K8S-EFK-02

EFK

官网

github

下载地址

Elasticsearch Kibana Filebeat Logstash选型

efk名称空间 zookeeper kafka es kibana ..
dev名称空间 failbeat
数据存储 nfs /data/nfs-volume/
graph TD
a(Filebeat 日志收集)-->|logging topic|b(kafka集群)
c(zookeeper集群)-->|分布式协调,调度|b
b-->d
  subgraph logstash
  d(接受input->过滤-filter->输出output)
  end
d-->e(elasricsearch集群 日志存储与索引)
e-->f(Kibana 日志可视化管理)
节点数
zookeeper 1或3
kafka 1或3
es 1或3

二进制-ES

jdk8,未来版本要jdk11

mkdir /opt/src
mv elasticsearch-7.10.0-linux-x86_64.tar.gz /opt/src

# 解压,创建数据目录
tar axf elasticsearch-7.10.0-linux-x86_64.tar.gz  -C  /opt/
ln -ns /opt/elasticsearch-7.10.0/ /opt/elasticsearch
mkdir /data/elasticsearch

# 创建普通用户
useradd -s /sbin/nologin -M es
chown -R es.es /opt/elasticsearch/*
chown -R es.es /data/elasaaa:wq:wqaaaaaa

# 文件描述符aa:Wq!


cat >/etc/security/limits.d/es.conf<<EOF
1000 hard nofile 65535
1000 soft fsize unlimitd
1000 hard memlock unlimited
1000 soft memlock unlimited
EOF

# 内核调整
sysctl -w vm.max_map_count=262144
echo "vm.max_map_count=262144" /etc/sysctl.conf

elasticsearch.yml

cat >>/opt/elasticsearch/config/elasticsearch.yml<<\EOF
cluster.initial_master_nodes: ["k8s-slave01"]
cluster.name: es.es.com
node.name: k8s-slave01
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
bootstrap.memory_lock: true            # 在启动时锁定内存,不用swap
network.host: 10.0.0.3
http.port: 9200
EOF

安全配置

参考

# 1.生成一些允许节点安全通信的证书
bin/elasticsearch-certutil cert -out config/elastic-certificates.p12 -pass ""

# 2.修改配置文件/config/elasticsearch.yml
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
    
# 3. 将凭证同步到另外两个节点
cp ../elasticsearch-7.1.0-master/config/* config/

# 4.Elasticsearch 集群密码,一旦主节点开始运行,便可以为集群设置密码了
bin/elasticsearch-setup-passwords  auto

# 5.kibana配置 config/elasticsearch.yml 
elasticsearch.username: "elastic"     # 就用这个最高权限
elasticsearch.password "xxxx"

curl --user elastic:xxxx  localhost:9200/_cat/indices?v

jvm.options

# 生产环境最多给 32G内存
-Xms512m
-Xmx512m

启动

su  -s /bin/bash -c "/opt/elasticsearch/bin/elasticsearch -d" es

k8s-ES单机

官方

官方镜像

dokcerfile

docker pull docker.elastic.co/elasticsearch/elasticsearch:7.10.0
#单机 docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.5.2

# nfs 
mkdir /data/nfs-volume/elasticsearch

# 节点设置标签
kubectl  label node k8s-slave02 efk=true

# 单机配置
kubectl create secret generic \
elasticsearch  -n efk \
--from-literal=elastic-passwd=whileiselastic

cat >elasticsearch-single.yaml<<\EOF
apiVersion: v1
kind: ConfigMap
metadata:
  name: elasticsearch
  namespace: efk
data:
  elasticsearch.yml: |
    network.host: 0.0.0.0
    path.data: /data
    xpack.security.enabled: true
    http.port: 9200
---
apiVersion: v1
data:
  elastic-passwd: d2hpbGVpc2VsYXN0aWM=
kind: Secret
metadata:
  creationTimestamp: null
  name: elasticsearch
  namespace: efk
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  labels:
    name: elasticsearch
  name: elasticsearch
  namespace: efk
spec:
  replicas: 1
  serviceName: elasticsearch
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      name: elasticsearch
  template:
    metadata:
      labels:
        name: elasticsearch
    spec:
#      nodeSelector:stic-certificates.p12
#        efk: "true"   # 指定部署在哪个节点。需根据环境来修改
      initContainers:
      - name: init
        image: alpine:3.6
        securityContext:
          privileged: true
        command: ["sh","-c", "sysctl -w vm.max_map_count=262144"]
        imagePullPolicy: IfNotPresent
      - name: fix-permissions
        image: alpine:3.6
        securityContext:
          privileged: true
        env:
        - name: NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        command: [ "sh", "-c", "mkdir -p /mnt/$NAMESPACE/$POD_NAME && chown -R 1000:1000 /mnt/$NAMESPACE/$POD_NAME"]
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: es-data
          mountPath: /mnt
      containers:
      - name: elasticsearch
        securityContext:
          capabilities:
            add:
              - IPC_LOCK
              - SYS_RESOURCE
        image: docker.elastic.co/elasticsearch/elasticsearch:7.10.0
        command: ["bash", "-c", "ulimit -l unlimited && exec su elasticsearch docker-entrypoint.sh"]
        env:
        - name: NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: ES_JAVA_OPTS
          value: "-Xms1g -Xmx1g"
        - name: discovery.type
          value: single-node
        - name: ELASTIC_PASSWORD
          valueFrom:
            secretKeyRef:
              name: elasticsearch
              key: elastic-passwd
        resources:
          limits:
            cpu: 1000m
            memory: 1.5Gi
          requests:
            cpu: 500m
            memory: 1Gi
        ports:
        - containerPort: 9200
          protocol: TCP
        - containerPort: 9300
          protocol: TCP
        volumeMounts:
        - name: es-config
          mountPath: "/usr/share/elasticsearch/config/elasticsearch.yml"
          subPath: elasticsearch.yml
        - name: es-data
          mountPath: "/data"
          subPathExpr: $(NAMESPACE)/$(POD_NAME)
      volumes:
      - name: es-config
        configMap:
          name: elasticsearch
          defaultMode: 0644
      - name: es-data
        nfs:
          server: 10.0.0.2
          path: /data/nfs-volume/elasticsearch/
---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch
  namespace: efk
spec:
  ports:
  - name: server
    port:  9200
    protocol: TCP
    targetPort: 9200
  - name: cluster
    port: 9300
    protocol: TCP
    targetPort: 9300
  selector:
    name: elasticsearch
  clusterIP: None

EOF
# 验证
curl -u elastic:whileiselastic elasticsearch:9200

curl http://elastic:whileiselastic@elasticsearch:9200
curl http://elastic:whileiselastic@elasticsearch.efk.svc.cluster.local:9200

k8s-ES集群

# 生产证书,通过这些证书便能允许节点安全地通信
bin/elasticsearch-certutil cert -out elastic-certificates.p12 -pass ""

kubectl create secret generic \
elasticsearch  -n efk \
--from-literal=elastic-passwd=whileiselastic \
--from-file=elastic-cert=./elastic-certificates.p12 -n efk

cat >elasticsearch-cluster.yaml<<\EOF
apiVersion: v1
kind: ConfigMap
metadata:
  name: elasticsearch
  namespace: efk
data:
  elasticsearch.yml: |
    cluster.name: "${NAMESPACE}"
    node.name: "${POD_NAME}"
    network.host: 0.0.0.0
    discovery.seed_hosts: ["elasticsearch.efk.svc.cluster.local"]
    cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1","elasticsearch-2"]
    bootstrap.memory_lock: true
    path.data: /data
    path.logs: /data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  labels:
    name: elasticsearch
  name: elasticsearch
  namespace: efk
spec:
  replicas: 1
  serviceName: elasticsearch
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      name: elasticsearch
  template:
    metadata:
      labels:
        name: elasticsearch
    spec:
      nodeSelector:
        elasticsearch: "true"   ## 指定部署在哪个节点。需根据环境来修改
      initContainers:
      - name: init
        image: alpine:3.6
        securityContext:
          privileged: true
        command: ["sh","-c", "sysctl -w vm.max_map_count=262144"]
        imagePullPolicy: IfNotPresent
      - name: fix-permissions
        image: alpine:3.6
        securityContext:
          privileged: true
        env:
        - name: NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        command: [ "sh", "-c", "mkdir -p /mnt/$NAMESPACE/$NODE_NAME && chown -R 1000:1000 /mnt"]
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: elasticsearch-data
          mountPath: /mnt
      containers:
      - name: elasticsearch
        securityContext:
          capabilities:
            add:
              - IPC_LOCK
              - SYS_RESOURCE
        image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2
        command: ["bash", "-c", "ulimit -l unlimited && exec su elasticsearch docker-entrypoint.sh"]
        env:
        - name: NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: ES_JAVA_OPTS
          value: "-Xms512m -Xmx512m"
        resources:
          limits:
            cpu: 1000m
            memory: 1Gi
          requests:
            cpu: 500m
            memory: 800Mi
        ports:
        - containerPort: 9200
          protocol: TCP
        - containerPort: 9300
          protocol: TCP
        volumeMounts:
        - name: es-config
          mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
          subPath: elasticsearch.yml
        - name: elasticsearch-data
          mountPath: /data
          subPathExpr: $(NAMESPACE)/$(POD_NAME)
      volumes:
      - name: es-config
        configMap:
          name: elasticsearch
          defaultMode: 0644
      - name: elasticsearch-data
        nfs:
          server: 10.0.0.2
          path: /data/nfs-volume/elasticsearch/
---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch
  namespace: efk
spec:
  ports:
  - name: server
    port:  9200
    protocol: TCP
    targetPort: 9200
  - name: cluster
    port: 9300
    protocol: TCP
    targetPort: 9300
  selector:
    name: elasticsearch
  clusterIP: None


EOF
# 验证
curl -u elastic:whileiselastic -X GET "elasticsearch:9200/_cat/nodes?v&pretty"


# 待优化集群发现
discovery.seed_providers
#以文件的方式提供主机列表,可以动态修改,而不用重启节点(容器化环境适用)

大规模集群使用方式

# 默认情况下elasticsearch 集群中每个节点都有成为主节点的资格,也都存储数据,还可以提供查询服务。

# 节点是否具有成为主节点的资格
node.master:

# 节点是否存储数据
node.data:

# 用 ingest 对数据进行过滤、转换等操作  用于预处理数据(索引和搜索阶段都可以用到)
 node.ingest
 
# 节点既有成为主节点的资格,又可以存储数据,还可以作为预处理节点(默认配置)
        node.master: true 
        node.data: true 
        node.ingest: true
        
# master节点
     node.master: true 
     node.data: false 
     node.ingest: false

# data(数据)节点        
         node.master: false 
         node.data: true 
         node.ingest: false

# coordinate(协调) 我们在代码中配置访问节点就都可以配置这些 ingest 节点即可
     node.master: false 
     node.data: false 
     node.ingest: true         
  
# 纯查询模,节点只可以接受查询,对于查询数据量比较大的集群,配置这种节点可以有效防止datanode内存溢出
     node.master: false 
     node.data: false 
     node.ingest: false  
   
  master节点:普通服务器即可(CPU 内存 消耗一般)
  data 节点:主要消耗磁盘,内存
  client | ingest 节点:普通服务器即可(如果要进行分组聚合操作的话,建议这个节点内存也分配多一点)  

k8s-ES可视化管理

# 谷歌浏览器商店安装 elasticsearch-head插件. 可视化,要配置ingress提供访问
kubectl  create svc clusterip elasticsearch --tcp=9200:9200

elasticsearch基本概念

索引

  • 索引(index)是Elasticsearch对逻辑数据的逻辑存储,所以它可以分为更小的部分。
  • 可以把索引看成关系型数据库的表,索引的结构是为快速有效的全文索引准备的,特别是它不存储原始值。
  • Elasticsearch可以把索引存放在一台机器或者分散在多台服务器上,每个索引有一或多个分片(shard),每个分片可以有多个副本(replica)。

文档

  • 存储在Elasticsearch中的主要实体叫文档(document)。用关系型数据库来类比的话,一个文档相当于数据库表中的一行记录
  • Elasticsearch和MongoDB中的文档类似,都可以有不同的结构,但Elasticsearch的文档中,相同字段必须有相同类型。
  • 文档由多个字段组成,每个字段可能多次出现在一个文档里,这样的字段叫多值字段(multivalued)。 每个字段的类型,可以是文本、数值、日期等。字段类型也可以是复杂类型,一个字段包含其他子文档或者数 组。

映射

  • 所有文档写进索引之前都会先进行分析,如何将输入的文本分割为词条、哪些词条又会被过滤,这种行为叫做 映射(mapping)。一般由用户自己定义规则。

文档类型

  • 在Elasticsearch中,一个索引对象可以存储很多不同用途的对象。例如,一个博客应用程序可以保存文章和评 论。
  • 每个文档可以有不同的结构。
  • 不同的文档类型不能为相同的属性设置不同的类型。例如,在同一索引中的所有文档类型中,一个叫title的字段必须具有相同的类型。

RESTful API

  • 在Elasticsearch中,提供了功能丰富的RESTful API的操作,包括基本的CRUD、创建索引、删除索引等操作。

创建非结构化索引

  • 在Lucene中,创建索引是需要定义字段名称以及字段的类型的,在Elasticsearch中提供了非结构化的索引,就是不需要创建索引结构,即可写入数据到索引中,实际上在Elasticsearch底层会进行结构化操作,此操作对用户是透明的。

elasticsearch基本操作

创建空索引

PUT /haoke
{
    "settings": {
        "index": {
        "number_of_shards": "2",
        "number_of_replicas": "0"
        }
    }
}

k8s-kibana

dokcerfile

官方容器说明

参考

docker pull docker.elastic.co/kibana/kibana:7.10.0
cat >kibana.yaml<<EOF
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: efk
  labels:
    name: kibana
spec:
  ports:
  - port: 5601
    protocol: TCP
    targetPort: ui
  selector:
    name: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: efk
  labels:
    name: kibana
spec:
  replicas: 1
  selector:
    matchLabels:
      name: kibana
  template:
    metadata:
      labels:
        name: kibana
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      containers:
      - name: kibana
        image: docker.elastic.co/kibana/kibana:7.10.0
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        env:
          - name: ELASTICSEARCH_HOSTS
            value: http://elasticsearch:9200
          - name: SERVER_NAME
            value: kibana.zs.com
          - name: I18N_LOCALE
            value: zh-CN
          - name: ELASTICSEARCH_USERNAME
            value: elastic
          - name: ELASTICSEARCH_PASSWORD
            valueFrom:
              secretKeyRef:
                name: elasticsearch
                key: elastic-passwd
          - name: PATHA_DATA
            value: /data
        ports:
          - containerPort: 5601
            name: ui
            protocol: TCP
#          livenessProbe:
#            httpGet:
#              path: /api/status
#              port: ui
#            initialDelaySeconds: 5
#            timeoutSeconds: 10
#          readinessProbe:
#            httpGet:
#              path: /api/status
#              port: ui
#            initialDelaySeconds: 5
#            timeoutSeconds: 10
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kibana
  namespace: efk
spec:
  rules:
  - host: kibana.zs.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kibana
            port:
              number: 5601

EOF

# 在stack management->Kibana(索引模式)-->创建 索引模式

k8s-filebeat

官方github yaml

官方文档

docker pull elastic/filebeat:7.10.0 

收集pod中 nginx访问日志

# fields_under_root: 如果值为ture,那么fields存储在输出文档的顶级位置
# fields 指定可选字段以将其他信息添加到输出中
# multiline.pattern  合并行,通过正则匹配开头
apiVersion: v1
kind: Service
metadata:
  name: nginx-externalname
  namespace: dev
spec:
  type: ExternalName
  externalName: kafka.efk.svc.cluster.local
  ports:
  - name: kafka
    port: 9092
    targetPort: 9092
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
  namespace: dev
spec:
  type: ClusterIP
  ports:
  - name: http
    port: 80
    targetPort: 80
  clusterIP: None
  selector:
    name: nginx
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx
  namespace: dev
data:
  nginx.conf: |
    user  nginx;
    worker_processes  1;
    error_log  /var/log/nginx/error.log warn;
    pid        /var/run/nginx.pid;
    events {
        worker_connections  1024;
    }

    http {
        include       /etc/nginx/mime.types;
        default_type  application/octet-stream;
        map $http_x_forwarded_for $clientRealIp {
                "" $remote_addr;
                ~^(?P<firstAddr>[0-9\.]+),?.*$ $firstAddr;
                }
        log_format nginx_log_json '{'
                '"accessip_list":"$proxy_add_x_forwarded_for",'
                '"client_ip":"$clientRealIp",'
                '"http_host":"$host",'
                '"@timestamp":"$time_iso8601",'
                '"method":"$request_method",'
                '"url":"$request_uri",'
                '"status":"$status",'
                '"http_referer":"$http_referer",'
                '"body_bytes_sent":"$body_bytes_sent",'
                '"request_time":"$request_time",'
                '"http_user_agent":"$http_user_agent",'
                '"total_bytes_sent":"$bytes_sent",'
                '"server_ip":"$server_addr"'
                '}';
        access_log  /var/log/nginx/access.log  nginx_log_json;
        sendfile        on;
        #tcp_nopush     on;
        keepalive_timeout  65;
        #gzip  on;
        include /etc/nginx/conf.d/*.conf;
    }
  filebeat.yml: |
    filebeat.inputs:
    - type: log
      enabled: true
      fields_under_root: true
      fields:
        log_topic: k8s-dev-fb-nginx
      paths:
        - "/log/*.log"
        - "/log/*/*.log"
      scan_frequency: 120s
      max_bytes: 10485760
      multiline.type: pattern
      multiline.pattern: '^{'
      multiline.negate: true
      multiline.match: after
      multilinde.max_lines: 100
    output.kafka:
      enabled: true
      hosts: ["nginx-externalname.dev.svc.cluster.local:9092"]
      topic: "%{[log_topic]}"
      partition.round_robin:
        reachable_only: false
      required_acks: 1
      max_message_bytes: 1000000

apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: dev
  name: nginx
  labels:
    name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      name: nginx
  template:
    metadata:
      labels:
        name: nginx
    spec:
      containers:
      - name: nginx
        image: ikubernetes/myapp:v7
        imagePullPolicy: IfNotPresent
        ports:
        - name: nginx
          containerPort: 80
          protocol: TCP
        volumeMounts:
        - name: timezone
          mountPath: /etc/localtime
        - name: log
          mountPath: /var/log/nginx
        - name: conf
          mountPath: "/etc/nginx/nginx.conf"
          readOnly: true
          subPath: nginx.conf
      - name: filebeat
        image: elastic/filebeat:7.10.0
        args: [
          "-c", "/etc/filebeat.yml",
          "-e",
        ]
        volumeMounts:
        - name: log
          mountPath: /log
        - name: yml
          mountPath: /etc/filebeat.yml
          readOnly: true
          subPath: filebeat.yml
      volumes:
      - name: timezone
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
      - name: log
        emptyDir: {}
      - name: yml
        configMap:
          defaultMode: 0644
          name: nginx
          items:
          - key: "filebeat.yml"
            path: "filebeat.yml"
      - name: conf
        configMap:
          defaultMode: 0644
          name: nginx
          items:
          - key: "nginx.conf"
            path: "nginx.conf"
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nginx
  namespace: dev
  annotations:
    nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
  rules:
  - host: nginx.zs.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: nginx
            port:
              number: 80

k8s-logstash

文档

docker pull docker.elastic.co/logstash/logstash:7.10.0

apiVersion: v1
kind: ConfigMap
metadata:
  name: logstash
  namespace: efk
data:
  logstash.yml: |
    #http.host: "0.0.0.0"
    #xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch.efk.svc.cluster.local:9200" ]
    #xpack.management.elasticsearch.username: logstash_admin_user
    #xpack.management.elasticsearch.password: t0p.s3cr3t
    #xpack.monitoring.enabled: true
    #path.config: /usr/share/logstash/pipeline/logstash.conf
  logstash.conf: |
    input {
        kafka {
            bootstrap_servers => "kafka.efk.svc.cluster.local:9092"
            topics_pattern => "k8s-dev-fb-.*"
            group_id => "logstash1"
            codec => json {
                charset => "UTF-8"
            }
            add_field => { "[@metadata][myid]" => "nginxaccess-log" }
        }
    }
    filter {
        if [@metadata][myid] == "nginxaccess-log" {
            mutate {
                gsub => ["message", "\\x", "\\\x"]
            }
            if ( 'method":"HEAD' in [message] ) {
                drop {}
            }
            json {
                source => "message"
                remove_field => "prospector"
                remove_field => "beat"
                remove_field => "source"
                remove_field => "input"
                remove_field => "offset"
                remove_field => "fields"
                remove_field => "host"
                remove_field => "@version"
                remove_field => "message"
            }
        }
    }
    output {
        if [@metadata][myid] == "nginxaccess-log" {
            elasticsearch {
                hosts => [ "elasticsearch.efk.svc.cluster.local:9200" ]
                index => "k8s-dev-%{+YYYY.MM.dd}"
                user => "elastic"
                password => "${ELASTIC_PASSWORD}"
            }
        }
    }

---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: efk
  name: logstash
  labels:
    name: logstash
spec:
  replicas: 1
  selector:
    matchLabels:
      name: logstash
  template:
    metadata:
      labels:
        name: logstash
    spec:
      containers:
      - name: logstash
        image: docker.elastic.co/logstash/logstash:7.10.0
        imagePullPolicy: IfNotPresent
        env:
        - name: ELASTIC_PASSWORD
          valueFrom:
            secretKeyRef:
              name: elasticsearch
              key: elastic-passwd
        volumeMounts:
        - name: yml
          mountPath: "/usr/share/logstash/config/logstash.yml"
          readOnly: true
          subPath: logstash.yml
        - name: conf
          mountPath: "/usr/share/logstash/pipeline/logstash.conf"
          readOnly: true
          subPath: logstash.conf
      volumes:
      - name: yml
        configMap:
          defaultMode: 0644
          name: logstash
          items:
          - key: "logstash.yml"
            path: "logstash.yml"
      - name: conf
        configMap:
          defaultMode: 0644
          name: logstash
          items:
          - key: "logstash.conf"
            path: "logstash.conf"

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 206,214评论 6 481
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 88,307评论 2 382
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 152,543评论 0 341
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 55,221评论 1 279
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 64,224评论 5 371
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 49,007评论 1 284
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 38,313评论 3 399
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,956评论 0 259
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 43,441评论 1 300
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,925评论 2 323
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 38,018评论 1 333
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,685评论 4 322
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 39,234评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 30,240评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,464评论 1 261
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 45,467评论 2 352
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,762评论 2 345

推荐阅读更多精彩内容