一. Docker环境搭建
https://www.jianshu.com/p/7a93894c4e62
二. 镜像拉取
可取官网拉取,或去docker hub拉取:
https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html
https://www.elastic.co/guide/en/kibana/current/docker.html
# 此处使用的是7.15.1的版本
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.15.1
docker pull docker.elastic.co/kibana/kibana:7.15.1
docker pull docker.elastic.co/logstash/logstash:7.15.1
docker pull docker.elastic.co/beats/filebeat:7.15.1
查看镜像是否拉取成功
# docker images
[root@node1 elk]# docker images | grep elastic
docker.elastic.co/beats/filebeat 7.15.1 311985fdcf7c 13 months ago 489MB
docker.elastic.co/logstash/logstash 7.15.1 852762e1f73f 13 months ago 980MB
docker.elastic.co/elasticsearch/elasticsearch 7.15.1 fa601f7c24cb 13 months ago 792MB
docker.elastic.co/kibana/kibana 7.15.1 9871707dda25 13 months ago 1.19GB
三. 创建容器间访问网络
[root@node1 elk]# docker network create elk
8971f8fd750f0dd0a9fe67e444ec11ed5e2b0e0fc0991141840b244d183058d5
四. 启动elasticSearch
1. 启动运行
参数说明
-d
后台运行
--name
设置容器名称
-p
端口映射,将本机的9200映射到容器里的9200端口,访问localhost:9200就会访问到容器的9200
-e
环境变量,discovery.type=single-node 将elasticsearch设置成单机运行
--net
指定通信网络
[root@node1 elk]# docker run -d --name es01-test -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" --net elk fa601f7c24cb
b582510a9e787c71cb25635377c85d7a3fa8fdc93e87c976b2cbd3a3e8560551
2. 查看是否运行成功
[root@node1 elk]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b582510a9e78 fa601f7c24cb "/bin/tini -- /usr/l…" 4 seconds ago Up 3 seconds 0.0.0.0:9200->9200/tcp, :::9200->9200/tcp, 0.0.0.0:9300->9300/tcp, :::9300->9300/tcp es01-test
[root@node1 elk]#
[root@node1 elk]# curl 'http://localhost:9200'
{
"name" : "b582510a9e78",
"cluster_name" : "docker-cluster",
"cluster_uuid" : "Bd0G3r8rRXGc9JeRXc99BQ",
"version" : {
"number" : "7.15.1",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "83c34f456ae29d60e94d886e455e6a3409bba9ed",
"build_date" : "2021-10-07T21:56:19.031608185Z",
"build_snapshot" : false,
"lucene_version" : "8.9.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
五. 启动kibana
1. 启动运行
参数说明:
-d
后台运行
--name
设置容器名称
-p
端口映射,将本机的9200映射到容器里的9200端口,访问localhost:9200就会访问到容器的9200
-e
环境变量,ELASTICSEARCH_HOSTS=http://es01-test:9200 指定es的访问地址
--net
指定通信网络
-e
ES_JAVA_OPTS配置JVM参数 Xmx 最大堆大小和 -Xms 最小堆大小,防止启动占用内存过大默认2g
[root@node1 elk]# docker run -d --name kib01-test -p 5601:5601 --net elk -e "ELASTICSEARCH_HOST=http://es01-test:9200" 9871707dda25
03926ce2f04e3c7ea0ac7d107b9ff6f429cbbdd259b81317e7fda5d32e224b50
[root@node1 kibana]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
03926ce2f04e 9871707dda25 "/bin/tini -- /usr/l…" 9 minutes ago Up 9 minutes 0.0.0.0:5601->5601/tcp, :::5601->5601/tcp kib01-test
将es的地址信息写入kibana的配置文件,启动时指定配置
(1)查看kibana默认配置文件
[root@node1 kibana]# docker exec -it 03926ce2f04e /bin/bash
bash-4.4$ ls
LICENSE.txt NOTICE.txt README.txt bin config data node node_modules package.json plugins src x-pack
bash-4.4$ cat config/kibana.yml
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.host: "0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
bash-4.4$
(2)在宿主机中创建一份配置文件,并修改为es容器访问地址
[root@node1 kibana]# cat /opt/docker/elk/kibana/kibana.yml
server.host: "0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://es01-test:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
# 语言
i18n.locale: "zh-CN"
(3)重新启动kibana
docker run -v /opt/docker/elk/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml -d --name kib01-test -p 5601:5601 --net elk -e "ELASTICSEARCH_HOST=http://es01-test:9200" 9871707dda25
2. 查看是否运行成功
[root@node1 kibana]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
5367980ff867 9871707dda25 "/bin/tini -- /usr/l…" 14 minutes ago Up 4 minutes 0.0.0.0:5601->5601/tcp, :::5601->5601/tcp kib01-test
六. 启动logstash
1. 启动运行
(1)配置编辑
mkdir -p /opt/docker/elk/logstash
vim /opt/docker/elk/logstash/logstash.yml
vim /opt/docker/elk/logstash/logstash.conf
# logstash.yml
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: es01-test:9200
# logstash.conf:filebeat通过4567端口将收集的日志发给logstash,最后输出到es中
input {
beats {
port => 4567
}
}
filter {
#Only matched data are send to output.
}
output {
elasticsearch {
hosts => ["http://es01-test:9200"] #ElasticSearch host, can be array.
index => "logapp-%{+YYYY.MM}" #The index to write data to.
}
}
(2)启动运行
docker run -d -p 4567:4567 --net elk --name logstash01 -p 5044:5044 -p 5045:5045 \
-v /opt/docker/elk/logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf \
-v /opt/docker/elk/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml 852762e1f73f
2. 查看是否运行成功
[root@node1 logstash]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0b5d2dac2c70 852762e1f73f "/usr/local/bin/dock…" 4 minutes ago Up 3 seconds 0.0.0.0:4567->4567/tcp, :::4567->4567/tcp, 0.0.0.0:5044-5045->5044-5045/tcp, :::5044-5045->5044-5045/tcp, 9600/tcp logstash01
七. 启动filebeat
1. 启动运行
(1)编辑配置文件
[root@node1 filebeat]# pwd
/opt/docker/elk/filebeat
[root@node1 filebeat]#
[root@node1 filebeat]# cat filebeat.yml
filebeat.inputs:
- type: log
paths:
- /var/log/logapp/app.info.log
output.logstash:
hosts: ["172.19.0.4:4567"]
(2)创建日志收集文件
在日志收集文件中随便写入日志数据
[root@node1 logapp]# pwd
/opt/docker/elk/log/logapp
[root@node1 logapp]#
[root@node1 logapp]# cat app.info.log
test-2021-11-21
[root@node1 logapp]# echo 'my log test' >> app.info.log
[root@node1 logapp]#
[root@node1 logapp]# cat app.info.log
test-2021-11-21
my log test
[root@node1 logapp]#
(3)启动运行
-e:设置环境变量 kibana地址
-v:本地配置文件挂载到容器中
docker run -d \
-u root \
-v /opt/docker/elk/log/logapp:/var/log/logapp:rw \
-v /opt/docker/elk/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro \
-e setup.kibana.host=172.19.0.3:5601 \
--name filebeat01 \
--net elk 311985fdcf7c
2. 查看是否运行成功
[root@node1 logapp]# docker ps | grep filebeat
4a1f92fb4d0e 311985fdcf7c "/usr/bin/tini -- /u…" 15 seconds ago Up 14 seconds filebeat01
[root@node1 logapp]#