一、背景
- 之前的教程中,已经搭建好ELK+filebeat+redis的日志平台。参考ElasticSearch+Logstash+Kibana+redis+filebeat搭建日志收集分析平台
- 但是在现实开发中,肯定会filebeat需要收集多个日志文件的需求,比如一个服务器上,nginx就有access.log与error.log,并且还有可能需要收集tomcat的日志等等。所以就需要配置filebeat支持采集多个日志文件的功能。
- 此处我演示同时收集nginx的access.log与error.log日志,并且在Kibana中分析显示。此处只演示配置文件,安装等参考:ElasticSearch+Logstash+Kibana+redis+filebeat搭建日志收集分析平台
二、配置Filebeat
- 编辑filebeat.yml
vi /etc/filebeat/filebeat.yml
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access-json.log #指明读取文件的位置
tags: ["nginx-access"]
- type: log
enabled: true
paths:
- /var/log/nginx/error.log #指明读取文件的位置
tags: ["nginx-error"]
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 3
#index.codec: best_compression
#_source.enabled: false
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Redis output ------------------------------
output.redis:
hosts: ["192.168.1.110:6379"] #输出到redis的机器
password: "123456"
key: "filebeat:test16" #redis中日志数据的key值ֵ
db: 0
timeout: 5
#================================ Processors =====================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
三、配置Logstash
1、编辑你自己的conf文件。我此处是接着上个教程,所文件名是nginx16-access.conf
vi /etc/logstash/nginx16-access.conf
input {
redis {
data_type =>"list"
key =>"filebeat:test16"
host =>"192.168.1.110"
port => 6379
password => "123456"
threads => "8"
db => 0
#codec => json
}
}
filter {
#在json化之前,使用mutte对\\x字符串进行替换,防止以下错误:ParserError: Unrecognized character escape 'x' (code 120)
mutate {
gsub => ["message", "\\x", "\\\x"]
}
if "nginx-access" in [tags]{
json {
source => "message"
remove_field => ["beat","message"]
}
}else if "nginx-error" in [tags]{
grok {
match => [
"message", "(?<time>\d{4}/\d{2}/\d{2}\s{1,}\d{2}:\d{2}:\d{2})\s{1,}\[%{DATA:err_severity}\]\s{1,}(%{NUMBER:pid:int}#%{NUMBER}:\s{1,}\*%{NUMBER}|\*%{NUMBER}) %{DATA:err_message}(?:,\s{1,}client:\s{1,}(?<client_ip>%{IP}|%{HOSTNAME}))(?:,\s{1,}server:\s{1,}%{IPORHOST:server})(?:, request: %{QS:request})?(?:, host: %{QS:client_ip})?(?:, referrer: \"%{URI:referrer})?",
"message", "(?<time>\d{4}/\d{2}/\d{2}\s{1,}\d{2}:\d{2}:\d{2})\s{1,}\[%{DATA:err_severity}\]\s{1,}%{GREEDYDATA:err_message}"]
}
date{
match=>["time","yyyy/MM/dd HH:mm:ss"]
target=>"logdate"
}
ruby{
code => "event.set('logdateunix',event.get('logdate').to_i)"
}
}
#使用geoip库定位ip
geoip {
source => "remote_addr" #nginx日志中外部访问ip对应字段
database => "/opt/GeoLite2-City/GeoLite2-City.mmdb"
#去掉显示geoip显示的多余信息
remove_field => ["[geoip][latitude]", "[geoip][longitude]", "[geoip][country_code]", "[geoip][country_code2]", "[geoip][country_code3]", "[geoip][timezone]", "[geoip][continent_code]", "[geoip][region_code]", "[geoip][ip]"]
target => "geoip"
}
mutate {
convert => [ "[geoip][coordinates]", "float" ]
}
}
output {
if "nginx-access" in [tags]{
elasticsearch {
hosts => ["192.168.1.110:9200"]
index => "logstash-test16-nginx-access-%{+yyyy.MM.dd}" #注意此处索引名称,一定要以logstash开头命名,后者地图功能不可用(mapping)
}
}
if "nginx-error" in [tags]{
elasticsearch {
hosts => ["192.168.1.110:9200"]
index => "logstash-test16-nginx-error-%{+yyyy.MM.dd}" #注意此处索引名称,一定要以logstash开头命名,后者地图功能不可用(mapping)
}
}
}
2、配置完,接着重启filebeat与logstash即可。
四、配置kibana,显示日志
1、创建索引
2、查看日志数据