ELK-7.10版本集群官方RPM部署 metricbeat/elasticsearch-head插件

image.png
===============================================================================
1. 下载好如下软件包+插件+环境
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.10.0-x86_64.rpm
集群插件:
git://github.com/mobz/elasticsearch-head.git
cerebro插件:
http://github.com/lmenezes/cerebro/releases/download/v0.9.4/cerebro-0.9.4.zip
监控插件:
curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-7.10.0-x86_64.rpm
安装环境
JDK1.8以上即可
自己官网下载
NPM环境
https://nodejs.org/dist/v12.18.3/node-v12.18.3-linux-x64.tar.xz
===============================================================================

安装jdk1.8版本
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_121
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
source /etc/profile
java -version

安装npm最新版本
wget https://nodejs.org/dist/v12.18.3/node-v12.18.3-linux-x64.tar.xz
tar -xvf node-v12.18.3-linux-x64.tar.xz -C .
mv node-v12.18.3-linux-x64 /usr/local/nodejs
ln -s /usr/local/nodejs/bin/node /usr/bin/node #创建软连接,让node命令全局生效
ln -s /usr/local/nodejs/bin/npm /usr/bin/npm #创建软连接,让npm命令全局生效
vim /etc/profile
export NODE_HOME=/usr/local/node
export PATH=$NODE_HOME/bin:$PATH
source /etc/profile
node -v
npm -v
设置镜像
npm config set registry https://registry.npm.taobao.org

安装easticsarch-head插件
yum install –y git
git --version
git clone https://github.com/mobz/elasticsearch-head.git
cd elasticsearch-head
npm install cnpm -g --registry=https://registry.npm.taobao.org #因为npm安装非常非常慢,所以在这里先安装淘宝源地址
ln -s /usr/local/nodejs/bin/cnpm /usr/local/bin/cnpm #创建cnpm软链接,不然执行下面执行命令会报错
cnpm install #使用cnpm命令下载安装项目所需要的插件
vim _site/app.js #修改app.js 搜索localhost,将localhost修改为安装ElasticSearch服务器的ip

cd node_modules/grunt/bin
nohup ./grunt server >nohup.out  2>&1 &
image.png
关防火墙。
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld

selinux配置
setenforce 0  
sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config

vim /etc/sysctl.conf
vm.max_map_count=262144
sysctl -p

修改最大文件描述符
vim /etc/security/limits.conf
*soft nofile 655350
*hard nofile 655350
*soft nproc 40960
*hard nproc 40960

es安装
rpm --install elasticsearch-7.10.0-x86_64.rpm  备注:es-3台都装
rpm --install kibana-7.10.0-x86_64.rpm
rpm --install logstash-7.10.0-x86_64.rpm
rpm --install filebeat-7.10.0-x86_64.rpm


cat /etc/elasticsearch/elasticsearch.yml|grep -v "#"

cluster.name: test-es-cluster
node.name: test-elk-01
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.22
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: true
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"

cluster.name: test-es-cluster
node.name: test-elk-02
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.23
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: false
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"

cluster.name: test-es-cluster
node.name: test-elk-03
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.24
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: false
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"

[root@elk-66 conf.d]# curl -XGET 'http://192.168.64.66:9200/_cat/nodes?v'
ip            heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.64.68           30          16   0    0.00    0.01     0.05 cdhstw    -      elk-68
192.168.64.66           38          37   1    0.10    0.15     0.13 cdhmstw   *      elk-66
192.168.64.67           62          15   0    0.00    0.03     0.08 cdhstw    -      elk-67
[root@elk-66 conf.d]# curl -XGET 'http://192.168.64.66:9200/_cat/nodes'
192.168.64.68 40 16 0 0.00 0.01 0.05 cdhstw  - elk-68
192.168.64.66 21 37 4 0.08 0.14 0.13 cdhmstw * elk-66
192.168.64.67 15 15 0 0.00 0.02 0.08 cdhstw  - elk-67
[root@elk-66 conf.d]# curl -i http://192.168.64.66:9200/_cluster/state/nodes?pretty
HTTP/1.1 200 OK
content-type: application/json; charset=UTF-8
content-length: 894

{
  "cluster_name" : "es-cluster",
  "cluster_uuid" : "LZso9rvUS3eeZdHs35KsZA",
  "nodes" : {
    "wNhKViCZRy2rJQfX11RNcg" : {
      "name" : "elk-68",
      "ephemeral_id" : "N2S8ggVPSMKEiPtCBug19A",
      "transport_address" : "192.168.64.68:9300",
      "attributes" : {
        "xpack.installed" : "true",
        "transform.node" : "true"
      }
    },
    "VSRB4svAStGm2Z1q90L00A" : {
      "name" : "elk-66",
      "ephemeral_id" : "ejQBrC-qTvCnjjML3dPYkQ",
      "transport_address" : "192.168.64.66:9300",
      "attributes" : {
        "xpack.installed" : "true",
        "transform.node" : "true"
      }
    },
    "rbi9mt6oSjCrO1luN6visQ" : {
      "name" : "elk-67",
      "ephemeral_id" : "P8uSRSxgT56rYSUuHP09hw",
      "transport_address" : "192.168.64.67:9300",
      "attributes" : {
        "xpack.installed" : "true",
        "transform.node" : "true"
      }
    }
  }
}

rpm --install metricbeat-7.10.0-x86_64.rpm 备注:装master主节点上即可
cd /usr/bin/
metricbeat modules list
metricbeat modules enable elasticsearch-xpack
[root@elk-66 metricbeat]# vim metricbeat.yml 
output.elasticsearch:
  # Array of hosts to connect to.
  hosts: ["http://192.168.64.66:9200","http://192.168.64.67:9200","http://192.168.64.68:9200"]
[root@elk-66 modules.d]# cat elasticsearch-xpack.yml 
# Module: elasticsearch
# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.10/metricbeat-module-elasticsearch.html

- module: elasticsearch
  xpack.enabled: true
  period: 10s
  hosts: ["http://192.168.64.66:9200","http://192.168.64.67:9200","http://192.168.64.68:9200"]
  #username: "user"
  #password: "secret"

sudo service metricbeat start
image.png
filebeat.yml 配置
filebeat.inputs:

# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
  paths:
   - /var/log/nginx/access.log
  fields:
   log_source: nginx-access
- type: log
  paths:
   - /var/log/nginx/error.log
  fields:
   log_source: nginx-error

[root@elk-66 conf.d]# pwd
/etc/logstash/conf.d
[root@elk-66 conf.d]# cat nginx_find_log.conf 
 input {
   beats {
     port => 5044
   }
 }
 filter {
   if [fields][log_source]=="nginx-access"{
     grok {
       match => {
         "message" => '%{IP:clientip}\s*%{DATA}\s*%{DATA}\s*\[%{HTTPDATE:requesttime}\]\s*"%{WORD:requesttype}.*?"\s*%{NUMBER:status:int}\s*%{NUMBER:bytes_read:int}\s*"%{DATA:requesturl}"\s*%{QS:ua}'
      }
       overwrite => ["message"]
     }
   }
   if [fields][log_source]=="nginx-error"{
     grok {
       match => {
         "message" => '(?<time>.*?)\s*\[%{LOGLEVEL:loglevel}\]\s*%{DATA}:\s*%{DATA:errorinfo},\s*%{WORD}:\s*%{IP:clientip},\s*%{WORD}:%{DATA:server},\s*%{WORD}:\s*%{QS:request},\s*%{WORD}:\s*%{QS:upstream},\s*%{WORD}:\s*"%{IP:hostip}",\s*%{WORD}:\s*%{QS:referrer}'
       }
       overwrite => ["message"]
     }
   }
 }
 output {
   if [fields][log_source]=="nginx-access"{
     elasticsearch {
       hosts => ["http://192.168.64.66:9200"]
       action => "index"
       index => "nginx-access-%{+YYYY.MM.dd}"
    }
   }
   if [fields][log_source]=="nginx-error"{
     elasticsearch {
       hosts => ["http://192.168.64.66:9200"]
       action => "index"
       index => "nginx-error-%{+YYYY.MM.dd}"
    }
   }
   stdout { codec => rubydebug }
 }
image.png

image.png

启动停止的顺序:
启动:elasticsearch --- logstash --- filebeat --- kibana
停止:kibana --- filebeat --- logstash --- elasticsearch

nginx安装htpasswd 密码登录
# 安装工具包httpd-tools
yum install -y httpd-tools
[root@elk-66 passwd]# htpasswd -c "/etc/nginx/passwd/kibana.passwd" "admin"
New password: 
Re-type new password: 
Adding password for user admin

nginx的conf配置例如:
[root@elk-66 conf.d]# cat default.conf 
server {
        server_name 192.168.64.66; #这里一般是填写域名 然后把防火墙开启 只需呀放开80即可 但是我这里只是演示测试环境哈
        #listen 80;
        auth_basic "Restricted Access";
        auth_basic_user_file /etc/nginx/passwd/kibana.passwd;
        location / {
                proxy_pass http://192.168.64.66:5601;
                proxy_http_version 1.1;
                proxy_set_header Upgrade $http_upgrade;
                proxy_set_header Connection 'upgrade';
                proxy_set_header Host $host;
                proxy_cache_bypass $http_upgrade;
        }
}


nginx-json模板如下:
log_format json '{"@timestamp":"$time_iso8601",'
     '"host":"$server_addr",'
     '"clientip":"$remote_addr",'
     '"size":$body_bytes_sent,'
     '"responsetime":$request_time,'
     '"upstreamtime":"$upstream_response_time",'
     '"upstreamhost":"$upstream_addr",'
     '"http_host":"$host",'
     '"url":"$uri",'
     '"domain":"$host",'
     '"xff":"$http_x_forwarded_for",'
     '"referer":"$http_referer",'
     '"remote_user":"$remote_user",'
     '"request":"$request",'
     '"http_user_agent":"$http_user_agent",'
     '"requesturi":"$request_uri",'
     '"status":"$status"}';
access_log  /var/log/nginx/access.log  json;

image.png

image.png
收集策略 nginx上的用户日志 docker上的jar日志 kafka日常消息topics日志 
[root@elk-66 conf.d]# cat logstash-test.conf 
 input {
   beats {
     port => 5044
   }
 }
 filter {
   if [fields][log_source]=="nginx-access"{
     grok {
       match => {
         "message" => '%{IP:clientip}\s*%{DATA}\s*%{DATA}\s*\[%{HTTPDATE:requesttime}\]\s*"%{WORD:requesttype}.*?"\s*%{NUMBER:status:int}\s*%{NUMBER:bytes_read:int}\s*"%{DATA:requesturl}"\s*%{QS:ua}'
      }
       overwrite => ["message"]
     }
   }
   if [fields][log_source]=="nginx-error"{
     grok {
       match => {
         "message" => '(?<time>.*?)\s*\[%{LOGLEVEL:loglevel}\]\s*%{DATA}:\s*%{DATA:errorinfo},\s*%{WORD}:\s*%{IP:clientip},\s*%{WORD}:%{DATA:server},\s*%{WORD}:\s*%{QS:request},\s*%{WORD}:\s*%{QS:upstream},\s*%{WORD}:\s*"%{IP:hostip}",\s*%{WORD}:\s*%{QS:referrer}'
       }
       overwrite => ["message"]
     }
   }
 }
 output {
   if [fields][log_source]=="nginx-access"{
     elasticsearch {
       hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
       action => "index"
       index => "nginx-access-%{+YYYY.MM.dd}"
    }
   }
   if [fields][log_source]=="nginx-error"{
     elasticsearch {
       hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
       action => "index"
       index => "nginx-error-%{+YYYY.MM.dd}"
    }
   }
   if [log_source] == "docker-test-45" {
        elasticsearch {
        hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
                index => "docker-test-45-%{+YYYY.MM.dd}"
        }
   }
   if [log_source] == "docker-test-46" {
    elasticsearch {
        hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
        index => "docker-test-46-%{+YYYY.MM.dd}"
    }
   }
   stdout { codec => rubydebug }
 }

[root@docker-test-45 filebeat]# cat filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /home/*/logs/*.log
  fields:
    log_source: docker-test-45
  fields_under_root: true
  multiline.pattern: ^\d{4}-\d{1,2}-\d{1,2}
  multiline.negate: true
  multiline.match: after
  scan_frequency: 5s
  close_inactive: 1h
  ignore_older: 24h
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
output.logstash:
  hosts: ["192.168.64.66:5044"]
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
  - add_docker_metadata: 
      host: "unix:///var/run/docker.sock"                                               
      match_source: true                                                
      match_source_index: 3 
  - add_kubernetes_metadata: ~
logging.level: info

[root@elk-66 conf.d]# cat /etc/filebeat/filebeat.yml|grep -v "#"
filebeat.inputs:
- type: log
  paths:
   - /var/log/nginx/access.log
  fields:
   log_source: nginx-access
- type: log
  paths:
   - /var/log/nginx/error.log
  fields:
   log_source: nginx-error
  enabled: false
- type: filestream
  enabled: false
  paths:
    - /var/log/*.log
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.dashboards.enabled: false
setup.kibana:
  host: "192.168.64.66:5601"
output.logstash:
  hosts: ["192.168.64.66:5044"]
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~

image.png

image.png

image.png
cat application.conf
hosts = [
  {
    host = "http://192.168.64.66:9200"
    name = "es-cluster"
    headers-whitelist = [ "x-proxy-user", "x-proxy-roles", "X-Forwarded-For" ]
  }

nohup ./cerebro 2>&1 &
image.png
删除索引(也删除了对应的数据)

curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.30 #删除单条索引
 
curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.3{0..1} #删除连续多条索引
 
curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.30,axt_examination_log-2021.08.30 #删除多条索引
有了这两个命令可以开始编写下面脚本

# cat es_delete_index.sh 
#!/bin/bash
 
#删除java 15天前索引
 
source /etc/profile
 
date=`date -d "15 days ago" +%Y.%m.%d`
 
log_name='
axt_resources_log
axt_user_log
axt_data_log
axt_crm_log
axt_statistics_log
axt_mhcz_log
axt_future_log
axt_examination_log
axt_usercenter_log
'
 
for i in $log_name
do
  curl -XDELETE http://localhost:9200/$i-$date
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 199,271评论 5 466
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 83,725评论 2 376
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 146,252评论 0 328
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 53,634评论 1 270
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 62,549评论 5 359
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 47,985评论 1 275
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 37,471评论 3 390
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,128评论 0 254
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 40,257评论 1 294
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,233评论 2 317
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,235评论 1 328
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 32,940评论 3 316
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 38,528评论 3 302
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,623评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 30,858评论 1 255
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 42,245评论 2 344
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 41,790评论 2 339

推荐阅读更多精彩内容