从其他服务器抓取日志存到ELK
业务服务器上配置logstash
[root@tzgsqvapp01 app]# tar zxvf logstash-2.4.0.tar.gz
[root@tzgsqvapp01 app]# rm -rf logstash-2.4.0.tar.gz
[root@tzgsqvapp01 app]# ln -s logstash-2.4.0 logstash
[root@tzgsqvapp01 app]# useradd logstash -M -s /sbin/nologin
[root@tzgsqvapp01 app]# mkdir /opt/logs/logstash/
[root@tzgsqvapp01 app]# chown -R logstash:logstash /opt/logs/logstash/
[root@tzgsqvapp01 app]# chown -R logstash:logstash /opt/app/logstash-2.4.0/
[root@tzgsqvapp01 app]# mkdir /opt/app/logstash/conf.d
[root@tzgsqvapp01 app]# chown -R logstash.logstash opt/app/logstash/conf.d
[root@tzgsqvapp01 app]# vi /etc/init.d/logstash
#!/bin/sh
# Init script for logstash
# Maintained by Elasticsearch
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
# * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides: logstash
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description:
# Description: Starts Logstash as a daemon.
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH
if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi
name=logstash
pidfile="/var/run/$name.pid"
LS_USER=logstash
LS_GROUP=logstash
LS_HOME=/opt/app/logstash
LS_HEAP_SIZE="500m"
LS_LOG_DIR=/opt/logs/logstash
LS_LOG_FILE="${LS_LOG_DIR}/$name.log"
LS_CONF_DIR=/opt/app/logstash/conf.d
LS_OPEN_FILES=16384
LS_NICE=19
LS_OPTS=""
[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
program=/opt/app/logstash/bin/logstash
args="agent -f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"
start() {
LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}"
HOME=${LS_HOME}
export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING
# chown doesn't grab the suplimental groups when setting the user:group - so we have to do it for it.
# Boy, I hope we're root here.
SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed 's/,$//'; echo '')
if [ ! -z $SGROUPS ]
then
EXTRA_GROUPS="--groups $SGROUPS"
fi
# set ulimit as (root, presumably) first, before we drop privileges
ulimit -n ${LS_OPEN_FILES}
# Run the program!
nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c "
cd $LS_HOME
ulimit -n ${LS_OPEN_FILES}
exec \"$program\" $args
" > "${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" &
# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile
echo "$name started."
return 0
}
stop() {
# Try a few times to kill TERM the program
if status ; then
pid=`cat "$pidfile"`
echo "Killing $name (pid $pid) with SIGTERM"
kill -TERM $pid
# Wait for it to exit.
for i in 1 2 3 4 5 ; do
echo "Waiting $name (pid $pid) to die..."
status || break
sleep 1
done
if status ; then
if [ "$KILL_ON_STOP_TIMEOUT" -eq 1 ] ; then
echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."
kill -KILL $pid
echo "$name killed with SIGKILL."
else
echo "$name stop failed; still running."
fi
else
echo "$name stopped."
fi
fi
}
status() {
if [ -f "$pidfile" ] ; then
pid=`cat "$pidfile"`
if kill -0 $pid > /dev/null 2> /dev/null ; then
# process by this pid is running.
# It may not be our pid, but that's what you get with just pidfiles.
# TODO(sissel): Check if this process seems to be the same as the one we
# expect. It'd be nice to use flock here, but flock uses fork, not exec,
# so it makes it quite awkward to use in this case.
return 0
else
return 2 # program is dead but pid file exists
fi
else
return 3 # program is not running
fi
}
force_stop() {
if status ; then
stop
status && kill -KILL `cat "$pidfile"`
fi
}
case "$1" in
start)
status
code=$?
if [ $code -eq 0 ]; then
echo "$name is already running"
else
start
code=$?
fi
exit $code
;;
stop) stop ;;
force-stop) force_stop ;;
status)
status
code=$?
if [ $code -eq 0 ] ; then
echo "$name is running"
else
echo "$name is not running"
fi
exit $code
;;
restart)
stop && start
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|force-stop|status|restart}" >&2
exit 3
;;
esac
exit $?
[root@tzgsqvapp01 app]# chmod +x /etc/init.d/logstash
[root@tzgsqvapp01 app]# chkconfig --add logstash
[root@tzgsqvapp01 app]# chkconfig logstash on
[root@tzgsqvapp01 app]# vim /opt/app/logstash/conf.d/logstash.conf
input {
file {
path => "/opt/logs/tomcat7-rest/catalina.*.out"
type => "tomcat-catalina-out"
#codec => multiline {
# pattern => "%{TIMESTAMP_ISO08601} "
# negate => true
# what => previous
#}
add_field => {"module" => "rest" }
}
}
output {
if [type] == "tomcat-catalina-out" {
redis {
data_type => "channel"
host => "redisIP"
port => "6379"
key => "tomcat-catalina-out"
db => "0"
}
}
}
[root@tzgsqvapp01 app]# ln -sv /opt/app/jdk7/bin/java /usr/bin/java
[root@tzgsqvapp01 app]# systemctl daemon-reload
[root@tzgsqvapp01 app]# systemctl start logstash
[root@tzgsqvapp01 logstash]# cat logstash.log
{:timestamp=>"2017-01-13T22:59:26.174000+0800", :message=>"Pipeline main started"}
{:timestamp=>"2017-01-13T23:00:57.972000+0800", :message=>"SIGTERM received. Shutting down the agent.", :level=>:warn}
{:timestamp=>"2017-01-13T23:00:57.973000+0800", :message=>"stopping pipeline", :id=>"main"}
{:timestamp=>"2017-01-13T23:00:58.379000+0800", :message=>"Pipeline main has been shutdown"}
{:timestamp=>"2017-01-13T23:01:07.800000+0800", :message=>"Pipeline main started"}
[root@tzgsqvapp01 logstash]# ll
total 8
-rw-r--r-- 1 root root 0 Jan 13 23:00 logstash.err
-rw-r--r-- 1 logstash logstash 470 Jan 13 23:01 logstash.log
-rw-r--r-- 1 root root 141 Jan 13 23:01 logstash.stdout
[root@tzgsqvapp01 logstash]# cat logstash.stdout
Sending logstash logs to /opt/logs/logstash/logstash.log.
{:timestamp=>"2017-01-13T23:01:07.800000+0800", :message=>"Pipeline main started"}
ELK服务器上
[tzg@tzgsqvelk01 redis]$ bin/redis-cli
redisIP:6379> SUBSCRIBE tomcat-catalina-out
Reading messages... (press Ctrl-C to quit)
1) "subscribe"
2) "tomcat-catalina-out"
3) (integer) 1
1) "message"
2) "tomcat-catalina-out"
3) "{\"message\":\" 2017-01-13 23:10:01.797 [DubboMonitorSendTimer-thread-1] INFO com.alibaba.dubbo.monitor.dubbo.DubboMonitor[Slf4jLogger.java:42] [for_log_analysis_id]--[for_log_analysis_ip] [DUBBO] Send statistics to monitor zookeeper://zk01.tzg.sq:2181/com.alibaba.dubbo.monitor.MonitorService?anyhost=true&application=simple-monitor&check=false&delay=-1&dubbo=2.5.3&interface=com.alibaba.dubbo.monitor.MonitorService&methods=lookup,collect&owner=tzg&pid=1179&revision=2.5.3&side=provider×tamp=1484029892639, dubbo version: 2.5.3, current host: 172.16.5.13\",\"@version\":\"1\",\"@timestamp\":\"2017-01-13T15:10:02.118Z\",\"path\":\"/opt/logs/tomcat7-rest/catalina.2017-01-13.out\",\"host\":\"tzgsqvapp01\",\"type\":\"tomcat-catalina-out\",\"module\":\"rest\"}"
[root@tzgsqvelk01 app]# cat /etc/logstash/conf.d/logstash.conf
input {
redis {
data_type => "channel"
key => "tomcat-catalina-out"
host => "redisIP"
port => 6379
db => 0
}
}
output {
elasticsearch { hosts => "localhost:9200"
user => elastic
password => 123456 }
file {
codec => line { format => "%{message}"}
path => "/tzgData/logstash/%{+YYYY.MM.dd}/%{module}.%{type}"
}
}
[root@tzgsqvelk01 app]# systemctl start logstash