Springboot

8、使用多实例filebeat收集日志

2020-12-13  本文已影响0人  阿fong

一、我的痛点

1、传统一般是使用filebeat的模块收集日志,而当一个linux服务器上有多种日志要收时,只能全部吐到logstash,因为7.X版本不再支持多个输出源。
2、这里以system模块为典型例子,通常收集以后,在kibana上会经常找不到想要的日志。
3、一是因为存在es中的时间不是日志本身时间,而是filebeat收集到的时间。
4、二是因为如果使用其他输出源,其filebeat模块不会正常解析日志。

二、思路

1、在linux上运行多个filebeat实例,一个直接输出到es,一个输出到logstash
2、同时建议es使用专门的负载均衡节点来承受输出,不要直接输出到数据节点

灵感来源于https://zh.codepre.com/how-to-19067.html
这个问题困扰了我很久,百度翻完了也找不到合适的方案,大部分讲的都比较浅显,在bing才找到这篇文章,实在感谢大神。

我这里采用systemd方式

三、实现收集模块日志(以system为例,输出到es)

1、编辑多filebeat实例脚本

#复制filebeat配置目录
cp -r /etc/filebeat{,-elasticsearch}

#编辑systemd服务
cat > /etc/systemd/system/filebeat-elasticsearch.service << "EOF"
[Unit]
Description=Filebeat sends log files to directly to Elasticsearch.
Documentation=https://www.elastic.co/products/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

Environment="BEAT_LOG_OPTS="
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat-elasticsearch/filebeat.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat-elasticsearch --path.data /var/lib/filebeat-elasticsearch --path.logs /var/log/filebeat-elasticsearch"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target
EOF

2、编辑/etc/filebeat-elasticsearch/filebeat.yml

filebeat.inputs:
- type: log
  enabled: false
  paths:
    - /var/log/*.log

- type: filestream
  enabled: false
  paths:
    - /var/log/*.log

filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false

setup.template.settings:
  index.number_of_shards: 1

setup.kibana:
  host: "192.168.18.13:5601"
setup.template.overwrite: true
setup.template.enabled: true
setup.ilm.enabled: false

output.elasticsearch:
  hosts: ["192.168.18.13:9200"]
  indices:
    - index: "os-linux-auth-%{+yyyy.MM.dd}"
      when.equals:
        event:
          dataset: "system.auth"
    - index: "os-linux-syslog-%{+yyyy.MM.dd}"
      when.equals:
        event:
          dataset: "system.syslog"

processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded

3、启动

mv /etc/filebeat-ealsticsearch/modules/system.yml.disable /etc/filebeat-ealsticsearch/modules/system.yml
systemctl start filebeat-ealsticsearch

4、在kibana上查看日志




四、收集非模块日志(以tomcat为例,输出到logstash)

1、准备日志样本


2、复制filebeat-logstash

#复制filebeat配置目录
cp -r /etc/filebeat{,-logstash}

#编辑systemd服务
cat > /etc/systemd/system/filebeat-logstash.service << "EOF"
[Unit]
Description=Filebeat sends log files to Logstash.
Documentation=https://www.elastic.co/products/beats/filebeat
Wants=network-online.target
After=network-online.target

[Service]

Environment="BEAT_LOG_OPTS="
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat-logstash/filebeat.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat-logstash --path.data /var/lib/filebeat-logstash --path.logs /var/log/filebeat-logstash"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always

[Install]
WantedBy=multi-user.target
EOF

3、编辑/etc/filebeat-logstash/filebeat.yml

filebeat.inputs:
- type: log
  enabled: false
  paths:
    - /var/log/*.log

- type: log
  enabled: true
  paths:
    - /etc/filebeat-logstash/testlog/localhost_access_log.*.txt
  fields:
    type_name: "web-tomcat-access"
  #将fieds添加的字段置于文档根路径
  fields_under_root: true

- type: filestream
  enabled: false
  paths:
    - /var/log/*.log

filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false


setup.template.settings:
  index.number_of_shards: 1

#setup.kibana:
#  host: "192.168.18.13:5601"
setup.template.overwrite: true
setup.template.enabled: true
setup.ilm.enabled: false

output.kafka:
  hosts: ["192.168.18.15:9092","192.168.18.16:9092"]
  topics:
    - topic: "web-tomcat-access"
      when.equals:
        type_name: "web-tomcat-access"
  

processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded

4、编辑logstash
4.1、input.conf

input {
    kafka {
                bootstrap_servers => "192.168.18.15:9092,192.168.18.16:9092"
                topics => ["web-tomcat-access"]
                consumer_threads => 5
                codec => json
        }
}

4.2、tomcat_out.conf

filter {
    if "web-tomcat-access" in [type_name] {
        grok {
            match => ["message", "%{IPORHOST:client_ip} (%{USER:ident}|-) (%{USER:auth}|-) \[%{HTTPDATE:log_time}\] \"(?:%{WORD:verb} %{NOTSPACE:request_url}(?: HTTP/%{NUMBER:http_version})?|-)\" %{NUMBER:response} (%{NUMBER:bytes}|-)" ]
        }
                date {
                        match => [ "log_time" ,"dd/MMM/YYYY:HH:mm:ss Z" ]
                        target => "@timestamp"
                }
                geoip {
            source =>"client_ip"
            target =>"geoip"
            database =>"/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/logstash-filter-geoip-6.0.3-java/vendor/GeoLite2-City.mmdb"            
            add_field => ["[geoip][coordinates]", "%{[geoip][longitude]}" ]
            add_field => ["[geoip][coordinates]", "%{[geoip][latitude]}" ]
        }
    }
}

output {
    if "web-tomcat-access" in [type_name] {
                elasticsearch {
                        hosts => ["192.168.18.13:9200"]
                        index => "web-tomcat-%{+YYYY.MM.dd}"
                }
    }
}

5、启动logstash,在kibana查看日志



上一篇 下一篇

猜你喜欢

热点阅读