ELK日志管理系统搭建

2019-11-27  本文已影响0人  DH大黄

搭建ELK日志管理(Elasticsearch,Logstash,Kibana)

搭建环境要求

docker,docker-compose,rabbitmq

搭建流程(以centos操作系统为例)

vi docker-compose.yml
version: '3'
services:
  elasticsearch:
    image: elasticsearch:7.3.1
    environment:
      discovery.type: single-node
    ports:
      - "9200:9200"
      - "9300:9300"
      - "9100:9100"
  logstash:
    image: logstash:7.3.1
    command: logstash -f /etc/logstash/conf.d/logstash.conf
    volumes:
      # 挂载logstash配置文件
      - ./config:/etc/logstash/conf.d
      - ./config/logstash.yml:/etc/logstash/logstash.yml
    ports:
      - "5000:5000"
  kibana:
    image: kibana:7.3.1
    environment:
      # 请求的elasticsearch地址
      - ELASTICSEARCH_URL=elasticsearch ip+port
    ports:
      - "5601:5601"
mkdir config
cd config
vi logstash.conf

input {
  # 此处使用的input源为rabbitmq
  rabbitmq {
    host => "rabbitmq_ip"
    port => rabbitmq_port
    subscription_retry_interval_seconds => "5"
    vhost => "/"
    # 下面的队列信息配置要与项目中的rabbitmq队列信息配置相同
    exchange => "rabbitmq_exchange"
    exchange_type => "fanout"
    queue => "rabbitmq_queue"
    durable => true
    auto_delete => false
    user => "rabbitmq_user"
    password => "rabbitmq_password"
  }
}
filter {
  grok {
    match => { "message" => "%{TIMESTAMP_ISO8601:timestamp}\s+%{LOGLEVEL:severity}\s+\[%{DATA:service},%{DATA:trace},%{DATA:span},%{DATA:exportable}\]\s+%{DATA:pid}\s+---\s+\[%{DATA:thread}\]\s+%{DATA:class}\s+:\s+%{GREEDYDATA:rest}" }
  }
}
output {
  elasticsearch {
    # 改成你的Elasticsearch地址
    hosts => "elasticsearch ip+port"  
  }
}
vi logstash.yml

# 配置elasticsearch配置信息
st: "0.0.0.0"
xpack.monitoring.elasticsearch.url: elasticsearch ip+port
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: changeme
cd ..
docker-compose up -d

搭建完成后访问Kibana( http://yourip:5601/

image-20191126153650639.png
image

然后输入条件即可分析日志

上一篇 下一篇

猜你喜欢

热点阅读