docker 部署EFK(elasticsearch,kiban

2020-07-01  本文已影响0人  wolfe404
对比方面 Logstash Fluentd
内存占用 启动1G左右 启动60M左右
CPU占用 较高 较低
支持插件 丰富 丰富
通用日志解析 支持grok(基于正则表达式)解析 支持正则表达式解析
特定日志类型 支持JSON等主流格式 支持JSON等主流格式
数据过滤 支持 支持
数据buffer发送 插件支持 插件支持
运行环境 JRuby实现,依赖JVM环境 CRuby、C实现,依赖Ruby环境
线程支持 支持多线程 多线程受GIL限制
注意:
#./bin/bash
# 定义颜色
BLUE_COLOR="\033[36m"
RED_COLOR="\033[31m"
GREEN_COLOR="\033[32m"
VIOLET_COLOR="\033[35m"
RES="\033[0m"

echo -e "${BLUE_COLOR}# ######################################################################${RES}"
echo -e "${BLUE_COLOR}#                       Docker ELK Shell Script                        #${RES}"
echo -e "${BLUE_COLOR}#                       Email:                                                     #${RES}"
echo -e "${BLUE_COLOR}# ######################################################################${RES}"

# 创建目录
echo -e "${BLUE_COLOR}---> create [elasticsearch]directory start.${RES}"
if [ ! -d "./elasticsearch/" ]; then
mkdir -p ./elasticsearch/master/conf ./elasticsearch/master/data ./elasticsearch/master/logs
fi

echo -e "${RED_COLOR}---> create [kibana]directory start.${RES}"
if [ ! -d "./kibana/" ]; then
mkdir -p ./kibana/conf ./kibana/logs
fi

# 目录授权(log 都要授读/写权限)
echo -e "${RED_COLOR}---> create [fluentd]directory start.${RES}"
if [ ! -d "./fluentd/" ]; then
mkdir -p ./fluentd/conf ./fluentd/log
chmod 777 ./fluentd/log
fi

echo -e "${BLUE_COLOR}===> create directory success.${RES}"

# 目录授权(data/logs 都要授读/写权限)
echo -e "${BLUE_COLOR}---> directory authorize start.${RES}"
if [ -d "./elasticsearch/" ]; then
chmod 777 ./elasticsearch/master/data/ ./elasticsearch/master/logs/
fi

echo -e "${BLUE_COLOR}===> directory authorize success.${RES}"

# 移动配置文件
echo -e "${BLUE_COLOR}---> move [elasticsearch]config file start.${RES}"
if [ -f "./es-master.yml" ]; then
mv ./es-master.yml ./elasticsearch/master/conf
fi

echo -e "${RED_COLOR}---> move [kibana]config file start.${RES}"
if [ -f "./kibana.yml" ]; then
mv ./kibana.yml ./kibana/conf
fi

echo -e "${GREEN_COLOR}---> move [fluent]config file start.${RES}"
if [ -f "./fluent.conf" ]; then
mv ./fluent.conf ./fluentd/conf
fi

echo -e "${BLUE_COLOR}===> move config files success.${RES}"
echo -e "${GREEN_COLOR}>>>>>>>>>>>>>>>>>> The End <<<<<<<<<<<<<<<<<<${RES}"

# 部署项目
echo -e "${BLUE_COLOR}==================> Docker deploy Start <==================${RES}"
docker-compose up --build -d

version: "3"
services:
  web:#方便测试使用,启动完成后执行curl localhost:1080,可以在kibana的discover界面查看
    image: httpd
    container_name: httpd
    ports:
      - 1080:80 #避免和默认的80端口冲突
    links:
      - fluentd
    logging:
      driver: fluentd
      options:
        fluentd-address: localhost:24224
        tag: httpd.access
    environment:
      - "TZ=Asia/Shanghai"

  fluentd:
    image: fluent/fluentd
    container_name: fluentd
    user: root
    volumes:
      - ./fluentd/conf/fluent.conf:/fluentd/etc/fluent.conf
      - ./fluentd/log:/fluentd/log
    ports:
      - 24221:24221
      - 24222:24222
      - 24223:24223
      - 24224:24224
    environment:
      - "TZ=Asia/Shanghai"

  es-master:
    container_name: es-master
    hostname: es-master
    image: elasticsearch:7.1.1
    user: root
    ports:
      - 9200:9200
      - 9300:9300
    volumes:
      - ./elasticsearch/master/conf/es-master.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - ./elasticsearch/master/data:/usr/share/elasticsearch/data
      - ./elasticsearch/master/logs:/usr/share/elasticsearch/logs
    environment:
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - "TZ=Asia/Shanghai"
  kibana:
    container_name: kibana
    hostname: kibana
    image: kibana:7.1.1
    ports:
      - 5601:5601
    volumes:
      - ./kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml
    environment:
      - "elasticsearch.hosts=http://ip:9200"
      - "TZ=Asia/Shanghai"
    depends_on:
      - "es-master"
# 集群名称
cluster.name: es-cluster
# 节点名称
node.name: es-master
# 是否可以成为master节点
node.master: true
# 是否允许该节点存储数据,默认开启
node.data: true
# 网络绑定
network.host: 0.0.0.0
# 设置对外服务的http端口
http.port: 9200
# 设置节点间交互的tcp端口
transport.port: 9300
# 集群发现
discovery.seed_hosts:
  - es-master
# 手动指定可以成为 mater 的所有节点的 name 或者 ip,这些配置将会在第一次选举中进行计算
cluster.initial_master_nodes:
  - es-master
# 支持跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
# 安全认证
xpack.security.enabled: false
#http.cors.allow-headers: "Authorization"

<source>
  @type  tcp
  @id    debug-input
  port  24221
  tag debug
  <parse>
 @type json
  </parse>
</source>

<source>
  @type  tcp
  @id    error-input
  port  24222
  tag error
  <parse>
 @type json
  </parse>
</source>

<source>
  @type  tcp
  @id    business-input
  port  24223
  tag business
  <parse>
 @type json
  </parse>
</source>

<source>
  @type  tcp
  @id    record-input
  port  24224
  tag record
  <parse>
 @type json
  </parse>
</source>

<filter record>
  @type parser
  key_name message
  reserve_data true
  remove_key_name_field true
  <parse>
    @type json
  </parse>
</filter>

<match fluent.**>
  @type stdout
  output_type json
</match>

<match **>
  @type elasticsearch
  host 192.168.3.101
  port 9200
  type_name docker
  logstash_format true
  logstash_prefix docker-${tag}-logs
  logstash_dateformat %Y-%m-%d
  flush_interval 5s
  include_tag_key true
</match>

# 服务端口
server.port: 5601
# 服务IP
server.host: "0.0.0.0"
# ES
elasticsearch.hosts: ["http://192.168.1.150:9200"]
# 汉化
i18n.locale: "zh-CN"

上一篇下一篇

猜你喜欢

热点阅读