GrayLog + ElasticSearch 容器化
2019-06-22 本文已影响0人
Secret_Sun
为什么要容器化,部署快速是关键,基于云的AMI,直接 AMI 「packer可以直接构建ami 要学习下,还不会」 + ansible + docker 一把拉起服务「Infrastracture as Code 进行到底」,看着都很炫酷。
相关组件
MongoDB --- 存 GrayLog 源信息
GrayLog --- 替换 LogStash 且自带UI「云上可以 AutoScaling 」
ElasticSearch --- 存储
版本问题
# hub.docker.com 直接拉取
docker pull elasticsearch:6.8.0
docker pull graylog/graylog:3.0.2
docker pull mongo:3.6.12
目录说明
- conf --- 配文件
- elasticsearch
- graylog
- plugin --- 插件
- elasticsearch
- graylog
- 包含地图 & 监控插件
主机基础参数初始化
- ansible-playbook -C -s -e"host=xxxxx" -u secret.sun -i hosts-file init.yml
---
- hosts: "{{ host }}"
tasks:
- name: Set sysctl file limits
pam_limits:
dest: "/etc/security/limits.conf"
domain: '{{ item.limit_domain }}'
limit_type: "{{ item.limit_type }}"
limit_item: "{{ item.limit_item }}"
value: "{{ item.value }}"
with_items:
- { limit_domain: 'root',limit_type: 'soft',limit_item: 'nofile', value: '655360' }
- { limit_domain: 'root',limit_type: 'hard',limit_item: 'nofile', value: '655360' }
- { limit_domain: '*',limit_type: 'soft',limit_item: 'core', value: '0' }
- { limit_domain: '*',limit_type: 'hard',limit_item: 'core', value: '0' }
- { limit_domain: '*',limit_type: 'soft',limit_item: 'nproc', value: '655360' }
- { limit_domain: '*',limit_type: 'hard',limit_item: 'nproc', value: '655360' }
- { limit_domain: '*',limit_type: 'soft',limit_item: 'stack', value: 'unlimited' }
- { limit_domain: '*',limit_type: 'hard',limit_item: 'stack', value: 'unlimited' }
- { limit_domain: '*',limit_type: 'soft',limit_item: 'nofile', value: '655360' }
- { limit_domain: '*',limit_type: 'hard',limit_item: 'nofile', value: '655360' }
- { limit_domain: '*',limit_type: 'soft',limit_item: 'memlock', value: 'unlimited' }
- { limit_domain: '*',limit_type: 'hard',limit_item: 'memlock', value: 'unlimited' }
tags: change-limits
- name: Set systemd limits
lineinfile:
dest: "/etc/systemd/system.conf"
line: "{{ item.line }}"
state: present
with_items:
- {line: 'DefaultLimitNOFILE=65535' }
- {line: 'DefaultLimitNPROC=65535' }
- {line: 'DefaultLimitMEMLOCK=infinity' }
tags: change-limits
- name: SystemCtl daemon-reload
command: systemctl daemon-reload
tags: daemon-reload
# - name: Close Iptables
# systemd:
# name: firewalld
# enabled: False
# state: stopped
# tags: close-iptables
- name: Change /etc/sysctl.conf
sysctl: name={{ item.key }} value={{ item.value }}
with_items:
- { key: "vm.max_map_count", value: "655360" }
- { key: "vm.swappiness", value: "0" }
- { key: "net.ipv4.tcp_fin_timeout", value: "3" }
- { key: "net.ipv4.tcp_max_tw_buckets", value: "300000" }
- { key: "net.ipv4.tcp_max_orphans", value: "655360" }
- { key: "net.ipv4.ip_forward", value: "1" }
- { key: "net.bridge.bridge-nf-call-iptables", value: "1" }
- { key: "fs.file-max", value: "655360" }
tags: Change-sysctl-conf
- name: Load sysctl
command: sysctl -p
tags: load-sysctl
# Ubuntu 16.04.5 LTS
- name: Add alicloud software source
apt_repository:
repo: '{{ item }}'
state: present
with_items:
- deb http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse
- deb http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse
- deb http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse
- deb http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse
- deb http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse
- deb-src http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse
- deb-src http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse
- deb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse
- deb-src http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse
- deb-src http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse
tags: add_alicloud_source
- name: Apt Update
command: apt-get update
tags: add_alicloud_source
- name: Create Dir
command: mkdir -p /{{ item }}
with_items:
- data
tags: disk_xvdg
- name: mkfs.xfs -f Disk
command: mkfs.xfs -f {{ item }}
with_items:
- /dev/xvdg
tags: disk_xvdg
- name: Mount Disk
command: mount {{ item.disk }} {{ item.catalogue }}
with_items:
- { disk: '/dev/xvdg', catalogue: '/data' }
tags: disk_xvdg
- name: Write /etc/fstab
shell: echo "UUID=$(blkid {{ item.disk }} | awk -F\" '{print $2}') {{ item.catalogue }} xfs defaults,noatime,nodiratime,nobarrier 1 2" >> /etc/fstab
with_items:
- { disk: '/dev/xvdg', catalogue: '/data' }
tags: disk_xvdg
- name: Mount -a Disk
command: mount -a
MongoDB 部署
- ansible-playbook -C -s -e"host=xxxxx" -u secret.sun -i hosts-file mongodb_init.yml
---
- hosts: "{{ host }}"
tasks:
- name: Docker Pull MongoDB
command: docker pull mongo:3.6.12
tags: docker_pull
- name: Create Dir
command: mkdir -p /data/{{ item }}
with_items:
- configdb
- db
- logs
tags: create_base_dir
- stat: path=/data/logs/mongod.log
register: p
- name: Touch /data/logs/mongod.log
file: path=/data/logs/mongod.log
owner=root
group=sys
mode=0777
state={{ "file" if p.stat.exists else "touch"}}
tags: touch_mongod.log
- name: Apt Instll mongodb-clients
command: apt-get install mongodb-clients -y
tags: install_mongodb_clients
- name: Scp MongoDB Conf
copy: src=conf/mongodb.conf dest=/data/configdb/mongodb.conf backup=yes
tags: scp_conf
- name: Docker Run
command: docker run -it -d -p 27017:27017 \
-v /data/configdb:/data/configdb \
-v /data/db:/data/db \
-v /data/logs:/data/logs \
--name mongo \
--restart=always \
-m 6g \
mongo:3.6.12 mongod --config /data/configdb/mongodb.conf --replSet graylog
tags: docker_run
- 数据库初始化
#登陆数据库
mongo --host 127.0.0.1 --port 27017 -u root -p --authenticationDatabase admin
use admin
db.auth("root", "xxxxx")
use admin
db.createUser(
{
user: "root",
pwd: "123456",
roles: [ { role: "userAdminAnyDatabase", db: "admin" } ]
}
)
#初始化副本集
rs.initiate({
_id: 'graylog',
members: [
{ _id: 0, host: '127.0.0.1:27017' },
{ _id: 1, host: '127.0.0.1:27017' },
{ _id: 2 ,host: '127.0.0.1:27017' }
]
})
rs.conf()
ElasticSearch
- 参数说明
discovery_ip --- Es 自动发现 ip 列表 (填写master地址)
cluster_name --- 集群名
- master & search
#目录初始化
ansible-playbook -C -s -e"host=master,search" -u secret.sun -i hosts-file -t create_es_base_dir es_disk_init.yml
#安装 master
ansible-playbook -C -s -e"host=master discovery_ip=['xxxx','xxxx','xxxx'] cluster_name=xxxx" -u secret.sun -i hosts-file -t docker_pull,run_base,change_conf_master es_init.yml
#安装 search
ansible-playbook -C -s -e"host=search discovery_ip=['xxxx','xxxx','xxxx'] cluster_name=xxxx" -u secret.sun -i hosts-file -t docker_pull,run_base,change_conf_search es_init.yml
- data
#目录初始化
ansible-playbook -C -s -e"host=data" -u secret.sun -i hosts-file es_disk_init.yml
# 安装 data
ansible-playbook -C -s -e"host=data discovery_ip=['xxxx','xxxx','xxxx'] cluster_name=xxxx" -u secret.sun -i hosts-file -t docker_pull,run_base,change_conf_data es_init.yml
- 修改配置
#master
ansible-playbook -C -s -e"host=master discovery_ip=['xxxx','xxxx','xxxx'] cluster_name=xxxx" -u secret.sun -i hosts-file -t run_base,change_conf_master es_init.yml
#search
ansible-playbook -C -s -e"host=search discovery_ip=['xxxx','xxxx','xxxx'] cluster_name=xxxx" -u secret.sun -i hosts-file -t run_base,change_conf_search es_init.yml
#data
ansible-playbook -C -s -e"host=data discovery_ip=['xxxx','xxxx','xxxx'] cluster_name=xxxx" -u secret.sun -i hosts-file -t run_base,change_conf_data es_init.yml
- 启动
#master & search
ansible-playbook -C -s -e"host=master,search" -u secret.sun -i hosts-file -t docker_run_master_search es_init.yml
#data
ansible-playbook -C -s -e"host=data" -u secret.sun -i hosts-file -t docker_run_data es_init.yml
- PlayBook
# es_disk_init.yml
---
- hosts: "{{ host }}"
tasks:
- name: Create Dir
command: mkdir -p /{{ item }}
with_items:
- data1
tags: disk_nvme0n1
- name: mkfs.xfs -f Disk
command: mkfs.xfs -f {{ item }}
with_items:
- /dev/nvme0n1
tags: disk_nvme0n1
- name: Mount Disk
command: mount {{ item.disk }} {{ item.catalogue }}
with_items:
- { disk: '/dev/nvme0n1', catalogue: '/data1' }
tags: disk_nvme0n1
- name: Write /etc/fstab
shell: echo "UUID=$(blkid {{ item.disk }} | awk -F\" '{print $2}') {{ item.catalogue }} xfs defaults,noatime,nodiratime,nobarrier 1 2" >> /etc/fstab
with_items:
- { disk: '/dev/nvme0n1', catalogue: '/data1' }
tags: disk_nvme0n1
- name: Mount -a Disk
command: mount -a
tags: disk_nvme0n1
- name: Create Es Conf & Logs & Plugins Dir
file: path=/data/elasticsearch/{{ item }} state=directory mode=0777
with_items:
- logs
- config
- plugins
- data
tags: create_es_base_dir
- name: Create Es Data Dir
file: path=/data1/elasticsearch/{{ item }} state=directory mode=0777
with_items:
- data
tags: create_es_data_dir
# es_init.yml
---
- hosts: "{{ host }}"
tasks:
- name: Docker Pull ElasticSearch
command: docker pull elasticsearch:6.8.0
tags: docker_pull
- name: Scp Base Config
copy: src=conf/elasticsearch/{{ item }} dest=/data/elasticsearch/config/{{ item }} backup=yes
with_items:
- elasticsearch.yml
- jvm.options
- log4j2.properties
tags: run_base
- name: Change Hostname elasticsearch.yml
command: sed -i "s/LOCALHOST/{{ ansible_hostname }}/g" /data/elasticsearch/config/elasticsearch.yml
tags: run_base
- name: Change Ip elasticsearch.yml
command: sed -i "s/LOCALIP/{{ ansible_ens3.ipv4.address }}/g" /data/elasticsearch/config/elasticsearch.yml
tags: run_base
- name: Change CLUSTER-NAME CLUSTER-NAME
command: sed -i "s/CLUSTER-NAME/{{ cluster_name }}/g" /data/elasticsearch/config/elasticsearch.yml
tags: run_base
- name: Change Discovery Ip List
command: sed -i "s/DISCOVERY-IP-LIST/{{ discovery_ip }}/g" /data/elasticsearch/config/elasticsearch.yml
tags: run_base
- name: Change elasticsearch.yml (Master)
command: sed -i "s/{{ item.key }}/{{ item.value }}/g" {{ item.filename }}
with_items:
- { key: 'NODE-MASTER', value: 'true', filename: '/data/elasticsearch/config/elasticsearch.yml' }
- { key: 'NODE-DATA', value: 'false', filename: '/data/elasticsearch/config/elasticsearch.yml' }
- { key: 'MEMORY-SIZE', value: '8g', filename: '/data/elasticsearch/config/jvm.options' }
tags: change_conf_master
- name: Change elasticsearch.yml (Search)
command: sed -i "s/{{ item.key }}/{{ item.value }}/g" {{ item.filename }}
with_items:
- { key: 'NODE-MASTER', value: 'false', filename: '/data/elasticsearch/config/elasticsearch.yml'}
- { key: 'NODE-DATA', value: 'false', filename: '/data/elasticsearch/config/elasticsearch.yml' }
- { key: 'MEMORY-SIZE', value: '8g', filename: '/data/elasticsearch/config/jvm.options' }
tags: change_conf_search
- name: Change elasticsearch.yml (Data)
command: sed -i "s/{{ item.key }}/{{ item.value }}/g" {{ item.filename }}
with_items:
- { key: 'NODE-MASTER', value: 'false', filename: '/data/elasticsearch/config/elasticsearch.yml' }
- { key: 'NODE-DATA', value: 'true', filename: '/data/elasticsearch/config/elasticsearch.yml' }
- { key: 'MEMORY-SIZE', value: '30g', filename: '/data/elasticsearch/config/jvm.options' }
tags: change_conf_data
- name: Docker Run Master & Search
command: docker run -it -d -p 9200:9200 -p 9300:9300 \
-v /data/elasticsearch/config:/usr/share/elasticsearch/config \
-v /data/elasticsearch/plugins:/usr/share/elasticsearch/plugins \
-v /data/elasticsearch/logs:/usr/share/elasticsearch/logs \
-v /data/elasticsearch/data:/usr/share/elasticsearch/data \
--name es \
--network host \
--restart=always \
--cap-add=IPC_LOCK --cap-add=SYS_PTRACE --ulimit memlock=-1:-1 --ulimit nofile=655360:655360 \
elasticsearch:6.8.0
tags: docker_run_master_search
- name: Docker Run Data
command: docker run -it -d -p 9200:9200 -p 9300:9300 \
-v /data/elasticsearch/config:/usr/share/elasticsearch/config \
-v /data/elasticsearch/plugins:/usr/share/elasticsearch/plugins \
-v /data/elasticsearch/logs:/usr/share/elasticsearch/logs \
-v /data1/elasticsearch/data:/usr/share/elasticsearch/data \
--name es \
--network host \
--restart=always \
--cap-add=IPC_LOCK --cap-add=SYS_PTRACE --ulimit memlock=-1:-1 --ulimit nofile=655360:655360 \
elasticsearch:6.8.0
tags: docker_run_data
GrayLog
- 参数说明
tf_node_type --- 节点属性 true (manage) false (work)
http_external_uri --- 代理转发域名
es_http_list --- Es http 地址 (search node 即可)
mongodb_host --- mongodb host 地址
- Service
# 初始化
ansible-playbook -C -s -e"host=xxxx" -u secret.sun -i hosts-file -t docker_pull,run_base gl_init.yml
# 安装
ansible-playbook -C -s -e"host=xxxx tf_node_type=false http_external_uri='[https://xxxx/](https://xxxx/)' es_http_list='[http://xxxx:9200,http://xxxx:9200,http://xxxx:9200](http://xxxx:9200,http://xxxx:9200,http://xxxx:9200)' mongodb_host='[mongodb://xxxx:27017,xxxx:27017,xxxx:27017/graylog?replicaSet=graylog'](mongodb://xxxx:27017,xxxx:27017,xxxx:27017/graylog?replicaSet=graylog')" -u secret.sun -i hosts-file -t run_change_conf gl_init.yml
# 启动
ansible-playbook -C -s -e"host=xxxx" -u secret.sun -i hosts-file -t docker_run gl_init.yml
- PlayBook
# gl_init.yml
---
- hosts: "{{ host }}"
tasks:
- name: Docker Pull Graylog
command: docker pull stag-reg.llsops.com/dba/graylog:3.0.2
tags: docker_pull
- name: Create GrayLog Dir
file: path=/data/graylog/{{ item }} state=directory mode=0777
with_items:
- config
- contentpacks
- data
- journal
- log
- logs
- plugin
- plugins
tags: run_base
# https://dev.maxmind.com/geoip/geoip2/geolite2/
- name: Scp Free GeoLite2
copy: src=plugin/graylog/{{ item.key }}/{{ item.value }} dest=/data/graylog/contentpacks/{{ item.value }} mode=0777 backup=no
with_items:
- { key: 'GeoLite2-ASN_20190604', value: 'GeoLite2-ASN.mmdb' }
- { key: 'GeoLite2-City_20190604', value: 'GeoLite2-City.mmdb' }
- { key: 'GeoLite2-Country_20190604', value: 'GeoLite2-Country.mmdb' }
tags: run_base
- name: Scp Plugin
copy: src=plugin/graylog/{{ item }} dest=/data/graylog/plugin/{{ item }} backup=no
with_items:
- metrics-reporter-prometheus-1.5.0.jar
tags: run_base
- name: Scp Base Config
copy: src=conf/graylog/{{ item }} dest=/data/graylog/config/{{ item }} mode=0777 backup=yes
with_items:
- graylog.conf
- log4j2.xml
tags: run_change_conf
- name: Change graylog.conf
command: sed -i "s/TF-NODE-TYPE/{{ tf_node_type }}/g" /data/graylog/config/graylog.conf
tags: run_change_conf
- name: Change graylog.conf elasticsearch_hosts
command: sed -i "s+ES-HTTP-LIST+{{ es_http_list }}+g" /data/graylog/config/graylog.conf
tags: run_change_conf
- name: Change graylog.conf mongodb_uri
command: sed -i "s+MONGODB-HOST+{{ mongodb_host }}+g" /data/graylog/config/graylog.conf
tags: run_change_conf
- name: Change graylog.conf http_external_uri
command: sed -i "s+HTTP-EXTERNAL-URI+{{ http_external_uri }}+g" /data/graylog/config/graylog.conf
tags: run_change_conf
- name: Docker Run
command: docker run -it -p 9000:9000 \
-e "GRAYLOG_SERVER_JAVA_OPTS=-Xms8g -Xmx8g" \
-v /data/graylog/config:/usr/share/graylog/data/config \
-v /data/graylog/contentpacks:/usr/share/graylog/data/contentpacks \
-v /data/graylog/data:/usr/share/graylog/data/data \
-v /data/graylog/journal:/usr/share/graylog/data/journal \
-v /data/graylog/log:/usr/share/graylog/data/log \
-v /data/graylog/plugin/:/usr/share/graylog/data/plugin \
-h {{ ansible_hostname }} \
--name gl \
--network host \
--restart always \
--ulimit memlock=-1:-1 --ulimit nofile=655360:655360 \
-d graylog:3.0.2
tags: docker_run
架构图
Tips
搞日志的人都知道,大概率会发生日志不见了,基本上只要生产正常、kafka正常,肯定是用户不会查,这个基本上没救、无解;日志又多又长触发Es Bug 依然无解,这个东西不出问题没人看出了问题没有日志就是你的锅,哪怕费了老大的劲确认不是我们的问题,依然烦,各位保重...