运维消息队列数据可视化

使用canal实现增量同步MySQL的数据到ES

2019-06-14  本文已影响266人  Okami_

使用canal实现增量同步MySQL的数据

搭建环境

技术方案概览

MySQL配置

log-bin=mysql-bin #添加这一行就ok
binlog-format=ROW #选择row模式
server_id=1 #配置mysql replaction需要定义,不能和canal的slaveId重复
CREATE USER canal IDENTIFIED BY 'canal';  
GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%';
-- GRANT ALL PRIVILEGES ON *.* TO 'canal'@'%' ;
FLUSH PRIVILEGES;

ES安装

下载安装包

wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.8.0.tar.gz

新增系统用户

# 新建用户
adduser es  
# 给新用户添加密码
passwd es
# 切换登陆用户
su es
mv elasticsearch-6.8.0.tar.gz /home/es/elasticsearch

解压安装包

cd /home/es/elasticsearch
tar -xzvf elasticsearch-6.8.0.tar.gz

修改配置文件

vi config/elasticsearch.yml 
#集群的名称,同一个集群该值必须设置成相同的
cluster.name: okami-application
#该节点的名字
node.name: node-1
#该节点有机会成为master节点
node.master: true
#该节点可以存储数据
node.data: true
#shard的数目
#index.number_of_shards: 5
#数据副本的数目
#index.number_of_replicas: 3
#设置绑定的IP地址,可以是IPV4或者IPV6
network.bind_host: 0.0.0.0
#设置其他节点与该节点交互的IP地址
network.publish_host: 192.168.10.1
#该参数用于同时设置bind_host和publish_host
network.host: 192.168.10.1
#设置节点之间交互的端口号
transport.tcp.port: 9300
#设置是否压缩tcp上交互传输的数据
transport.tcp.compress: true
#设置对外服务的http端口号
http.port: 9200
#设置http内容的最大大小
http.max_content_length: 100mb
#是否开启http服务对外提供服务
http.enabled: true 
#设置这个参数来保证集群中的节点可以知道其它N个有master资格的节点。默认为1,对于大的集群来说,可以设置大一点的值(2-4)
discovery.zen.minimum_master_nodes: 1
#设置集群中自动发现其他节点时ping连接的超时时间
discovery.zen.ping_timeout: 120s
#设置是否打开多播发现节点
#discovery.zen.ping.multicast.enabled: true
#设置集群中的Master节点的初始列表,可以通过这些节点来自动发现其他新加入集群的节点
discovery.zen.ping.unicast.hosts: ["192.168.10.1:9300"]

path.data: /usr/hdp/2.5.0.0-1245/esdata
path.logs: /usr/hdp/2.5.0.0-1245/eslog

http.cors.enabled: true
http.cors.allow-origin: "*"
#--------------------------------------------------------------------------------
#index.analysis.analyzer.ik.type: "ik"

启动ES

[es@xxx elasticsearch-7.1.1]# java -version
java version "1.8.0_172"
Java(TM) SE Runtime Environment (build 1.8.0_172-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.172-b11, mixed mode)
./home/es/elasticsearch/elasticsearch-6.8.0/bin/elasticsearch -d
[es@xxx ~]#  curl http://127.0.0.1:9200
{
  "name" : "node-1",
  "cluster_name" : "okami-application",
  "cluster_uuid" : "Q00-w01oQT6vsXx7E6KIeA",
  "version" : {
    "number" : "6.8.0",
    "build_flavor" : "default",
    "build_type" : "tar",
    "build_hash" : "65b6179",
    "build_date" : "2019-05-15T20:06:13.172855Z",
    "build_snapshot" : false,
    "lucene_version" : "7.7.0",
    "minimum_wire_compatibility_version" : "5.6.0",
    "minimum_index_compatibility_version" : "5.0.0"
  },
  "tagline" : "You Know, for Search"
}

安装部署其他主机

检查集群的部署情况

[es@xxx ~]#  curl http://127.0.0.1:9200/_cluster/health
{"cluster_name":"okami-application","status":"green","timed_out":false,"number_of_nodes":3,"number_of_data_nodes":3,"active_primary_shards":0,"active_shards":0,"relocating_shards":0,"initializing_shards":0,"unassigned_shards":0,"delayed_unassigned_shards":0,"number_of_pending_tasks":0,"number_of_in_flight_fetch":0,"task_max_waiting_in_queue_millis":0,"active_shards_percent_as_number":100.0}

安装中遇到的问题

canal-server的安装

下载canal

wget https://github.com/alibaba/canal/releases/download/canal-1.1.3/canal.deployer-1.1.3.tar.gz
mv canal.deployer-1.1.3.tar.gz /opt/app/canal

解压

tar zxvf canal.deployer-1.1.3.tar.gz

修改配置文件

canal.id = 1 # 每个canal server实例的唯一标识,暂无实际意义
canal.ip = 192.111.112.103 # canal server绑定的本地IP信息,如果不配置,默认选择一个本机IP进行启动服务
canal.port = 11111 # canal server提供socket服务的端口
canal.metrics.pull.port = 11112
canal.zkServers = 192.168.1.111:2181 #canal server链接zookeeper集群的链接信息

# flush data to zk
canal.zookeeper.flush.period = 1000 #canal持久化数据到zookeeper上的更新频率,单位毫秒
canal.withoutNetty = false 
# tcp, kafka, RocketMQ
canal.serverMode = tcp
# flush meta cursor/parse position to file
canal.file.data.dir = ${canal.conf.dir}
canal.file.flush.period = 1000
## memory store RingBuffer size, should be Math.pow(2,n)
canal.instance.memory.buffer.size = 16384
## memory store RingBuffer used memory unit size , default 1kb
canal.instance.memory.buffer.memunit = 1024 
## meory store gets mode used MEMSIZE or ITEMSIZE
canal.instance.memory.batch.mode = MEMSIZE
canal.instance.memory.rawEntry = true

## detecing config
canal.instance.detecting.enable = false
#canal.instance.detecting.sql = insert into retl.xdual values(1,now()) on duplicate key update x=now()
canal.instance.detecting.sql = select 1
canal.instance.detecting.interval.time = 3
canal.instance.detecting.retry.threshold = 3
canal.instance.detecting.heartbeatHaEnable = false

# support maximum transaction size, more than the size of the transaction will be cut into multiple transactions delivery
canal.instance.transaction.size =  1024
# mysql fallback connected to new master should fallback times
canal.instance.fallbackIntervalInSeconds = 60

# network config
canal.instance.network.receiveBufferSize = 16384
canal.instance.network.sendBufferSize = 16384
canal.instance.network.soTimeout = 30

# binlog filter config
canal.instance.filter.druid.ddl = true
canal.instance.filter.query.dcl = false
canal.instance.filter.query.dml = false
canal.instance.filter.query.ddl = false
canal.instance.filter.table.error = false
canal.instance.filter.rows = false
canal.instance.filter.transaction.entry = false

# binlog format/image check
canal.instance.binlog.format = ROW,STATEMENT,MIXED 
canal.instance.binlog.image = FULL,MINIMAL,NOBLOB

# binlog ddl isolation
canal.instance.get.ddl.isolation = false

# parallel parser config
canal.instance.parser.parallel = true
## concurrent thread number, default 60% available processors, suggest not to exceed Runtime.getRuntime().availableProcessors()
#canal.instance.parser.parallelThreadSize = 16
## disruptor ringbuffer size, must be power of 2
canal.instance.parser.parallelBufferSize = 256

# table meta tsdb info
canal.instance.tsdb.enable = false
canal.instance.tsdb.dir = ${canal.file.data.dir:../conf}/${canal.instance.destination:}
canal.instance.tsdb.url = jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
canal.instance.tsdb.dbUsername = canal
canal.instance.tsdb.dbPassword = password
# dump snapshot interval, default 24 hour
canal.instance.tsdb.snapshot.interval = 24
# purge snapshot expire , default 360 hour(15 days)
canal.instance.tsdb.snapshot.expire = 360

# aliyun ak/sk , support rds/mq
canal.aliyun.accessKey =
canal.aliyun.secretKey =

#################################################
#########               destinations            ############# 
#################################################
canal.destinations = example_01,example_02  # 当前server上部署的instance列表
# conf root dir
canal.conf.dir = ../conf
# auto scan instance dir add/remove and start/stop instance
canal.auto.scan = true
canal.auto.scan.interval = 5

#canal.instance.tsdb.spring.xml = classpath:spring/tsdb/h2-tsdb.xml
#canal.instance.tsdb.spring.xml = classpath:spring/tsdb/mysql-tsdb.xml

canal.instance.global.mode = spring # 全局配置加载方式
canal.instance.global.lazy = false
#canal.instance.global.manager.address = 127.0.0.1:1099
#canal.instance.global.spring.xml = classpath:spring/memory-instance.xml
#canal.instance.global.spring.xml = classpath:spring/file-instance.xml
canal.instance.global.spring.xml = classpath:spring/default-instance.xml

##################################################
#########                    MQ                      #############
##################################################
canal.mq.servers = 127.0.0.1:6667
canal.mq.retries = 0
canal.mq.batchSize = 16384
canal.mq.maxRequestSize = 1048576
canal.mq.lingerMs = 100
canal.mq.bufferMemory = 33554432
canal.mq.canalBatchSize = 50
canal.mq.canalGetTimeout = 100
canal.mq.flatMessage = true
canal.mq.compressionType = none
canal.mq.acks = all
# use transaction for kafka flatMessage batch produce
canal.mq.transaction = false
#canal.mq.properties. =

mkdir conf/example_01
mkdir conf/example_02
canal.instance.mysql.slaveId=99
canal.instance.gtidon=false

# position info
canal.instance.master.address=
canal.instance.master.journal.name=
canal.instance.master.position=
canal.instance.master.timestamp=
canal.instance.master.gtid=

# rds oss binlog
canal.instance.rds.accesskey=
canal.instance.rds.secretkey=
canal.instance.rds.instanceId=

# table meta tsdb info
canal.instance.tsdb.enable=false

# username/password
canal.instance.dbUsername=username
canal.instance.dbPassword=password
canal.instance.defaultDatabaseName=dbName
canal.instance.connectionCharset = UTF-8
# enable druid Decrypt database password
canal.instance.enableDruid=false

# table regex
canal.instance.filter.regex=.*\\..*

# mq config
canal.mq.topic=example
# dynamic topic route by schema or table regex
#canal.mq.dynamicTopic=mytest1.user,mytest2\\..*,.*\\..*
canal.mq.partition=0
# hash partition config
#canal.mq.partitionsNum=3
#canal.mq.partitionHash=test.table:id^name,.*\\..*

配置说明

1.  所有表:.*   or  .*\\..*
2.  canal schema下所有表: canal\\..*
3.  canal下的以canal打头的表:canal\\.canal.*
4.  canal schema下的一张表:canal.test1
5.  多个规则组合使用:canal\\..*,mysql.test1,mysql.test2 (逗号分隔)

启动

查看日志

canal-adapter的安装

下载安装包

wget https://github.com/alibaba/canal/releases/download/canal-1.1.3/canal.adapter-1.1.3.tar.gz

解压

tar xzvf canal.adapter-1.1.3.tar.gz

修改配置文件

server:
  port: 8081
spring:
  jackson:
    date-format: yyyy-MM-dd HH:mm:ss
    time-zone: GMT+8
    default-property-inclusion: non_null

canal.conf:
  mode: tcp
  zookeeperHosts: 192.111.111.173:2181
#  mqServers: 127.0.0.1:9092 #or rocketmq
#  flatMessage: true
  batchSize: 500
  syncBatchSize: 1000
  retries: 0
  timeout:
  accessKey:
  secretKey:
  srcDataSources:
    defaultDS:
      url: jdbc:mysql://192.168.1.100:3306/test?useUnicode=true
      username: username
      password: password
    defaultDS2:
      url: jdbc:mysql://192.168.1.101:3306/test?useUnicode=true
      username: username
      password: password
  canalAdapters:
  - instance: example_01
    groups:
    - groupId: g1
      outerAdapters:
      - name: logger
      - name: es
        hosts: 192.168.1.110:9300
        properties:
          cluster.name: okami-application
  - instance: example_02
    groups:
    - groupId: g1
      outerAdapters:
      - name: logger
      - name: es
        hosts: 192.168.1.111:9300
        properties:
          cluster.name: okami-application
vi conf/es/example_01.yml
dataSourceKey: defaultDS
destination: example_01
groupId: g1
esMapping:
  _index: indexName
  _type: typeName
  _id: _id
  upsert: true
#  pk: id
  sql: "select a.id as _id, a.name as _name, a.role_id as _role_id, b.role_name as _role_name,
        a.c_time as _c_time from user a
        left join role b on b.id=a.role_id"
#  objFields:
#    _labels: array:;
#  etlCondition: "where a.c_time>='{0}'"
  commitBatch: 3000
vi conf/es/example_02.yml
dataSourceKey: defaultDS2
destination: example_02
groupId: g1
esMapping:
  _index: indexName
  _type: typeName
  _id: _id
  upsert: true
#  pk: id
  sql: "select a.id as _id, a.name as _name, a.role_id as _role_id, b.role_name as _role_name,
        a.c_time as _c_time from user a
        left join role b on b.id=a.role_id"


#  objFields:
#    _labels: array:;
#  etlCondition: "where a.c_time>='{0}'"
  commitBatch: 3000

配置说明

启动

查看日志

tail -f logs/adapter/adapter.log 

通过Http请求管理

  [
      {
          "destination": "example_01",
          "status": "on"
      },
      {
          "destination": "example_02",
          "status": "on"
      }
  ]
{
    "stauts": "off"
}
{
    "code": 20000,
    "message": "实例: example_01 开启同步成功"
}
上一篇 下一篇

猜你喜欢

热点阅读