flume-ng添加自定义拦截器

2018-07-27  本文已影响0人  阿甘骑士
业务场景:收集nginx日志中个别信息进入kafka,为了避免kafka压力过大,这里优化了两点
刷选数据
<!-- 这里是maven配置 -->
<!-- 我们用的是1.6.0版本 -->
 <dependency>
       <groupId>org.apache.flume</groupId>
       <artifactId>flume-ng-core</artifactId>
       <version>1.6.0</version>
    </dependency>
//只保留两个接口的数据
package deng.yb.flume_ng_Interceptor;

import java.util.ArrayList;
import java.util.List;

import org.apache.commons.codec.Charsets;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.interceptor.Interceptor;

public class MyInterceptor implements Interceptor {
    /**
     * epp接口-request
     */
    private final String EPP_REQUEST = "POST /api/sky_server_data_app/track/user_time HTTP/1.1";

    /**
     * app接口-request
     */
    private final String APP_REQUEST = "POST /api/sky_server_data_app/code/app_code HTTP/1.1";

    public void close() {
    }

    public void initialize() {
    }

    public Event intercept(Event event) {
        String body = new String(event.getBody(), Charsets.UTF_8);
        if (body.indexOf(EPP_REQUEST) > -1 || body.indexOf(APP_REQUEST) > -1) {
            event.setBody(body.toString().getBytes());
            return event;
        }
        return null;
    }

    public List<Event> intercept(List<Event> events) {
        List<Event> intercepted = new ArrayList<>(events.size());
        for (Event event : events) {
            Event interceptedEvent = intercept(event);
            if (interceptedEvent != null) {
                intercepted.add(interceptedEvent);
            }
        }
        return intercepted;
    }

    public static class Builder implements Interceptor.Builder {

        public void configure(Context arg0) {
            // TODO Auto-generated method stub
        }

        public Interceptor build() {
            return new MyInterceptor();
        }

    }
}

epplog.sources.r1.interceptors=i1
epplog.sources.r1.interceptors.i1.type= deng.yb.flume_ng_Interceptor.MyInterceptor$Builder
kafka均衡负载
Kafka Sink uses the topic and key properties from the FlumeEvent headers to send events to Kafka. If topic exists in the headers, the event will be sent to that specific topic, overriding the topic configured for the Sink. If key exists in the headers, the key will used by Kafka to partition the data between the topic partitions. Events with same key will be sent to the same partition. If the key is null, events will be sent to random partitions.
epplog.sources.r1.interceptors=i1 i2
epplog.sources.r1.interceptors.i1.type= deng.yb.flume_ng_Interceptor.MyInterceptor$Builder

epplog.sources.r1.interceptors.i2.type=org.apache.flume.sink.solr.morphline.UUIDInterceptor$Builder
epplog.sources.r1.interceptors.i2.headerName=key
epplog.sources.r1.interceptors.i2.preserveExisting=false
#分区数需要根据brokers的数量决定,最好是brokers的整数倍
kafka-topics --create  --zookeeper bi-slave1:2181,bi-slave2:2181,bi-master:2181 --replication-factor 1 --partitions 3 --topic epplog1
上一篇下一篇

猜你喜欢

热点阅读