spark

Spark数据倾斜解决

2020-05-25  本文已影响0人  专职掏大粪

关于处理倾斜的demo程序-githup

解决方案四:两阶段聚合(局部聚合+全局聚合)

    public static void main(String[] args) {
        SparkSession spark = SparkSession
                .builder()
                .appName("JavaWordCount")
                .master("local[*]")
                .getOrCreate();

        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());

        int slices = 3;
        List<String> list1 = new ArrayList(Arrays.asList("hello 1",
                "hello 2",
                "hello 3",
                "hello 4",
                "you 1",
                "me 2"));

        JavaPairRDD<String, Long> rdd = jsc.parallelize(list1, slices).mapToPair(
                new PairFunction<String, String, Long>() {

                    @Override
                    public Tuple2<String, Long> call(String s) throws Exception {
                        String[] arr = s.split(" ");

                        return new Tuple2<String, Long>(arr[0], Long.parseLong(arr[1]));
                    }
                }
        );
        // 第一步,给RDD中的每个key都打上一个随机前缀。
        JavaPairRDD<String, Long> randomPrefixRdd = rdd.mapToPair(
                new PairFunction<Tuple2<String, Long>, String, Long>() {
                    @Override
                    public Tuple2<String, Long> call(Tuple2<String, Long> tuple) throws Exception {
                        Random random = new Random();
                        int prefix = random.nextInt(10);
                        return new Tuple2<String, Long>(prefix + "_" + tuple._1, tuple._2);
                    }
                }
        );

        // 第二步,对打上随机前缀的key进行局部聚合。
        JavaPairRDD<String, Long> localAggrdd = randomPrefixRdd.reduceByKey(
                new Function2<Long, Long, Long>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Long call(Long v1, Long v2) throws Exception {
                        return v1 + v2;
                    }
                });

// 第三步,去除RDD中每个key的随机前缀。
        JavaPairRDD<String, Long> removedandomPrefixdd = localAggrdd.mapToPair(
                new PairFunction<Tuple2<String, Long>, String, Long>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Tuple2<String, Long> call(Tuple2<String, Long> tuple)
                            throws Exception {
                        String originalKey = tuple._1.split("_")[1];
                        return new Tuple2<String, Long>(originalKey, tuple._2);
                    }
                });

// 第四步,对去除了随机前缀的RDD进行全局聚合。
        JavaPairRDD<String, Long> globalAggrdd = removedandomPrefixdd.reduceByKey(
                new Function2<Long, Long, Long>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Long call(Long v1, Long v2) throws Exception {
                        return v1 + v2;
                    }
                });

        List<Tuple2<String, Long>> output = globalAggrdd.collect();
        for (Tuple2<?, ?> tuple : output) {
            System.out.println(tuple._1() + ": " + tuple._2());
        }
        spark.stop();

    }

解决方案五:将reduce join转为map join

 SparkSession spark = SparkSession
                .builder()
                .appName("JavaWordCount")
                .master("local[*]")
                .getOrCreate();

        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
        SparkContext sc = spark.sparkContext();

        int slices = 3;
        List<String> list1 = new ArrayList(Arrays.asList(
                "1  郑祥楷1",
                "2  王佳豪1",
                "3  刘鹰2",
                "4  宋志华3",
                "5  刘帆4",
                "6  OLDLi5"
        ));

        List<String> list2 = new ArrayList(Arrays.asList(
                "1 1807bd-bj",
                "2 1807bd-sz",
                "3 1807bd-wh",
                "4 1807bd-xa",
                "7 1805bd-bj"
        ));

        JavaPairRDD<Long, String> rdd1 = jsc.parallelize(list1, slices).mapToPair(
                new PairFunction<String, Long, String>() {

                    @Override
                    public Tuple2<Long, String> call(String s) throws Exception {
                        String[] arr = s.split("  ");

                        return new Tuple2<Long, String>(Long.parseLong(arr[0]), arr[1]);
                    }
                }
        );

        JavaPairRDD<Long, String> rdd2 = jsc.parallelize(list2, slices).mapToPair(
                new PairFunction<String, Long, String>() {

                    @Override
                    public Tuple2<Long, String> call(String s) throws Exception {
                        String[] arr = s.split(" ");

                        return new Tuple2<Long, String>(Long.parseLong(arr[0]), arr[1]);
                    }
                }
        );
        // 首先将数据量比较小的RDD的数据,collect到Driver中来。
        List<Tuple2<Long, String>> rdd1Data = rdd1.collect();
        final Broadcast<List<Tuple2<Long, String>>> rdd1DataBroadcast = jsc.broadcast(rdd1Data);
        // 然后使用Spark的广播功能,将小RDD的数据转换成广播变量,这样每个Executor就只有一份RDD的数据。
        // 可以尽可能节省内存空间,并且减少网络传输性能开销。
        // 对另外一个RDD执行map类操作,而不再是join类操作。
        JavaPairRDD<Long, Tuple2<String, String>> joinedRdd = rdd2.mapToPair(
                new PairFunction<Tuple2<Long, String>, Long, Tuple2<String, String>>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Tuple2<Long, Tuple2<String, String>> call(Tuple2<Long, String> tuple)
                            throws Exception {
                        // 在算子函数中,通过广播变量,获取到本地Executor中的rdd1数据。
                        List<Tuple2<Long, String>> rdd1Data = rdd1DataBroadcast.value();
                        // 可以将rdd1的数据转换为一个Map,便于后面进行join操作。
                        Map<Long, String> rdd1DataMap = new HashMap<Long, String>();
                        for (Tuple2<Long, String> data : rdd1Data) {
                            rdd1DataMap.put(data._1, data._2);
                        }
                        // 获取当前RDD数据的key以及value。
                        Long key = tuple._1;
                        String value = tuple._2;
                        // 从rdd1数据Map中,根据key获取到可以join到的数据。
                        String rdd1Value = rdd1DataMap.get(key);
                        return new Tuple2<Long, Tuple2<String, String>>(key, new Tuple2<String, String>(value, rdd1Value));
                    }
                });

        List<Tuple2<Long, Tuple2<String, String>>> output = joinedRdd.collect();
        for (Tuple2<?, ?> tuple : output) {
            Tuple2<String, String> tuple2 = (Tuple2<String, String>)tuple._2();
            System.out.println(tuple._1() + ": " + tuple2._1+ ": " +tuple2._2());
        }
        spark.stop();

解决方案六:采样倾斜key并分拆join操作

// 首先从包含了少数几个导致数据倾斜key的rdd1中,采样10%的样本数据。
JavaPairRDD<Long, String> sampledRDD = rdd1.sample(false, 0.1);
  
// 对样本数据RDD统计出每个key的出现次数,并按出现次数降序排序。
// 对降序排序后的数据,取出top 1或者top 100的数据,也就是key最多的前n个数据。
// 具体取出多少个数据量最多的key,由大家自己决定,我们这里就取1个作为示范。
JavaPairRDD<Long, Long> mappedSampledRDD = sampledRDD.mapToPair(
        new PairFunction<Tuple2<Long,String>, Long, Long>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Tuple2<Long, Long> call(Tuple2<Long, String> tuple)
                    throws Exception {
                return new Tuple2<Long, Long>(tuple._1, 1L);
            }     
        });
JavaPairRDD<Long, Long> countedSampledRDD = mappedSampledRDD.reduceByKey(
        new Function2<Long, Long, Long>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Long call(Long v1, Long v2) throws Exception {
                return v1 + v2;
            }
        });
JavaPairRDD<Long, Long> reversedSampledRDD = countedSampledRDD.mapToPair( 
        new PairFunction<Tuple2<Long,Long>, Long, Long>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Tuple2<Long, Long> call(Tuple2<Long, Long> tuple)
                    throws Exception {
                return new Tuple2<Long, Long>(tuple._2, tuple._1);
            }
        });
final Long skewedUserid = reversedSampledRDD.sortByKey(false).take(1).get(0)._2;
  
// 从rdd1中分拆出导致数据倾斜的key,形成独立的RDD。
JavaPairRDD<Long, String> skewedRDD = rdd1.filter(
        new Function<Tuple2<Long,String>, Boolean>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Boolean call(Tuple2<Long, String> tuple) throws Exception {
                return tuple._1.equals(skewedUserid);
            }
        });
// 从rdd1中分拆出不导致数据倾斜的普通key,形成独立的RDD。
JavaPairRDD<Long, String> commonRDD = rdd1.filter(
        new Function<Tuple2<Long,String>, Boolean>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Boolean call(Tuple2<Long, String> tuple) throws Exception {
                return !tuple._1.equals(skewedUserid);
            } 
        });
  
// rdd2,就是那个所有key的分布相对较为均匀的rdd。
// 这里将rdd2中,前面获取到的key对应的数据,过滤出来,分拆成单独的rdd,并对rdd中的数据使用flatMap算子都扩容100倍。
// 对扩容的每条数据,都打上0~100的前缀。
JavaPairRDD<String, Row> skewedRdd2 = rdd2.filter(
         new Function<Tuple2<Long,Row>, Boolean>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Boolean call(Tuple2<Long, Row> tuple) throws Exception {
                return tuple._1.equals(skewedUserid);
            }
        }).flatMapToPair(new PairFlatMapFunction<Tuple2<Long,Row>, String, Row>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Iterable<Tuple2<String, Row>> call(
                    Tuple2<Long, Row> tuple) throws Exception {
                Random random = new Random();
                List<Tuple2<String, Row>> list = new ArrayList<Tuple2<String, Row>>();
                for(int i = 0; i < 100; i++) {
                    list.add(new Tuple2<String, Row>(i + "_" + tuple._1, tuple._2));
                }
                return list;
            }
              
        });
 
// 将rdd1中分拆出来的导致倾斜的key的独立rdd,每条数据都打上100以内的随机前缀。
// 然后将这个rdd1中分拆出来的独立rdd,与上面rdd2中分拆出来的独立rdd,进行join。
JavaPairRDD<Long, Tuple2<String, Row>> joinedRDD1 = skewedRDD.mapToPair(
        new PairFunction<Tuple2<Long,String>, String, String>() {
            private static final long serialVersionUID = 1L;
            @Override
            public Tuple2<String, String> call(Tuple2<Long, String> tuple)
                    throws Exception {
                Random random = new Random();
                int prefix = random.nextInt(100);
                return new Tuple2<String, String>(prefix + "_" + tuple._1, tuple._2);
            }
        })
        .join(skewedUserid2infoRDD)
        .mapToPair(new PairFunction<Tuple2<String,Tuple2<String,Row>>, Long, Tuple2<String, Row>>() {
                        private static final long serialVersionUID = 1L;
                        @Override
                        public Tuple2<Long, Tuple2<String, Row>> call(
                            Tuple2<String, Tuple2<String, Row>> tuple)
                            throws Exception {
                            long key = Long.valueOf(tuple._1.split("_")[1]);
                            return new Tuple2<Long, Tuple2<String, Row>>(key, tuple._2);
                        }
                    });
 
// 将rdd1中分拆出来的包含普通key的独立rdd,直接与rdd2进行join。
JavaPairRDD<Long, Tuple2<String, Row>> joinedRDD2 = commonRDD.join(rdd2);
 
// 将倾斜key join后的结果与普通key join后的结果,uinon起来。
// 就是最终的join结果。
JavaPairRDD<Long, Tuple2<String, Row>> joinedRDD = joinedRDD1.union(joinedRDD2);

解决方案七:使用随机前缀和扩容RDD进行join

方案缺点:该方案更多的是缓解数据倾斜,而不是彻底避免数据倾斜。而且需要对整个RDD进行扩容,对内存资源要求很高

SparkSession spark = SparkSession
                .builder()
                .appName("JavaWordCount")
                .master("local[*]")
                .getOrCreate();

        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
        SparkContext sc = spark.sparkContext();

        int slices = 3;
        List<String> list2 = new ArrayList(Arrays.asList(
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "1  郑祥楷1",
                "2  王佳豪1",
                "2  王佳豪1",
                "2  王佳豪1",
                "2  王佳豪1",
                "2  王佳豪1",
                "2  王佳豪1",
                "2  王佳豪1",
                "2  王佳豪1",
                "2  王佳豪1",
                "3  刘鹰2",
                "4  宋志华3",
                "5  刘帆4",
                "6  OLDLi5"
        ));

        List<String> list1 = new ArrayList(Arrays.asList(
                "1 1807bd-bj",
                "2 1807bd-sz",
                "3 1807bd-wh",
                "4 1807bd-xa",
                "7 1805bd-bj"
        ));

        JavaPairRDD<Long, String> rdd1 = jsc.parallelize(list1, slices).mapToPair(
                new PairFunction<String, Long, String>() {

                    @Override
                    public Tuple2<Long, String> call(String s) throws Exception {
                        String[] arr = s.split(" ");

                        return new Tuple2<Long, String>(Long.parseLong(arr[0]), arr[1]);
                    }
                }
        );

        JavaPairRDD<Long, String> rdd2 = jsc.parallelize(list2, slices).mapToPair(
                new PairFunction<String, Long, String>() {

                    @Override
                    public Tuple2<Long, String> call(String s) throws Exception {
                        String[] arr = s.split("  ");

                        return new Tuple2<Long, String>(Long.parseLong(arr[0]), arr[1]);
                    }
                }
        );

        // 首先将其中一个key分布相对较为均匀的RDD膨胀100倍。
        JavaPairRDD<String, String> expandedRDD = rdd1.flatMapToPair(
                new PairFlatMapFunction<Tuple2<Long, String>, String, String>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Iterator<Tuple2<String, String>> call(Tuple2<Long, String> tuple)
                            throws Exception {
                        List<Tuple2<String, String>> list = new ArrayList<Tuple2<String, String>>();
                        for (int i = 0; i < 100; i++) {
                            list.add(new Tuple2<String, String>(i + "_" + tuple._1, tuple._2));
                        }
                        return list.iterator();
                    }
                });

        List list11 = expandedRDD.collect();


        // 其次,将另一个有数据倾斜key的RDD,每条数据都打上100以内的随机前缀。
        JavaPairRDD<String, String> mappedRDD = rdd2.mapToPair(
                new PairFunction<Tuple2<Long, String>, String, String>() {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public Tuple2<String, String> call(Tuple2<Long, String> tuple)
                            throws Exception {
                        Random random = new Random();
                        int prefix = random.nextInt(100);
                        return new Tuple2<String, String>(prefix + "_" + tuple._1, tuple._2);
                    }
                });

        List list12 = mappedRDD.collect();


        // 将两个处理后的RDD进行join即可。
        JavaPairRDD<String, Tuple2<String, String>> joinedRDD = mappedRDD.join(expandedRDD);

        JavaPairRDD<String, Tuple2<String, String>> returltRDD = joinedRDD.mapToPair(
                new PairFunction<Tuple2<String, Tuple2<String, String>>, String, Tuple2<String, String>>() {
                    @Override
                    public Tuple2<String, Tuple2<String, String>> call(Tuple2<String, Tuple2<String, String>> tuple) throws Exception {
                        String[] arr = tuple._1.split("_");
                        return new Tuple2<String, Tuple2<String, String>>(arr[1], tuple._2);
                    }
                }
        );


        List<Tuple2<String, Tuple2<String, String>>> output = returltRDD.collect();
        for (Tuple2<?, ?> tuple : output) {
            Tuple2<String, String> tuple2 = (Tuple2<String, String>) tuple._2();
            System.out.println(tuple._1() + ": " + tuple2._1 + ": " + tuple2._2());
        }
        spark.stop();

转自:
https://tech.meituan.com/2016/05/12/spark-tuning-pro.html
https://blog.51cto.com/14048416/2338651

上一篇 下一篇

猜你喜欢

热点阅读