hive动态分区和占比优化案例

2018-06-04  本文已影响255人  miss幸运

CREATE TABLE rpt_invest_online_prod_rate_mon (
date_type varchar(2) NOT NULL DEFAULT '' COMMENT '日期类型',
product_cd varchar(2) NOT NULL DEFAULT '' COMMENT '产品代码',
all_pro_intpc_cnt int(15) NOT NULL DEFAULT '0' COMMENT '全流程进件量',
all_pro_intpc_rate decimal(18,2) NOT NULL DEFAULT '0' COMMENT '全流程进件量占比',
manual_invest_cnt int(15) NOT NULL DEFAULT '0' COMMENT '人工信审进件量',
manual_invest_rate decimal(18,2) NOT NULL DEFAULT '0' COMMENT '人工信审进件量占比',
data_dt varchar(16) NOT NULL DEFAULT '' COMMENT '数据日期',
PRIMARY KEY (date_type,product_cd,data_dt)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='报表名称'
-- 报表名称

CREATE TABLE bcm_dim_rej_chnl_cd_ins (
rej_chnl_cd varchar(22) NOT NULL DEFAULT '' COMMENT '渠道代码',
rej_chnl_nm varchar(22) NOT NULL DEFAULT '' COMMENT '渠道名称',
PRIMARY KEY (rej_chnl_cd,rej_chnl_nm)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='拒绝渠道代码'

drop table if exists rpt_invest_online_prod_rate_mon;
CREATE EXTERNAL TABLE rpt_invest_online_prod_rate_mon
(
date_type string
,product_cd string
,all_pro_intpc_cnt int
,all_pro_intpc_rate decimal(18,2)
,manual_invest_cnt int
,manual_invest_rate decimal(18,2)
,data_dt string
)
PARTITIONED BY (
dt_type string COMMENT 'dt_type',
dt string COMMENT 'by date')
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS INPUTFORMAT
'org.apache.hadoop.mapred.TextInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION
'hdfs://hadb/db/rpt_invest_online_prod_rate_mon';

set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict;
insert overwrite table rpt_invest_online_prod_rate_mon partition(dt_type='01',dt)
select
T.date_type --日期类型
,T.product_cd --产品代码
,T.all_pro_intpc_cnt --全流程进件量
,sum(T.all_pro_intpc_cnt) over( partition by T.product_cd)/sum(T.all_pro_intpc_cnt) over() as all_pro_intpc_rate -- 全流程进件量占比
,T.manual_invest_cnt
,sum(T.manual_invest_cnt) over( partition by T.product_cd )/sum(T.manual_invest_cnt) over() as manual_invest_rate -- 人工信审进件量占比
,T.data_dt --数据日期
,T.data_dt as dt --数据日期
from
( select
'01' as date_type
,t1.product as product_cd
,sum (case when substr(t1.create_time,1,10)=t1.dt then 1 else 0 end ) as all_pro_intpc_cnt -- 全流程进件量
,sum(case when substr(t1.create_time,1,10)=t1.dt and t3.chushen_userid IS NOT NULL and t3.chushen_userid <> 'null' and t3.chushen_userid <> '' then 1 else 0 end ) as manual_invest_cnt --人工信审进件量
,t1.dt as data_dt
from
(select into_id,id,product,create_time,dt from v_eagle_jsd_intopieces
where dt between '2018-04-07' and '2018-05-21' and product IN ('1001','1002','1004','1201','1202','1301') ) t1 -- tmp 进件表
left join
(select distinct ip_id,chushen_userid,dt from eagle_jsd_riskcon where dt between '2018-04-07' and '2018-05-21' ) t3 -- risk
on t3.ip_id=t1.id and t1.dt=t3.dt
group by t1.product,t1.dt
) T
1\hadoop dfs -getmerge /rmdb/rpt_invest_online_prod_rate_mon/dt_type=01/dt* /home/etl_ket_azk/app/dt/aaa1.txt
2\mysql -h 10.100.22.23 -u etl_prod -petl_5IB3
3\use report_strategy;
4\load data local infile '/home/etl_ket_azk/app/dt/aaa1.txt' into table report_strategy.rpt_invest_online_prod_rate_mon FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';
mysql -h 10.100.22.1111-u root -proot

上一篇下一篇

猜你喜欢

热点阅读