呆鸟的Python数据分析我爱编程

Pandas数据处理与特征提取

2018-05-11  本文已影响39人  leobupt

1. 目的

了解使用pandas数据处理。
下面操作相对复杂些, 主要整理给自己看, 写的比较粗糙.
如果那些看不明白, 可以留言标出来, 我会花些时间整理.

1.1 数据样例:

User_id,Merchant_id,Action,Coupon_id,Discount_rate,Date_received,Date
13740231,18907,2,100017492,500:50,20160513,null
13740231,34805,1,null,null,null,20160321
14336199,18907,0,null,null,null,20160618
15137031,25104,2,100120354,150:50,20160613,null
15137031,44706,2,100071973,50:5,20160317,null

1.2 字段说明

1.3 环境准备

import pandas as pd
df = pd.read_csv("example.csv")

2. 数据处理

2.1 列变换

2.1.1 单列变换

df['newDate'] = df.Date.replace("null", 0)

# Date列, 分段映射
def mapDate(x):
    if(x=="null"):
        return 0
    elif(x<=20160501):
        return 1
    else:
        return 2
df.Date.apply(lambda x: mapDate(x))
df['Action'].apply(lambda x: 1 if (x>=0 and x<=15) else 0)
In [28]: def discountFuc(x):
   ....:         list_discount = x.split(":")
   ....:         if(x=="null"):
   ....:                 return 0.1
   ....:         elif(len(list_discount)==2):
   ....:                 return round(float(list_discount[1]) / float(list_discount[0]), 3)
   ....:         else: 
   ....:                 return list_discount[0]
   ....:     

In [29]: df.Discount_rate.apply(lambda x: discountFuc(x))
Out[29]: 
0    0.100
1    0.100
2    0.100
3    0.333
4    0.100
Name: Discount_rate, dtype: float64
def setMax(x):
    l = x.split(":")
    if(len(l)==2):
        return l[0]
    else:
        return 100
def setMin(x):
    l = x.split(":")
    if(len(l)==2):
        return l[1]
    else:
        return 100*(1-float(l[0]))
df.Discount_rate.str.split(':', expand=True)[1].astype('float32')
# 如下列,分割两列
df.head()
>>>
20180101:2010101
20180102:2010102

# 定义拆分函数:
def splitD(x):
    if x=='null':
        return 0, 0
    elif x.find(':')>=0:
        maxValue = x.split(':')[0]
        minValue = x.split(':')[1]
    else: # 其他取默认值
        maxValue = 50
        minValue = 50*float(x)
    return maxValue, minValue 

# 如下,进行拆分:
# 注意: index=df.index, 目的是保证新产生的df与原来index一致, 方便后面concat等
df[['maxValue', 'minValue']] = pd.DataFrame(df.Discount_rate.apply(lambda x: splitD(x)).values.tolist(), index=df.index, dtype='int32')

2.1.2 两列合并

df.User_id + df.Merchant_id
# 如: 某些列满足某种条件, 返回某值
df['result'] = df.apply(lambda x: x.User_id if x.Date=='null' else x.Merchant_id, axis=1).astype('float32')
# 两列相除, 可能存在N对,避免重复代码,也可写成方法:
def getRate(df, xStr, yStr):
    return df[yStr] / (df[xStr] + 0.01) # 0.01避免分母为0

# 利用上面函数,比较方便的整合两列相除的问题
getRate(df, 'col1', 'col2')

2.2 去重

df.nunquie()
df.User_id.drop_duplicates().head()
# 如下: 实现 select v1, v2, count(distinct merchant_id) from t1 group by v1, v2
# 先对User_id, Merchant_di进行groupby
df.groupby(['User_id', 'Merchant_id'])['User_id'].count().reset_index(name='count1')[['User_id', 'Merchant_id']].groupby('User_id')['Merchant_id'].count().reset_index(name='mid_count')

2.3 排序

df.sort_values(by='User_id', ascending=False).head()
# reset_index函数
df[df['Date_received'].astype(str)!='null'].groupby(['User_id'])['Date_received'].count().reset_index(name='count').sort_values(by='count', ascending=False).head()

2.4 空值

df.iloc[2,0] = np.nan
np.isnan(df.User_id).any()
np.isnan(df.User_id)
df_tmp[~df_tmp.columnname.isnull()]
# 可以先根据已知的值, 对应行样本, 训练后预测未知的值;
# 最后填充, 方法:
df.loc[ (df.Age.isnull()), 'Age'] = predictAge

# 再看其他填充样例:
df2 = pd.DataFrame(np.random.randint(2,10,3))
df2.loc[0:1,0] = 1090  # 结果: 替换掉前两个
# df2.loc[0:2,0] = 1090 # 尝试看下不同案例. 结果:替换掉
# df2.loc[1,0] = 1090 # 尝试看下不同案例. 结果:替换掉第1个

2.5 列属性转换

a = [['a', '1.2', '4.2'], ['b', '70', '0.03'], ['x', '5', '0']]
df = pd.DataFrame(a, columns=['one', 'two', 'three'])
df
Out[16]: 
  one  two three
0   a  1.2   4.2
1   b   70  0.03
2   x    5     0

df.dtypes
Out[17]: 
one      object
two      object
three    object
df[['two', 'three']] = df[['two', 'three']].astype(float)

# pandas提供针对df整体类型转化方法:
df2 = df.convert_objects(convert_numeric=True)
df2.dtypes
.. float64
.. float64
# 重命名某些列
df.rename(index=str, columns={0:'click_times', 1:'pay_times', 2:'get_times'})

2.6 groupby

# groupby Userid, partValue
# 注意: reset_index 用法将group结果列命名为count
df_new = df[df.Coupon_id!='null'].groupby(['User_id', 'partValue'])['Merchant_id'].count().reset_index(name='count').head()
>>>
    User_id partValue   count
0   4   0-50    1
1   35  0-50    1
2   35  200-500 1
3   35  50-200  2
4   36  0-50    2

# 将列partValue透视到行上,每个值作为一列
df_new.pivot_table(values='count',index='User_id', columns='partValue', fill_value=0).head()
>>>
partValue   0-50    200-500 50-200
User_id         
4   1   0   0
35  1   1   2
36  2   0   0
# groupby User_id后,每组中对discountRate进行三种不同函数运算
df_new = df.groupby('User_id')['discountRate'].agg({'discount_avg':np.average, 'discount_max':np.max, 'discount_min':np.min})
>>>
    discount_avg    discount_max    discount_min
User_id         
4   0.833389    0.833389    0.833389
35  0.800128    0.900005    0.500500
36  0.791757    0.833389    0.750125
64  0.900010    0.900010    0.900010
110 0.900008    0.900010    0.900005

# 将df_new的index转换成新列
df_new.reset_index(level='User_id')
    User_id discount_avg    discount_max    discount_min
0   4   0.833389    0.833389    0.833389
1   35  0.800128    0.900005    0.500500
2   36  0.791757    0.833389    0.750125
3   64  0.900010    0.900010    0.900010
4   110 0.900008    0.900010    0.900005

# agg后面的函数,可以尝试自定义函数
# 商家优惠券被领取后不核销次数: get_times_noconsume
def getTimesNoConsume(df):
    return df[df.Action==2].Action.count()

# 商家优惠券被领取后核销次数: get_times_consume
def getTimesConsume(df):
    return df[df.Action==1].Action.count()

df.groupby('Merchant_id')['Action' , 'User_id'].agg({'get_times':getTimes, 'get_times_noconsume':getTimesNoConsume, 'get_times_consume':getTimesConsume})

2.7 过滤

# 通过index进行过滤
df.filter(items=[184, 417, 687], axis=0)
# 通过对某列的值进行过滤
df[df.col1>v]

# 通过正则匹配列名, 进行过滤
# 如下: 过滤, column name 为"Survived" 或者 "Age_" 开头或者 "Fare_"开头
df.filter(regex='Survived|Age_.*|Fare_.*')
df[~np.isnan(df.date_received)].count()
 dff.A[pd.notnull(df.B)]

2.8 删除列

# 删除列,返回新的df
df.drop(['NewDistance'], axis=1)
# 直接在df上删除列
del df['NewDistance']

2.9 创建DataFrame

dict_s0 = {'S': 10, 'C': 11, 'Q': 12, 'S1':100}
dict_s1 = {'S': 1, 'C': 2, 'Q': 3}
s0 = pd.Series(dict_s0)
s1 = pd.Series(dict_s1)
# 生成DataFrame, 会按照索引来进行, 如下对比, 可一目了然
df2 = pd.DataFrame({'s0':s0, 's1':s1})
    s0  s1
C   11  2.0
Q   12  3.0
S   10  1.0
S1  100 Na

3.多个DataFrame运算

3.1 表关联: join | merge | concat

# df_list包含多个df, 通过相同的key进行join
df_list = [df1, df2, df3, df4]
df_join_more = reduce(lambda left,right: pd.merge(left,right,on='User_id', how='left').fillna(0), df_list)

4. 重复代码提取

# 抽取重复代码
def getFeature2(df, groupby_col, agg_col, rename_col):
    return df.groupby(groupby_col)[agg_col].apply(np.average).reset_index(name=rename_col)

# 下面直接根据需要调用即可
getFeature2(df, 'Coupon_id', 'date_received_week_day', 'avg_week_day')
上一篇 下一篇

猜你喜欢

热点阅读