计算机@linux_python_R 技术帖

R语言之实战分析

2019-03-18  本文已影响271人  Oodelay
R面向的数据类型

采编自DataMiningWithR

数据描述:在一年时间内,收集河流水样。测定它们的化学性质和7种有害藻类(a1-a7)的存在频率
求:预测藻类模型,了解影响藻类的因素。

1. 数据准备

rm(list = ls()) #删除先前存档的一些环境变量,打扫干净屋子再请客

if(!require('DMwR'))(
 install.packages('DMwR') 
)

data(algae)

#查看数据集的基本情况
head(algae)
summary(algae)
str(algae)
图.1
图.2
图.1可以看出,数据中存在缺失值和异常值
图.2可以看出,该数据有三个因子型变量'season','size','speed',其余为数值型变量

2. 初步可视化

2.1 观察各个变量数据的规范性
几乎每个变量都有异常值存在,多是异常大值

par(mfrow = c(3,5)) #将成图区域设定为3行5列的形式,同时呈现多张图片
for(i in 4:18){ #这里只针对数值型变量
  boxplot(algae[,i],xlab = names(algae)[i])
}
par(mfrow = c(1,1)) #返回默认设置
图.3

2.2 观察变量间的相关性

library('PerformanceAnalytics')
chart.Correlation(algae[,4:18], histogram=TRUE,pch='+') #'图.4'


library('dplyr')
library('Hmisc')

#计算各个数值型变量间的相关性及显著性,与'chart.Correlation'不同,这里可以获得相关性数据
res <- algae[,4:18] %>% as.matrix() %>% rcorr(type = 'pearson') 
library('corrplot')
corrplot(res$r, p.mat = res$P, type = "upper", order = "hclust", tl.col = "black", tl.srt = 45)#'图.5'
图.4
图.5

2.3 双变量间的相关性
由上可知,"oPO4"和"PO4"高度相关,达到0.91

library("ggpubr")
ggscatter(algae, x = "oPO4", y = "PO4", 
          facet.by = 'season',scales ='free', #注释此行,则展现全局下两个变量的相关性
          size = 3,color = 'black', #定义点的属性
          add.params = list(color = 'red', lty = 2, lwd = 2), #定义拟合曲线的属性
          add = "reg.line", conf.int = TRUE,  #添加拟合曲线和置信区间
          cor.coef = TRUE, cor.method = "pearson",cor.coef.size = 5, #添加拟合参数
          xlab = "oPO4", ylab = "PO4")
图.6

2.4 观察单个变量的数据分布情况

library('car')
par(mfrow=c(1,2))
hist(algae$mxPH, prob=T, xlab='', main='Histogram of maximum pH value',ylim=0:1) #绘制频率分布直方图
lines(density(algae$mxPH,na.rm=T),col='blue')#在直方图之上,叠加频率密度曲线
rug(jitter(algae$mxPH),col = 'blue') #在直方图的横坐标轴上添加须线,表征数据在坐标轴上分布的密集度
qqPlot(algae$mxPH,main='Normal QQ plot of maximum pH',col.lines = 'purple') #另外绘制正态分布图
par(mfrow=c(1,1))
图.7
boxplot(algae$oPO4,ylab='Orthophosphate (oPO4)',notch = T)
rug(jitter(algae$oPO4),side=2)#'side = 2'表示在y轴绘制须线表征数据分布密集度
abline(h=mean(algae$oPO4,na.rm=T),lty=2,col = 'red')#添加均值线
abline(h=median(algae$oPO4,na.rm=T),lty=2,col = 'blue')#添加中值线
abline(h=quantile(algae$oPO4,0.25,na.rm=T),lty=2,col = 'pink')#添加1/4分位线
abline(h=quantile(algae$oPO4,0.75,na.rm=T),lty=2,col = 'purple')#添加3/4分位线
图.8
plot(algae$NH4,xlab='')
abline(h=mean(algae$NH4,na.rm=T),lty=1,col = 'red')
abline(h=mean(algae$NH4,na.rm=T)+sd(algae$NH4,na.rm=T),lty=2,col= 'green')
abline(h=median(algae$NH4,na.rm=T),lty=3,col='blue')
# 手动挑选异常值,鼠标点击多个点,即可获得该点在数据中的索引即行号
identify(algae$NH4) 
algae[c(20,34,35,88,89,133,153),'NH4']
图.9
library(lattice)
bwplot(size ~ a1, data=algae,ylab='River Size',xlab='Algal A1')

library(Hmisc)
bwplot(size ~ a1, data=algae,panel=panel.bpplot, 
       probs=seq(.01,.49,by=.01), datadensity=TRUE,
       ylab='River Size',xlab='Algal A1')

左图可明显判断异常值的存在,右图可展现数据在不同范围内的分布集中度


图.10
histogram(~ mxPH | season,data=algae)

algae$season <- factor(algae$season,levels=c('spring','summer','autumn','winter'))

histogram(~ mxPH | size*speed,data=algae) 

stripplot(size ~ mxPH | speed, data=algae, jitter=T)
minO2 <- equal.count(na.omit(algae$mnO2),
                     number=3,overlap=1/10)# number设置区间个数,overlap设置区间靠近边界的重合。

stripplot(season ~ a3|minO2,scales = 'free',
          data=algae[!is.na(algae$mnO2),],
          strip = strip.custom(strip.names = TRUE, strip.levels = TRUE))
图.11

3. 数据中存在缺失值,下面我们对其进行处理

3.1 了解缺失值的基本分布情况

library('mice')
md.pattern(algae,rotate.names = T) #可视化缺失值,以红色表示缺失值
#分别统计各行各列缺失值的分布情况
apply(algae,1,function (x) sum(is.na(x)))
apply(algae,2,function(x) sum(is.na(x)))
图.12

3.2 直接删除缺失值,在缺失值占比很少的情况采用

# 获取包含缺失值的行
algae.na <- algae[!complete.cases(algae),]
md.pattern(algae.na,rotate.names = T)
nrow(algae.na)

#获取不含缺失值的行
algae.clean <- na.omit(algae) #同algae <- algae[complete.cases(algae),]
md.pattern(algae.clean,rotate.names = T)
nrow(algae.clean)

#设定阈值,返回缺失值占比超过该阈值的行
manyNAs(algae,0.2)
algae <- algae[-manyNAs(algae),]

# 对某一变量,使用有效值的均值或者中值等进行填充
algae[48,'mxPH'] <- mean(algae$mxPH,na.rm=T) #针对某一行
algae[is.na(algae$Chla),'Chla'] <- median(algae$Chla,na.rm=T) #针对所有包含缺失值的行

3.3 基于一定的规则填充缺失值

library('dplyr')
algae.Imp <- algae %>% impute(fun = median) #alternative "fun = mean",or "fun = 0.4"
algae.ctrl <- algae %>% centralImputation() #如果是数值型变量,使用中位数;如果是字符型变量,使用众数
algae.knn <- algae %>% knnImputation(k = 5, meth = 'weighAvg') #使用欧氏距离找到与之最近的数值,取加权平均值
library('randomForest')
algae.rf <- rfImpute(season ~ .,algae,iters = 6, ntree = 300) # response vector must has no NAs
algae <- algae[-manyNAs(algae),]
# 将各变量间的相关性以符号表示,发现"oPO4"和"PO4"相关性较高
algae[,4:18] %>% cor(use = 'complete.obs') %>% symnum()
lm(PO4 ~ oPO4,data=algae)#对两变量进行线性回归,以回归参数进行填值
algae[28,'PO4'] <- 42.897 + 1.293 * algae[28,'oPO4']

data(algae)
algae <- algae[-manyNAs(algae),]
fillPO4 <- function(oP) {
   if (is.na(oP)) 
     return(NA)
   else 
     return(42.897 + 1.293 * oP)
}
algae[is.na(algae$PO4),'PO4'] <- 
    sapply(algae[is.na(algae$PO4),'oPO4'],fillPO4)

4. 聚类分析

#清空当前工作环境,重新加载数据
rm(list = ls())
require('DMwR')
data("algae")

4.1 数据准备和聚类预览

#聚类分析对缺失值及其敏感,以kmeans均值方法填充缺失值
algae.knn <- algae %>% knnImputation(k = 10, meth = 'weighAvg') #使用欧氏距离找到与之最近的数值,取加权平均值

#初步判断数据大概可以聚为几类
if(!require('factoextra'))(
  install.packages('factoextra')
)
fviz_nbclust(algae.knn[,-c(1:3)],kmeans,method = 'wss') +  #'wss' within sum of squares
  geom_vline(xintercept = 4, lty = 2)#

初步判断,可分为4组


图.13

4.2 层次聚类

dis = dist(algae.knn[,-c(1:3)],method = 'euclidean')
dis_hc = hclust(dis,method = 'ward.D2')

library('scales');show_col(hue_pal()(4))# 展示ggplot2默认的一些颜色

fviz_dend(dis_hc, k=4, cex = 0.5, color_labels_by_k = T, rect = F,
          k_colors = c('#F8766D','#A3A500','#0087BF7D','#00B0F6'))
# 很明显,聚类效果很差,有偏离点存在
图.14

4.3 kmeans均值聚类 (1)

library('ggfortify')
autoplot(kmeans(algae.knn[,4:18], 4),data = algae.knn, size = 3, label = F, 
         label.size = 4,frame = T,frame.type = 't')# 很明显,聚类效果很差
图.15

4.3 kmeans均值聚类 (2)

# 变量太多,不适合此种展现形式
# if(!require('GGally'))(
#   install.packages('GGally')
# )
# rest = kmeans(algae.knn[,-(1:3)],centers = 4)
# ggpairs(algae.knn[,-(1:3)], cex = 0.5,mapping = aes(color = as.character(rest$cluster)))
# 同样的,聚类效果很差

# 变量较多情况下,从全局出发考查属性间的差异性
if(!require(flexclust))(
  install.packages("flexclust")
)
clk4 <- cclust(algae.knn[,-c(1:3)], k=4);clk4
barchart(clk4,legend=TRUE)
# 同样的,聚类效果很差,原因在于有异常值存在
图.16

4. 处理异常值

4.1 盖帽法处理异常值
即分别设定数据的上下限,高于上限的用上限替换,低于下限的用下限替换

#清空当前工作环境,重新加载数据
rm(list = ls())
require('DMwR')
data(algae)

#使用kmeans均值算法对填充缺失值
clean_algae = knnImputation(algae, k = 10)

# 定义一个函数,对一个数值型变量,使用1%处的数值替换小于1%的异常小值,使用90%处的数值替换大于90%的异常大值
block = function(x,lower = T, upper = T){
  if(lower){
    q1 <- quantile(x,0.01)
    x[x<=q1] = q1
  }
  if(upper){
    q90 <- quantile(x,0.90)
    x[x>=q90] = q90
  }
  return(x)
}
对所有数值型变量执行盖帽法替换异常值
clean_algae_blk = sapply(clean_algae[,-c(1:3)],block) %>% cbind(clean_algae[,1:3],.)

4.2 盖帽法处理异常值后重现考察数据的分布情况

clean_algae_blk %>% melt() %>%
  ggplot(aes(NULL,value)) +
  labs(x = '',y ='') +
  geom_boxplot(aes(fill = variable),show.legend = F) +
  facet_wrap(variable ~. , scales = 'free_y')
图.17
if(!require('factoextra'))(
  install.packages('factoextra')
)
#处理异常值之后,初步评估数据聚为5组
fviz_nbclust(clean_algae_blk[,-c(1:3)],kmeans,method = 'wss') +  #'wss' within sum of squares
  geom_vline(xintercept = 5, lty = 2)#

dis = dist(clean_algae_blk[,-c(1:3)],method = 'euclidean')
dis_hc = hclust(dis,method = 'ward.D2')

library('scales');show_col(hue_pal()(5))# 展示ggplot2默认的一些颜色

#层次聚类,处理异常值后,聚类效果显著改善
fviz_dend(dis_hc, k=5, cex = 0.5, color_labels_by_k = T, rect = F,
          k_colors = c('#F8766D','#A3A500','#0087BF7D','#00B0F6','#E76BF3'))

#kmeans均值聚类效果显著改善
library('ggfortify')
autoplot(kmeans(clean_algae_blk[,4:18], 5),data = clean_algae_blk, size = 3, label = F, 
         label.size = 4,frame = T,frame.type = 't')

# 变量太多,不适合此种展现形式
# if(!require('GGally'))(
#   install.packages('GGally')
# )
# rest = kmeans(clean_algae_blk[,-(1:3)],centers = 5)
# ggpairs(clean_algae_blk[,-(1:3)], cex = 0.5,mapping = aes(color = as.character(rest$cluster)))

#变量较多情况下,更应注重考查整体的数据分类情况
if(!require(flexclust))(
  install.packages("flexclust")
)
clk5 <- cclust(clean_algae_blk[,-c(1:3)], k=5);clk5
barchart(clk5,legend=TRUE)
图.18
###################################################
### Multiple Linear Regression
###################################################

data(algae)
algae <- algae[-manyNAs(algae), ]
clean.algae <- knnImputation(algae, k = 10)


lm.a1 <- lm(a1 ~ .,data=clean.algae[,1:12])
summary(lm.a1)
anova(lm.a1)


lm2.a1 <- update(lm.a1, . ~ . - season)
summary(lm2.a1)
anova(lm.a1,lm2.a1)


final.lm <- step(lm.a1)
summary(final.lm)
#判断共线性问题,小于2,表明变量间共线性弱
vif(final.lm)


###################################################
### Regression Trees
###################################################
library(rpart)
data(algae)
algae <- algae[-manyNAs(algae), ]
rt.a1 <- rpart(a1 ~ .,data=algae[,1:12])

rt.a1
summary(rt.a1)

prettyTree(rt.a1,
           compress = T,#布局问题,是否更紧凑
           branch = 1, #横线的长短
           margin   = 0.05, # 图像距离边界的大小
           cex = 0.8,#定义字体的大小
           fwidth = 0.5,#定义框的宽度
           fheight = 0.5,#定义框的高度
           center = 0.1)#定义文字在框中的位置


printcp(rt.a1)


rt2.a1 <- prune(rt.a1,cp=0.01) #剪树,cp越大,树越简单
rt2.a1
prettyTree(rt2.a1)

# 生成树的过程剪枝
# set.seed(1234) # Just to ensure  same results as in the book
# (rt.xse <- rpartXse(a1 ~ .,data=algae[,1:12]))
# prettyTree(rt.xse)

first.tree <- rpart(a1 ~ .,data=algae[,1:12])
prettyTree(first.tree)
#去掉节点4和7生出的分支,从上到下,从左到右的顺序判断节点的编号
snip.tre <- snip.rpart(first.tree,c(4,7)) 
prettyTree(snip.tre)

prettyTree(first.tree)
snip.rpart(first.tree)#双击某个节点,则该节点之后的分支就消失了


###################################################
### Model Evaluation and Selection
###################################################
rm(list = ls())

data("algae")
algae <- algae[-manyNAs(algae), ]

clean.algae = knnImputation(algae,k = 10)
lm.a1 <- lm(a1 ~ .,data=clean.algae[,1:12])
final.lm = step(lm.a1)


rt.a1 <- rpart(a1 ~ .,data=algae[,1:12])


lm.predictions.a1 <- predict(final.lm,clean.algae)
rt.predictions.a1 <- predict(rt.a1,algae)

#mae: mean absolute error
(mae.a1.lm <- mean(abs(lm.predictions.a1-algae[,'a1'])))
(mae.a1.rt <- mean(abs(rt.predictions.a1-algae[,'a1'])))

#mse: mean squared error
(mse.a1.lm <- mean((lm.predictions.a1-algae[,'a1'])^2))
(mse.a1.rt <- mean((rt.predictions.a1-algae[,'a1'])^2))

#
(nmse.a1.lm <- mean((lm.predictions.a1-algae[,'a1'])^2)/
                mean((mean(algae[,'a1'])-algae[,'a1'])^2))
(nmse.a1.rt <- mean((rt.predictions.a1-algae[,'a1'])^2)/
                mean((mean(algae[,'a1'])-algae[,'a1'])^2))


regr.eval(algae[,'a1'],rt.predictions.a1,train.y=algae[,'a1'])


old.par <- par(mfrow=c(1,2))
plot(lm.predictions.a1,algae[,'a1'],main="Linear Model",
     xlab="Predictions",ylab="True Values")
abline(0,1,lty=2) #intercept = 0, slope = 1
plot(rt.predictions.a1,algae[,'a1'],main="Regression Tree",
     xlab="Predictions",ylab="True Values")
abline(0,1,lty=2)
par(old.par)



plot(lm.predictions.a1,algae[,'a1'],main="Linear Model",
     xlab="Predictions",ylab="True Values")
abline(0,1,lty=2)
algae[identify(lm.predictions.a1,algae[,'a1']),]


sensible.lm.predictions.a1 <- ifelse(lm.predictions.a1 < 0,0,lm.predictions.a1)
regr.eval(algae[,'a1'],lm.predictions.a1,stats=c('mae','mse'))
regr.eval(algae[,'a1'],sensible.lm.predictions.a1,stats=c('mae','mse'))


cv.rpart <- function(form,train,test,...) {
  m <- rpartXse(form,train,...)
  p <- predict(m,test)
  mse <- mean((p-resp(form,test))^2)
  c(nmse=mse/mean((mean(resp(form,train))-resp(form,test))^2))
}
cv.lm <- function(form,train,test,...) {
  m <- lm(form,train,...)
  p <- predict(m,test)
  p <- ifelse(p < 0,0,p)
  mse <- mean((p-resp(form,test))^2)
  c(nmse=mse/mean((mean(resp(form,train))-resp(form,test))^2))
}


res <- experimentalComparison(
            c(dataset(a1 ~ .,clean.algae[,1:12],'a1')),
            c(variants('cv.lm'), 
              variants('cv.rpart',se=c(0,0.5,1))),
            cvSettings(3,10,1234))


summary(res)


plot(res)


getVariant('cv.rpart.v1',res)


DSs <- sapply(names(clean.algae)[12:18],
         function(x,names.attrs) { 
           f <- as.formula(paste(x,"~ ."))
           dataset(f,clean.algae[,c(names.attrs,x)],x) 
         },
         names(clean.algae)[1:11])

res.all <- experimentalComparison(
                  DSs,
                  c(variants('cv.lm'),
                    variants('cv.rpart',se=c(0,0.5,1))
                   ),
                  cvSettings(5,10,1234))


plot(res.all)


bestScores(res.all)


library(randomForest)
cv.rf <- function(form,train,test,...) {
  m <- randomForest(form,train,...)
  p <- predict(m,test)
  mse <- mean((p-resp(form,test))^2)
  c(nmse=mse/mean((mean(resp(form,train))-resp(form,test))^2))
}
res.all <- experimentalComparison(
                  DSs,
                  c(variants('cv.lm'),
                    variants('cv.rpart',se=c(0,0.5,1)),
                    variants('cv.rf',ntree=c(200,500,700))
                   ),
                  cvSettings(5,10,1234))


bestScores(res.all)


compAnalysis(res.all,against='cv.rf.v3',
               datasets=c('a1','a2','a4','a6'))



###################################################
### Predictions for the 7 algae
###################################################
bestModelsNames <- sapply(bestScores(res.all),
                          function(x) x['nmse','system'])
learners <- c(rf='randomForest',rpart='rpartXse') 
funcs <- learners[sapply(strsplit(bestModelsNames,'\\.'),
                        function(x) x[2])]
parSetts <- lapply(bestModelsNames,
                   function(x) getVariant(x,res.all)@pars)

bestModels <- list()
for(a in 1:7) {
  form <- as.formula(paste(names(clean.algae)[11+a],'~ .'))
  bestModels[[a]] <- do.call(funcs[a],
          c(list(form,clean.algae[,c(1:11,11+a)]),parSetts[[a]]))
}


clean.test.algae <- knnImputation(test.algae,k=10,distData=algae[,1:11])


preds <- matrix(ncol=7,nrow=140)
for(i in 1:nrow(clean.test.algae)) 
  preds[i,] <- sapply(1:7,
                      function(x) 
                        predict(bestModels[[x]],clean.test.algae[i,])
                     )


avg.preds <- apply(algae[,12:18],2,mean)
apply( ((algae.sols-preds)^2),2,mean) / apply( (scale(algae.sols,avg.preds,F)^2),2,mean)
上一篇下一篇

猜你喜欢

热点阅读