机器学习--有监督--线性回归

2021-11-05  本文已影响0人  小贝学生信

线性回归应该可以说是最简单的机器学习算法之一了,它最显著的优势在于其易解释性;同时也是理解后续深奥算法的基础。

1、线性回归的简单理解

split <- rsample::initial_split(ames, prop = 0.7, 
                       strata = "Sale_Price")
ames_train  <- rsample::training(split) #训练集
ames_test   <- rsample::testing(split) #测试集
model1 <- lm(Sale_Price ~ Gr_Liv_Area, data = ames_train)
summary(model1)

# Call:
#   lm(formula = Sale_Price ~ Gr_Liv_Area, data = ames_train)
# 
# Residuals:   (1)所有点预测值与真实值的残差分布
#   Min      1Q  Median      3Q     Max 
# -489196  -30736   -1944   21861  332565 
# 
# Coefficients: (2)预测的线性回归参数与显著性p值
#   Estimate Std. Error t value Pr(>|t|)    
# (Intercept) 12183.914   3915.674   3.112  0.00189 ** 
#   Gr_Liv_Area   112.905      2.482  45.492  < 2e-16 ***
#   ---
#   Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# ####################(3)RMSE
# Residual standard error: 56840 on 2047 degrees of freedom 
# Multiple R-squared:  0.5027,  Adjusted R-squared:  0.5025 
# F-statistic:  2070 on 1 and 2047 DF,  p-value: < 2.2e-16

names(summary(model1))
# [1] "call"          "terms"         "residuals"     "coefficients"  "aliased"      
# [6] "sigma"         "df"            "r.squared"     "adj.r.squared" "fstatistic"   
# [11] "cov.unscaled"

summary(model1)$r.squared
#[1] 0.502737

coef()参看模型的参数

coef(model1)
# (Intercept) Gr_Liv_Area 
# 12183.9139    112.9054

sigma()查看模型的RMSE指标

sigma(model1)
# [1] 56835.05

confint() 查看模型参数的95%置信区间

confint(model1, level = 0.95) 
#             2.5 %     97.5 %
# (Intercept) 4504.7933 19863.0344
# Gr_Liv_Area  108.0382   117.7727

2、较为复杂的线性回归

model2 <- lm(Sale_Price ~ Gr_Liv_Area + Year_Built, data = ames_train)
# Call:
# lm(formula = Sale_Price ~ Gr_Liv_Area + Year_Built, data = ames_train)
# 
# Coefficients:
# (Intercept)  Gr_Liv_Area   Year_Built  
# -2.131e+06    9.536e+01    1.100e+03

两个变量可能会相互影响,即存在交互效应,这里没有考虑。

# 考虑所有的变量
model3 <- lm(Sale_Price ~ ., data = ames_train)
broom::tidy(model3)
# # A tibble: 303 x 5
# term                                               estimate std.error statistic p.value
# <chr>                                                 <dbl>     <dbl>     <dbl>   <dbl>
#   1 (Intercept)                                       -9038908. 10336981.   -0.874   0.382 
# 2 MS_SubClassOne_Story_1945_and_Older                   3020.     3350.    0.901   0.368 
# 3 MS_SubClassOne_Story_with_Finished_Attic_All_Ages     7518.    10579.    0.711   0.477 
# 4 MS_SubClassOne_and_Half_Story_Unfinished_All_Ages    18045.    12498.    1.44    0.149 
# 5 MS_SubClassOne_and_Half_Story_Finished_All_Ages       8258.     5696.    1.45    0.147 
# 6 MS_SubClassTwo_Story_1946_and_Newer                   7284.     4907.    1.48    0.138 
# 7 MS_SubClassTwo_Story_1945_and_Older                  10683.     5523.    1.93    0.0532
# 8 MS_SubClassTwo_and_Half_Story_All_Ages               19066.    11001.    1.73    0.0832
# 9 MS_SubClassSplit_or_Multilevel                       -2217.    10282.   -0.216   0.829 
# 10 MS_SubClassSplit_Foyer                                 574.     6891.    0.0832  0.934 
# # ... with 293 more rows

3、10折交叉验证评价、比较模型

library(caret)
set.seed(123) 
cv_model1 <- train(
  form = Sale_Price ~ Gr_Liv_Area,
  data = ames_train,
  method = "lm",
  trControl = trainControl(method = "cv", number = 10)
)
cv_model1$results
# intercept     RMSE  Rsquared      MAE   RMSESD RsquaredSD    MAESD
# 1      TRUE 56854.32 0.5103902 38802.52 4416.879 0.08540552 2272.675

#用于测试集
predictions <- predict(cv_model1, ames_test)
RMSE(predictions, ames_test$Sale_Price)
#[1] 55821.4
predictions <- predict(model1, ames_test)
RMSE(predictions, ames_test$Sale_Price)
#[1] 55821.4

提醒:cv_model1模型的参数值与之前的model1其实是一样的,只是前者计算得到的RMSE、R2等指标更具有代表性。

# model 2 CV
set.seed(123)
cv_model2 <- train(
  Sale_Price ~ Gr_Liv_Area + Year_Built,
  data = ames_train,
  method = "lm",
  trControl = trainControl(method = "cv", number = 10)
)

# model 3 CV
set.seed(123)
cv_model3 <- train(
  Sale_Price ~ .,
  data = ames_train,
  method = "lm",
  trControl = trainControl(method = "cv", number = 10)
)
summary(resamples(list(
  model1 = cv_model1,
  model2 = cv_model2,
  model3 = cv_model3
)))

# 如下图结果,可以看出模型3的性能最优

4、线性回归的前提假设

4.1 PCA

# perform 10-fold cross validation on a PCR model tuning the 
# number of principal components to use as predictors from 1-100
set.seed(123)
cv_model_pcr <- train(
  Sale_Price ~ ., 
  data = ames_train, 
  method = "pcr",
  trControl = trainControl(method = "cv", number = 10),
  preProcess = c("zv", "center", "scale"),
  tuneLength = 100
)

# model with lowest RMSE
cv_model_pcr$bestTune
##    ncomp
## 87    87
library(tidyverse)
# results for model with lowest RMSE
cv_model_pcr$results %>%
  dplyr::filter(ncomp == pull(cv_model_pcr$bestTune))
##   ncomp     RMSE  Rsquared      MAE   RMSESD RsquaredSD    MAESD
## 1    97 30135.51 0.8615453 20143.42 5191.887 0.03764501 1696.534

# plot cross-validated RMSE
ggplot(cv_model_pcr)

4.2 PLS

# perform 10-fold cross validation on a PLS model tuning the 
# number of principal components to use as predictors from 1-30
set.seed(123)
cv_model_pls <- train(
  Sale_Price ~ ., 
  data = ames_train, 
  method = "pls",
  trControl = trainControl(method = "cv", number = 10),
  preProcess = c("zv", "center", "scale"),
  tuneLength = 30
)

# model with lowest RMSE
cv_model_pls$bestTune
##    ncomp
## 20    20

# results for model with lowest RMSE
cv_model_pls$results %>%
  dplyr::filter(ncomp == pull(cv_model_pls$bestTune))
##   ncomp     RMSE  Rsquared      MAE   RMSESD RsquaredSD   MAESD
## 1    20 25459.51 0.8998194 16022.68 5243.478 0.04278512 1665.61

# plot cross-validated RMSE
ggplot(cv_model_pls)

5、特征变量的讨论

5.1 哪些特征变量在线性回归建模中贡献最大

library(vip)
#The importance measure is normalized from 100 (most important) to 0 (least important). 
vip(cv_model_pls, num_features = 20, method = "model")

5.2 partial dependence plots (PDPs).

pdp::partial(cv_model_pls, "Gr_Liv_Area", grid.resolution = 20, plot = TRUE)
上一篇 下一篇

猜你喜欢

热点阅读