机器学习与数据挖掘我爱编程

Python实现梯度下降算法求多元线性回归(二)

2018-05-03  本文已影响125人  MambaHJ

前言

数据标准化

  1. 首先我们先取要用的三列数据作为训练数据集:
trainData = cReader[['mpg','displacement','acceleration']]
trainData.insert(0,'ones',1)
cols = trainData.shape[1]
X = trainData.iloc[:,0:cols-1]
Y = trainData.iloc[:,cols-1:cols]
X = np.mat(X.values)
Y = np.mat(Y.values)
for i in range(1,3):
    X[:,i] = (X[:,i] - min(X[:,i])) / (max(X[:,i]) - min(X[:,I]))
Y[:,0] = (Y[:,0] - min(Y[:,0])) / (max(Y[:,0]) - min(Y[:,0]))

打印一下trainData前五行数据:


trainData数据

代码说明

一般来说,我们再做线性回归时并不需要中心化和标准化数据。大多数情况下数据中的特征会以不同的测量单位展现,无论有没有中心化或者标准化都不会影响线性回归的结果。
因为估计出来的参数值β会恰当地将每个解释变量的单位x转化为响应变量的单位y.
但是标准化数据能将我们的结果更具有可解释性,比如β1=0.6
和β2=0.3, 我们可以理解为第一个特征的重要性是第二个特征的两倍。

这里采用以下公式进行标准化,对应上面代码的最后三行:


离差标准化公式

开始回归

  1. 首先用最小二乘法计算回归系数,并给出梯度下降的代价函数的代码表示
theta_n = (X.T*X).I*X.T*Y
print(theta_n)

打印结果: [[ 0.58007057] [-0.0378746 ] [-0.35473364]],从左到右分别是θ0,θ1,θ2

  1. 定义代价函数:
import pandas as pd
import numpy as np

代价函数costFunc

def costFunc(X,Y,theta):
    inner = np.power((X*theta.T)-Y,2)
    return np.sum(inner)/(2*len(X))
  1. 梯度下降算法
import numpy as np
import matplotlib.pyplot as plt

x = np.linspace(-100,+100,10)
y = pow(x-20,2)
k = 2*(x-20)
y1 = -20*(x-10)+100
y2 = -100*(x+30)+2500

plt.plot(x,y)
plt.plot(x,y1,label= 'k=-20')
plt.plot(x,y2,label= 'k=-100')

leg = plt.legend(loc='upper right',ncol=1)

plt.show()
def gradientDescent(X,Y,theta,alpha,iters):
    temp = np.mat(np.zeros(theta.shape))
    cost = np.zeros(iters)
    thetaNums = int(theta.shape[1])
    print(thetaNums)
    for i in range(iters):
        error = (X*theta.T-Y)
        for j in range(thetaNums):
            derivativeInner = np.multiply(error,X[:,j])
            temp[0,j] = theta[0,j] - (alpha*np.sum(derivativeInner)/len(X))

        theta = temp
        cost[i] = costFunc(X,Y,theta)

    return theta,cost
  1. 开始回归
theta = np.mat([0,0,0])
iters = 100000
alpha = 0.001
finalTheta,cost = lg.gradientDescent(X,Y,theta,alpha,iters)
print(finalTheta)
print(cost)

x1 = np.linspace(X[:,1].min(),X[:,1].max(),100)
x2 = np.linspace(X[:,2].min(),X[:,2].max(),100)

x1,x2 = np.meshgrid(x1,x2)
f = finalTheta[0,0] + finalTheta[0,1]*x1 + finalTheta[0,2]*x2

fig = plt.figure()
Ax = Axes3D(fig)
Ax.plot_surface(x1, x2, f, rstride=1, cstride=1, cmap=cm.viridis,label='prediction')

Ax.scatter(X[:100,1],X[:100,2],Y[:100,0],c='y')
Ax.scatter(X[100:250,1],X[100:250,2],Y[100:250,0],c='r')
Ax.scatter(X[250:,1],X[250:,2],Y[250:,0],c='b')

Ax.set_zlabel('acceleration')  # 坐标轴
Ax.set_ylabel('displacement')
Ax.set_xlabel('mpg')

plt.show()

finalTheta,也就是最终的θ:

[[ 15.47017446   2.99065096  -3.31870705]]

最小二乘法估计的结果

[[ 17.74518558]
 [ -0.63629322]
 [ -5.95952521]]

代价函数的值:

[ 124.67279846  124.37076615  124.06949161 ...,    2.77449658    2.77449481
    2.77449304]
fig, bx = plt.subplots(figsize=(8,6))
bx.plot(np.arange(iters), cost, 'r') 
bx.set_xlabel('Iterations') 
bx.set_ylabel('Cost') 
bx.set_title('Error vs. Training Epoch') 
plt.show()
梯度下降

如图,随着迭代次数的增加,代价函数越来越小,趋势越来越平稳,同时逼近最优解。

  1. linearRegression.py
import pandas as pd
import numpy as np

def costFunc(X,Y,theta):
    inner = np.power((X*theta.T)-Y,2)
    return np.sum(inner)/(2*len(X))

def gradientDescent(X,Y,theta,alpha,iters):
    temp = np.mat(np.zeros(theta.shape))
    cost = np.zeros(iters)
    thetaNums = int(theta.shape[1])
    print(thetaNums)
    for i in range(iters):
        error = (X*theta.T-Y)
        for j in range(thetaNums):
            derivativeInner = np.multiply(error,X[:,j])
            temp[0,j] = theta[0,j] - (alpha*np.sum(derivativeInner)/len(X))

        theta = temp
        cost[i] = costFunc(X,Y,theta)

    return theta,cost
  1. lgScript.py
from io import StringIO
from urllib import request
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import ssl
import pandas as pd
import numpy as np
import linearRegrassion as lg

ssl._create_default_https_context = ssl._create_unverified_context

names =["mpg","cylinders","displacement","horsepower",
        "weight","acceleration","model year","origin","car name"]

url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
s = request.urlopen(url).read().decode('utf8')

dataFile = StringIO(s)
cReader = pd.read_csv(dataFile,delim_whitespace=True,names=names)

ax = plt.subplot(111, projection='3d')  # 创建一个三维的绘图工程
ax.scatter(cReader["mpg"][:100],cReader["displacement"][:100],cReader["acceleration"][:100],c='y')
ax.scatter(cReader["mpg"][100:250],cReader["displacement"][100:250],cReader["acceleration"][100:250],c='r')
ax.scatter(cReader["mpg"][250:],cReader["displacement"][250:],cReader["acceleration"][250:],c='b')

ax.set_zlabel('acceleration')  # 坐标轴
ax.set_ylabel('displacement')
ax.set_xlabel('mpg')
plt.show()

plt.scatter(cReader["mpg"],cReader["displacement"])
plt.xlabel('mpg')
plt.ylabel('displacement')
plt.show()

trainData = cReader[['mpg','displacement','acceleration']]
trainData.insert(0,'ones',1)

print(trainData.head(5))
cols = trainData.shape[1]
X = trainData.iloc[:,0:cols-1]
Y = trainData.iloc[:,cols-1:cols]
X = np.mat(X.values)
Y = np.mat(Y.values)

for i in range(1,3):
    X[:,i] = (X[:,i] - min(X[:,i])) / (max(X[:,i]) - min(X[:,i]))
print(X[:5:,:3])
#Y[:,0] = (Y[:,0] - min(Y[:,0])) / (max(Y[:,0]) - min(Y[:,0]))
print(Y[:5,0])

theta_n = (X.T*X).I*X.T*Y
print(theta_n)

theta = np.mat([0,0,0])
iters = 100000
alpha = 0.001

finalTheta,cost = lg.gradientDescent(X,Y,theta,alpha,iters)
print(finalTheta)
print(cost)

x1 = np.linspace(X[:,1].min(),X[:,1].max(),100)
x2 = np.linspace(X[:,2].min(),X[:,2].max(),100)


x1,x2 = np.meshgrid(x1,x2)
f = finalTheta[0,0] + finalTheta[0,1]*x1 + finalTheta[0,2]*x2

fig = plt.figure()
Ax = Axes3D(fig)
Ax.plot_surface(x1, x2, f, rstride=1, cstride=1, cmap=cm.viridis,label='prediction')

Ax.scatter(X[:100,1],X[:100,2],Y[:100,0],c='y')
Ax.scatter(X[100:250,1],X[100:250,2],Y[100:250,0],c='r')
Ax.scatter(X[250:,1],X[250:,2],Y[250:,0],c='b')

Ax.set_zlabel('acceleration')  # 坐标轴
Ax.set_ylabel('displacement')
Ax.set_xlabel('mpg')

plt.show()

fig, bx = plt.subplots(figsize=(8,6))
bx.plot(np.arange(iters), cost, 'r') 
bx.set_xlabel('Iterations') 
bx.set_ylabel('Cost') 
bx.set_title('Error vs. Training Epoch') 
plt.show()

总结

这个系列算是学习吴恩达机器学习的笔记与作业,由于平时课程较多,所以不定期更新,下一篇大概是用Python实现逻辑回归,希望不足之处大家可以讨论一下。

上一篇 下一篇

猜你喜欢

热点阅读