吴恩达第二次作业代码
2020-10-17 本文已影响0人
自学java的菜鸟小赵
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
path = './ex2data1.txt'
data = pd.read_csv(path, header=None, names=['Exam 1', 'Exam 2', 'Admitted'])
print(data.head())
'''
画图
----------------------------------------------------
'''
plt.figure(figsize=(20,8),dpi=80)
#找到Admitted为1的特征值
positive = data[data['Admitted'].isin([1])]
negative = data[data['Admitted'].isin([0])]
plt.scatter(positive['Exam 1'], positive['Exam 2'], s=50, c='b', marker='o', label='Admitted')
plt.scatter(negative['Exam 1'], negative['Exam 2'], s=50, c='r', marker='x', label='Not Admitted')
plt.legend()
plt.xlabel('Exam 1 Score')
plt.ylabel('Exam 2 Score')
plt.show()
'''
定义逻辑回归模型的假设函数
ℎ𝜃(𝑥)=g(𝜃Tx)
𝑔(𝑧)=11+𝑒−𝑧
即ℎ𝜃(𝑥)=11+𝑒−𝜃𝑇𝑋
----------------------------------------------------
'''
#定义g(z)函数
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# nums = np.arange(-10, 10, step=1)
# #print(nums)
# fig, ax = plt.subplots(figsize=(12,8))
# ax.plot(nums, sigmoid(nums), 'r')
# plt.show()
'''
编写代价函数
𝐽(𝜃)=1𝑚∑𝑖=1𝑚[−𝑦(𝑖)log(ℎ𝜃(𝑥(𝑖)))−(1−𝑦(𝑖))log(1−ℎ𝜃(𝑥(𝑖)))]
'''
def cost(theta, X, y):
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
return np.sum(first - second) / (len(X))
data.insert(0, 'Ones', 1)
# set X (training data) and y (target variable)
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
# convert to numpy arrays and initalize the parameter array theta
X = np.asmatrix(np.array(X.values))
y = np.asmatrix(np.array(y.values))
theta = np.asmatrix(np.zeros(3))
print(cost(theta, X, y))
输出
image.png逻辑回归模型的梯度下降
'''
逻辑回归模型的梯度下降
'''
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize as opt
path = './ex2data1.txt'
data = pd.read_csv(path, header=None, names=['Exam 1', 'Exam 2', 'Admitted'])
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost(theta, X, y):
theta = np.asmatrix(theta)
X = np.asmatrix(X)
y = np.asmatrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
return np.sum(first - second) / (len(X))
'''
公式∂𝐽(𝜃)∂𝜃𝑗=1𝑚∑𝑖=1𝑚(ℎ𝜃(𝑥(𝑖))−𝑦(𝑖))𝑥(𝑖)𝑗
注意,我们实际上没有在这个函数中执行梯度下降,我们仅仅在计算一个梯度步长。
在练习中,一个称为“fminunc”的Octave函数是用来优化函数来计算成本和梯度参数。
由于我们使用Python,我们可以用SciPy的“optimize”命名空间来做同样的事情。
'''
def gradient(theta, X, y):
theta = np.asmatrix(theta)
X = np.asmatrix(X)
y = np.asmatrix(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
for i in range(parameters):
term = np.multiply(error, X[:, i])
grad[i] = np.sum(term) / len(X)
return grad
data.insert(0, 'Ones', 1)
# set X (training data) and y (target variable)
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
X = np.array(X.values)
y = np.array(y.values)
theta = np.zeros(3)
#初始参数为0的梯度下降法的结果
print(gradient(theta, X, y))
#寻找最优参数
result = opt.fmin_tnc(func=cost, x0=theta, fprime=gradient, args=(X, y))
print(result)
'''
我们需要编写一个函数,用我们所学的参数theta来为数据集X输出预测。
然后,我们可以使用这个函数来给我们的分类器的训练精度打分。 逻辑回归模型的假设函数:
ℎ𝜃(𝑥)=11+𝑒−𝜃𝑇𝑋
当 ℎ𝜃 大于等于0.5时,预测 y=1
当 ℎ𝜃 小于0.5时,预测 y=0 。
'''
def predict(theta, X):
probability = sigmoid(X * theta.T)
return [1 if x >= 0.5 else 0 for x in probability]
theta_min = np.asmatrix(result[0])
predictions = predict(theta_min, X)
print(predictions)
#迭代predictions的值和y的值,做对比
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {0}%'.format(accuracy))
输出
[ -0.1 -12.00921659 -11.26284221]
(array([-25.16131872, 0.20623159, 0.20147149]), 36, 0)
[0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1]
accuracy = 89%
NIT NF F GTG
0 1 6.931471805599453E-01 2.71082898E+02
1 3 6.318123602631309E-01 7.89087138E-01
2 5 5.892425219642520E-01 7.39226619E+01
3 7 4.227823930158082E-01 1.85265695E+01
4 9 4.072926893605728E-01 1.68671152E+01
5 11 3.818854750866844E-01 1.07735074E+01
6 13 3.786234674835032E-01 2.31584968E+01
tnc: stepmx = 1000
7 16 2.389267462640103E-01 3.00821259E+00
8 18 2.047203869708881E-01 1.52226931E-01
9 20 2.046713881812257E-01 6.62493556E-02
10 22 2.035303165211531E-01 9.30777856E-04
tnc: fscale = 32.7776
11 24 2.035293524146259E-01 8.07260547E-06
12 26 2.035251117218610E-01 1.80205062E-04
13 28 2.034984109123940E-01 5.02855383E-04
14 30 2.034978382440665E-01 9.92013360E-06
15 32 2.034977907560623E-01 3.77753950E-06
16 34 2.034977388673018E-01 1.94755458E-05
17 36 2.034977015894743E-01 2.34969209E-13
tnc: |pg| = 1.47886e-08 -> local minimum
17 36 2.034977015894743E-01 2.34969209E-13
tnc: Local minima reach (|pg| ~= 0)
正则化逻辑回归
'''
正则化逻辑回归
'''
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import scipy.optimize as opt
from sklearn import linear_model
path = 'ex2data2.txt'
data2 = pd.read_csv(path, header=None, names=['Test 1', 'Test 2', 'Accepted'])
print(data2.head())
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# positive = data2[data2['Accepted'].isin([1])]
# negative = data2[data2['Accepted'].isin([0])]
# plt.figure(figsize=(20,8),dpi=80)
# plt.scatter(positive['Test 1'], positive['Test 2'], s=50, c='b', marker='o', label='Accepted')
# plt.scatter(negative['Test 1'], negative['Test 2'], s=50, c='r', marker='x', label='Rejected')
# plt.legend(loc=2)
# plt.xlabel('Test 1 Score')
# plt.ylabel('Test 2 Score')
# plt.show()
degree = 5
x1 = data2['Test 1']
x2 = data2['Test 2']
data2.insert(3, 'Ones', 1)
for i in range(1, degree):
for j in range(0, i):
data2['F' + str(i) + str(j)] = np.power(x1, i-j) * np.power(x2, j)
data2.drop('Test 1', axis=1, inplace=True)
data2.drop('Test 2', axis=1, inplace=True)
print(data2.head())
'''
正则化代价函数𝐽(𝜃)=1𝑚∑𝑖=1𝑚[−𝑦(𝑖)log(ℎ𝜃(𝑥(𝑖)))−(1−𝑦(𝑖))log(1−ℎ𝜃(𝑥(𝑖)))]+𝜆2𝑚∑𝑗=1𝑛𝜃2𝑗
'''
def costReg(theta, X, y, learningRate):
theta = np.asmatrix(theta)
X = np.asmatrix(X)
y = np.asmatrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
return np.sum(first - second) / len(X) + reg
'''
计算最优参数𝜃
'''
def gradientReg(theta, X, y, learningRate):
theta = np.asmatrix(theta)
X = np.asmatrix(X)
y = np.asmatrix(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
for i in range(parameters):
term = np.multiply(error, X[:, i])
if (i == 0):
grad[i] = np.sum(term) / len(X)
else:
grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:, i])
return grad
# set X and y (remember from above that we moved the label to column 0)
cols = data2.shape[1]
X2 = data2.iloc[:,1:cols]
y2 = data2.iloc[:,0:1]
# convert to numpy arrays and initalize the parameter array theta
X2 = np.array(X2.values)
y2 = np.array(y2.values)
theta2 = np.zeros(11)
#初始学习率
learningRate = 1
print('costReg\n')
print(costReg(theta2, X2, y2, learningRate))
#计算当参数为0的时候计算的梯度步长
print('gradientReg\n')
print(gradientReg(theta2, X2, y2, learningRate))
'''
--------------------------------------------------------------------
'''
#使用opt.fmin_tnc查找最优参数
result2 = opt.fmin_tnc(func=costReg, x0=theta2, fprime=gradientReg, args=(X2, y2, learningRate))
print('result2\n')
print(result2)
def predict(theta, X):
probability = sigmoid(X * theta.T)
return [1 if x >= 0.5 else 0 for x in probability]
theta_min = np.asmatrix(result2[0])
predictions = predict(theta_min, X2)
#zip函数用户迭代
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y2)]
#统计corrent里面1的个数
accuracy = (sum(map(int, correct)) % len(correct))
print ('accuracy = {}%'.format(accuracy))
#调用sklearn的线性回归包
model = linear_model.LogisticRegression(penalty='l2', C=1.0)
model.fit(X2, y2.ravel())
print(model.score(X2, y2))
输出
Test 1 Test 2 Accepted
0 0.051267 0.69956 1
1 -0.092742 0.68494 1
2 -0.213710 0.69225 1
3 -0.375000 0.50219 1
4 -0.513250 0.46564 1
NIT NF F GTG
0 1 6.931471805599454E-01 5.27766673E-03
1 6 6.236965134089864E-01 1.11875260E-04
tnc: fscale = 94.5438
2 11 6.226856678772246E-01 3.17597740E-07
3 14 6.226825655509072E-01 4.27912545E-09
tnc: fscale = 15287
4 19 6.226824391039327E-01 8.26318930E-12
tnc: fscale = 347877
tnc: |fn-fn-1] = 2.75217e-10 -> convergence
5 22 6.226824388287158E-01 3.19371100E-13
tnc: Converged (|f_n-f_(n-1)| ~= 0)
Accepted Ones F10 F20 ... F40 F41 F42 F43
0 1 1 0.051267 0.002628 ... 0.000007 0.000094 0.001286 0.017551
1 1 1 -0.092742 0.008601 ... 0.000074 -0.000546 0.004035 -0.029801
2 1 1 -0.213710 0.045672 ... 0.002086 -0.006757 0.021886 -0.070895
3 1 1 -0.375000 0.140625 ... 0.019775 -0.026483 0.035465 -0.047494
4 1 1 -0.513250 0.263426 ... 0.069393 -0.062956 0.057116 -0.051818
[5 rows x 12 columns]
costReg
0.6931471805599454
gradientReg
[0.00847458 0.01878809 0.05034464 0.01150133 0.01835599 0.00732393
0.00819244 0.03934862 0.00223924 0.01286005 0.00309594]
result2
(array([ 0.53010247, 0.29075567, -1.60725764, -0.58213819, 0.01781027,
-0.21329507, -0.40024142, -1.3714414 , 0.02264304, -0.95033581,
0.0344085 ]), 22, 1)
accuracy = 78%
0.6610169491525424