Python机器学习基础教程学习笔记(6)——线性模型(分类)

2019-10-09  本文已影响0人  neumeng

Python机器学习基础教程学习笔记(6)——线性模型(分类)

1. 二分类

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import mglearn
# 不想看到warnings
import warnings
warnings.filterwarnings("ignore", category=Warning)
# 拆分训练集与测试集
from sklearn.model_selection import train_test_split
# 使用两个分类算法:LogisticRegression和LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC

X, y = mglearn.datasets.make_forge()

fig, axes = plt.subplots(1, 2, figsize=(10, 3))

for model, ax in zip([LinearSVC(), LogisticRegression()], axes):
    clf = model.fit(X, y)
    mglearn.plots.plot_2d_separator(clf, X, fill=False, eps=0.5,
                                    ax=ax, alpha=.7)
    mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
    ax.set_title(clf.__class__.__name__)
    ax.set_xlabel("Feature 0")
    ax.set_ylabel("Feature 1")
    ax.legend()

output_3_0.png
mglearn.plots.plot_linear_svc_regularization()
output_5_0.png
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
    cancer.data, cancer.target, stratify=cancer.target, random_state=42)
logreg = LogisticRegression().fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg.score(X_test, y_test)))
Training set score: 0.953
Test set score: 0.958
logreg100 = LogisticRegression(C=100).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg100.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg100.score(X_test, y_test)))
Training set score: 0.972
Test set score: 0.965
logreg001 = LogisticRegression(C=0.01).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg001.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg001.score(X_test, y_test)))
Training set score: 0.934
Test set score: 0.930
plt.plot(logreg.coef_.T,'o',label="C=1")
plt.plot(logreg100.coef_.T,'^',label="C=100")

plt.plot(logreg001.coef_.T,'v',label="C=0.01")

# 给横坐标赋值
plt.xticks(range(cancer.data.shape[1]),cancer.feature_names,rotation=90)

# 加中间的横线
plt.hlines(0,0,cancer.data.shape[1])
plt.ylim(-5,5)
plt.xlabel("coefficient index")
plt.ylabel("coeffient magnitude")
plt.legend()
plt.show()
output_11_0.png
for C,marker in zip([0.001,1,100],['o','^','v']):
    # 这里把penalty参数设置为l1,表示使用L1正则化
    lr_l1= LogisticRegression(C=C,penalty="l1").fit(X_train,y_train)
    print("C={:.3f}".format(C))
    print("Training set score : {:.3f}".format(lr_l1.score(X_train, y_train)))
    print("Test set score : {:.3f}".format(lr_l1.score(X_test, y_test)))
    print("---------------------------------------")
    plt.plot(lr_l1.coef_.T,marker,label="C={:.3f}".format(C))
# 给横坐标赋值
plt.xticks(range(cancer.data.shape[1]),cancer.feature_names,rotation=90)

# 加中间的横线
plt.hlines(0,0,cancer.data.shape[1])
plt.ylim(-5,5)
plt.xlabel("coefficient index")
plt.ylabel("coeffient magnitude")
plt.legend()
plt.show()
C=0.001
Training set score : 0.913
Test set score : 0.923
---------------------------------------
C=1.000
Training set score : 0.960
Test set score : 0.958
---------------------------------------
C=100.000
Training set score : 0.986
Test set score : 0.979
---------------------------------------
output_13_1.png

2. 多分类

将二分类算法推广到多分类算法的一种常见方法是“一对其余”(one-vs.-rest)

from sklearn.datasets import make_blobs
X ,y = make_blobs(random_state=42)
mglearn.discrete_scatter(X[:,0],X[:,1],y)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.legend(["Class 0","Class 1","Class 2"])
plt.show()
output_17_0.png
linear_svm = LinearSVC().fit(X,y)
# coef_的形状是(3,2),说明coef_每行包含三个类别之一的系统微量每列包含某个特征(这个数据集有2个特征)对应的系数值。
print("Coefficient shape:{}".format(linear_svm.coef_.shape))
# intercept_的形状是(3,),是一维数组,保存每个类别的截距
print("Intercept shape:{}".format(linear_svm.intercept_.shape))
Coefficient shape:(3, 2)
Intercept shape:(3,)
mglearn.discrete_scatter(X[:,0],X[:,1],y)
line = np.linspace(-15,15)
# -(line*coef[0]+ intercept)/coef[1] 这个值可以简单理解一下
for coef,intercept,color in zip(linear_svm.coef_,linear_svm.intercept_,['b','r','g']):
    plt.plot(line,-(line*coef[0]+ intercept)/coef[1],color)
plt.xlim(-10,8)
plt.ylim(-10,15)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.legend(["Class 0","Class 1","Class 2","Line class 0","Line class 1","Line class 2"],loc=(1.01,0.3))
plt.show()
output_19_0.png
mglearn.plots.plot_2d_classification(linear_svm,X,fill=True,alpha=.3)
mglearn.discrete_scatter(X[:,0],X[:,1],y)
line = np.linspace(-15,15)
# -(line*coef[0]+ intercept)/coef[1] 这个值可以简单理解一下
for coef,intercept,color in zip(linear_svm.coef_,linear_svm.intercept_,['b','r','g']):
    plt.plot(line,-(line*coef[0]+ intercept)/coef[1],color)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
plt.legend(["Class 0","Class 1","Class 2","Line class 0","Line class 1","Line class 2"],loc=(1.01,0.3))
plt.show()
output_21_0.png

3 优点、缺点和参数

上一篇 下一篇

猜你喜欢

热点阅读