大数据,机器学习,人工智能人工智能/模式识别/机器学习精华专题机器学习和人工智能入门

《机器学习实战》第二章:k-近邻算法

2019-04-30  本文已影响3人  山雾幻华

k-近邻算法(kNN):采用测量不同特征值之间的距离方法进行分类

#导入库
import numpy as np
import operator
#创建数据集合
def createDataSet():
    group = np.array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
    labels = ['A', 'A', 'B', 'B']
    return group, labels
#欧式距离的k-近邻算法
def classify0(inX, dataSet, labels, k):
    """
    欧式距离的k-近邻算法
    params inX:单一输入值,list or np.array
    params dataSet:样本集,np.array
    params labels:样本集标签,list or np.array
    params k:int
    returns sortedClassCount[0][0]:分类的类别
    """
    # 计算输入值与样本集的距离——基于欧式距离
    #numpy函数shape[0]返回dataSet的行数
    dataSetSize = dataSet.shape[0]
    #在列向量方向上重复inX共1次(横向),行向量方向上重复inX共dataSetSize次(纵向)
    diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet
    #二维特征相减后平方
    sqDiffMat = diffMat**2
    #sum()所有元素相加,sum(0)列相加,sum(1)行相加
    sqDistances = sqDiffMat.sum(axis=1)
    #开方,计算出距离
    distances = sqDistances**0.5

    #返回distances中元素从小到大排序后的索引值
    sortedDistIndicies = distances.argsort()
    #定一个记录类别次数的字典
    classCount = {}
    for i in range(k):
        #取出前k个元素的类别
        voteIlabel = labels[sortedDistIndicies[i]]
        #dict.get(key,default=None),字典的get()方法,返回指定键的值,如果值不在字典中返回默认值。
        #计算类别次数
        classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
    #key=operator.itemgetter(1)根据字典的值进行排序
    #key=operator.itemgetter(0)根据字典的键进行排序
    #reverse降序排序字典
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    #返回次数最多的类别,即所要分类的类别
    return sortedClassCount[0][0]
#输入归一化
def autoNorm(dataSet):
    """
    newValue=(oldValue-min)/(max-min) 将认一取值范围的特征值转化为[0,1]
    params dataSet:归一化数集
    returns normDataSet:归一化后的特征矩阵
    returns ranges:数据范围
    returns minVals:数据最小值
    """
    minVals = dataSet.min(0)
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals
    normDataSet = np.zeros(np.shape(dataSet))
    m = dataSet.shape[0]
    normDataSet = dataSet - np.tile(minVals, (m, 1))
    normDataSet = normDataSet/np.tile(ranges, (m, 1))   #element wise divide
    return normDataSet, ranges, minVals
#算法测试
def handwritingclasstest(data_test,data_test_label,data_train,data_train_label,k):
    """
    算法测试函数
    params data_test:测试数集
    params data_test_label:测试集的标签
    params data_train:训练数集
    params data_train_label:训练集的标签
    params k:k近邻算法的k
    """
    errorCount = 0.0
    for i in range(data_test.shape[0]):
        classifierResult = classify0(data_test[i, :], data_train, data_train_label, k)
        print(i, "the classifier came back with: %d, the real answer is: %d" % (classifierResult, data_test_label[i]))
        if (classifierResult != data_test_label[i]): errorCount += 1.0
    print("错误率:%f%%" %(errorCount/float(numTestVecs)*100))
    print(errorCount)
from sklearn.neighbors import KNeighborsClassifier as kNN
def kNNClassTest(data_test,data_test_label,data_train,data_train_label,k):
    """
    params data_test:测试数集
    params data_test_label:测试集的标签
    params data_train:训练数集
    params data_train_label:训练集的标签
    params k:k近邻算法的k
    """
    #构建kNN分类器
    neigh = kNN(n_neighbors = k, algorithm = 'auto')
    #拟合模型, data_train为训练数集,data_train_label为训练集的标签
    neigh.fit(data_train, data_train_label)
    #错误检测计数
    errorCount = 0.0
    #测试数据的数量
    mTest = data_test.shape[0]
    #从文件中解析出测试集的类别并进行分类测试
    #获得预测结果
    classifierResult = neigh.predict(data_test)
    for i in range(mTest):
        print("分类返回结果为%d\t真实结果为%d" % (classifierResult[i], data_test_label[i]))
        if(classifierResult[i] != data_test_label[i]):
            errorCount += 1.0
    print("总共错了%d个数据\n错误率为%f%%" % (errorCount, errorCount/mTest * 100))
# 导入数据
def file2matrix(filename):
    love_dictionary = {'largeDoses':3, 'smallDoses':2, 'didntLike':1}
    fr = open(filename)
    arrayOLines = fr.readlines()
    numberOfLines = len(arrayOLines)            #get the number of lines in the file
    returnMat = np.zeros((numberOfLines, 3))        #prepare matrix to return
    classLabelVector = []                       #prepare labels return
    index = 0
    for line in arrayOLines:
        line = line.strip()
        listFromLine = line.split('\t')
        returnMat[index, :] = listFromLine[0:3]
        if(listFromLine[-1].isdigit()):
            classLabelVector.append(int(listFromLine[-1]))
        else:
            classLabelVector.append(love_dictionary.get(listFromLine[-1]))
        index += 1
    return returnMat, classLabelVector
#测试函数
def datingClassTest():
    path = r'E:\C_all\Desktop\ML仓库\机器学习实战\machinelearninginaction3x-master\Ch02\datingTestSet2.txt'
    hoRatio = 0.50      #hold out 10%
    datingDataMat, datingLabels = file2matrix(path)       #load data setfrom file
    normMat, ranges, minVals = autoNorm(datingDataMat)
    m = normMat.shape[0]
    numTestVecs = int(m*hoRatio)
    errorCount = 0.0
    for i in range(numTestVecs):
        classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)
        print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print("the total error rate is: %f" % (errorCount / float(numTestVecs)))
    print(errorCount)
datingClassTest()

the total error rate is: 0.068000
34.0

#应用
def classifyPerson():
    path = r'E:\C_all\Desktop\ML仓库\机器学习实战\machinelearninginaction3x-master\Ch02\datingTestSet2.txt'
    resultList = ['not at all', 'in small doses', 'in large doses']
    percentTats = float(input(\
                                  "percentage of time spent playing video games?"))
    ffMiles = float(input("frequent flier miles earned per year?"))
    iceCream = float(input("liters of ice cream consumed per year?"))
    datingDataMat, datingLabels = file2matrix(path)
    normMat, ranges, minVals = autoNorm(datingDataMat)
    inArr = np.array([
        ffMiles,
        percentTats,
        iceCream,
    ])
    classifierResult = classify0((inArr - minVals) / ranges, normMat,
                                 datingLabels, 3)
    print("You will probably like this person: %s" %
          resultList[classifierResult - 1])
classifyPerson()

percentage of time spent playing video games?10
frequent flier miles earned per year?10000
liters of ice cream consumed per year?0.5
You will probably like this person: in small doses

#数据导入
from tensorflow.examples.tutorials.mnist import input_data
# 利用tensorflow代码下载MNIST
mnist = input_data.read_data_sets("/MNIST_data/", one_hot=False)
# 代码中的one_hot=True,表示将样本标签转化为one_hot编码


#测试函数
def handwritingclasstest():
    errorCount = 0.0
    for i in range(mnist.test.images.shape[0]):
        classifierResult = classify0(mnist.test.images[i, :], mnist.train.images, mnist.train.labels, 10)
        print(i, "the classifier came back with: %d, the real answer is: %d" % (classifierResult, mnist.test.labels[i]))
        if (classifierResult != mnist.test.labels[i]): errorCount += 1.0
    print("the total error rate is: %f" % (errorCount / float(mnist.test.images.shape[0])))
    print(errorCount)
handwritingclasstest()

the total error rate is: 0.033200
332.0

上一篇 下一篇

猜你喜欢

热点阅读