k-MEANS

2018-01-05  本文已影响0人  VaultHunter

Python中 list和np.array的区别:

data=[[1,2,3,4],
      [2,1,3,4],
      [1,0,0,1]]

data[:,0]
列表的索引必须是整数,而这里是tuple类型(:,1),所以出现了错误,只有矩阵才能通过这样的方式索引,因此我们常常需要将数据转换为矩阵:data[:,0]

data=mat([[1,2,3,4],
      [2,1,3,4],
      [1,0,0,1]])
feature1=data[:,0]

kMeans.py

#!/usr/bin/python
# -*- coding: utf-8 -*-
from numpy import *
import time
import matplotlib.pyplot as plt
def loadDataSet(filename):
    '''
    读取文件,返回的是list
    :param filename:
    :return:
    '''
    dataMat =[]
    fr = open(filename)
    for line in fr.readlines():
        curLine = line.strip().split('\t')
        fltLine = map(float, curLine)
        dataMat.append(fltLine)
    return dataMat

def distEclud(vecA,vecB):
    '''
    计算欧氏距离
    :param vecA:
    :param vecB:
    :return:
    '''
    return sqrt(sum(power(vecA - vecB, 2)))

def randCent(dataSet,k):
    '''
    随机初始化质心
    :param dataSet:
    :param k:
    :return:
    '''
    n = shape(dataSet)[1]
    centroids = mat(zeros((k, n)))
    for j in range(n):
        minJ = min(dataSet[:, j])
        rangeJ =  float(max(dataSet[:, j]) - minJ)
        centroids[:, j] = minJ + rangeJ * random.rand(k, 1)
    return centroids

def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
    m = shape(dataSet)[0]  #dataSet有80行 2列, m=80
    clusterAssment = mat(zeros((m, 2)))#簇,用于存每个点的簇分类结果,1:簇索引 2:该点到簇质心的误差
    centroids = createCent(dataSet, k)  #得到质心
    clusterChanged = True
    while clusterChanged:
        clusterChanged = False
        for i in range(m): #循环80次
            minDist = inf; minIndex = -1
            for j in range(k): #分别计算每个点到四个质心的距离,并将误差最大值付给minDist 索引付给minIndex
                distJI = distMeas(centroids[j, :], dataSet[i, :])
                if distJI < minDist:
                    minDist = distJI;minIndex = j
            if clusterAssment[i, 0] != minIndex:    #clusterAssment初始化80行 2列 全0
                clusterChanged = True
            clusterAssment[i, :] = minIndex, minDist**2
        print centroids
        for cent in range(k):#recalculate centroids
            ptsInClust = dataSet[nonzero(clusterAssment[:, 0].A == cent)[0]]#获得同簇所有元素的在dataSet中的下标 .A是矩阵展成数组
            centroids[cent, :] = mean(ptsInClust, axis=0) #同簇元素求平均值得到质心
    return centroids, clusterAssment


def showCluster(dataSet, k, centroids, clusterAssment):
    numSamples, dim = dataSet.shape
    if dim != 2:
        print ("Sorry! I can not draw because the dimension of your data is not 2!")
        return 1

    mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
    if k > len(mark):
        print ("Sorry! Your k is too large! ")
        return 1


    # draw all samples
    for i in range(numSamples):
        markIndex = int(clusterAssment[i, 0])  #为样本指定颜色
        plt.plot(dataSet[i, 0], dataSet[i, 1], mark[markIndex])

    mark = ['Dr', 'Db', 'Dg', 'Dk', '^b', '+b', 'sb', 'db', '<b', 'pb']
    # draw the centroids
    for i in range(k):
        plt.plot(centroids[i, 0], centroids[i, 1], mark[i], markersize = 12)

    plt.show()

demo.py

import kMeans
from numpy import *
datMat = mat(kMeans.loadDataSet('testSet.txt'))
myCentroids, myClusterAssements = kMeans.kMeans(datMat, 4)
print shape(myClusterAssements)

kMeans.showCluster(datMat, 4, myCentroids, myClusterAssements)
Figure_1.png
Figure_2.png

由figure2可知,上面的k-means算法会有陷入局部最优解的情况。ClusterAssements的第一列是每个点到质心的误差,同簇数据的误差求取平均值就是SSE(SUM OF SQUARED ERROR)——————衡量聚类效果标准

改进的方法
1:合并最近的质心
2:合并时SSE增幅最小的质心

引出二分K-MEANS算法:
1.首先将所有数据当作一个簇
2.将每一个点都送入k-means进行k=2聚类
3.分别计算SSE,将SSE的在进行K=2聚类,直到最终的K要求

def biKmeans(dataSet, k, distMeas=distEclud):
    m = shape(dataSet)[0]
    clusterAssment = mat(zeros((m,2)))
    centroid0 = mean(dataSet, axis=0).tolist()[0]
    centList =[centroid0] #create a list with one centroid
    for j in range(m):#calc initial Error
        clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2
    while (len(centList) < k):
        lowestSSE = inf
        for i in range(len(centList)):
            ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
            centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
            sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
            sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
            print "sseSplit, and notSplit: ",sseSplit,sseNotSplit
            if (sseSplit + sseNotSplit) < lowestSSE:
                bestCentToSplit = i
                bestNewCents = centroidMat
                bestClustAss = splitClustAss.copy()
                lowestSSE = sseSplit + sseNotSplit
        bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
        bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
        print 'the bestCentToSplit is: ',bestCentToSplit
        print 'the len of bestClustAss is: ', len(bestClustAss)
        centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids
        centList.append(bestNewCents[1,:].tolist()[0])
        clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
    return mat(centList), clusterAssment

k-means的思想还是比较容易理解的:
1.创建起始质心 2.计算数据到质心距离并对数据进行分配 3。同簇数据求均值得到质心

上一篇下一篇

猜你喜欢

热点阅读