K-Means
2018-12-06 本文已影响19人
JasonChiu17
原理
- 聚类是无监督学习,将相似的对象归到同一个簇中,簇内的对象越相似,聚类的效果越好;
- 首先,随机确定K个初始点作为质心;
- 然后,将数据集中的每个点分配到一个簇中,具体来讲,为每个点找距其最近的质心,将其分配到改质心对应的簇;
- 接着,每个簇的质心更新为该簇所有点的平均值。
优点:
- 容易实现
缺点:
- 可能收敛到局部最小值,在大规模数据集上收敛较慢
适用数据类型:
- 数值型数据
import numpy as np
#定义加载数据函数
def loadDataSet(fileName):
dataList = []
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split()
fltLine = list(map(float,curLine)) #每个元素,str2float
dataList.append(fltLine)
return dataList
dataList = loadDataSet('../../Reference Code/Ch10/testSet.txt')
#查看数据分布
import matplotlib.pyplot as plt
def data2show(dataArr):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(dataArr[:,0],dataArr[:,1],label='raw data')
ax.legend()
ax.set_title('Data Distribution')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
plt.show()
dataArr = np.array(dataList)
data2show(dataArr)
output_3_0.png
#定义距离公式(相似度计算)
def distEclud(vecA,vecB):
distance = np.sqrt(np.sum(np.power(vecA - vecB,2)))
return distance
#构建质心
def randCent(dataSet,k):
n = dataSet.shape[1]
centroids = np.mat(np.zeros((k,n)))
#每一列,在范围内随机选择三个点
for j in range(n):#遍历每一列
minJ = np.min(dataSet[:,j]) #每列最小值
rangeJ = float(np.max(dataSet[:,j]) - minJ) #每列的范围
centroids[:,j] = minJ + rangeJ*np.random.rand(k,1) #np.random.rand over [0, 1)
'''
np.random.rand(3,1)
array([[0.73830851],
[0.55393815],
[0.4770937 ]])
'''
return centroids
#K-Means聚类算法
def kMeans(dataSet,k,distMeas=distEclud,createCent=randCent):
m = dataSet.shape[0]
clusterAssment = np.mat(np.zeros((m,2))) #记录簇索引值和存储误差
centroids = createCent(dataSet,k) #初始化质心
clusterChanged = True #是否继续聚类操作
while clusterChanged:
clusterChanged = False
#所有样本计算离质心的距离
for i in range(m):
minDist = np.inf
minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:])
#分配
if distJI < minDist:
minDist = distJI
minIndex = j
#簇分配结果不变,停止聚类
if clusterAssment[i,0] != minIndex:
clusterChanged = True
clusterAssment[i,:] = minIndex,minDist**2
# print(centroids)
#更新质心
for cent in range(k):
ptsInClust = dataSet[np.nonzero(clusterAssment[:,0].A == cent)[0]] #该簇所有样本
centroids[cent,:] = np.mean(ptsInClust,axis=0)
return centroids,clusterAssment
#画图看聚类结果
def res2show(dataMat,centroids,clusterAssment):
k = len(centroids)
names = locals() #局部命名空间,dict形式
dataMarkerList = ['*','s','o','x']
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(k):
#质心
names['centroids'+str(i)] = centroids[i,:].A
#各个簇的样本
names['data'+str(i)] = dataMat[clusterAssment.A[:,0]==i].A
#画质心
ax.scatter(names['centroids'+str(i)][:,0],names['centroids'+str(i)][:,1],marker='+',s=200,label=str(i))
#画样本
ax.scatter(names['data'+str(i)][:,0],names['data'+str(i)][:,1],marker=dataMarkerList[i],label='cluster '+str(i))
ax.legend(loc='best')
ax.set_title('Result with K-Means')
plt.show()
dataList = loadDataSet('../../Reference Code/Ch10/testSet.txt')
dataMat = np.mat(dataList)
centroids,clusterAssment = kMeans(dataMat,4,distMeas=distEclud,createCent=randCent)
res2show(dataMat,centroids,clusterAssment)
output_9_0.png
二分K-Means算法
- 所有点作为一个簇;
- 将该簇一分为二,选择一个簇进行划分,选择最大程度降低SSE的簇进行划分(最大SSE的簇);
- 再选择其中一个簇进行划分,选择最大程度降低SSE的簇进行划分(最大SSE的簇);
- 直到簇达到设定的K值为止。
def biKmeans(dataSet,k,distMeas = distEclud):
m = dataSet.shape[0]
clusterAssment = np.mat(np.zeros((m,2)))
centroid0 = np.mean(dataSet,axis = 0).tolist()[0] #初始化质心,按列平均
centList = [centroid0]
#计算初始误差平方
for j in range(m):
clusterAssment[j,1] = distMeas(np.mat(centroid0),dataSet[j,:])**2
#建立K个簇
while (len(centList) < k):
lowestSSE= np.inf#初始化最低SSE为无穷大
#遍历每一个簇
for i in range(len(centList)):
ptsInCurrCluster = dataSet[np.nonzero(clusterAssment[:,0].A == i)[0],:] #提取该簇所有样本
centroidMat,splitClustAss = kMeans(ptsInCurrCluster,k=2,distMeas=distMeas) #对该簇的样本进行K=2的kmeans聚类
sseSplit = np.sum(splitClustAss[:,1]) #该簇聚类之后的sse
sseNotSplit = np.sum(clusterAssment[np.nonzero(clusterAssment[:,0].A != i)[0],1])#其他簇的sse
print('sseSplit, and notsplit:',sseSplit,sseNotSplit)
#寻找划分后有最小SSE的簇
if (sseSplit+sseNotSplit)<lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit+sseNotSplit
#更新簇的分配结果
bestClustAss[np.nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList)
bestClustAss[np.nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
clusterAssment[np.nonzero(clusterAssment[:,0].A==bestCentToSplit)[0],:] = bestClustAss
print('the bestCentToSplit is:',bestCentToSplit)
print('the len of bestClustAss is:',len(bestClustAss))
#更新质心
centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]
centList.append(bestNewCents[1,:].tolist()[0])
return np.mat(centList),clusterAssment
k-means VS 二分k-means
#k-means
dataMat2= np.mat(loadDataSet('../../Reference Code/Ch10/testSet2.txt'))
centroids,clusterAssment = kMeans(dataMat2,3,distMeas=distEclud,createCent=randCent)
res2show(dataMat2,centroids,clusterAssment)
output_13_0.png
#二分k-means
dataMat3= np.mat(loadDataSet('../../Reference Code/Ch10/testSet2.txt'))
centList,clusterAssment = biKmeans(dataMat3,3)
res2show(dataMat3,centList,clusterAssment)
sseSplit, and notsplit: 453.0334895807502 0.0
the bestCentToSplit is: 0
the len of bestClustAss is: 60
sseSplit, and notsplit: 12.753263136887313 423.8762401366249
sseSplit, and notsplit: 77.59224931775066 29.15724944412535
the bestCentToSplit is: 1
the len of bestClustAss is: 40
output_14_1.png
对地理坐标进行聚类
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
#计算地球表面两点的距离
def distSLC(vecA, vecB):#Spherical Law of Cosines
a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180)
b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
cos(pi * (vecB[0,0]-vecA[0,0]) /180)
return arccos(a + b)*6371.0 #pi is imported with numpy
def clusterClubs(numClust=5):
#读取数据
datList = []
for line in open('../../Reference Code/Ch10/places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = mat(datList)
#二分k-means
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect=[0.1,0.1,0.8,0.8] #rect=[left, bottom, width, height]
scatterMarkers=['s', 'o', '^', '8', 'p', \
'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0=fig.add_axes(rect, label='ax0', **axprops)
#基于一张地图来画图
imgP = plt.imread('../../Reference Code/Ch10/Portland.png')
ax0.imshow(imgP)
ax1=fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:]
markerStyle = scatterMarkers[i % len(scatterMarkers)] #返回余数作为索引,可以循环使用markers
#画簇的样本
ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90)
#画质心
ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300)
plt.show()
clusterClubs(5)
sseSplit, and notsplit: 3431.621150934527 0.0
the bestCentToSplit is: 0
the len of bestClustAss is: 69
sseSplit, and notsplit: 1230.242092893483 1062.0271973570536
sseSplit, and notsplit: 608.5836216116304 2369.5939535774737
the bestCentToSplit is: 0
the len of bestClustAss is: 53
sseSplit, and notsplit: 197.3863640526132 1892.3442135171072
sseSplit, and notsplit: 515.6100922622932 1230.242092893483
sseSplit, and notsplit: 471.8115193432218 1461.952274090483
the bestCentToSplit is: 1
the len of bestClustAss is: 16
sseSplit, and notsplit: 170.75670898476076 1345.9271084223471
sseSplit, and notsplit: 53.299046126034725 1437.9528665605217
sseSplit, and notsplit: 471.8115193432218 915.5351689957225
sseSplit, and notsplit: 98.33199611762291 1538.141411488738
the bestCentToSplit is: 2
the len of bestClustAss is: 35
output_17_1.png