赞
踩
导入包
import scipy.io as sio # load mat
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
## data1
mat1 = sio.loadmat(r'D:\python_try\5. AndrewNg_ML\data\kmeans and pca\ex7data1.mat')
data1 = pd.DataFrame(mat1.get('X'), columns=['X1', 'X2'])
## data2
mat2 = sio.loadmat(r'D:\python_try\5. AndrewNg_ML\data\kmeans and pca\ex7data2.mat')
data2 = pd.DataFrame(mat2.get('X'), columns=['X1', 'X2'])
画出data2的图形
sns.lmplot('X1', 'X2', data=data2, fit_reg=None)
plt.show()
原理步骤
重复step2、3直至中心点不发生改变
生成随机的k个中心
思路:sample(k)
def random_init(data, k):
return data.sample(k).as_matrix() #as_matrix(): 转换成array
分配聚类
思路:
# 单个找寻聚类
def find_cluster(x, centroids):
distances = np.apply_along_axis(func1d=np.linalg.norm, axis=1, arr=x-centroids)
return np.argmin(distances)
# 集体data聚类标签
def assign_cluster(data, centroids):
return np.apply_along_axis(lambda x: find_cluster(x, centroids), axis=1, arr=data.as_matrix())
计算每个聚类的中心,生成新的聚类中心
# data中增加一列聚类标签C def combineDataC(data, C): dataC = data.copy() dataC['C'] = C return dataC # 新中心点,同时去掉C, 再转换成array数组 def newCentroids(data, C): dataC = combineDataC(data, C) return dataC.groupby('C', as_index=False).mean().sort_values(by='C').drop('C', axis=1).as_matrix() # 损失函数 def cost(data, centroids, C): m = data.shape[0] # 样本量 dataCentroids = centroids[C] # 各行的中心坐标 distances = np.apply_along_axis(func1d=np.linalg.norm, axis=1, arr=data.as_matrix()-dataCentroids) return distances.sum()/m
执行步骤
# kmeans通道,运行一次 def kMeansIter(data, k, epoch=100, tol=0.0001): # 生成最初的中心坐标 centroids = random_init(data, k) costProgress = [] # 用来存放递归聚类的每次损失 # 分配聚类标签 for i in range(epoch): # print('running epoch{}'.format(i)) C = assign_cluster(data, centroids) centroids = newCentroids(data, C ) costProgress.append(cost(data, centroids, C)) if len(costProgress)>1: if np.abs(costProgress[-1]-costProgress[-2])/costProgress[-1] < tol: break return C, centroids, costProgress[-1]
# 每个k运行n_init次,套用kmeans通道
def kMeans(data, k, epoch=100, n_init=10):
tries = np.array([kMeansIter(data, k) for _ in range(n_init)])
leasrCostIndex = np.argmin(tries[:,-1])
return tries[leasrCostIndex]
CList = [] # 存放每个k的数据的最佳标签
centroidsList = [] # 存放每个k的数据的最佳中心
costList = [] # 存放每个k的最小损失函数
for k in range(2,10):
kFinalC, kFinalCentroids, kFinalCost = kMeans(data2, k)
CList.append(kFinalC)
centroidsList.append(kFinalCentroids)
costList.append(kFinalCost)
plt.plot([i for i in range(2,10)], costList, 'bo-', markersize=20)
plt.show()
用肘部法选择
k
=
3
k=3
k=3进行聚类
k =3 # 确认的k值
bestC = CList[k-2]
bestCentroids = centroidsList[k-2]
dataWithC = combineDataC(data2, bestC)
画图
fig, ax = plt.subplots(figsize=(8,6))
ax.scatter(dataWithC['X1'], dataWithC['X2'], s=50, c=dataWithC['C'], cmap='Accent', label='data')
ax.scatter(pd.DataFrame(bestCentroids)[0],pd.DataFrame(bestCentroids)[1], s=100, c=['r'], label='centroids')
ax.legend()
plt.show()
KMeans网址
调用KMeans(), 查看参数设置
KMeans()
sk_kmeans = KMeans(n_clusters=3)
sk_kmeans.fit(data2)
sk_kmeans.cluster_centers_ # 中心点
sk_kmeans.labels_ # 所属类
sk_kmeans.inertia_ # 各点到其重点的距离和
画图
fig, ax = plt.subplots(figsize=(8,6))
ax.scatter(SKdataWithC['X1'], SKdataWithC['X2'], s=50, c=sk_kmeans.labels_, cmap='Accent', label='data')
ax.scatter(pd.DataFrame(sk_kmeans.cluster_centers_)[0],pd.DataFrame(sk_kmeans.cluster_centers_)[1], s=100, c=['r'], label='centroids')
ax.legend()
plt.show()
from skimage import io
pic = io.imread(r'D:\python_try\5. AndrewNg_ML\data\kmeans and pca\bird_small.png')/255.
pic.shape
(128, 128, 3)
转换成2D
data3 = pic.reshape(128*128, 3)
model = KMeans(n_clusters=16, n_init=100, n_jobs=-1)
model.fit(data3)
picCentroids = model.cluster_centers_
picC = model.labels_
compressionPic = picCentroids[picC].reshape((128, 128, 3))
fig, ax = plt.subplots(1, 2, figsize=(12,8))
ax[0].imshow(pic)
ax[1].imshow(compressionPic)
plt.show()
thousands of colors, reduce the number of color to 16
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。