当前位置:   article > 正文

PCA python 实现

pca的python实现

PCA 实现:

参考博客:https://blog.csdn.net/u013719780/article/details/78352262

 

  1. from __future__ import print_function
  2. from sklearn import datasets
  3. import matplotlib.pyplot as plt
  4. import matplotlib.cm as cmx
  5. import matplotlib.colors as colors
  6. import numpy as np
  7. # matplotlib inline
  8. def shuffle_data(X, y, seed=None):
  9. if seed:
  10. np.random.seed(seed)
  11. idx = np.arange(X.shape[0])
  12. np.random.shuffle(idx)
  13. return X[idx], y[idx]
  14. # 正规化数据集 X
  15. def normalize(X, axis=-1, p=2):
  16. lp_norm = np.atleast_1d(np.linalg.norm(X, p, axis))
  17. lp_norm[lp_norm == 0] = 1
  18. return X / np.expand_dims(lp_norm, axis)
  19. # 标准化数据集 X
  20. def standardize(X):
  21. X_std = np.zeros(X.shape)
  22. mean = X.mean(axis=0)
  23. std = X.std(axis=0)
  24. # 做除法运算时请永远记住分母不能等于0的情形
  25. # X_std = (X - X.mean(axis=0)) / X.std(axis=0)
  26. for col in range(np.shape(X)[1]):
  27. if std[col]:
  28. X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]
  29. return X_std
  30. # 划分数据集为训练集和测试集
  31. def train_test_split(X, y, test_size=0.2, shuffle=True, seed=None):
  32. if shuffle:
  33. X, y = shuffle_data(X, y, seed)
  34. n_train_samples = int(X.shape[0] * (1-test_size))
  35. x_train, x_test = X[:n_train_samples], X[n_train_samples:]
  36. y_train, y_test = y[:n_train_samples], y[n_train_samples:]
  37. return x_train, x_test, y_train, y_test
  38. # 计算矩阵X的协方差矩阵
  39. def calculate_covariance_matrix(X, Y=np.empty((0,0))):
  40. if not Y.any():
  41. Y = X
  42. n_samples = np.shape(X)[0]
  43. covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))
  44. return np.array(covariance_matrix, dtype=float)
  45. # 计算数据集X每列的方差
  46. def calculate_variance(X):
  47. n_samples = np.shape(X)[0]
  48. variance = (1 / n_samples) * np.diag((X - X.mean(axis=0)).T.dot(X - X.mean(axis=0)))
  49. return variance
  50. # 计算数据集X每列的标准差
  51. def calculate_std_dev(X):
  52. std_dev = np.sqrt(calculate_variance(X))
  53. return std_dev
  54. # 计算相关系数矩阵
  55. def calculate_correlation_matrix(X, Y=np.empty([0])):
  56. # 先计算协方差矩阵
  57. covariance_matrix = calculate_covariance_matrix(X, Y)
  58. # 计算X, Y的标准差
  59. std_dev_X = np.expand_dims(calculate_std_dev(X), 1)
  60. std_dev_y = np.expand_dims(calculate_std_dev(Y), 1)
  61. correlation_matrix = np.divide(covariance_matrix, std_dev_X.dot(std_dev_y.T))
  62. return np.array(correlation_matrix, dtype=float)
  63. class PCA():
  64. """
  65. 主成份分析算法PCA,非监督学习算法.
  66. """
  67. def __init__(self):
  68. self.eigen_values = None
  69. self.eigen_vectors = None
  70. self.k = 2
  71. def transform(self, X):
  72. """
  73. 将原始数据集X通过PCA进行降维
  74. """
  75. covariance = calculate_covariance_matrix(X)
  76. # 求解特征值和特征向量
  77. self.eigen_values, self.eigen_vectors = np.linalg.eig(covariance)
  78. # 将特征值从大到小进行排序,注意特征向量是按列排的,即self.eigen_vectors第k列是self.eigen_values中第k个特征值对应的特征向量
  79. idx = self.eigen_values.argsort()[::-1]
  80. eigenvalues = self.eigen_values[idx][:self.k]
  81. eigenvectors = self.eigen_vectors[:, idx][:, :self.k]
  82. # 将原始数据集X映射到低维空间
  83. X_transformed = X.dot(eigenvectors)
  84. return X_transformed
  85. def main():
  86. # Load the dataset
  87. data = datasets.load_iris()
  88. X = data.data
  89. y = data.target
  90. # 将数据集X映射到低维空间
  91. X_trans = PCA().transform(X)
  92. x1 = X_trans[:, 0]
  93. x2 = X_trans[:, 1]
  94. print(X[0:2])
  95. cmap = plt.get_cmap('viridis')
  96. colors = [cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))]
  97. class_distr = []
  98. # Plot the different class distributions
  99. for i, l in enumerate(np.unique(y)):
  100. _x1 = x1[y == l]
  101. _x2 = x2[y == l]
  102. _y = y[y == l]
  103. class_distr.append(plt.scatter(_x1, _x2, color=colors[i]))
  104. # Add a legend
  105. plt.legend(class_distr, y, loc=1)
  106. # Axis labels
  107. plt.xlabel('Principal Component 1')
  108. plt.ylabel('Principal Component 2')
  109. plt.show()
  110. if __name__ == "__main__":
  111. main()

  

kPCA

1、核主成份分析 Kernel Principle Component Analysis:

1)现实世界中,并不是所有数据都是线性可分的
2)通过LDA,PCA将其转化为线性问题并不是好的方法

3)线性可分 VS 非线性可分

 

2、引入核主成份分析:

可以通过kPCA将非线性数据映射到高维空间,在高维空间下使用标准PCA将其映射到另一个低维空间

3、原理:
定义非线性映射函数,该函数可以对原始特征进行非线性组合,以将原始的d维数据集映射到更高维的k维特征空间。

1)多项式核
2)双曲正切核
3)径向基核(RBF),高斯核函数


基于RBF核的kPCA算法流程:

 

 

Python 代码:

  1. from scipy.spatial.distance import pdist, squareform
  2. from scipy import exp
  3. from numpy.linalg import eigh
  4. import numpy as np
  5. def rbf_kernel_pca(X, gamma, n_components):
  6. """
  7. RBF kernel PCA implementation.
  8. Parameters
  9. ------------
  10. X: {NumPy ndarray}, shape = [n_samples, n_features]
  11. gamma: float
  12. Tuning parameter of the RBF kernel
  13. n_components: int
  14. Number of principal components to return
  15. Returns
  16. ------------
  17. X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
  18. Projected dataset
  19. """
  20. # Calculate pairwise squared Euclidean distances
  21. # in the MxN dimensional dataset.
  22. sq_dists = pdist(X, 'sqeuclidean')
  23. # Convert pairwise distances into a square matrix.
  24. mat_sq_dists = squareform(sq_dists)
  25. # Compute the symmetric kernel matrix.
  26. K = exp(-gamma * mat_sq_dists)
  27. # Center the kernel matrix.
  28. N = K.shape[0]
  29. one_n = np.ones((N, N)) / N
  30. K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
  31. # Obtaining eigenpairs from the centered kernel matrix
  32. # numpy.linalg.eigh returns them in sorted order
  33. eigvals, eigvecs = eigh(K)
  34. # Collect the top k eigenvectors (projected samples)
  35. X_pc = np.column_stack((eigvecs[:, -i]
  36. for i in range(1, n_components + 1)))
  37. return X_pc
  38. import matplotlib.pyplot as plt
  39. from sklearn.datasets import make_moons
  40. X, y = make_moons(n_samples=100, random_state=123)
  41. plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
  42. plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
  43. plt.tight_layout()
  44. # plt.savefig('./figures/half_moon_1.png', dpi=300)
  45. plt.show()
  46. # 直接用PCA
  47. from sklearn.decomposition import PCA
  48. from sklearn.preprocessing import StandardScaler
  49. scikit_pca = PCA(n_components=2)
  50. X_spca = scikit_pca.fit_transform(X)
  51. fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
  52. ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
  53. color='red', marker='^', alpha=0.5)
  54. ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
  55. color='blue', marker='o', alpha=0.5)
  56. ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
  57. color='red', marker='^', alpha=0.5)
  58. ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
  59. color='blue', marker='o', alpha=0.5)
  60. ax[0].set_xlabel('PC1')
  61. ax[0].set_ylabel('PC2')
  62. ax[1].set_ylim([-1, 1])
  63. ax[1].set_yticks([])
  64. ax[1].set_xlabel('PC1')
  65. plt.tight_layout()
  66. # plt.savefig('./figures/half_moon_2.png', dpi=300)
  67. plt.show()
  68. # KPCA
  69. from matplotlib.ticker import FormatStrFormatter
  70. X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
  71. fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3))
  72. ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1],
  73. color='red', marker='^', alpha=0.5)
  74. ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1],
  75. color='blue', marker='o', alpha=0.5)
  76. ax[1].scatter(X_kpca[y==0, 0], np.zeros((50,1))+0.02,
  77. color='red', marker='^', alpha=0.5)
  78. ax[1].scatter(X_kpca[y==1, 0], np.zeros((50,1))-0.02,
  79. color='blue', marker='o', alpha=0.5)
  80. ax[0].set_xlabel('PC1')
  81. ax[0].set_ylabel('PC2')
  82. ax[1].set_ylim([-1, 1])
  83. ax[1].set_yticks([])
  84. ax[1].set_xlabel('PC1')
  85. ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
  86. ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
  87. plt.tight_layout()
  88. # plt.savefig('./figures/half_moon_3.png', dpi=300)
  89. plt.show()
  90. #sklearn kpca
  91. from sklearn.decomposition import KernelPCA
  92. X, y = make_moons(n_samples=100, random_state=123)
  93. scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
  94. X_skernpca = scikit_kpca.fit_transform(X)
  95. plt.scatter(X_skernpca[y == 0, 0], X_skernpca[y == 0, 1],
  96. color='red', marker='^', alpha=0.5)
  97. plt.scatter(X_skernpca[y == 1, 0], X_skernpca[y == 1, 1],
  98. color='blue', marker='o', alpha=0.5)
  99. plt.xlabel('PC1')
  100. plt.ylabel('PC2')
  101. plt.tight_layout()
  102. # plt.savefig('./figures/scikit_kpca.png', dpi=300)
  103. plt.show()

  

参考: https://blog.csdn.net/weixin_40604987/article/details/79632888

 

转载于:https://www.cnblogs.com/Allen-rg/p/11414972.html

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/IT小白/article/detail/127157
推荐阅读
相关标签
  

闽ICP备14008679号