当前位置:   article > 正文

kennard-stone算法实现样本集划分(ks算法)

ks算法

目录

一、 Kennard-Stone算法原理(KS算法)

二、Kennard-Stone算法作用

三、代码

四、对选出来的train样本使用T-SNE算法进行绘制

五、参考链接


一、 Kennard-Stone算法原理(KS算法)

KS算法原理:把所有的样本都看作训练集候选样本,依次从中挑选样本进训练集。首先选择欧氏距离最远的两个样本进入训练集,其后通过计算剩下的每一个样品到训练集内每一个已知样品的欧式距离,找到距已选样本最远以及最近的两个样本,并将这两个样本选入训练集,重复上述步骤直到样本数量达到要求。

欧式距离计算公式:

Xp,Xq表示两个不同的样本,N代表样本的光谱波点数量

二、Kennard-Stone算法作用

Kennard-Stone算法作用:用于数据集的划分,使用算法,将输入的数据集划分为训练集、测试集,并同时输出训练集和测试集在原样本集中的编号信息,方便样本的查找。

三、代码

版本1、返回样本索引

  1. # select samples using Kennard-Stone algorithm
  2. import numpy as np
  3. # --- input ---
  4. # X : dataset of X-variables (samples x variables)
  5. # k : number of samples to be selected
  6. #
  7. # --- output ---
  8. # selected_sample_numbers : selected sample numbers (training data)
  9. # remaining_sample_numbers : remaining sample numbers (test data)
  10. def kennardstonealgorithm(x_variables, k):
  11. x_variables = np.array(x_variables)
  12. original_x = x_variables
  13. distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(
  14. axis=1)
  15. max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))
  16. max_distance_sample_number = max_distance_sample_number[0][0]
  17. selected_sample_numbers = list()
  18. selected_sample_numbers.append(max_distance_sample_number)
  19. remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)
  20. x_variables = np.delete(x_variables, selected_sample_numbers, 0)
  21. remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)
  22. for iteration in range(1, k):
  23. selected_samples = original_x[selected_sample_numbers, :]
  24. min_distance_to_selected_samples = list()
  25. for min_distance_calculation_number in range(0, x_variables.shape[0]):
  26. distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],
  27. (selected_samples.shape[0], 1))) ** 2).sum(
  28. axis=1)
  29. min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))
  30. max_distance_sample_number = np.where(
  31. min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))
  32. max_distance_sample_number = max_distance_sample_number[0][0]
  33. selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])
  34. x_variables = np.delete(x_variables, max_distance_sample_number, 0)
  35. remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)
  36. return selected_sample_numbers, remaining_sample_numbers
  37. np.random.seed(0)
  38. a = np.random.random((100,125))
  39. b = np.random.randint(0,5,(100,))
  40. selected_sample_numbers, remaining_sample_numbers = kennardstonealgorithm(a,80)
  41. print(remaining_sample_numbers)

版本2、直接返回划分好的训练和测试样本

  1. import numpy as np
  2. def ks(x, y, test_size=0.2):
  3. """
  4. :param x: shape (n_samples, n_features)
  5. :param y: shape (n_sample, )
  6. :param test_size: the ratio of test_size (float)
  7. :return: spec_train: (n_samples, n_features)
  8. spec_test: (n_samples, n_features)
  9. target_train: (n_sample, )
  10. target_test: (n_sample, )
  11. """
  12. M = x.shape[0]
  13. N = round((1 - test_size) * M)
  14. samples = np.arange(M)
  15. D = np.zeros((M, M))
  16. for i in range((M - 1)):
  17. xa = x[i, :]
  18. for j in range((i + 1), M):
  19. xb = x[j, :]
  20. D[i, j] = np.linalg.norm(xa - xb)
  21. maxD = np.max(D, axis=0)
  22. index_row = np.argmax(D, axis=0)
  23. index_column = np.argmax(maxD)
  24. m = np.zeros(N)
  25. m[0] = np.array(index_row[index_column])
  26. m[1] = np.array(index_column)
  27. m = m.astype(int)
  28. dminmax = np.zeros(N)
  29. dminmax[1] = D[m[0], m[1]]
  30. for i in range(2, N):
  31. pool = np.delete(samples, m[:i])
  32. dmin = np.zeros((M - i))
  33. for j in range((M - i)):
  34. indexa = pool[j]
  35. d = np.zeros(i)
  36. for k in range(i):
  37. indexb = m[k]
  38. if indexa < indexb:
  39. d[k] = D[indexa, indexb]
  40. else:
  41. d[k] = D[indexb, indexa]
  42. dmin[j] = np.min(d)
  43. dminmax[i] = np.max(dmin)
  44. index = np.argmax(dmin)
  45. m[i] = pool[index]
  46. m_complement = np.delete(np.arange(x.shape[0]), m)
  47. spec_train = x[m, :]
  48. target_train = y[m]
  49. spec_test = x[m_complement, :]
  50. target_test = y[m_complement]
  51. return spec_train, spec_test, target_train, target_test
  52. np.random.seed(0)
  53. a = np.random.random((100,125))
  54. b = np.random.randint(0,5,(100,))
  55. print(b)
  56. spec_train, spec_test, target_train, target_test = ks(a,b)
  57. print(spec_train.shape,target_train.shape)
  58. print(spec_test.shape,target_test.shape)

四、对选出来的train样本使用T-SNE算法进行绘制

  1. # -*- coding: utf-8 -*- %reset -f
  2. import numpy as np
  3. import matplotlib.pyplot as plt
  4. from sklearn.manifold import TSNE
  5. # --- input ---
  6. # X : dataset of X-variables (samples x variables)
  7. # k : number of samples to be selected
  8. #
  9. # --- output ---
  10. # selected_sample_numbers : selected sample numbers (training data)
  11. # remaining_sample_numbers : remaining sample numbers (test data)
  12. def kennardstonealgorithm(x_variables, k):
  13. x_variables = np.array(x_variables)
  14. original_x = x_variables
  15. distance_to_average = ((x_variables - np.tile(x_variables.mean(axis=0), (x_variables.shape[0], 1))) ** 2).sum(
  16. axis=1)
  17. max_distance_sample_number = np.where(distance_to_average == np.max(distance_to_average))
  18. max_distance_sample_number = max_distance_sample_number[0][0]
  19. selected_sample_numbers = list()
  20. selected_sample_numbers.append(max_distance_sample_number)
  21. remaining_sample_numbers = np.arange(0, x_variables.shape[0], 1)
  22. x_variables = np.delete(x_variables, selected_sample_numbers, 0)
  23. remaining_sample_numbers = np.delete(remaining_sample_numbers, selected_sample_numbers, 0)
  24. for iteration in range(1, k):
  25. selected_samples = original_x[selected_sample_numbers, :]
  26. min_distance_to_selected_samples = list()
  27. for min_distance_calculation_number in range(0, x_variables.shape[0]):
  28. distance_to_selected_samples = ((selected_samples - np.tile(x_variables[min_distance_calculation_number, :],
  29. (selected_samples.shape[0], 1))) ** 2).sum(
  30. axis=1)
  31. min_distance_to_selected_samples.append(np.min(distance_to_selected_samples))
  32. max_distance_sample_number = np.where(
  33. min_distance_to_selected_samples == np.max(min_distance_to_selected_samples))
  34. max_distance_sample_number = max_distance_sample_number[0][0]
  35. selected_sample_numbers.append(remaining_sample_numbers[max_distance_sample_number])
  36. x_variables = np.delete(x_variables, max_distance_sample_number, 0)
  37. remaining_sample_numbers = np.delete(remaining_sample_numbers, max_distance_sample_number, 0)
  38. return selected_sample_numbers, remaining_sample_numbers
  39. # 对样本进行预处理并画图
  40. def plot_embedding(data, title):
  41. """
  42. :param data:数据集
  43. :param label:样本标签
  44. :param title:图像标题
  45. :return:图像
  46. """
  47. x_min, x_max = np.min(data, 0), np.max(data, 0)
  48. data = (data - x_min) / (x_max - x_min) # 对数据进行归一化处理
  49. fig = plt.figure() # 创建图形实例
  50. ax = plt.subplot(111) # 创建子图
  51. # 遍历所有样本
  52. for i in range(data.shape[0]):
  53. # 在图中为每个数据点画出标签
  54. plt.text(data[i, 0], data[i, 1], str(0), color=plt.cm.Set1(0 / 10),
  55. fontdict={'weight': 'bold', 'size': 7})
  56. plt.xticks() # 指定坐标的刻度
  57. plt.yticks()
  58. plt.title(title, fontsize=14)
  59. # 返回值
  60. return fig
  61. if __name__ == '__main__':
  62. np.random.seed(0)
  63. data = np.random.random((100,125))
  64. y = np.random.randint(0,5,(100,))
  65. number_of_selected_samples = 80
  66. idxs_selected_sample, idxs_remaining_sample = kennardstonealgorithm(data, number_of_selected_samples)
  67. data_slt = data[idxs_selected_sample]
  68. tsne = TSNE(n_components=2, init='pca', random_state=0)
  69. reslut = tsne.fit_transform(data_slt)
  70. fig = plot_embedding(reslut, 't-SNE Embedding of digits')
  71. plt.show()

五、参考链接

GitHub - hkaneko1985/kennardstonealgorithm: Sample selection using Kennard-Stone algorighm

KS算法样本集划分

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/从前慢现在也慢/article/detail/777870
推荐阅读
相关标签
  

闽ICP备14008679号