#k近邻法算法原理,根据训练集数据,对于新数据进行数据的定位,n_neighbors参数=[1,3,6,9.....],参数越小则模型的复杂度越高。参数越多则决策边界越平滑。(基于python机器学习基础教程)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import mglearn
x,y=mglearn.datasets.make_forge()
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0)
knn3=KNeighborsClassifier(n_neighbors=3)
knn3.fit(x_train,y_train)
y_pred=knn3.predict(x_test)
print(knn3.score(x_test,y_test))