赞
踩
- import numpy as np
- import matplotlib.pyplot as plt
- from sklearn import datasets
- from sklearn.model_selection import train_test_split
- from sklearn.preprocessing import StandardScaler
- from sklearn.linear_model import LogisticRegression
- from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
-
- # 加载数据集
- iris = datasets.load_iris()
- X = iris.data[:, :2] # 只使用前两个特征
- y = (iris.target != 0) * 1 # 将标签转换为二分类问题
-
- # 数据标准化
- scaler = StandardScaler()
- X = scaler.fit_transform(X)
-
- # 划分训练集和测试集
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
-
- # 训练逻辑回归模型
- clf = LogisticRegression()
- clf.fit(X_train, y_train)
-
- # 预测
- y_pred = clf.predict(X_test)
-
- # 计算准确率
- accuracy = accuracy_score(y_test, y_pred)
- print(f"Accuracy: {accuracy}")
-
- # 分类报告
- print("Classification Report:")
- print(classification_report(y_test, y_pred))
-
- # 混淆矩阵
- print("Confusion Matrix:")
- print(confusion_matrix(y_test, y_pred))
-
- # 绘制决策边界
- def plot_decision_boundary(clf, X, y):
- x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
- y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
- xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.01),
- np.arange(y_min, y_max, 0.01))
- Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
- Z = Z.reshape(xx.shape)
- plt.contourf(xx, yy, Z, alpha=0.8)
- plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', marker='o')
- plt.xlabel('Feature 1')
- plt.ylabel('Feature 2')
- plt.title('Logistic Regression Decision Boundary')
- plt.show()
-
- plot_decision_boundary(clf, X, y)

逻辑回归是一种二分类算法,它只能处理两个类别
标准化的目的是将特征数据调整到一个标准的范围内(通常是均值为0,标准差为1),从而消除不同特征之间的量纲差异。这对于许多机器学习算法来说都非常重要,尤其是使用梯度下降的算法,如逻辑回归、神经网络等。标准化可以加快收敛速度并提高模型性能。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。