赞
踩
目录
前几天偶然发现了一个超棒的人工智能学习网站,内容通俗易懂,讲解风趣幽默,简直让人欲罢不能。忍不住分享给大家,点击这里立刻跳转,开启你的AI学习之旅吧!
- from sklearn.datasets import load_iris
- from sklearn.model_selection import train_test_split
- from sklearn.neighbors import KNeighborsClassifier
- from sklearn.metrics import accuracy_score
-
- # 加载数据集
- iris = load_iris()
- X = iris.data
- y = iris.target
-
- # 划分训练集和测试集
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
-
- # 创建KNN分类器
- knn = KNeighborsClassifier(n_neighbors=3)
- knn.fit(X_train, y_train)
-
- # 进行预测
- y_pred = knn.predict(X_test)
-
- # 计算准确率
- accuracy = accuracy_score(y_test, y_pred)
- print(f"分类准确率: {accuracy * 100:.2f}%")
- import numpy as np
- import matplotlib.pyplot as plt
- from sklearn.linear_model import LinearRegression
- from sklearn.model_selection import train_test_split
-
- # 模拟数据
- np.random.seed(0)
- X = 2 * np.random.rand(100, 1)
- y = 4 + 3 * X + np.random.randn(100, 1)
-
- # 划分训练集和测试集
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
-
- # 创建线性回归模型
- model = LinearRegression()
- model.fit(X_train, y_train)
-
- # 进行预测
- y_pred = model.predict(X_test)
-
- # 绘制结果
- plt.scatter(X_test, y_test, color='black')
- plt.plot(X_test, y_pred, color='blue', linewidth=3)
- plt.show()
- from sklearn.datasets import make_blobs
- from sklearn.cluster import KMeans
- import matplotlib.pyplot as plt
-
- # 生成模拟数据
- X, _ = make_blobs(n_samples=300, centers=4, cluster_std=0.60, random_state=0)
-
- # 使用KMeans聚类
- kmeans = KMeans(n_clusters=4)
- kmeans.fit(X)
- y_kmeans = kmeans.predict(X)
-
- # 绘制聚类结果
- plt.scatter(X[:, 0], X[:, 1], c=y_kmeans, s=50, cmap='viridis')
- centers = kmeans.cluster_centers_
- plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75)
- plt.show()
- from sklearn.decomposition import PCA
- from sklearn.datasets import load_iris
- import matplotlib.pyplot as plt
-
- # 加载数据集
- iris = load_iris()
- X = iris.data
- y = iris.target
-
- # 使用PCA降维
- pca = PCA(n_components=2)
- X_pca = pca.fit_transform(X)
-
- # 绘制降维结果
- plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y)
- plt.xlabel('First Principal Component')
- plt.ylabel('Second Principal Component')
- plt.show()
- import gym
- import numpy as np
-
- env = gym.make('CartPole-v1')
- n_actions = env.action_space.n
- n_states = env.observation_space.shape[0]
-
- q_table = np.zeros((n_states, n_actions))
- learning_rate = 0.1
- discount_factor = 0.99
- epsilon = 1.0
- epsilon_decay = 0.99
- episodes = 1000
-
- for episode in range(episodes):
- state = env.reset()
- done = False
- while not done:
- if np.random.rand() < epsilon:
- action = np.random.choice(n_actions)
- else:
- action = np.argmax(q_table[state])
-
- next_state, reward, done, _ = env.step(action)
- best_next_action = np.argmax(q_table[next_state])
- q_table[state, action] += learning_rate * (reward + discount_factor * q_table[next_state, best_next_action] - q_table[state, action])
- state = next_state
-
- epsilon *= epsilon_decay
-
- print("训练完成!")
- import tensorflow as tf
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import Dense
- from tensorflow.keras.datasets import mnist
- from tensorflow.keras.utils import to_categorical
-
- # 加载数据集
- (X_train, y_train), (X_test, y_test) = mnist.load_data()
- X_train = X_train.reshape(60000, 784).astype('float32') / 255
- X_test = X_test.reshape(10000, 784).astype('float32') / 255
- y_train = to_categorical(y_train, 10)
- y_test = to_categorical(y_test, 10)
-
- # 创建DNN模型
- model = Sequential([
- Dense(128, activation='relu', input_shape=(784,)),
- Dense(64, activation='relu'),
- Dense(10, activation='softmax')
- ])
-
- # 编译模型
- model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
-
- # 训练模型
- model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
- from tensorflow.keras.datasets import cifar10
- from tensorflow.keras.utils import to_categorical
-
- # 加载数据集
- (X_train, y_train), (X_test, y_test) = cifar10.load_data()
- X_train = X_train.astype('float32') / 255
- X_test = X_test.astype('float32') / 255
- y_train = to_categorical(y_train, 10)
- y_test = to_categorical(y_test, 10)
-
- # 创建CNN模型
- model = Sequential([
- Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
- MaxPooling2D(pool_size=(2, 2)),
- Conv2D(64, (3, 3), activation='relu'),
- MaxPooling2D(pool_size=(2, 2)),
- Flatten(),
- Dense(64, activation='relu'),
- Dense(10, activation='softmax')
- ])
-
- # 编译模型
- model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
-
- # 训练模型
- model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
- import tensorflow as tf
- from tensorflow.keras.models import Sequential
- from tensorflow.keras.layers import Embedding, SimpleRNN, Dense
- from tensorflow.keras.datasets import imdb
- from tensorflow.keras.preprocessing import sequence
-
- # 加载数据集
- max_features = 10000
- maxlen = 500
- (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)
- X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
- X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
-
- # 创建RNN模型
- model = Sequential([
- Embedding(max_features, 32),
- SimpleRNN(32),
- Dense(1, activation='sigmoid')
- ])
-
- # 编译模型
- model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
-
- # 训练模型
- model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
- import torch
- import torch.nn as nn
- import torch.optim as optim
- from torchvision import datasets, transforms
- from torch.utils.data import DataLoader
-
- # 数据处理
- transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
- mnist = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
- dataloader = DataLoader(mnist, batch_size=64, shuffle=True)
-
- # 生成器模型
- class Generator(nn.Module):
- def __init__(self):
- super(Generator, self).__init__()
- self.main = nn.Sequential(
- nn.Linear(100, 256),
- nn.ReLU(True),
- nn.Linear(256, 512),
- nn.ReLU(True),
- nn.Linear(512, 1024),
- nn.ReLU(True),
- nn.Linear(1024, 28*28),
- nn.Tanh()
- )
-
- def forward(self, x):
- return self.main(x).view(-1, 1, 28, 28)
-
- # 判别器模型
- class Discriminator(nn.Module):
- def __init__(self):
- super(Discriminator, self).__init__()
- self.main = nn.Sequential(
- nn.Linear(28*28, 1024),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Linear(1024, 512),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Linear(512, 256),
- nn.LeakyReLU(0.2, inplace=True),
- nn.Linear(256, 1),
- nn.Sigmoid()
- )
-
- def forward(self, x):
- return self.main(x.view(-1, 28*28))
-
- # 初始化模型
- generator = Generator()
- discriminator = Discriminator()
-
- # 损失函数和优化器
- criterion = nn.BCELoss()
- optimizer_g = optim.Adam(generator.parameters(), lr=0.0002)
- optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0002)
-
- # 训练GAN
- for epoch in range(100):
- for i, (real_images, _) in enumerate(dataloader):
- # 训练判别器
- optimizer_d.zero_grad()
- real_labels = torch.ones(real_images.size(0), 1)
- fake_labels = torch.zeros(real_images.size(0), 1)
- real_outputs = discriminator(real_images)
- d_loss_real = criterion(real_outputs, real_labels)
- d_loss_real.backward()
-
- z = torch.randn(real_images.size(0), 100)
- fake_images = generator(z)
- fake_outputs = discriminator(fake_images)
- d_loss_fake = criterion(fake_outputs, fake_labels)
- d_loss_fake.backward()
-
- optimizer_d.step()
-
- # 训练生成器
- optimizer_g.zero_grad()
- z = torch.randn(real_images.size(0), 100)
- fake_images = generator(z)
- fake_outputs = discriminator(fake_images)
- g_loss = criterion(fake_outputs, real_labels)
- g_loss.backward()
-
- optimizer_g.step()
-
- print(f'Epoch [{epoch+1}/100], d_loss: {d_loss_real.item() + d_loss_fake.item()}, g_loss: {g_loss.item()}')
- # 省略的代码用于数据加载和预处理,后续构建和训练CNN模型
- # 模型结构与前述CNN示例类似,但数据集和目标任务不同
# 省略的代码用于定义和训练深度Q网络(DQN)模型,自动驾驶任务环境为CarRacing-v0
# 省略的代码用于加载时间序列数据、构建和训练LSTM模型
# 省略的代码用于数据加载、特征工程和模型训练
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。