赞
踩
import os
import time
import traceback
import requests
import re
from PIL import Image
def img_get(save_path, word, epoch):
"""
从百度爬图片
:param save_path:保存路径
:param word:查询关键词
:param epoch:查询轮数,一轮60张
:return:
"""
q = 0 # 停止爬取图片条件
a = 0 # 图片名称
while (True):
time.sleep(1)
url = "https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word={}&pn={}&ct=&ic=0&lm=-1&width=0&height=0".format(
word, q)
# word=需要搜索的名字
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.56'
}
response = requests.get(url, headers=headers)
# print(response.request.headers)
html = response.text
# print(html)
urls = re.findall('"objURL":"(.*?)"', html)
# print(urls)
for url in urls:
print(a) # 图片的名字
response = requests.get(url, headers=headers)
image = response.content
if not os.path.isdir(save_path):
os.makedirs(save_path)
with open(os.path.join(save_path, "{}.jpg".format(a)), 'wb') as f:
f.write(image)
a = a + 1
q = q + 20
if (q / 20) >= int(epoch):
break
def img_chuli(root_path):
"""
图片归类去重处理
:return:
"""
# root_path=r"C:\Users\Lenovo\Desktop\data" #待处理文件夹绝对路径(可按‘Ctrl+Shift+c’复制)
global names_path, name_path, img
root_names = os.listdir(root_path)
for name in root_names:
path = os.path.join(root_path, name)
print("正在删除文件夹:", path)
# names = os.listdir(path)
names_path = []
# for name in names:
# print(name)
try:
if not os.path.isfile(path):
return
img = Image.open(path)
name_path = path # os.path.join(path, name)
if img == None: # 筛选无法打开的图片
names_path.append(name_path)
print('成功保存错误图片路径:{}'.format(name))
else:
w, h = img.size
if w < 50 or h < 50: # 筛选错误图片
names_path.append(name_path)
print('成功保存特小图片路径:{}'.format(name))
img.close()
except Exception as e:
# traceback.print_exc()
names_path.append(name_path)
print('成功保存错误图片路径:{}'.format(name))
img.close()
print("开始删除需删除的图片")
for r in names_path:
os.remove(r)
print("已删除:", r)
def img_sort(root_path, word):
"""
图片重新排序
:param root_path:
:return:
"""
# 图片重新排序
img_path = os.listdir(root_path)
save_path = os.path.join(root_path, word)
if not os.path.isdir(save_path):
os.makedirs(save_path)
a = 0
for i in img_path:
a += 1
i = os.path.join(os.path.abspath(root_path), i)
if not os.path.isfile(i):
return
new_name = os.path.join(os.path.abspath(save_path), str(a) + '_{}.jpg'.format(word)) # 此处可以修改图片名称
os.rename(i, new_name) # 特别注意:rename会删除原图
def img_class(root_path):
"""
分类存放
:param root_path:
:return:
"""
print("分类存放")
names = os.listdir(root_path) # 得到images文件夹下的子文件夹的名称
for name in names:
path = os.path.join(root_path, name)
img_names = os.listdir(path) # 得到子文件夹下的图片的名称
for img_name in img_names:
save_name = img_name.split(".jpg")[0] + '.txt' # 得到相应的lable名称
txt_path = os.path.join(save_path, name) # 得到label的子文件夹的路径
with open(os.path.join(txt_path, save_name), "w") as f: # 结合子文件夹路径和相应子文件夹下图片的名称生成相应的子文件夹txt文件
f.write(name) # 将label写入对应txt文件夹
print(f.name)
def getimg(save_path, word, epoch):
"""
获取图片素材
:param save_path: 你想保存的路径
:param word: 你想要下载什么图片
:param epoch: 你想要下载几轮图片?请输入(一轮为60张左右图片):')
:return:
"""
# save_path = input('你想保存的路径:')
# save_path = "D:/pytorch/img"
# word = input('你想要下载什么图片?请输入:')
# word = "狗"
# epoch = input('你想要下载几轮图片?请输入(一轮为60张左右图片):') # 需要迭代几次图片
# epoch = 1
# 爬图片
img_get(save_path, word, epoch)
# 图片处理
img_chuli(save_path)
# 图片排序
img_sort(save_path, word)
# 分类,不用执行
# img_class(save_path)
if __name__ == "__main__":
# save_path = input('你想保存的路径:')
save_path = "D:/pytorch/img"
# word = input('你想要下载什么图片?请输入:')
word = "狗"
# epoch = input('你想要下载几轮图片?请输入(一轮为60张左右图片):') # 需要迭代几次图片
epoch = 1
# 爬图片
getimg(save_path, word, epoch)
import time
from torch.utils.tensorboard import SummaryWriter
from torchvision.datasets import ImageFolder
from torchvision import transforms
from torch.utils.data import DataLoader
import torchvision.models as models
import torch.nn as nn
import torch
def train(path_train, path_valid, model_name):
"""
训练
:param path_train: 训练图片文件夹
:param path_valid: 验证图片文件夹
:param model_name: 类名
:return:
"""
gpu_avai = torch.cuda.is_available()
print("是否使用GPU训练:{}".format(gpu_avai)) # 打印是否采用gpu训练
if gpu_avai:
print("GPU名称为:{}".format(torch.cuda.get_device_name())) # 打印相应的gpu信息
# 数据增强太多也可能造成训练出不好的结果,而且耗时长,宜增强两三倍即可。
normalize = transforms.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5]) # 规范化
transform = transforms.Compose([ # 数据处理
transforms.Resize((64, 64)),
transforms.ToTensor(),
normalize
])
dataset_train = ImageFolder(path_train, transform=transform) # 训练数据集
# print(dataset_tran[0])
dataset_valid = ImageFolder(path_valid, transform=transform) # 验证或测试数据集
# print(dataset_train.classer)#返回类别
print(dataset_train.class_to_idx) # 返回类别及其索引
# print(dataset_train.imgs)#返回图片路径
print(dataset_valid.class_to_idx)
train_data_size = len(dataset_train) # 放回数据集长度
test_data_size = len(dataset_valid)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))
# torch自带的标准数据集加载函数
dataloader_train = DataLoader(dataset_train, batch_size=4, shuffle=True, num_workers=0, drop_last=True)
dataloader_test = DataLoader(dataset_valid, batch_size=4, shuffle=True, num_workers=0, drop_last=True)
# 2.模型加载
model_ft = models.resnet18(pretrained=True) # 使用迁移学习,加载预训练权重
# print(model_ft)
in_features = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(in_features, 36),
nn.Linear(36, 6)) # 将最后的全连接改为(36,6),使输出为六个小数,对应六种植物的置信度
# 冻结卷积层函数
# for i,para in enumerate(model_ft.parameters()):
# if i<18:
# para.requires_grad=False
# print(model_ft)
# model_ft.half()#可改为半精度,加快训练速度,在这里不适用
if gpu_avai:
model_ft = model_ft.cuda() # 将模型迁移到gpu
# 3.优化器
loss_fn = nn.CrossEntropyLoss()
if gpu_avai:
loss_fn = loss_fn.cuda() # 将loss迁移到gpu
learn_rate = 0.01 # 设置学习率
optimizer = torch.optim.SGD(model_ft.parameters(), lr=learn_rate, momentum=0.01) # 可调超参数
total_train_step = 0
total_test_step = 0
epoch = 50 # 迭代次数
writer = SummaryWriter("logs/"+model_name)
best_acc = -1
ss_time = time.time()
for i in range(epoch):
start_time = time.time()
print("--------第{}轮训练开始---------".format(i + 1))
model_ft.train()
for data in dataloader_train:
imgs, targets = data
# if torch.cuda.is_available():
# imgs.float()
# imgs=imgs.float()#为上述改为半精度操作,在这里不适用
if gpu_avai:
imgs = imgs.cuda()
targets = targets.cuda()
# imgs=imgs.half()
outputs = model_ft(imgs)
loss = loss_fn(outputs, targets)
optimizer.zero_grad() # 梯度归零
loss.backward() # 反向传播计算梯度
optimizer.step() # 梯度优化
total_train_step = total_train_step + 1
if total_train_step % 100 == 0: # 一轮时间过长可以考虑加一个
end_time = time.time()
print("使用GPU训练100次的时间为:{}".format(end_time - start_time))
print("训练次数:{},loss:{}".format(total_train_step, loss.item()))
# writer.add_scalar("valid_loss",loss.item(),total_train_step)
model_ft.eval()
total_test_loss = 0
total_accuracy = 0
with torch.no_grad(): # 验证数据集时禁止反向传播优化权重
for data in dataloader_test:
imgs, targets = data
# if torch.cuda.is_available():
# imgs.float()
# imgs=imgs.float()
if gpu_avai:
imgs = imgs.cuda()
targets = targets.cuda()
# imgs=imgs.half()
outputs = model_ft(imgs)
loss = loss_fn(outputs, targets)
total_test_loss = total_test_loss + loss.item()
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy
print("整体测试集上的loss:{}(越小越好,与上面的loss无关此为测试集的总loss)".format(total_test_loss))
print("整体测试集上的正确率:{}(越大越好)".format(total_accuracy / len(dataset_valid)))
writer.add_scalar("valid_loss", (total_accuracy / len(dataset_valid)), (i + 1)) # 选择性使用哪一个
total_test_step = total_test_step + 1
if total_accuracy > best_acc: # 保存迭代次数中最好的模型
print("已修改模型")
best_acc = total_accuracy
torch.save(model_ft, "model/{}.pth".format(model_name))
ee_time = time.time()
zong_time = ee_time - ss_time
print(
"训练总共用时:{}h:{}m:{}s".format(int(zong_time // 3600), int((zong_time % 3600) // 60),
int(zong_time % 60))) # 打印训练总耗时
writer.close()
if __name__ == '__main__':
path_train = 'D:/pytorch/train'
path_valid = 'D:/pytorch/valid'
train(path_train, path_valid, '猫狗小白鼠')
import os
import torch
import torchvision
from PIL import Image
from torch import nn
def run(root_path, model_name, data_class):
"""
:param data_class: ['小白鼠'] # 按文件索引顺序排列
:param root_path: 图片文件夹
:param model_name: 模型名称
:return:
"""
i = 0 # 识别图片计数
# root_path="data/hik" #待测试文件夹
names = os.listdir(root_path)
for name in names:
# print(name)
i = i + 1
# data_class = ['小白鼠'] # 按文件索引顺序排列
image_path = os.path.join(root_path, name)
image = Image.open(image_path)
# print(image)
transforms = torchvision.transforms.Compose([torchvision.transforms.Resize((64, 64)),
torchvision.transforms.ToTensor()])
image = transforms(image)
# print(image.shape)
model_ft = torchvision.models.resnet18() # 需要使用训练时的相同模型
# print(model_ft)
in_features = model_ft.fc.in_features
model_ft.fc = nn.Sequential(nn.Linear(in_features, 36),
nn.Linear(36, 6)) # 此处也要与训练模型一致
model = torch.load("{}.pth".format('model/' + model_name), map_location=torch.device("cpu")) # 选择训练后得到的模型文件
# print(model)
image = torch.reshape(image, (1, 3, 64, 64)) # 修改待预测图片尺寸,需要与训练时一致
model.eval()
with torch.no_grad():
output = model(image)
# print(output) # 输出预测结果
# print(int(output.argmax(1)))
print("第{}张图片【{}】预测为:{}".format(i, name, data_class[int(output.argmax(1))])) # 对结果进行处理,使直接显示出预测的植物种类
if __name__ == '__main__':
root_path = "D:/pytorch/hik" # 待测试文件夹
model_name = "model"
data_class = ['小白鼠', '狗', '猫']
run(root_path, model_name, data_class)
from utils_img.getimg import getimg
from utils_img.run import run
from utils_img.train import train
if __name__ == "__main__":
# 爬图片
save_path = "D:/pytorch/img"
# word = input('你想要下载什么图片?请输入:')
word = "猫"
# epoch = input('你想要下载几轮图片?请输入(一轮为60张左右图片):') # 需要迭代几次图片
epoch = 1
# 爬图片
# getimg(save_path, word, epoch)
# 训练模型
model_name = "猫狗小白鼠"
path_train = 'D:/pytorch/train'
path_valid = 'D:/pytorch/valid'
# train(path_train, path_valid, model_name)
# 使用模型
root_path = "D:/pytorch/img" # 待测试文件夹
data_class = ['小白鼠', '狗', '猫']
run(root_path, model_name, data_class)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。