赞
踩
python有许多实用函数,合理实用可以大幅精简代码。本篇博文旨在记录一些常用的操作技巧,以便重复使用时快速查阅,会持续进行更新。
Windows:https://www.lfd.uci.edu/~gohlke/pythonlibs/#scipy
Windows+Linux:https://sourceforge.net/projects/scipy.mirror/files/v1.7.2/
data = np.genfromtxt('./sonar.txt', delimiter=',', usecols=np.arange(0, 60)
通过numpy的genfromtxt来读取txt文件
delimiter
分隔符
usecols
指定读取的列
生成[0,1)大小为(2,2)的符合正态分布的矩阵
u = np.random.uniform(0, 1, (2, 2))
产生k个[0,60)的不同随机数
Index = random.sample(range(0, 60), k)
cx = max(label_list, key=label_list.count)
nozero_index = np.nonzero()
这个函数更多的实用案例可参考:
https://www.cnblogs.com/pengzhonglian/p/11613336.html
导入库:
import matplotlib.pyplot as plt
plt.figure(1)
plt.scatter(x0[:, 0], x0[:, 1], c='r', marker='o', label='类别一') # scatter绘制散点图
plt.scatter(x1[:, 0], x1[:, 1], c='g', marker='o', label='类别二')
plt.xlabel('x轴标签')
plt.ylabel('y轴标签')
plt.title('图片标题')
plt.legend(loc=2) # 把图例放到左上角
plt.rcParams['font.sans-serif'] = ['SimHei'] # 中文字体显示
plt.savefig('./保存名')# 导出图片保存
plt.show() # 显示图片
没有嵌套,copy()即可;
有嵌套,必须copy.deepcopy(变量)
经常用到,有两种方式实现,一种手写,另一种调用numpy的某接口。
我倾向手写的方式,对结果更容易掌控。
# 计算x,y欧式距离
def dist_cal(x, y):
return ((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) ** 0.5
用于打乱某序列的固定顺序
np.random.shuffle(rand_ch)
在轮盘赌算法中常用,累求和序列
q = p.cumsum()
比如 ,这里的p是1,2,3,q就是1,3,6
生成随机数:
np.random.rand()
生成随机整数:
np.random.randint()
括号里可添加范围,默认(0,1]
index = np.argwhere(ind_a == 1)
已知location = [(x1,y1),(x2,y2)]
通过下面的方式将x,y单独分离
x, y = zip(*location)
很实用,很巧妙
ls=[1,2,3,4,5,6,7,8,9,0]#list
index=[2,3,6]#index list
[ls[i]for i in index]
翻转[::-1]
a = np.array([[24, 20, 10, 22, 21, 4, 27, 6, 25, 1, 0, 28, 2, 17, 14, 7, 12, 16, 8, 23, 9, 3, 13, 11,
19, 18, 26, 5, 15],[24, 20, 10, 22, 21, 4, 27, 6, 25, 1, 0, 28, 2, 17, 14, 7, 12, 16, 8, 23, 9, 3, 13, 11,19, 18, 26, 5, 15]])
a[0,1:4] = a[0,1:4][::-1]
结果:a[0]的20,10,22变为22,10,20
直接除会报错,巧妙办法:
每个数都除以10
my_list = [x/10 for x in my_list]
遇到这么一个问题:两个list元素一一对应,一个list进行排序,另一个list上的元素也跟着排序,保持一一对应关系。
下面是我遇到的实际问题场景:
一个list存储文章标题,另一个list存储文章发表时间,根据时间来进行两者同时排序:
title_list = ['文章1标题', '文章2']
time_List = ['2021-2-12', '2020-3-18']
title_time = zip(title_list, time_List)
sorted_title_time = sorted(title_time, key=lambda x: x[1])
result = zip(*sorted_title_time)
title_list, title_time = [list(x) for x in result]
print(title_list)
print(title_time)
主要思路:用zip将两者进行打包,排序完之后再用zip*解包。
这个需求是我在进行爬虫练习时遇到的,有的网站为了防爬虫,会连续性的网站数据中加入某些异常值,导致正常爬虫遇到时会进行报错,从而前功尽弃。
为了防止这种事情发生,就需要通过异常检测的方式来跳过去:
for item in List:
try:
# 继续执行的内容
except Exception:
pass
continue
字符串截取比较常规,遇到这么一个场景:需要从字符串中提取出所有的网页链接,即Link。
可直接调用下面封装好的函数。
# 从a标签中切分出具体文章链接
def split_link(string):
start_string = 'http'
end_string = '.html'
sub_str = ""
start = string.find(start_string)
# 只要start不等于-1,说明找到了http
while start != -1:
# 找结束的位置
end = string.find(end_string, start)
# 截取字符串 结束位置=结束字符串的开始位置+结束字符串的长度
sub_str = string[start:end + len(end_string)]
# 找下一个开始的位置
# 如果没有下一个开始的位置,结束循环
start = string.find(start_string, end)
return sub_str
import time
print(time.strftime("%Y-%m-%d"))
将numpy中的array序列中的类型进行转换可使用astype
例如:转换成浮点型
X.astype(int)
让图例显示中文,全局添加:
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
使用subplot
控制子图位置,用figsize
调整子图大小
plt.figure(figsize=(20, 15))
plt.subplot(2, 2, 1)
for i in range(len(label_pred)):
plt.scatter(smile['smile'][i][0], smile['smile']
[i][1], color=colors[label_pred[i]])
plt.title("原始数据")
plt.subplot(2, 2, 2)
for i in range(len(y)):
plt.scatter(smile['smile'][i][0], smile['smile'][i][1], color=colors[y[i]])
plt.title("聚类后数据")
和上面的写法略有区别
# 绘图显示
fig, ax = plt.subplots(1, 3, figsize=(20, 20))
ax[0].imshow(img)
ax[0].set_title("子图标题1")
ax[1].imshow(out_img)
ax[1].set_title("子图标题2")
ax[2].imshow(out_img2)
ax[2].set_title("子图标题3")
plt.show()
fig.savefig(r"组合图名称.png")
import time
begin_time = time.time()
# 所运行程序
end_time = time.time()
print("程序花费时间{}秒".format(end_time-begin_time))
# 绘制折线图
def plot_pic(x, y):
plt.plot(x, y, linewidth=1, color="orange", marker="o")
plt.xlabel("num_bits")
plt.ylabel("ACC (%)")
plt.savefig("./result.png")
plt.show()
with open(r'./result.txt', mode='a', encoding='utf-8') as f:
f.write(str(reward) + "\n")
# 获取每行最大值
y_pred = []
for row in y_test:
y = np.argmax(row)
y_pred.append(y)
(3, 320, 640) -> (320, 640, 3)
print(img.shape) # (3, 320, 640)
print(img.transpose((1, 2, 0)).shape) # (320, 640, 3)
注意0,1,2表示原序列的索引
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
pipreqs . --encoding=utf8 --force
新建一个txt文件,读取原文件每行数据,批量进行添加信息
ff = open(r'D:\Desktop\ailab\task6\pa\submission.txt', 'w') # 打开一个文件,可写模式
with open(r'D:\Desktop\ailab\task6\pa\pa_result.txt', 'r') as f: # 打开一个文件只读模式
line = f.readlines()
for line_list in line:
line_new = line_list.replace('\n', '') # 将换行符替换为空('')
line_new = 'cat_12_test/' + line_new + '\n'
print(line_new)
ff.write(line_new) # 写入一个新文件中
import os
path = r'F:\jittor提交\test_label' # 要修改的文件夹路径
pre_name = 'T' # 修改后的文件名前缀
for filename in os.listdir(path):
os.chdir(path)
os.rename(filename, pre_name + filename)
注意,转换后并不会将原始tif进行删除,如需删除源文件,可在Linux中运行rm *.tif
import cv2
import numpy as np
import os
def tif_to_png(image_path,save_path):
"""
:param image_path: *.tif image path
:param save_path: *.png image path
:return:
"""
img = cv2.imread(image_path, 3)
filename = image_path.split('/')[-1].split('.')[0]
# print(filename)
save_path = save_path + '/' + filename + '.png'
cv2.imwrite(save_path, img)
if __name__ == '__main__':
root_path = r'dataset/preprocessed/test_1024_200_1.0/images/'
save_path = r'dataset/preprocessed/test_1024_200_1.0/images'
image_files = os.listdir(root_path)
for image_file in image_files:
tif_to_png(root_path + image_file, save_path)
import os
def Read_all_images_file_DesignateName():
images_file_path = './images'
images_file_name = os.listdir(images_file_path)
images = []
for i in images_file_name:
images_file_names = i.split('.')[0]
images.append(images_file_names)
return images
def Read_all_labels_file_DesignateName():
labels_file_path = './labelTxt'
labels_file_name = os.listdir(labels_file_path)
labels = []
for l in labels_file_name:
labels_file_names = l.split('.')[0]
labels.append(labels_file_names)
return labels
if __name__ == '__main__':
images = Read_all_images_file_DesignateName()
labels = Read_all_labels_file_DesignateName()
set_images = set(images)
set_labels = set(labels)
print(set_images^set_labels)
将一幅大图裁剪成多张小图:
from PIL import Image
import os.path
# 指明被遍历的文件夹
rootdir = r'dataset/ceshi/images'
dis = 480
leap = 480
for parent, dirnames, filenames in os.walk(rootdir): # 遍历每一张图片
filenames.sort()
for filename in filenames:
currentPath = os.path.join(parent, filename)
img = Image.open(currentPath)
width = img.size[0]
height = img.size[1]
Flag = True
i = j = 0
num = 0
for i in range(0, width, leap):
for j in range(0, height, leap):
box = (i, j, i+dis, j+dis)
image = img.crop(box) # 图像裁剪
image.save(r"dataset/ceshi/crop" + '/' + filename.split(".")[0] + "__" + str(num) + ".png")
num += 1
# 写入
with open("H.pkl", "wb") as f:
pickle.dump(H, f, protocol=0)
# 读取
with open("H.pkl", "rb") as f:
H = pickle.load(f)
序列化模式(protocol)支持[0, 1, 2]三个选项,数值越大效率越高,也代表压缩的越厉害。
默认是0, 基本能看出来保存的对象和列表信息,但是如果选择1、或2,基本就是乱码了,说明压缩比很高
import os
import shutil
if __name__ == '__main__':
dir_path = "result/h"
img_path = "D:/Data/First"
copy_path = "D:/Data/ceshi"
task_list = []
for i in os.listdir(dir_path):
task_list.append(os.path.join('result', 'h', i))
for test_img_param in task_list:
file_name = test_img_param.split('.pkl')[0][-12:]
shutil.copy(os.path.join(img_path, file_name), os.path.join(copy_path, file_name))
另一个简单版本:
import os
import shutil
from tqdm import tqdm
rootdir = r"E:\Dataset1"
outputdir = r"E:\Dataset2"
if __name__ == '__main__':
for parent, dirnames, filenames in tqdm(os.walk(rootdir)):
for filename in filenames:
currentPath = os.path.join(parent, filename)
# 设置分离条件
flag = currentPath[-5]
if flag == "T":
shutil.copy(currentPath, outputdir)
if __name__ == '__main__':
a = "(0.9, 0.999)"
b = eval(a)
print(b)
print(type(b))
用于清理训练早停之后模型中的冗余信息:
import torch
import os
from utils.general import LOGGER
def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
if x.get('ema'):
x['model'] = x['ema'] # replace model with ema
for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys
x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
if __name__ == '__main__':
strip_optimizer('weights/best.pt', 'weights/best.pt')
import os
import cv2
from tqdm import tqdm
datadir = "E:/dataset"
Resize_size = 1024, 1024
path = os.path.join(datadir)
img_list = os.listdir(path)
for i in tqdm(img_list):
img_array = cv2.imread(os.path.join(path, i))
new_array = cv2.resize(img_array, Resize_size)
img_name = str(i)
save_path = datadir + '/' + str(i)
cv2.imwrite(save_path, new_array)
smap = imread("img.png");
img = rgb2gray(smap);
[m, n] = size(img);
set (gcf, 'Position', [0,0,n,m]);
imshow(img,'border','tight','initialmagnification','fit');
colormap(jet);
import os
import shutil
if __name__ == '__main__':
label_path = r"E:\Dataset\VOCdevkit\VOC2007\labels"
img_path = r"E:\Dataset\VOCdevkit\VOC2007\JPEGImages"
copy_path = r"E:\Dataset\VOCdevkit\VOC2007\output"
file_list = []
for i in os.listdir(label_path):
file_list.append(i[:-4])
for i in os.listdir(img_path):
if i[:-4] in file_list:
shutil.copy(os.path.join(img_path, i), os.path.join(copy_path, i))
用于项目中分离可见光数据
# 版本一
import os
import shutil
if __name__ == '__main__':
label_path = r"E:\Dataset\数据集\打标签任务1\无重名\labels"
img_path = r"E:\Dataset\数据集\打标签任务1\有重名重命名\img"
img_copy_path = r"E:\Dataset\数据集\可见光数据\img"
label_copy_path = r"E:\Dataset\数据集\可见光数据\label"
file_list = []
for xml_file in os.listdir(label_path):
if xml_file[-8:-4] == "Zoom" or xml_file[-8:-4] == "Wide":
file_list.append(xml_file[:-4])
for i in os.listdir(img_path):
if i[:-4] in file_list:
shutil.copy(os.path.join(img_path, i), os.path.join(img_copy_path, i))
shutil.copy(os.path.join(label_path, i[:-3] + "xml"), os.path.join(label_copy_path, i[:-3] + "xml"))
# 版本二
import os
import shutil
from tqdm import tqdm
imgdir = r"E:\Dataset\数据集\打标签任务2\DJI_202309131328_010_新建航点飞行3"
label_path = r"E:\Dataset\数据集\打标签任务2\DJI_202309131328_010_新建航点飞行3_labels"
img_copy_path = r"E:\Dataset\数据集\可见光数据\原始未裁剪\img"
label_copy_path = r"E:\Dataset\数据集\可见光数据\原始未裁剪\labels"
if __name__ == '__main__':
xml_list = []
for xml_file in os.listdir(label_path):
if xml_file[-5] == "W" or xml_file[-5] == "Z":
xml_list.append(xml_file[:-4])
shutil.copy(os.path.join(label_path, xml_file), os.path.join(label_copy_path, xml_file))
for parent, dirnames, filenames in tqdm(os.walk(imgdir)):
for filename in filenames:
currentPath = os.path.join(parent, filename)
if currentPath.split("\\")[-1].split(".")[0] in xml_list and currentPath.split("\\")[-1].split(".")[-1] == "JPG":
shutil.copy(currentPath, img_copy_path)
'''
对重名图片进行重命名
'''
import os
import shutil
from tqdm import tqdm
rootdir = r"E:\Dataset\数据集\打标签任务1\有重名"
img_outputdir = r"E:\Dataset\数据集\打标签任务1\有重名重命名\img"
xml_outputdir = r"E:\Dataset\数据集\打标签任务1\有重名重命名\labels"
if __name__ == '__main__':
for parent, dirnames, filenames in tqdm(os.walk(rootdir)):
for filename in filenames:
currentPath = os.path.join(parent, filename)
parent_name = currentPath.split('\\')[-2]
file_name = currentPath.split('\\')[-1]
if file_name[-3:] == "xml":
# copy&rename xml
new_xmlname = parent_name + "_" + file_name
shutil.copy(currentPath, xml_outputdir)
os.rename(os.path.join(xml_outputdir, file_name), os.path.join(xml_outputdir, new_xmlname))
# copy&rename img
new_imgname = parent_name + "_" + file_name[:-3] + "jpg"
if os.path.exists(currentPath[:-3] + "jpg"):
shutil.copy(currentPath[:-3] + "jpg", img_outputdir)
os.rename(os.path.join(img_outputdir, currentPath[:-3] + "jpg"), os.path.join(img_outputdir, new_imgname))
else:
shutil.copy(currentPath[:-3] + "tif", img_outputdir)
os.rename(os.path.join(img_outputdir, currentPath[:-3] + "tif"), os.path.join(img_outputdir, new_imgname))
import os
import glob
import xml.etree.ElementTree as ET
def convert(size, box):
dw = 1.0 / size[0]
dh = 1.0 / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
if __name__ == '__main__':
# 设置xml文件的路径和要保存的txt文件路径
txt_save_path = r'E:\Dataset\数据集\红外数据\labels'
xml_root_path = r'E:\Dataset\数据集\红外数据\labels_xml'
classes = ['car', 'person']
if not os.path.exists(txt_save_path):
os.makedirs(txt_save_path)
xml_paths = glob.glob(os.path.join(xml_root_path, '*.xml'))
for xml_id in xml_paths:
txt_id = os.path.join(txt_save_path, (xml_id.split('\\')[-1])[:-4] + '.txt')
txt = open(txt_id, 'w')
xml = open(xml_id, encoding='utf-8')
tree = ET.parse(xml)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = 0
if obj.find('difficult') != None:
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('xmax').text)),
int(float(xmlbox.find('ymin').text)), int(float(xmlbox.find('ymax').text)))
box = convert((w, h), b)
txt.write(str(cls_id) + ' ' + ' '.join([str(a) for a in box]) + '\n')
txt.close()
与上面相似,不过对于xml文件,需要读取图片的宽高数据并写入,因此需要指定图片文件夹。
import os
import xml.etree.ElementTree as ET
from PIL import Image
import numpy as np
img_path = 'data/img' # 图片文件夹
labels_path = 'data/labels' # txt文件夹
annotations_path = 'data/labels_xml' # xml存放的文件夹
labels = os.listdir(labels_path)
# 类别
classes = ["car", "person"] # 类别名
# 图片的高度、宽度、深度
sh = sw = sd = 0
def write_xml(imgname, sw, sh, sd, filepath, labeldicts):
'''
imgname: 没有扩展名的图片名称
'''
# 创建Annotation根节点
root = ET.Element('Annotation')
# 创建filename子节点,无扩展名
ET.SubElement(root, 'filename').text = str(imgname)
# 创建size子节点
sizes = ET.SubElement(root,'size')
ET.SubElement(sizes, 'width').text = str(sw)
ET.SubElement(sizes, 'height').text = str(sh)
ET.SubElement(sizes, 'depth').text = str(sd)
for labeldict in labeldicts:
objects = ET.SubElement(root, 'object')
ET.SubElement(objects, 'name').text = labeldict['name']
ET.SubElement(objects, 'pose').text = 'Unspecified'
ET.SubElement(objects, 'truncated').text = '0'
ET.SubElement(objects, 'difficult').text = '0'
bndbox = ET.SubElement(objects,'bndbox')
ET.SubElement(bndbox, 'xmin').text = str(int(labeldict['xmin']))
ET.SubElement(bndbox, 'ymin').text = str(int(labeldict['ymin']))
ET.SubElement(bndbox, 'xmax').text = str(int(labeldict['xmax']))
ET.SubElement(bndbox, 'ymax').text = str(int(labeldict['ymax']))
tree = ET.ElementTree(root)
tree.write(filepath, encoding='utf-8')
for label in labels:
with open(labels_path + "/" + label, 'r') as f:
img_id = os.path.splitext(label)[0]
contents = f.readlines()
labeldicts = []
for content in contents:
# !!!这里要看你的图片格式了,我这里是png,注意修改
img = np.array(Image.open(img_path + "/" + label.strip('.txt') + '.jpg'))
# 图片的高度和宽度
sh, sw, sd = img.shape[0], img.shape[1], img.shape[2]
content = content.strip('\n').split()
x = float(content[1])*sw
y = float(content[2])*sh
w = float(content[3])*sw
h = float(content[4])*sh
# 坐标的转换,x_center y_center width height -> xmin ymin xmax ymax
new_dict = {'name': classes[int(content[0])],
'difficult': '0',
'xmin': x+1-w/2,
'ymin': y+1-h/2,
'xmax': x+1+w/2,
'ymax': y+1+h/2
}
labeldicts.append(new_dict)
write_xml(img_id, sw, sh, sd, annotations_path + "/" + label.strip('.txt') + '.xml', labeldicts)
FFmpeg下载链接:https://ffmpeg.org/download.html
剪辑命令:
ffmpeg -i 20230713_164112_I.mp4 -ss 00:01:42 -to 00:02:06 -c copy output4.mp4
直接将多光谱前三个谱段合成rgb数据:
import cv2
import numpy as np
# 假设你的图像文件名如下
image_files = {
"450nm": "ms_450nm.tif",
"555nm": "ms_555nm.tif",
"660nm": "ms_660nm.tif",
"720nm": "ms_720nm.tif",
"750nm": "ms_750nm.tif",
"840nm": "ms_840nm.tif"
}
# 读取图像
img_450nm = cv2.imread(image_files["450nm"], cv2.IMREAD_GRAYSCALE)
img_555nm = cv2.imread(image_files["555nm"], cv2.IMREAD_GRAYSCALE)
img_660nm = cv2.imread(image_files["660nm"], cv2.IMREAD_GRAYSCALE)
# 确保所有图像大小一致
assert img_450nm.shape == img_555nm.shape == img_660nm.shape, "所有图像必须具有相同的尺寸"
# 创建空的RGB图像
rgb_image = np.zeros((img_450nm.shape[0], img_450nm.shape[1], 3), dtype=np.uint8)
# 将各个波长的图像映射到RGB通道
rgb_image[:, :, 0] = img_450nm # 蓝色通道
rgb_image[:, :, 1] = img_555nm # 绿色通道
rgb_image[:, :, 2] = img_660nm # 红色通道
# 保存合成的RGB图像
cv2.imwrite("output_rgb_image.png", rgb_image)
print("RGB图像已保存为 output_rgb_image.png")
配准之后融合:
import cv2
import numpy as np
# 假设你的图像文件名如下
image_files = {
"450nm": "ms_450nm.tif",
"555nm": "ms_555nm.tif",
"660nm": "ms_660nm.tif",
"720nm": "ms_720nm.tif",
"750nm": "ms_750nm.tif",
"840nm": "ms_840nm.tif"
}
def align_images(base_image, image_to_align):
# 使用ORB检测器和描述符
orb = cv2.ORB_create()
# 寻找关键点和描述符
keypoints1, descriptors1 = orb.detectAndCompute(base_image, None)
keypoints2, descriptors2 = orb.detectAndCompute(image_to_align, None)
# 使用BFMatcher进行匹配
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(descriptors1, descriptors2)
# 按照距离排序匹配项
matches = sorted(matches, key=lambda x: x.distance)
# 选择前50个匹配项
good_matches = matches[:50]
# 获取匹配点
points1 = np.zeros((len(good_matches), 2), dtype=np.float32)
points2 = np.zeros((len(good_matches), 2), dtype=np.float32)
for i, match in enumerate(good_matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# 计算变换矩阵
h, mask = cv2.findHomography(points2, points1, cv2.RANSAC)
# 使用变换矩阵对图像进行配准
height, width = base_image.shape
aligned_image = cv2.warpPerspective(image_to_align, h, (width, height))
return aligned_image
# 读取图像
img_450nm = cv2.imread(image_files["450nm"], cv2.IMREAD_GRAYSCALE)
img_555nm = cv2.imread(image_files["555nm"], cv2.IMREAD_GRAYSCALE)
img_660nm = cv2.imread(image_files["660nm"], cv2.IMREAD_GRAYSCALE)
# 配准图像
img_555nm_aligned = align_images(img_450nm, img_555nm)
img_660nm_aligned = align_images(img_450nm, img_660nm)
# 创建空的RGB图像
rgb_image = np.zeros((img_450nm.shape[0], img_450nm.shape[1], 3), dtype=np.uint8)
# 将各个波长的图像映射到RGB通道
rgb_image[:, :, 0] = img_450nm # 蓝色通道
rgb_image[:, :, 1] = img_555nm_aligned # 绿色通道
rgb_image[:, :, 2] = img_660nm_aligned # 红色通道
# 保存合成的RGB图像
cv2.imwrite("output_rgb_image.png", rgb_image)
print("RGB图像已保存为 output_rgb_image.png")
提取A图像的色彩空间,合成luts,应用于B图像:
import cv2
import numpy as np
def apply_color_transfer(source, target):
# 将图像转换为LAB色彩空间
source_lab = cv2.cvtColor(source, cv2.COLOR_BGR2LAB)
target_lab = cv2.cvtColor(target, cv2.COLOR_BGR2LAB)
# 计算每个通道的均值和标准差
source_mean, source_std = cv2.meanStdDev(source_lab)
target_mean, target_std = cv2.meanStdDev(target_lab)
# 调整形状以匹配图像
source_mean = source_mean[:, 0][np.newaxis, np.newaxis, :]
source_std = source_std[:, 0][np.newaxis, np.newaxis, :]
target_mean = target_mean[:, 0][np.newaxis, np.newaxis, :]
target_std = target_std[:, 0][np.newaxis, np.newaxis, :]
# 标准化目标图像
normalized_target = (target_lab - target_mean) / target_std
# 应用源图像的均值和标准差
result_lab = normalized_target * source_std + source_mean
# 限制像素值在[0, 255]范围内
result_lab = np.clip(result_lab, 0, 255).astype(np.uint8)
# 增加亮度
l, a, b = cv2.split(result_lab)
l = cv2.add(l, 100) # 增加亮度值,20可以根据需要调整
result_lab = cv2.merge((l, a, b))
# 转换回BGR色彩空间
result_bgr = cv2.cvtColor(result_lab, cv2.COLOR_LAB2BGR)
return result_bgr
# 读取图像
image_a = cv2.imread('output_rgb_image2.png')
image_b = cv2.imread('images/group1/kjg.png')
# 调整图像尺寸以匹配
image_a_resized = cv2.resize(image_a, (image_b.shape[1], image_b.shape[0]))
# 应用颜色迁移
result_image = apply_color_transfer(image_a_resized, image_b)
# 保存结果图像,尺寸与原始image_b一致
cv2.imwrite('images/group1/hgp.png', result_image)
OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。