赞
踩
https://github.com/richzhang/PerceptualSimilarity
源代码链接如上
我的方法是首先安装lpips包
pip install lpips
安装成功之后,新建.py文件命名为lpips1文件
- # coding=gbk
- import cv2
- import lpips
- import torchvision.transforms as transforms
- import torch
-
- # 载入指标模型
- loss_fn_alex = lpips.LPIPS(net='alex') # best forward scores
- loss_fn_vgg = lpips.LPIPS(net='vgg') # closer to "traditional" perceptual loss, when used for optimization
-
- # 读取图片
- test1 = cv2.imread('1.png')
- test2 = cv2.imread('2.png')
- # 分割目标图片
- test1_org = test1[:, :512, :] / 255 # 原始图片
- test1_res = test1[:, 512:1024, :] / 255 # 模型输出结果
- test1_label = test1[:, 1024:1536, :] / 255 # label图片
-
- test2_org = test2[:, :512, :] / 255
- test2_res = test2[:, 512:1024, :] / 255
- test2_label = test2[:, 1024:1536, :] / 255
-
- # 转为tensor
- transf = transforms.ToTensor()
-
- test1_org = transf(test1_org)
- test1_res = transf(test1_res)
- test1_label = transf(test1_label)
- test2_org = transf(test2_org)
- test2_res = transf(test2_res)
- test2_label = transf(test2_label)
-
- # 转换数据类型
- test1_orgg = test1_org.to(torch.float32)
- test1_ress = test1_res.to(torch.float32)
- test1_labell = test1_label.to(torch.float32)
- test2_orgg = test2_org.to(torch.float32)
- test2_ress = test2_res.to(torch.float32)
- test2_labell = test2_label.to(torch.float32)
-
- # 测试
- d11 = loss_fn_alex(test1_ress, test1_labell)
- d12 = loss_fn_alex(test1_ress, test2_labell)
- print('d11:', d11)
- print('d12:', d12)
-
- d22 = loss_fn_alex(test2_ress, test2_labell)
- d21 = loss_fn_alex(test2_ress, test1_labell)
- print('d22:', d22)
- print('d121:', d21)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。