当前位置: 首页 > news >正文

邯郸网站建设品牌公司泰州东方医院有限公司

邯郸网站建设品牌公司,泰州东方医院有限公司,成都专业的网站建设公司,wordpress joomla使用pix2pixgan #xff08;pytorch)实现T1 - T2的基本代码 使用 https://github.com/eriklindernoren/PyTorch-GAN/ 这里面的pix2pixgan代码进行实现。 进去之后我们需要重新处理数据集#xff0c;并且源代码里面先训练的生成器#xff0c;后训练鉴别器。 一般情况下…使用pix2pixgan pytorch)实现T1 - T2的基本代码 使用 https://github.com/eriklindernoren/PyTorch-GAN/ 这里面的pix2pixgan代码进行实现。 进去之后我们需要重新处理数据集并且源代码里面先训练的生成器后训练鉴别器。 一般情况下先训练判别器而后训练生成器是因为这种训练顺序在理论和实践上更加稳定和有效。我们需要改变顺序以及一些代码 以下是一些原因 判别器的任务相对简单判别器的任务是将真实样本与生成样本区分开来。这相对于生成器而言是一个相对简单的分类任务因为它只需要区分两种类型的样本。通过先训练判别器我们可以确保其具有足够的能力来准确识别真实和生成的样本。生成器依赖于判别器的反馈生成器的目标是生成逼真的样本以尽可能地欺骗判别器。通过先训练判别器我们可以得到关于生成样本质量的反馈信息。生成器可以根据判别器的反馈进行调整并逐渐提高生成样本的质量。训练稳定性在GAN的早期训练阶段生成器产生的样本可能会非常不真实。如果首先训练生成器那么判别器可能会很容易辨别这些低质量的生成样本导致梯度更新不稳定。通过先训练判别器我们可以使生成器更好地适应判别器的反馈从而增加训练的稳定性。避免模式崩溃在GAN训练过程中存在模式坍塌的问题即生成器只学会生成少数几种样本而不是整个数据分布。通过先训练判别器我们可以提供更多样本的多样性帮助生成器避免陷入模式崩溃现象。 尽管先训练鉴别器再训练生成器是一种常见的做法但并不意味着这是唯一正确的方式。根据特定的问题和数据集有时候也可以尝试其他训练策略例如逆向训练先训练生成器。选择何种顺序取决于具体情况和实验结果。 数据集使用的是BraTs2020数据集他的介绍和处理方法在我的知识链接里面。目前使用的是个人电脑的GPU跑的。然后数据也只取了前200个训练集并且20%分出来作为测试集。 并且我们在训练的时候每隔一定的batch使用matplotlib将T1,生成的T1真实的T2进行展示并且将生成器和鉴别器的loss进行展示。 通过比较可以发现使用了逐像素的L1 LOSS可以让生成的结果更好。 训练10个epoch时的结果图 此时的测试结果 PSNR mean: 21.1621928375993 PSNR std: 1.1501189362634836 NMSE mean: 0.14920212 NMSE std: 0.03501928 SSIM mean: 0.5401535398016223 SSIM std: 0.019281408927679166 代码 dataloader.py # dataloader for fine-tuning import torch import torchvision import torch.nn as nn import torch.nn.functional as F from torchvision import datasets from torchvision import transforms from torchvision.utils import save_image import torch.utils.data as data import numpy as np from PIL import ImageEnhance, Image import random import osdef cv_random_flip(img, label):# left right flipflip_flag random.randint(0, 2)if flip_flag 1:img np.flip(img, 0).copy()label np.flip(label, 0).copy()if flip_flag 2:img np.flip(img, 1).copy()label np.flip(label, 1).copy()return img, labeldef randomCrop(image, label):border 30image_width image.size[0]image_height image.size[1]crop_win_width np.random.randint(image_width - border, image_width)crop_win_height np.random.randint(image_height - border, image_height)random_region ((image_width - crop_win_width) 1, (image_height - crop_win_height) 1, (image_width crop_win_width) 1,(image_height crop_win_height) 1)return image.crop(random_region), label.crop(random_region)def randomRotation(image, label):rotate random.randint(0, 1)if rotate 1:rotate_time random.randint(1, 3)image np.rot90(image, rotate_time).copy()label np.rot90(label, rotate_time).copy()return image, labeldef colorEnhance(image):bright_intensity random.randint(7, 13) / 10.0image ImageEnhance.Brightness(image).enhance(bright_intensity)contrast_intensity random.randint(4, 11) / 10.0image ImageEnhance.Contrast(image).enhance(contrast_intensity)color_intensity random.randint(7, 13) / 10.0image ImageEnhance.Color(image).enhance(color_intensity)sharp_intensity random.randint(7, 13) / 10.0image ImageEnhance.Sharpness(image).enhance(sharp_intensity)return imagedef randomGaussian(img, mean0.002, sigma0.002):def gaussianNoisy(im, meanmean, sigmasigma):for _i in range(len(im)):im[_i] random.gauss(mean, sigma)return imflag random.randint(0, 3)if flag 1:width, height img.shapeimg gaussianNoisy(img[:].flatten(), mean, sigma)img img.reshape([width, height])return imgdef randomPeper(img):flag random.randint(0, 3)if flag 1:noiseNum int(0.0015 * img.shape[0] * img.shape[1])for i in range(noiseNum):randX random.randint(0, img.shape[0] - 1)randY random.randint(0, img.shape[1] - 1)if random.randint(0, 1) 0:img[randX, randY] 0else:img[randX, randY] 1return imgclass BraTS_Train_Dataset(data.Dataset):def __init__(self, source_modal, target_modal, img_size,image_root, data_rate, sortFalse, argumentFalse, randomFalse):self.source source_modalself.target target_modalself.modal_list [t1, t2]self.image_root image_rootself.data_rate data_rateself.images [self.image_root f for f in os.listdir(self.image_root) if f.endswith(.npy)]self.images.sort(keylambda x: int(x.split(image_root)[1].split(.npy)[0]))self.img_transform transforms.Compose([transforms.ToTensor(),transforms.Resize(img_size)])self.gt_transform transforms.Compose([transforms.ToTensor(),transforms.Resize(img_size, Image.NEAREST)])self.sort sortself.argument argumentself.random randomself.subject_num len(self.images) // 60if self.random True:subject np.arange(self.subject_num)np.random.shuffle(subject)self.LUT []for i in subject:for j in range(60):self.LUT.append(i * 60 j)# print(slice number:, self.__len__())def __getitem__(self, index):if self.random True:index self.LUT[index]npy np.load(self.images[index])img npy[self.modal_list.index(self.source), :, :]gt npy[self.modal_list.index(self.target), :, :]if self.argument True:img, gt cv_random_flip(img, gt)img, gt randomRotation(img, gt)img img * 255img Image.fromarray(img.astype(np.uint8))img colorEnhance(img)img img.convert(L)img self.img_transform(img)gt self.img_transform(gt)return img, gtdef __len__(self):return int(len(self.images) * self.data_rate)def get_loader(batchsize, shuffle, pin_memoryTrue, source_modalt1, target_modalt2,img_size256, img_rootdata/train/, data_rate0.1, num_workers8, sortFalse, argumentFalse,randomFalse):dataset BraTS_Train_Dataset(source_modalsource_modal, target_modaltarget_modal,img_sizeimg_size, image_rootimg_root, data_ratedata_rate, sortsort,argumentargument, randomrandom)data_loader data.DataLoader(datasetdataset, batch_sizebatchsize, shuffleshuffle,pin_memorypin_memory, num_workersnum_workers)return data_loader# if __name____main__: # data_loader get_loader(batchsize1, shuffleTrue, pin_memoryTrue, source_modalt1, # target_modalt2, img_size256, num_workers8, # img_rootdata/train/, data_rate0.1, argumentTrue, randomFalse) # length len(data_loader) # print(data_loader的长度为:, length) # # 将 data_loader 转换为迭代器 # data_iter iter(data_loader) # # # 获取第一批数据 # batch next(data_iter) # # # 打印第一批数据的大小 # print(第一批数据的大小:, batch[0].shape) # 输入图像的张量 # print(第一批数据的大小:, batch[1].shape) # 目标图像的张量 # print(batch.shape) models.py import torch.nn as nn import torch.nn.functional as F import torchdef weights_init_normal(m):classname m.__class__.__name__if classname.find(Conv) ! -1:torch.nn.init.normal_(m.weight.data, 0.0, 0.02)elif classname.find(BatchNorm2d) ! -1:torch.nn.init.normal_(m.weight.data, 1.0, 0.02)torch.nn.init.constant_(m.bias.data, 0.0)############################## # U-NET ##############################class UNetDown(nn.Module):def __init__(self, in_size, out_size, normalizeTrue, dropout0.0):super(UNetDown, self).__init__()layers [nn.Conv2d(in_size, out_size, 4, 2, 1, biasFalse)]if normalize:layers.append(nn.InstanceNorm2d(out_size))layers.append(nn.LeakyReLU(0.2))if dropout:layers.append(nn.Dropout(dropout))self.model nn.Sequential(*layers)def forward(self, x):return self.model(x)class UNetUp(nn.Module):def __init__(self, in_size, out_size, dropout0.0):super(UNetUp, self).__init__()layers [nn.ConvTranspose2d(in_size, out_size, 4, 2, 1, biasFalse),nn.InstanceNorm2d(out_size),nn.ReLU(inplaceTrue),]if dropout:layers.append(nn.Dropout(dropout))self.model nn.Sequential(*layers)def forward(self, x, skip_input):x self.model(x)x torch.cat((x, skip_input), 1)return xclass GeneratorUNet(nn.Module):def __init__(self, in_channels3, out_channels3):super(GeneratorUNet, self).__init__()self.down1 UNetDown(in_channels, 64, normalizeFalse)self.down2 UNetDown(64, 128)self.down3 UNetDown(128, 256)self.down4 UNetDown(256, 512, dropout0.5)self.down5 UNetDown(512, 512, dropout0.5)self.down6 UNetDown(512, 512, dropout0.5)self.down7 UNetDown(512, 512, dropout0.5)self.down8 UNetDown(512, 512, normalizeFalse, dropout0.5)self.up1 UNetUp(512, 512, dropout0.5)self.up2 UNetUp(1024, 512, dropout0.5)self.up3 UNetUp(1024, 512, dropout0.5)self.up4 UNetUp(1024, 512, dropout0.5)self.up5 UNetUp(1024, 256)self.up6 UNetUp(512, 128)self.up7 UNetUp(256, 64)self.final nn.Sequential(nn.Upsample(scale_factor2),nn.ZeroPad2d((1, 0, 1, 0)),nn.Conv2d(128, out_channels, 4, padding1),nn.Tanh(),)def forward(self, x):# U-Net generator with skip connections from encoder to decoderd1 self.down1(x)d2 self.down2(d1)d3 self.down3(d2)d4 self.down4(d3)d5 self.down5(d4)d6 self.down6(d5)d7 self.down7(d6)d8 self.down8(d7)u1 self.up1(d8, d7)u2 self.up2(u1, d6)u3 self.up3(u2, d5)u4 self.up4(u3, d4)u5 self.up5(u4, d3)u6 self.up6(u5, d2)u7 self.up7(u6, d1)return self.final(u7)############################## # Discriminator ##############################class Discriminator(nn.Module):def __init__(self, in_channels3):super(Discriminator, self).__init__()def discriminator_block(in_filters, out_filters, normalizationTrue):Returns downsampling layers of each discriminator blocklayers [nn.Conv2d(in_filters, out_filters, 4, stride2, padding1)]if normalization:layers.append(nn.InstanceNorm2d(out_filters))layers.append(nn.LeakyReLU(0.2, inplaceTrue))return layersself.model nn.Sequential(*discriminator_block(in_channels * 2, 64, normalizationFalse),*discriminator_block(64, 128),*discriminator_block(128, 256),*discriminator_block(256, 512),nn.ZeroPad2d((1, 0, 1, 0)),nn.Conv2d(512, 1, 4, padding1, biasFalse))def forward(self, img_A, img_B):# Concatenate image and condition image by channels to produce inputimg_input torch.cat((img_A, img_B), 1)return self.model(img_input)pix2pix.py import argparse import os import numpy as np import math import itertools import time import datetime import sys import matplotlib.pyplot as plt import torchvision.transforms as transforms from torchvision.utils import save_imagefrom torch.utils.data import DataLoader from torchvision import datasets from torch.autograd import Variablefrom models import * from dataloader import *import torch.nn as nn import torch.nn.functional as F import torch if __name____main__:parser argparse.ArgumentParser()parser.add_argument(--epoch, typeint, default0, helpepoch to start training from)parser.add_argument(--n_epochs, typeint, default200, helpnumber of epochs of training)parser.add_argument(--dataset_name, typestr, defaultbasta2020, helpname of the dataset)parser.add_argument(--batch_size, typeint, default2, helpsize of the batches)parser.add_argument(--lr, typefloat, default0.0002, helpadam: learning rate)parser.add_argument(--b1, typefloat, default0.5, helpadam: decay of first order momentum of gradient)parser.add_argument(--b2, typefloat, default0.999, helpadam: decay of first order momentum of gradient)parser.add_argument(--decay_epoch, typeint, default100, helpepoch from which to start lr decay)parser.add_argument(--n_cpu, typeint, default8, helpnumber of cpu threads to use during batch generation)parser.add_argument(--img_height, typeint, default256, helpsize of image height)parser.add_argument(--img_width, typeint, default256, helpsize of image width)parser.add_argument(--channels, typeint, default3, helpnumber of image channels)parser.add_argument(--sample_interval, typeint, default500, helpinterval between sampling of images from generators)parser.add_argument(--checkpoint_interval, typeint, default10, helpinterval between model checkpoints)opt parser.parse_args()print(opt)os.makedirs(images/%s % opt.dataset_name, exist_okTrue)os.makedirs(saved_models/%s % opt.dataset_name, exist_okTrue)cuda True if torch.cuda.is_available() else False# Loss functionscriterion_GAN torch.nn.MSELoss()criterion_pixelwise torch.nn.L1Loss()# Loss weight of L1 pixel-wise loss between translated image and real imagelambda_pixel 100# Calculate output of image discriminator (PatchGAN)patch (1, opt.img_height // 2 ** 4, opt.img_width // 2 ** 4)# Initialize generator and discriminatorgenerator GeneratorUNet(in_channels1, out_channels1)discriminator Discriminator(in_channels1)if cuda:generator generator.cuda()discriminator discriminator.cuda()criterion_GAN.cuda()criterion_pixelwise.cuda()if opt.epoch ! 0:# Load pretrained modelsgenerator.load_state_dict(torch.load(saved_models/%s/generator_%d.pth % (opt.dataset_name, opt.epoch)))discriminator.load_state_dict(torch.load(saved_models/%s/discriminator_%d.pth % (opt.dataset_name, opt.epoch)))else:# Initialize weightsgenerator.apply(weights_init_normal)discriminator.apply(weights_init_normal)# Optimizersoptimizer_G torch.optim.Adam(generator.parameters(), lropt.lr, betas(opt.b1, opt.b2))optimizer_D torch.optim.Adam(discriminator.parameters(), lropt.lr, betas(opt.b1, opt.b2))# Configure dataloaderstransforms_ [transforms.Resize((opt.img_height, opt.img_width), Image.BICUBIC),transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),]dataloader get_loader(batchsize4, shuffleTrue, pin_memoryTrue, source_modalt1,target_modalt2, img_size256, num_workers8,img_rootdata/train/, data_rate0.1, argumentTrue, randomFalse)# dataloader DataLoader(# ImageDataset(../../data/%s % opt.dataset_name, transforms_transforms_),# batch_sizeopt.batch_size,# shuffleTrue,# num_workersopt.n_cpu,# )# val_dataloader DataLoader(# ImageDataset(../../data/%s % opt.dataset_name, transforms_transforms_, modeval),# batch_size10,# shuffleTrue,# num_workers1,# )# Tensor typeTensor torch.cuda.FloatTensor if cuda else torch.FloatTensor# def sample_images(batches_done):# Saves a generated sample from the validation set# imgs next(iter(val_dataloader))# real_A Variable(imgs[B].type(Tensor))# real_B Variable(imgs[A].type(Tensor))# fake_B generator(real_A)# img_sample torch.cat((real_A.data, fake_B.data, real_B.data), -2)# save_image(img_sample, images/%s/%s.png % (opt.dataset_name, batches_done), nrow5, normalizeTrue)# ----------# Training# ----------prev_time time.time()# 创建空列表用于保存损失值losses_G []losses_D []for epoch in range(opt.epoch, opt.n_epochs):for i, batch in enumerate(dataloader):# Model inputsreal_A Variable(batch[0].type(Tensor))real_B Variable(batch[1].type(Tensor))# print(real_A real_B)# Adversarial ground truthsvalid Variable(Tensor(np.ones((real_A.size(0), *patch))), requires_gradFalse)fake Variable(Tensor(np.zeros((real_A.size(0), *patch))), requires_gradFalse)# ---------------------# Train Discriminator# ---------------------optimizer_D.zero_grad()# Real losspred_real discriminator(real_B, real_A)loss_real criterion_GAN(pred_real, valid)# Fake lossfake_B generator(real_A)pred_fake discriminator(fake_B.detach(), real_A)loss_fake criterion_GAN(pred_fake, fake)# Total lossloss_D 0.5 * (loss_real loss_fake)loss_D.backward()optimizer_D.step()# ------------------# Train Generators# ------------------optimizer_G.zero_grad()# GAN losspred_fake discriminator(fake_B, real_A)loss_GAN criterion_GAN(pred_fake, valid)# Pixel-wise lossloss_pixel criterion_pixelwise(fake_B, real_B)# Total lossloss_G loss_GAN lambda_pixel * loss_pixel # 希望生成的接近1loss_G.backward()optimizer_G.step()# --------------# Log Progress# --------------# Determine approximate time leftbatches_done epoch * len(dataloader) ibatches_left opt.n_epochs * len(dataloader) - batches_donetime_left datetime.timedelta(secondsbatches_left * (time.time() - prev_time))prev_time time.time()# Print logsys.stdout.write(\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, pixel: %f, adv: %f] ETA: %s% (epoch,opt.n_epochs,i,len(dataloader),loss_D.item(),loss_G.item(),loss_pixel.item(),loss_GAN.item(),time_left,))mat [real_A, fake_B, real_B]if (batches_done 1) % 200 0:plt.figure(dpi400)ax plt.subplot(131)for i, img in enumerate(mat):ax plt.subplot(1, 3, i 1) #get positionimg img.permute([0, 2, 3, 1]) # b c h w -b h w cif img.shape[0] ! 1: # 有多个就只取第一个img img[1]img img.squeeze(0) # b h w c - h w cif img.shape[2] 1:img img.repeat(1, 1, 3) # process gray imgimg img.cpu()ax.imshow(img.data)ax.set_xticks([])ax.set_yticks([])plt.show()if (batches_done 1) % 20 0:losses_G.append(loss_G.item())losses_D.append(loss_D.item())if (batches_done 1) % 200 0: # 每20个batch添加一次损失# 保存损失值plt.figure(figsize(10, 5))plt.plot(range(int((batches_done 1) / 20)), losses_G, labelGenerator Loss)plt.plot(range(int((batches_done 1) / 20)), losses_D, labelDiscriminator Loss)plt.xlabel(Epoch)plt.ylabel(Loss)plt.title(GAN Training Loss Curve)plt.legend()plt.show()# # If at sample interval save image# if batches_done % opt.sample_interval 0:# sample_images(batches_done)if opt.checkpoint_interval ! -1 and epoch % opt.checkpoint_interval 0:# Save model checkpointstorch.save(generator.state_dict(), saved_models/%s/generator_%d.pth % (opt.dataset_name, epoch))torch.save(discriminator.state_dict(), saved_models/%s/discriminator_%d.pth % (opt.dataset_name, epoch))processing.py 数据预处理 import numpy as np from matplotlib import pylab as plt import nibabel as nib import random import glob import os from PIL import Image import imageiodef normalize(image, maskNone, percentile_lower0.2, percentile_upper99.8):if mask is None:mask image ! image[0, 0, 0]cut_off_lower np.percentile(image[mask ! 0].ravel(), percentile_lower)cut_off_upper np.percentile(image[mask ! 0].ravel(), percentile_upper)res np.copy(image)res[(res cut_off_lower) (mask ! 0)] cut_off_lowerres[(res cut_off_upper) (mask ! 0)] cut_off_upperres res / res.max() # 0-1return resdef visualize(t1_data, t2_data, flair_data, t1ce_data, gt_data):plt.figure(figsize(8, 8))plt.subplot(231)plt.imshow(t1_data[:, :], cmapgray)plt.title(Image t1)plt.subplot(232)plt.imshow(t2_data[:, :], cmapgray)plt.title(Image t2)plt.subplot(233)plt.imshow(flair_data[:, :], cmapgray)plt.title(Image flair)plt.subplot(234)plt.imshow(t1ce_data[:, :], cmapgray)plt.title(Image t1ce)plt.subplot(235)plt.imshow(gt_data[:, :])plt.title(GT)plt.show()def visualize_to_gif(t1_data, t2_data, t1ce_data, flair_data):transversal []coronal []sagittal []slice_num t1_data.shape[2]for i in range(slice_num):sagittal_plane np.concatenate((t1_data[:, :, i], t2_data[:, :, i],t1ce_data[:, :, i], flair_data[:, :, i]), axis1)coronal_plane np.concatenate((t1_data[i, :, :], t2_data[i, :, :],t1ce_data[i, :, :], flair_data[i, :, :]), axis1)transversal_plane np.concatenate((t1_data[:, i, :], t2_data[:, i, :],t1ce_data[:, i, :], flair_data[:, i, :]), axis1)transversal.append(transversal_plane)coronal.append(coronal_plane)sagittal.append(sagittal_plane)imageio.mimsave(./transversal_plane.gif, transversal, duration0.01)imageio.mimsave(./coronal_plane.gif, coronal, duration0.01)imageio.mimsave(./sagittal_plane.gif, sagittal, duration0.01)returnif __name__ __main__:t1_list sorted(glob.glob(../data/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData/*/*t1.*))t2_list sorted(glob.glob(../data/BraTS2020_TrainingData/MICCAI_BraTS2020_TrainingData/*/*t2.*))data_len len(t1_list)train_len int(data_len * 0.8)test_len data_len - train_lentrain_path ../data/train/test_path ../data/test/os.makedirs(train_path, exist_okTrue)os.makedirs(test_path, exist_okTrue)for i, (t1_path, t2_path) in enumerate(zip(t1_list, t2_list)):print(preprocessing the, i 1, th subject)t1_img nib.load(t1_path) # (240,140,155)t2_img nib.load(t2_path)# to numpyt1_data t1_img.get_fdata()t2_data t2_img.get_fdata()t1_data normalize(t1_data) # normalize to [0,1]t2_data normalize(t2_data)tensor np.stack([t1_data, t2_data]) # (2, 240, 240, 155)if i train_len:for j in range(60):Tensor tensor[:, 10:210, 25:225, 50 j]np.save(train_path str(60 * i j 1) .npy, Tensor)else:for j in range(60):Tensor tensor[:, 10:210, 25:225, 50 j]np.save(test_path str(60 * (i - train_len) j 1) .npy, Tensor) testutil.py #-*- codeing utf-8 -*- #Time 2023/9/23 0023 17:21 #Author : Tom #File : testutil.py.py #Software : PyCharm import argparsefrom math import log10, sqrt import numpy as np from skimage.metrics import structural_similarity as ssimdef psnr(res,gt):mse np.mean((res - gt) ** 2)if(mse 0):return 100max_pixel 1psnr 20 * log10(max_pixel / sqrt(mse))return psnrdef nmse(res,gt):Norm np.linalg.norm((gt * gt),ord2)if np.all(Norm 0):return 0else:nmse np.linalg.norm(((res - gt) * (res - gt)),ord2) / Normreturn nmsetest.py #-*- codeing utf-8 -*- #Time 2023/9/23 0023 16:14 #Author : Tom #File : test.py.py #Software : PyCharmimport torch from models import * from dataloader import * from testutil import *if __name__ __main__:images_save images_save/slice_num 4os.makedirs(images_save, exist_okTrue)device torch.device(cuda:0 if torch.cuda.is_available() else cpu)model GeneratorUNet(in_channels1, out_channels1)data_loader get_loader(batchsize4, shuffleTrue, pin_memoryTrue, source_modalt1,target_modalt2, img_size256, num_workers8,img_rootdata/test/, data_rate1, argumentTrue, randomFalse)model model.to(device)model.load_state_dict(torch.load(saved_models/basta2020/generator_0.pth, map_locationtorch.device(device)), strictFalse)PSNR []NMSE []SSIM []for i, (img, gt) in enumerate(data_loader):batch_size img.size()[0]img img.to(device, dtypetorch.float)gt gt.to(device, dtypetorch.float)with torch.no_grad():pred model(img)for j in range(batch_size):a pred[j]save_image([pred[j]], images_save str(i * batch_size j 1) .png, normalizeTrue)print(images_save str(i * batch_size j 1) .png)pred, gt pred.cpu().detach().numpy().squeeze(), gt.cpu().detach().numpy().squeeze()for j in range(batch_size):PSNR.append(psnr(pred[j], gt[j]))NMSE.append(nmse(pred[j], gt[j]))SSIM.append(ssim(pred[j], gt[j]))PSNR np.asarray(PSNR)NMSE np.asarray(NMSE)SSIM np.asarray(SSIM)PSNR PSNR.reshape(-1, slice_num)NMSE NMSE.reshape(-1, slice_num)SSIM SSIM.reshape(-1, slice_num)PSNR np.mean(PSNR, axis1)print(PSNR.size)NMSE np.mean(NMSE, axis1)SSIM np.mean(SSIM, axis1)print(PSNR mean:, np.mean(PSNR), PSNR std:, np.std(PSNR))print(NMSE mean:, np.mean(NMSE), NMSE std:, np.std(NMSE))print(SSIM mean:, np.mean(SSIM), SSIM std:, np.std(SSIM))
http://www.pierceye.com/news/216209/

相关文章:

  • php手机网站模板厦门网站设计建设
  • 焦作集团网站建设做食品网站需要什么资质
  • 西北电力建设甘肃工程公司网站90设计电商模板
  • 内蒙古网站设计推广网站注册赚佣金
  • 医药类网站建设评价wordpress微信支付模板
  • 如何查看网站空间商手机服务器下载安装
  • 北京响应式网站建设报价英文版网站案例
  • 做爰全过程免费的视频99网站做h5单页的网站
  • 怎么才能百度做网站海外直播
  • 响应式企业网站开发所用的平台酷炫网站首页
  • 西安网站建设全包大发 wordpress
  • html5 网站开发定制做公司网站好处
  • 建站网站教程网站建设工程师职责说明书
  • 新云网站模版宠物网站开发
  • 网站建设面授班网站备案回访电话号码
  • 阿里有做网站网站建设费是宣传费用吗
  • 点广告挣钱网站有哪些网站的建设与预算
  • 佛山新网站建设详细内容手机软件公司
  • 网站建设美词原创怎样建设一个能上传数据的网站
  • 网站建设网站营销做APP必须要有网站么
  • 易企建站咖啡公司网站建设策划书
  • wordpress 不能查看站点网站建设公司软件开
  • 网站文章seoftp搭建wordpress
  • 济宁企业做网站受欢迎的常州做网站
  • 我有域名有服务器怎么建设网站凡科官网app下载
  • 深圳保障性住房可以买卖吗襄樊seo排名
  • 餐饮官网建站模板网站开发实验报告可行性分析
  • 美食网站建设规划书外链工具软件
  • 网站设计模板代码七牛wordpress后台慢
  • 佛山网站建设怎么办huang色网站