wordpress软件站主题,专业模板建站提供商,哪个平台可以免费卖货,免费图片制作app软件哪个好ResNet神经网络 定义ResNet Block定义ResNet18加载数据集并训练、测试 定义ResNet Block ResNet Block 的作用#xff1a; 是一个残差块#xff0c;用于构建ResNet 主要是为了解决神经网络中的梯度爆炸和梯度消失问题#xff0c;以及缓解训练过程中的退化问题。 在传统的神经… ResNet神经网络 定义ResNet Block定义ResNet18加载数据集并训练、测试 定义ResNet Block ResNet Block 的作用 是一个残差块用于构建ResNet 主要是为了解决神经网络中的梯度爆炸和梯度消失问题以及缓解训练过程中的退化问题。 在传统的神经网络中每层的输出会直接作为下一层的输入可能会导致梯度在反向传播过程中逐渐减小当层数比较深时就可能导致梯度消失。故引入了跳跃连接将每一层的输出与最初的x进行相加当你对其进行求导能发现比传统的多了一项对x的求导也就是因为该项避免了梯度消失的问题。 class ResBlk(nn.Module):resnet Blockdef __init__(self,ch_in,ch_out,stride):super(ResBlk,self).__init__()self.conv1 nn.Conv2d(in_channelsch_in,out_channelsch_out,kernel_size3,stridestride,padding1)print(self.conv1)self.bn1 nn.BatchNorm2d(ch_out)self.conv2 nn.Conv2d(in_channelsch_out, out_channelsch_out, kernel_size3, stride1, padding1)print(self.conv2)self.bn2 nn.BatchNorm2d(ch_out)self.extra nn.Sequential()#当输入通道数并不等于输出通道数的时候进行转换。if ch_out ! ch_in:self.extra nn.Sequential(# [b,ch_in,h,w] [b,ch_out,h,w]nn.Conv2d(ch_in,ch_out,kernel_size1,stridestride),nn.BatchNorm2d(ch_out))def forward(self,x)::param x: [b,ch,h,w]:return:out F.relu(self.bn1(self.conv1(x)))out self.bn2(self.conv2(out))#shor cut# x :[b,ch_in,h,w] 而out [b,ch_out,h,w]out self.extra(x) out #resNet的精髓所在能够避免过拟合梯度爆炸梯度消失return out 运行测试一下 def main():blk ResBlk(64,128,stride4)tmp torch.randn(2,64,32,32)out blk(tmp)print(out.shape)
if __name__ __main__:main()在这里说明一下其中的疑惑在做该模块的时候 blk ResBlk(64,128,stride4) #64是输入通道数128表示输出通道数。 tmp torch.randn(2,64,32,32) # 2是样本数量64是输入通道数32是形状。 out blk(tmp) #将其传入到ResBlok中进行运算。 输出为torch.Size([2, 128, 8, 8])。 定义ResNet18
class ResNet18(nn.Module):def __init__(self):super(ResNet18,self).__init__()self.conv1 nn.Sequential(nn.Conv2d(3,64,kernel_size3,stride3,padding0),nn.BatchNorm2d(64))# followed 4 blocks# [b,64,h,w] [b,128,h,w]self.blk1 ResBlk(64,128,stride2)# [b,128,h,w] [b,256,h,w]self.blk2 ResBlk(128,256,stride2)# [b,256,h,w] [b,512,h,w]self.blk3 ResBlk(256, 512,stride2)# [b,512,h,w] [b,1024,h,w]self.blk4 ResBlk(512, 512,stride2)self.outlayer nn.Linear(512,10)def forward(self,x):x F.relu(self.conv1(x))x self.blk1(x)x self.blk2(x)x self.blk3(x)x self.blk4(x)x F.adaptive_avg_pool2d(x,[1,1])x x.view(x.size(0), -1)x self.outlayer(x)return x加载数据集并训练、测试
import torch
import torchvision.transforms
from torch import nn, optim
from torchvision import datasets
from torch.utils.data import DataLoader
# from lenet5 import Lenet5
from learing_resnet import ResNet18
def main():batchsz 32cifar_train datasets.CIFAR10(data,trainTrue,transformtorchvision.transforms.Compose([torchvision.transforms.Resize((32,32)),torchvision.transforms.ToTensor()]),downloadTrue)cifar_train DataLoader(cifar_train,batch_sizebatchsz,shuffleTrue)cifar_test datasets.CIFAR10(data,trainFalse,transformtorchvision.transforms.Compose([torchvision.transforms.Resize((32,32)),torchvision.transforms.ToTensor()]),downloadTrue)cifar_test DataLoader(cifar_test,batch_sizebatchsz,shuffleTrue)# x, label iter(cifar_train)# print(x:,x.shape,label:,label.shape)device torch.device(cuda)# model Lenet5().to(device)model ResNet18().to(device)criten nn.CrossEntropyLoss().to(device)optimizer optim.Adam(model.parameters(),lr1e-3)for epoch in range(1000):for batchidx,(x,lable) in enumerate(cifar_train):x,lable x.to(device),lable.to(device)logits model(x)loss criten(logits,lable)optimizer.zero_grad()loss.backward()optimizer.step()print(epoch,loss.item())total_correct 0total_num 0model.eval()with torch.no_grad():for x,label in cifar_test:x,label x.to(device),label.to(device)logits model(x)pred logits.argmax(dim1)total_correct torch.eq(pred,label).float().sum().item()total_num x.size(0)acc total_correct /total_numprint(epoch,acc)if __name__ __main__:main()