柳南网站建设,什么情况下需要建设网站,从seo角度做网站流量,sem推广优化一、保存自己搭建的模型方法一
例如#xff1a;基于VGG16网络模型架构的基础上加上了一层线性层#xff0c;最后的输出为10类 torch.save(objmodule,fpath)#xff0c;传入需要保存的模型名称以及要保存的路径位置 保存模型结构和模型的参数#xff0c;保存文…一、保存自己搭建的模型方法一
例如基于VGG16网络模型架构的基础上加上了一层线性层最后的输出为10类 torch.save(objmodule,fpath)传入需要保存的模型名称以及要保存的路径位置 保存模型结构和模型的参数保存文件较大
import torch
import torchvision
from torch import nnvgg_16 torchvision.models.vgg16(pretrainedFalse)#基于VGG16网络模型架构的基础上加上了一层线性层最后的输出为10类
vgg_16.classifier.add_module(namenew_7,modulenn.Linear(in_features1000,out_features10))
print(vgg_16)VGG((features): Sequential((0): Conv2d(3, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(1): ReLU(inplaceTrue)(2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(3): ReLU(inplaceTrue)(4): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(5): Conv2d(64, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(6): ReLU(inplaceTrue)(7): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(8): ReLU(inplaceTrue)(9): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(10): Conv2d(128, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(11): ReLU(inplaceTrue)(12): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(13): ReLU(inplaceTrue)(14): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(15): ReLU(inplaceTrue)(16): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(17): Conv2d(256, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(18): ReLU(inplaceTrue)(19): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(20): ReLU(inplaceTrue)(21): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(22): ReLU(inplaceTrue)(23): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(24): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(25): ReLU(inplaceTrue)(26): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(27): ReLU(inplaceTrue)(28): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(29): ReLU(inplaceTrue)(30): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse))(avgpool): AdaptiveAvgPool2d(output_size(7, 7))(classifier): Sequential((0): Linear(in_features25088, out_features4096, biasTrue)(1): ReLU(inplaceTrue)(2): Dropout(p0.5, inplaceFalse)(3): Linear(in_features4096, out_features4096, biasTrue)(4): ReLU(inplaceTrue)(5): Dropout(p0.5, inplaceFalse)(6): Linear(in_features4096, out_features1000, biasTrue)(new_7): Linear(in_features1000, out_features10, biasTrue))
)
torch.save(objvgg_16,fVGG16_1.pth)二、加载自己搭建的模型方法一
torch.load(fVGG16_1.pth)传入模型保存的路径
import torchvgg16_1 torch.load(fVGG16_1.pth)
print(vgg16_1)VGG((features): Sequential((0): Conv2d(3, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(1): ReLU(inplaceTrue)(2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(3): ReLU(inplaceTrue)(4): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(5): Conv2d(64, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(6): ReLU(inplaceTrue)(7): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(8): ReLU(inplaceTrue)(9): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(10): Conv2d(128, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(11): ReLU(inplaceTrue)(12): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(13): ReLU(inplaceTrue)(14): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(15): ReLU(inplaceTrue)(16): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(17): Conv2d(256, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(18): ReLU(inplaceTrue)(19): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(20): ReLU(inplaceTrue)(21): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(22): ReLU(inplaceTrue)(23): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(24): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(25): ReLU(inplaceTrue)(26): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(27): ReLU(inplaceTrue)(28): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(29): ReLU(inplaceTrue)(30): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse))(avgpool): AdaptiveAvgPool2d(output_size(7, 7))(classifier): Sequential((0): Linear(in_features25088, out_features4096, biasTrue)(1): ReLU(inplaceTrue)(2): Dropout(p0.5, inplaceFalse)(3): Linear(in_features4096, out_features4096, biasTrue)(4): ReLU(inplaceTrue)(5): Dropout(p0.5, inplaceFalse)(6): Linear(in_features4096, out_features1000, biasTrue)(new_7): Linear(in_features1000, out_features10, biasTrue))
)注意若使用的是自己所编写的框架使用方法一保存之后再使用方法一加载的时候需要把模型框架的class文件也得导入一下要不然会保存之所以导入模型的class是为了加载模型的时候进行校验 例如from 模型框架class所在文件名 import * from mymodule import *
三、保存自己搭建的模型方法二
torch.save(vgg_16.state_dict(),VGG16_2.pth)将网络模型保存成字典形式保存模型参数官方推荐的方式
保存VGG-16网络框架
import torch
import torchvision
from torch import nnvgg_16 torchvision.models.vgg16(pretrainedFalse)
print(vgg_16)VGG((features): Sequential((0): Conv2d(3, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(1): ReLU(inplaceTrue)(2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(3): ReLU(inplaceTrue)(4): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(5): Conv2d(64, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(6): ReLU(inplaceTrue)(7): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(8): ReLU(inplaceTrue)(9): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(10): Conv2d(128, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(11): ReLU(inplaceTrue)(12): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(13): ReLU(inplaceTrue)(14): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(15): ReLU(inplaceTrue)(16): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(17): Conv2d(256, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(18): ReLU(inplaceTrue)(19): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(20): ReLU(inplaceTrue)(21): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(22): ReLU(inplaceTrue)(23): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(24): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(25): ReLU(inplaceTrue)(26): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(27): ReLU(inplaceTrue)(28): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(29): ReLU(inplaceTrue)(30): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse))(avgpool): AdaptiveAvgPool2d(output_size(7, 7))(classifier): Sequential((0): Linear(in_features25088, out_features4096, biasTrue)(1): ReLU(inplaceTrue)(2): Dropout(p0.5, inplaceFalse)(3): Linear(in_features4096, out_features4096, biasTrue)(4): ReLU(inplaceTrue)(5): Dropout(p0.5, inplaceFalse)(6): Linear(in_features4096, out_features1000, biasTrue))
)
torch.save(vgg_16.state_dict(),VGG16_2.pth) 四、加载自己搭建的模型方法二
加载保存的VGG-16网络框架
import torch
import torchvisionvgg16 torchvision.models.vgg16(pretrainedFalse)
vgg16.load_state_dict(torch.load(VGG16_2.pth))
print(vgg16)VGG((features): Sequential((0): Conv2d(3, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(1): ReLU(inplaceTrue)(2): Conv2d(64, 64, kernel_size(3, 3), stride(1, 1), padding(1, 1))(3): ReLU(inplaceTrue)(4): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(5): Conv2d(64, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(6): ReLU(inplaceTrue)(7): Conv2d(128, 128, kernel_size(3, 3), stride(1, 1), padding(1, 1))(8): ReLU(inplaceTrue)(9): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(10): Conv2d(128, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(11): ReLU(inplaceTrue)(12): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(13): ReLU(inplaceTrue)(14): Conv2d(256, 256, kernel_size(3, 3), stride(1, 1), padding(1, 1))(15): ReLU(inplaceTrue)(16): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(17): Conv2d(256, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(18): ReLU(inplaceTrue)(19): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(20): ReLU(inplaceTrue)(21): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(22): ReLU(inplaceTrue)(23): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse)(24): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(25): ReLU(inplaceTrue)(26): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(27): ReLU(inplaceTrue)(28): Conv2d(512, 512, kernel_size(3, 3), stride(1, 1), padding(1, 1))(29): ReLU(inplaceTrue)(30): MaxPool2d(kernel_size2, stride2, padding0, dilation1, ceil_modeFalse))(avgpool): AdaptiveAvgPool2d(output_size(7, 7))(classifier): Sequential((0): Linear(in_features25088, out_features4096, biasTrue)(1): ReLU(inplaceTrue)(2): Dropout(p0.5, inplaceFalse)(3): Linear(in_features4096, out_features4096, biasTrue)(4): ReLU(inplaceTrue)(5): Dropout(p0.5, inplaceFalse)(6): Linear(in_features4096, out_features1000, biasTrue))
)