在建设银行网站申请完信用卡吗,深圳优定软件网站建设,巴彦淖尔网站制作,北京网站制作哪家好前言
本文主要通过实战的方式#xff0c;记录各种模型推理的方法 模型训练
首先我们先使用Pytorch训练一个最简单的十分类神经网络#xff0c;如下#xff1a;
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import data…前言
本文主要通过实战的方式记录各种模型推理的方法 模型训练
首先我们先使用Pytorch训练一个最简单的十分类神经网络如下
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor# 加载训练数据
training_data datasets.FashionMNIST(rootr./Datasets/,trainTrue,downloadTrue,transformToTensor(),
)# 加载验证数据
test_data datasets.FashionMNIST(rootr./Datasets/,trainFalse,downloadTrue,transformToTensor(),
)# Create data loaders.
batch_size 16
train_dataloader DataLoader(training_data, batch_sizebatch_size)
test_dataloader DataLoader(test_data, batch_sizebatch_size)# Get cpu or gpu device for training.
device cuda if torch.cuda.is_available() else cpu
print(fUsing {device} device)# 定义神经网络模型
class NeuralNetwork(nn.Module):def __init__(self):super().__init__()self.flatten nn.Flatten()self.linear_relu_stack nn.Sequential(nn.Linear(28*28, 512),nn.ReLU(),nn.Linear(512, 512),nn.ReLU(),nn.Linear(512, 10))def forward(self, x):x self.flatten(x)logits self.linear_relu_stack(x)return logitsmodel NeuralNetwork().to(device)
# print(model)# 定义损失函数优化器
loss_fn nn.CrossEntropyLoss()
optimizer torch.optim.SGD(model.parameters(), lr1e-3)# 定义训练过程
def train(dataloader, model, loss_fn, optimizer):size len(dataloader.dataset)model.train()for batch, (X, y) in enumerate(dataloader):X, y X.to(device), y.to(device)# Compute prediction errorpred model(X)loss loss_fn(pred, y)# Backpropagationoptimizer.zero_grad()loss.backward()optimizer.step()if batch % 100 0:loss, current loss.item(), batch * len(X)print(floss: {loss:7f} [{current:5d}/{size:5d}])# 定义验证方法在验证数据集中进行验证
def test(dataloader, model, loss_fn):size len(dataloader.dataset)num_batches len(dataloader)model.eval()test_loss, correct 0, 0with torch.no_grad():for X, y in dataloader:X, y X.to(device), y.to(device)pred model(X)test_loss loss_fn(pred, y).item()correct (pred.argmax(1) y).type(torch.float).sum().item()test_loss / num_batchescorrect / sizeprint(fTest: \n Accuracy: {(100*correct):0.1f}%, Avg loss: {test_loss:8f} \n)epochs 100
for t in range(epochs):print(fEpoch {t1}\n-------------------------------)train(train_dataloader, model, loss_fn, optimizer)test(test_dataloader, model, loss_fn)
print(Done!)
模型推理
Pytorch模型
Pytorch官方入门文档所给出的模型持久化及加载方法使用torch.save()方法对模型进行持久化所保存的模型为动态图模型。如下
# (需承接上面的训练代码才可正常运行)
# 保存模型
model_path ./model
if not os.path.isdir(model_path):os.makedirs(model_path)torch.save(model.state_dict(), os.path.join(model_path, model.pth))
print(Saved PyTorch Model State to model.pth)# 加载模型进行推理
model NeuralNetwork()
model.load_state_dict(torch.load(./model/model.pth))classes [T-shirt/top,Trouser,Pullover,Dress,Coat,Sandal,Shirt,Sneaker,Bag,Ankle boot,
]model.eval()
x, y test_data[0][0], test_data[0][1]
with torch.no_grad():pred model(x)predicted, actual classes[pred[0].argmax(0)], classes[y]print(fPredicted: {predicted}, Actual: {actual})
TorchScript
TorchScript是一种从PyTorch代码创建可序列化和可优化模型的方法是一种静态图模型。TorchScript模型可以从Python进程中保存并加载到没有Python依赖的进程中。使用方法如下
/* 保存模型 */
# 通过trace的方法生成IR需要一个输入样例
dummy_input torch.rand(1, 1, 28, 28) # IR生成
with torch.no_grad(): jit_model torch.jit.trace(model, dummy_input) # 将模型序列化
jit_model.save(./model/jit_model.pt) /* 加载、推理模型 */
# 加载序列化后的模型
jit_model torch.jit.load(./model/jit_model.pt) x, y test_data[0][0], test_data[0][1]
start_time time.time()
pred jit_model.forward(x)
print(fspend time: {time.time()-start_time})
print(pred[0].argmax(0)) 参考文档
Save and Load the Model — PyTorch Tutorials 2.1.1cu121 documentation
TorchScript — PyTorch master documentation