怎么才能在百度搜到自己的网站,做什么软件做网站效率最好,镇江专业建网站,河北省建设执业资格中心网站BasicVSR模型转JIT并用clibtorch推理 文章目录 BasicVSR模型转JIT并用clibtorch推理安装BasicVSR 环境1.下载源码2. 新建一个conda环境3. 安装pytorch4. 安装 mim 和 mmcv-full5. 安装 mmedit6. 下载模型文件7. 测试一下能否正常运行 转换为JIT模型用c libtorch推理效果 安装Ba…BasicVSR模型转JIT并用clibtorch推理 文章目录 BasicVSR模型转JIT并用clibtorch推理安装BasicVSR 环境1.下载源码2. 新建一个conda环境3. 安装pytorch4. 安装 mim 和 mmcv-full5. 安装 mmedit6. 下载模型文件7. 测试一下能否正常运行 转换为JIT模型用c libtorch推理效果 安装BasicVSR 环境
1.下载源码
git clone https://github.com/ckkelvinchan/BasicVSR_PlusPlus.git2. 新建一个conda环境
conda create -n BasicVSRPLUSPLUS python3.8 -y
conda activate BasicVSRPLUSPLUS 3. 安装pytorch
pytorch官网 安装合适的版本 我这里是CUDA11.6版本
conda install pytorch1.13.1 torchvision0.14.1 torchaudio0.13.1 pytorch-cuda11.6 -c pytorch -c nvidia4. 安装 mim 和 mmcv-full
pip install openmim
mim install mmcv-full5. 安装 mmedit
pip install mmedit6. 下载模型文件
下载模型文件放在这里chkpts/basicvsr_plusplus_reds4.pth
7. 测试一下能否正常运行
python demo/restoration_video_demo.py configs/basicvsr_plusplus_reds4.py chkpts/basicvsr_plusplus_reds4.pth data/demo_000 results/demo_000OK ! 环境正常下面开始转换工作
转换为JIT模型
在demo下新建一个转换脚本
import os
import cv2
import mmcv
import numpy as np
import torch
from mmedit.core import tensor2img
from mmedit.apis import init_modeldef main():# 加载模型并设置为评估模式model init_model(configs/basicvsr_plusplus_reds4.py,chkpts/basicvsr_plusplus_reds4.pth, devicetorch.device(cuda, 0))model.eval()# 准备一个示例输入src1 cv2.imread(./data/img/00000000.png)src cv2.cvtColor(src1, cv2.COLOR_BGR2RGB)src torch.from_numpy(src / 255.).permute(2, 0, 1).float()src src.unsqueeze(0)input_arg torch.stack([src], dim1)input_arg input_arg.to(torch.device(cuda, 0)) # 确保输入在GPU上# # 执行模型推理# with torch.no_grad(): # 在推理时不需要计算梯度# result model(input_arg, test_modeTrue)[output].cpu()# output_i tensor2img(result)# mmcv.imwrite(output_i, ./test.png)# 模型转换traced_model torch.jit.trace(model.generator, input_arg)torch.jit.save(traced_model, basicvsrPP.pt)# 测试res traced_model(input_arg)out tensor2img(res)mmcv.imwrite(out, ./testoo.png)if __name__ __main__:main()用c libtorch推理
/** Author: Liangbaikai* LastEditTime: 2024-03-29 11:28:42* Description: 视频超分* Copyright (c) 2024 by Liangbaikai, All Rights Reserved.*/#pragma once
#include iostream
#include torch/script.h
#include torch/torch.h
#include opencv2/opencv.hpp
#include vector
#include c10/cuda/CUDACachingAllocator.hnamespace LIANGBAIKAI_BASE_MODEL_NAME
{class lbk_video_super_resolution_basicPP{public:lbk_video_super_resolution_basicPP() default;virtual ~lbk_video_super_resolution_basicPP(){c10::cuda::CUDACachingAllocator::emptyCache();// cudaDeviceReset();}/*** description: 初始化* param {string} modelpath 模型文件* param {int} gpuid GPU的id* return {*}成功返回0失败返回-1*/int init(const std::string modelpath, int gpuid 0){try{_mymodule std::make_uniquetorch::jit::script::Module(torch::jit::load(modelpath));}catch (const c10::Error e){std::cerr Error loading the model modelpath std::endl;std::cerr Error e.what() std::endl;return -1;}_gpuid gpuid;if ((_gpuid 0) || (!torch::cuda::is_available())){_device std::make_uniquetorch::Device(torch::kCPU);_mymodule-to(at::kCPU);}else{_device std::make_uniquetorch::Device(torch::kCUDA, _gpuid);_mymodule-to(at::kCUDA, _gpuid);}_mymodule-eval();_modelsuccess true;return 0;}/*** description: 推理* param {Mat} inputpic 输入图片* param {Mat} outputpic 输出结果* param {bool} showlog 是否打印日志* return {*} 成功返回0失败返回-1*/int inference(cv::Mat inputpic, cv::Mat outputpic, bool showlog false){if (inputpic.empty() || (inputpic.channels() ! 3)){std::cout input data ERROR std::endl;return -1;}if (!_modelsuccess){std::cout model has not been inited! std::endl;return -1;}// torch::DeviceGuard 是一个类它的作用是确保在使用完设备如CPU或GPU后能够正确地将设备恢复到使用前的状态。torch::DeviceGuard device_guard(*_device); // 作用域内所有操作都在指定设备上运行离开此作用域恢复cv::transpose(inputpic, inputpic); // 顺时针旋转// 将图片转换为tensorcv::Mat img_float;inputpic.convertTo(img_float, CV_32FC3, 1.0 / 255);torch::Tensor img_tensor torch::from_blob(img_float.data, {img_float.rows, img_float.cols, 3}, torch::kFloat32).permute({2, 1, 0});img_tensor (img_tensor - 0.5) / 0.5;img_tensor (img_tensor 1) / 2;img_tensor torch::clamp(img_tensor, 0, 1);torch::Tensor src_unsqueezed img_tensor.unsqueeze(0).to(*_device); // 将tensor转移到GPU上std::vectortorch::Tensor tensors_to_stack {src_unsqueezed}; // 创建一个包含 src 的 vectortorch::Tensor input_arg torch::stack(tensors_to_stack, 1); // 沿着维度1堆叠tensorsif (showlog){std::cout input_arg.sizes() std::endl;}torch::NoGradGuard no_grad; // 暂时禁用梯度计算auto output_dict _mymodule-forward({input_arg});torch::Tensor output_data;if (output_dict.isTensor()){output_data output_dict.toTensor().to(at::kCPU); // 如果是Tensor则通过toTensor()方法获取它if (showlog){std::cout out shape: output_data.sizes() std::endl;}}else{if (showlog){std::cerr The IValue does not contain a Tensor. std::endl;}}float *f output_data.data_ptrfloat();int output_width output_data.size(3);int output_height output_data.size(4);int size_pic output_width * output_height;std::vectorcv::Mat rgbChannels(3);rgbChannels[0] cv::Mat(output_width, output_height, CV_32FC1, f);rgbChannels[1] cv::Mat(output_width, output_height, CV_32FC1, f size_pic);rgbChannels[2] cv::Mat(output_width, output_height, CV_32FC1, f size_pic size_pic);rgbChannels[0].convertTo(rgbChannels[0], CV_8UC1, 255);rgbChannels[1].convertTo(rgbChannels[1], CV_8UC1, 255);rgbChannels[2].convertTo(rgbChannels[2], CV_8UC1, 255);merge(rgbChannels, outputpic);return 0;}private:bool _modelsuccess false;int _gpuid 0;std::unique_ptrtorch::Device _device;std::unique_ptrtorch::jit::script::Module _mymodule;};}#include unistd.h
#includelbk_video_super_resolution.hpp
using namespace LIANGBAIKAI_BASE_MODEL_NAME;
int main(int argc,char *argv[])
{if(argc 5){std::cout ./test 模型 GPUid(cpu传-1) 输入图片 输出图片 std::endl;return -1;}std::string modelfile argv[1];int gpuid atoi(argv[2]);std::string imgfile argv[3];std::string outfile argv[4];cv::Mat src cv::imread(imgfile);lbk_video_super_resolution_basicPP test;if(0 test.init(modelfile,gpuid)){std::cout init failed std::endl;return -1;}cv::Mat out;int rec test.inference(src,out,true);if(rec 0){cv::imwrite(outfile, out);}return 0;
}效果