编辑网站的软件,网站后台内容管理,wordpress 下拉菜单插件,四川住房城乡建设网站TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo
Runtime 运行时库
明天再补充#xff0c;先去准备面试了
矩阵加法demo
cudaMalloc和cudaMemcpy
它们和c的malloc和memcpy功能一致#xff0c;只是操作的不是host端的内存空间#xff0c;而是device端的”显存空间“ …TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo
Runtime 运行时库
明天再补充先去准备面试了
矩阵加法demo
cudaMalloc和cudaMemcpy
它们和c的malloc和memcpy功能一致只是操作的不是host端的内存空间而是device端的”显存空间“
cudaSetDevice
cudaSetDevice是用于针对主机线程指定Device的cudaAPI函数接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个被指定的设备的。
error_check
error_check是我写的检查函数用于检查你调用的cudaAPI函数是否调用失败或报错如果失败error_check会为你输出失败的原因、文件路径和代码行号。
main.cu
#includecommon/common.hvoid data_inital(float* data,int N){time_t t;srand((unsigned)time(t));std::coutdata: ;//初始化数据for(int i0;iN;i){data[i] (float)(rand()%0xff)/10.0f;std::coutdata[i] ;}std::coutstd::endl;return;
};__global__ void add(float* a, float* b,float* c,int N){int threadID threadIdx.y*blockDim.xthreadIdx.x;if(threadIDN){c[threadID] a[threadID]b[threadID];}
}int main(int argc, char** argv){int deviceCount {0};cudaDeviceProp deviceProp;int driverVersion {0};int runtimeVersion {0};device_information(deviceCount,deviceProp,driverVersion,runtimeVersion);std::coutstd::endl;cudaError_t error error_check(cudaSetDevice(0),__FILE__,__LINE__);//针对主机线程指定Device接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个设备的。if(error cudaSuccess){std::coutcudaSetDevice success!std::endl;std::coutset on device: deviceProp.name std::endl;}else{std::coutcudaSetDevice failed!std::endl;return -1;}int numElem 16;size_t nBytes numElem * sizeof(float);// 初始化主机端数据缓冲区float *hostDataA, *hostDataB, *gpuRef;hostDataA (float*)malloc(nBytes);hostDataB (float*)malloc(nBytes);gpuRef (float*)malloc(nBytes);if (hostDataA NULL || hostDataB NULL || gpuRef NULL){std::coutmalloc failed!std::endl;return -1;}data_inital(hostDataA,numElem); //初始化数据data_inital(hostDataB,numElem); //初始化数据memset(gpuRef, 0, nBytes);// 初始化设备端数据缓冲区float *deviceDataA, *deviceDataB, *deviceDataC;cudaMalloc((float**)deviceDataA, nBytes);//注意cudaMalloc的修饰符为__host____device___,也就是说host和device都可以使用这个cudaAPI函数cudaMalloc((float**)deviceDataB, nBytes);cudaMalloc((float**)deviceDataC, nBytes);if (deviceDataA NULL || deviceDataB NULL || deviceDataC NULL){std::coutcudaMalloc failed!std::endl;free(hostDataA);free(hostDataB);free(gpuRef);return -1;}if(cudaSuccess cudaMemcpy(deviceDataA,hostDataA,nBytes,cudaMemcpyHostToDevice) cudaSuccess cudaMemcpy(deviceDataB,hostDataB,nBytes,cudaMemcpyHostToDevice) cudaSuccess cudaMemcpy(deviceDataC,gpuRef,nBytes,cudaMemcpyHostToDevice)) ///注意cudaMemcpy的修饰符为__host__,也就是说只有host可以使用这个cudaAPI函数{std::coutsuccessfully copy data from host to device deviceProp.name std::endl;}else{std::coutcopy data from host to device deviceProp.name failed! std::endl;free(hostDataA);free(hostDataB);free(gpuRef);return -1;}//加载核函数dim3 block (4,4);dim3 grid (1,1);addgrid,block(deviceDataA,deviceDataB,deviceDataC,numElem);//将数据从设备端拷贝回主机端cudaMemcpy(gpuRef,deviceDataC,nBytes,cudaMemcpyDeviceToHost);//打印运算结果std::coutresult: ;for(size_t i 0; i numElem; i)std::coutgpuRef[i] ;std::coutstd::endl;//释放资源free(hostDataA);free(hostDataB);free(gpuRef);cudaFree(deviceDataA);cudaFree(deviceDataB);cudaFree(deviceDataC);cudaDeviceReset();return 0;
}
common.h
#includesys/time.h
#includeiostream
#includecuda_runtime.h
#includestdio.h//用于检查你的cuda函数是否调用失败
cudaError_t error_check(cudaError_t status,const char *filePathName,int lineNumber){if(status !cudaSuccess){std::cout CUDA API error cudaGetErrorName(status) at filePathName in line lineNumber std::endl;std::cout description : cudaGetErrorString(status) std::endl;return status;}return status;
}bool device_information(int* ptr_devicCount,cudaDeviceProp* ptr_deviceProp,int* ptr_driverVersion,int* ptr_runtimeVersion){cudaGetDeviceCount(ptr_devicCount);if(*ptr_devicCount 0){std::cerr error: no devices supporting CUDA.\n;return false;}else{std::cout Detected *ptr_devicCount CUDA Capable device(s)\n;}for(int i {0}; i *ptr_devicCount; i){cudaSetDevice(i);error_check(cudaGetDeviceProperties(ptr_deviceProp,i),__FILE__,__LINE__);std::cout Device i name: ptr_deviceProp-name std::endl;error_check(cudaDriverGetVersion(ptr_driverVersion),__FILE__,__LINE__);error_check(cudaRuntimeGetVersion(ptr_runtimeVersion),__FILE__,__LINE__);std::cout CUDA Driver Version / Runtime Version: *ptr_driverVersion/1000 . (*ptr_driverVersion%100)/10 . *ptr_driverVersion%10 / *ptr_runtimeVersion/1000 . (*ptr_runtimeVersion%100)/10 . *ptr_runtimeVersion%10 std::endl;std::cout CUDA Capability Major/Minor version number: ptr_deviceProp-major . ptr_deviceProp-minor std::endl;std::cout Total amount of global memory: ptr_deviceProp-totalGlobalMem std::endl;std::cout Total amount of constant memory: ptr_deviceProp-totalConstMem std::endl;std::cout Total amount of shared memory per block: ptr_deviceProp-sharedMemPerBlock std::endl;std::cout Total number of registers available per block: ptr_deviceProp-regsPerBlock std::endl;std::cout Warp size: ptr_deviceProp-warpSize std::endl;std::cout Maximum number of threads per block: ptr_deviceProp-maxThreadsPerBlock std::endl;std::cout Maximum sizes of each dimension of a block: ptr_deviceProp-maxThreadsDim[0] x ptr_deviceProp-maxThreadsDim[1] x ptr_deviceProp-maxThreadsDim[2] std::endl;std::cout Maximum sizes of each dimension of a grid: ptr_deviceProp-maxGridSize[0] x ptr_deviceProp-maxGridSize[1] x ptr_deviceProp-maxGridSize[2] std::endl;std::cout Maximum memory pitch: ptr_deviceProp-memPitch std::endl;std::cout Texture alignment: ptr_deviceProp-textureAlignment std::endl;std::cout Concurrent copy and execution: ptr_deviceProp-deviceOverlap std::endl;std::cout Run time limit on kernels: ptr_deviceProp-kernelExecTimeoutEnabled std::endl;std::cout Integrated: ptr_deviceProp-integrated std::endl;std::cout Support host page-locked memory mapping: ptr_deviceProp-canMapHostMemory std::endl;std::cout Alignment requirement for Surfaces: ptr_deviceProp-surfaceAlignment std::endl;std::cout Device has ECC support: ptr_deviceProp-ECCEnabled std::endl;std::cout Device is using TCC driver model: ptr_deviceProp-tccDriver std::endl;std::cout Device supports Unified Addressing (UVA): ptr_deviceProp-unifiedAddressing std::endl;std::cout Device supports Compute Preemption: ptr_deviceProp-computePreemptionSupported std::endl;}return true;
}