加强网站信息内容建设管理,昆明网站建设8888168,小学门户网站建设,wordpress主页归档背景#xff1a;网上很多关于矩阵乘的编程优化思路#xff0c;本着看理论分析万遍#xff0c;不如实际代码写一遍的想法#xff0c;大概过一下优化思路。
矩阵乘的定义如下#xff0c;约定矩阵的形状及存储方式为: A[M, K], B[K, N], C[M, N]。 CPU篇
朴素实现方法 按照…背景网上很多关于矩阵乘的编程优化思路本着看理论分析万遍不如实际代码写一遍的想法大概过一下优化思路。
矩阵乘的定义如下约定矩阵的形状及存储方式为: A[M, K], B[K, N], C[M, N]。 CPU篇
朴素实现方法 按照常规的思路实现矩阵乘时如下的3层for循环。
#define OFFSET(row, col, ld) ((row) * (ld) (col))
void cpuSgemm(float *a, float *b, float *c, const int M, const int N, const int K)
{for (int m 0; m M; m) {for (int n 0; n N; n) {float psum 0.0;for (int k 0; k K; k) {psum a[OFFSET(m, k, K)] * b[OFFSET(k, n, N)];}c[OFFSET(m, n, N)] psum;}}
}
数据访存连续的优化 矩阵B的存储默认为N方向连续所以可以将上面的第23层循环互换顺序这样B的取数就不会跨行了而是连续取数达到访问连续的效果。
void cpuSgemm_1(float *a, float *b, float *c, const int M, const int N, const int K)
{for (int m 0; m M; m) {for (int k 0; k K; k) {for (int n 0; n N; n){c[OFFSET(m, n, N)] a[OFFSET(m, k, K)] * b[OFFSET(k, n, N)];} }}
} 数据重排/数据复用的优化 上面将MNK的for循环调整为MKN的循环顺序导致我们K方向累加不能缓存了增加了多次访问C矩阵的开销所以我们不放先直接将B矩阵转置处理然后再按照原始的MNK的for循环来处理。
void cpuSgemm_2(float *a, float *b, float *c, const int M, const int N, const int K)
{float* b1(float*) malloc(sizeof(float)*K*N);for(int i0; iK; i){for (int j0; jN; j){b1[OFFSET(j,i,K)] b[OFFSET(i,j,N)];}}for (int m 0; m M; m) {for (int n 0; n N; n) {float psum 0.0;for (int k 0; k K; k) {psum a[OFFSET(m, k, K)] * b1[OFFSET(n, k, K)];}c[OFFSET(m, n, N)] psum;}}
}
性能表现 如下是测试CPU环境下这几种方法的时间情况其中MN512, K 256。可以发现经过优化后的代码在时间上是逐步减少的。 CPU的优化思路还有其他的比如循环展开intrinsic函数基于cache的矩阵切分等注意本文并没有都实现出来。
cpuSgemm, Time measured: 416889 microseconds.
cpuSgemm_1, Time measured: 405259 microseconds.
cpuSgemm_2, Time measured: 238786 microseconds.
GPU篇
grid线程循环矩阵乘法 输出矩阵C有M*N个点每个点是K个数的乘积和所以可以定义每个线程计算K个点的乘积和即grid线程循环矩阵乘法。
__global__ void matrix_multiply_gpu_0(float*a, float*b, float*c, int M, int N, int K)
{int tidx threadIdx.x;int bidx blockIdx.x;int idx bidx * blockDim.x tidx;int row idx/N;int col idx%N;if(rowM col N){float tmp 0.0;for(int k0; kK; k){tmpa[row*Kk] * b[k*Ncol];}c[row*Ncol] tmp;}
}
block线程循环矩阵乘法 grid内线程循环的矩阵乘法有如下缺憾一个block内线程可能需要计算C矩阵不同行的矩阵元素block内thread对相应的A矩阵访存不一致导致无法广播和额外的访存开销导致执行时间增加。 针对这个问题可以做如下改进每个block计算C矩阵的一行block内的thread以固定跳步步长blockDim.x的方法循环计算C矩阵的一行每一行启动一个block共计M个block。
__global__ void matrix_multiply_gpu_1(float*a, float*b, float*c, int M, int N, int K)
{int tidx threadIdx.x;int bidx blockIdx.x;float tmp;for(;bidxM; bidx gridDim.x){for(;tidxN; tidxblockDim.x ){tmp0.0;for(int k0; kK; k){tmpa[bidx*K k] * b[k*Ntidx];}c[bidx*Ntidx] tmp;} }
}行共享存储矩阵乘法 共享存储与L1 Cache同级其访存延迟较全局存储小一个量级。用共享存储代替全局存储是GPU最重要的优化手段之一。采用共享存储优化的关键是数据复用数据复用次数越多共享存储优化可获得的收益也越高。 在block循环乘法中1个block内所有thread都会用到A矩阵的一行此时与B矩阵每一列相乘A矩阵中该行复用了N次。故可以考虑将A矩阵的一行读入shared memory运算时候从shared memory读取相应的数据。 注意代码中TILE_WIDTHK。
#define TILE_WIDTH 256
__global__ void matrix_multiply_gpu_2(float*a, float*b, float*c, int M, int N, const int K)
{__shared__ float data[TILE_WIDTH];int tid threadIdx.x;int row blockIdx.x;int i,j;for(itid; iK; iblockDim.x){data[i]a[row*K i];}__syncthreads();float tmp;for(jtid; jN; jblockDim.x){tmp0.0;for(int k0; kK; k){tmp data[k]*b[k*Nj];}c[row*Nj] tmp;}
}分块共享存储矩阵乘法 根据上面共享存储的理解我们很自然的想到把B矩阵也考虑数据复用所以可以同时把AB矩阵都分成棋盘似的小尺寸的数据块从全局内存读取到共享内存这样可以有效降低数据访问时间充分复用矩阵乘的局部数据。
#define TILE_SIZE 32
__global__ void matrix_multiply_gpu_3(float*a, float*b, float*c, int M, int N, const int K)
{__shared__ float matA[TILE_SIZE][TILE_SIZE];__shared__ float matB[TILE_SIZE][TILE_SIZE];int bx blockIdx.x;int by blockIdx.y;int tx threadIdx.x;int ty threadIdx.y;int Col bx * TILE_SIZE tx;int Row by * TILE_SIZE ty;float Pervalue 0.0;for(int i 0;i K / TILE_SIZE;i) {matA[ty][tx] a[Row * K (i * TILE_SIZE tx)];matB[ty][tx] b[Col (i * TILE_SIZE ty) * N];__syncthreads();for(int k 0;k TILE_SIZE;k) Pervalue matA[ty][k] * matB[k][tx];__syncthreads();}c[Row * N Col] Pervalue;}
性能表现
利用nvprof工具统计各个核函数的执行时间如下可以发现每一步优化思路都能直观的带来的性能提升。 完整代码:
GitHub - Briwisdom/study_CUDA_examples: some demos for study CUDA program.
#include iostream
#include chronousing namespace std;#define OFFSET(row, col, ld) ((row) * (ld) (col))void initDate(float *arr,int Len, bool randFlagtrue)
{if (randFlag){for (int i 0; i Len; i) {arr[i] rand()/1000000;}}else{float value 0.0;for (int i 0; i Len; i) {arr[i] value;}}
}void compare_result(float *x, float *y, int n, char *name)
{int cnt0;for (int i0; in; i){if (x[i]!y[i]){cnt;printf(x %f, y %f\n, x[i],y[i]);}}printf(%s, , name);if(cnt 0)printf(result matched.\n);elseprintf(something error! result not match number %d int total number: %d .\n, cnt, n);}void cpuSgemm(float *a, float *b, float *c, const int M, const int N, const int K)
{for (int m 0; m M; m) {for (int n 0; n N; n) {float psum 0.0;for (int k 0; k K; k) {psum a[OFFSET(m, k, K)] * b[OFFSET(k, n, N)];}c[OFFSET(m, n, N)] psum;}}
}void cpuSgemm_1(float *a, float *b, float *c, const int M, const int N, const int K)
{for (int m 0; m M; m) {for (int k 0; k K; k) {for (int n 0; n N; n){c[OFFSET(m, n, N)] a[OFFSET(m, k, K)] * b[OFFSET(k, n, N)];} }}
}void cpuSgemm_2(float *a, float *b, float *c, const int M, const int N, const int K)
{float* b1(float*) malloc(sizeof(float)*K*N);for(int i0; iK; i){for (int j0; jN; j){b1[OFFSET(j,i,K)] b[OFFSET(i,j,N)];}}for (int m 0; m M; m) {for (int n 0; n N; n) {float psum 0.0;for (int k 0; k K; k) {psum a[OFFSET(m, k, K)] * b1[OFFSET(n, k, K)];}c[OFFSET(m, n, N)] psum;}}
}void operation(void (*func)(float*,float*, float*, int, int, int), float *a, float *b, float *c, const int M, const int N, const int K, int repeat, char* name)
{auto begin0 std::chrono::high_resolution_clock::now();for(int i0; irepeat; i){(*func)(a,b,c, M, N, K);}auto end0 std::chrono::high_resolution_clock::now();auto elapsed0 std::chrono::duration_caststd::chrono::microseconds(end0 - begin0);printf(%s, Time measured: %d microseconds.\n, name, int(elapsed0.count()/repeat));
}__global__ void matrix_multiply_gpu_0(float*a, float*b, float*c, int M, int N, int K)
{int tidx threadIdx.x;int bidx blockIdx.x;int idx bidx * blockDim.x tidx;int row idx/N;int col idx%N;if(rowM col N){float tmp 0.0;for(int k0; kK; k){tmpa[row*Kk] * b[k*Ncol];}c[row*Ncol] tmp;}
}__global__ void matrix_multiply_gpu_1(float*a, float*b, float*c, int M, int N, int K)
{int tidx threadIdx.x;int bidx blockIdx.x;float tmp;for(;bidxM; bidx gridDim.x){for(;tidxN; tidxblockDim.x ){tmp0.0;for(int k0; kK; k){tmpa[bidx*K k] * b[k*Ntidx];}c[bidx*Ntidx] tmp;} }
}#define TILE_WIDTH 256
__global__ void matrix_multiply_gpu_2(float*a, float*b, float*c, int M, int N, const int K)
{__shared__ float data[TILE_WIDTH];int tid threadIdx.x;int row blockIdx.x;int i,j;for(itid; iK; iblockDim.x){data[i]a[row*K i];}__syncthreads();float tmp;for(jtid; jN; jblockDim.x){tmp0.0;for(int k0; kK; k){tmp data[k]*b[k*Nj];}c[row*Nj] tmp;}
}#define TILE_SIZE 32
__global__ void matrix_multiply_gpu_3(float*a, float*b, float*c, int M, int N, const int K)
{__shared__ float matA[TILE_SIZE][TILE_SIZE];__shared__ float matB[TILE_SIZE][TILE_SIZE];int bx blockIdx.x;int by blockIdx.y;int tx threadIdx.x;int ty threadIdx.y;int Col bx * TILE_SIZE tx;int Row by * TILE_SIZE ty;float Pervalue 0.0;for(int i 0;i K / TILE_SIZE;i) {matA[ty][tx] a[Row * K (i * TILE_SIZE tx)];matB[ty][tx] b[Col (i * TILE_SIZE ty) * N];__syncthreads();for(int k 0;k TILE_SIZE;k) Pervalue matA[ty][k] * matB[k][tx];__syncthreads();}c[Row * N Col] Pervalue;}int main()
{int M512;int N512;int K256;float *a (float*) malloc(M*K * sizeof(float));float *b (float*) malloc(N*K * sizeof(float));float *c (float*) malloc(M*N * sizeof(float));float *c1 (float*) malloc(M*N * sizeof(float));float *c2 (float*) malloc(M*N * sizeof(float));float *c_gpu_0 (float*) malloc(M*N * sizeof(float));float *c_gpu_1 (float*) malloc(M*N * sizeof(float));float *c_gpu_2 (float*) malloc(M*N * sizeof(float));float *c_gpu_3 (float*) malloc(M*N * sizeof(float));initDate(a,M*K);initDate(b,N*K);initDate(c, M*N, false);initDate(c1, M*N, false);initDate(c2, M*N, false);initDate(c_gpu_0, M*N, false);initDate(c_gpu_1, M*N, false);initDate(c_gpu_2, M*N, false);initDate(c_gpu_3, M*N, false);//ensure result is right.cpuSgemm(a,b,c,M,N,K);cpuSgemm_1(a,b,c1,M,N,K);cpuSgemm_2(a,b,c2,M,N,K); compare_result(c, c1, M*N,sgemm1);compare_result(c, c2, M*N,sgemm2);//test the prerformance.int repeat 10;operation(cpuSgemm,a,b,c,M,N,K,repeat,cpuSgemm);operation(cpuSgemm_1,a,b,c1,M,N,K,repeat,cpuSgemm_1);operation(cpuSgemm_2,a,b,c2,M,N,K,repeat,cpuSgemm_2);float* d_a, *d_b, *d_c0, *d_c1, *d_c2, *d_c3;cudaMalloc((void**) d_a, sizeof(float)*(M*K));cudaMalloc((void**) d_b, sizeof(float)*(N*K));cudaMalloc((void**) d_c0, sizeof(float)*(M*N));cudaMalloc((void**) d_c1, sizeof(float)*(M*N));cudaMalloc((void**) d_c2, sizeof(float)*(M*N));cudaMalloc((void**) d_c3, sizeof(float)*(M*N));cudaMemcpy(d_a, a, sizeof(float)*M*K, cudaMemcpyHostToDevice);cudaMemcpy(d_b, b, sizeof(float)*N*K, cudaMemcpyHostToDevice);int threadnum64;int blocks (M*Nthreadnum-1)/threadnum;cudaMemcpy(d_c0, c_gpu_0, sizeof(float)*M*N, cudaMemcpyHostToDevice);matrix_multiply_gpu_0blocks, threadnum(d_a, d_b, d_c0, M, N, K);cudaMemcpy(c_gpu_0, d_c0, sizeof(float)*M*N, cudaMemcpyDeviceToHost);compare_result(c, c_gpu_0, M*N,gpu_0);cudaFree(d_c0);cudaMemcpy(d_c1, c_gpu_1, sizeof(float)*M*N, cudaMemcpyHostToDevice);matrix_multiply_gpu_1M, threadnum(d_a, d_b, d_c1, M, N, K);cudaMemcpy(c_gpu_1, d_c1, sizeof(float)*M*N, cudaMemcpyDeviceToHost);compare_result(c, c_gpu_1, M*N,gpu_1);cudaFree(d_c1);cudaMemcpy(d_c2, c_gpu_2, sizeof(float)*M*N, cudaMemcpyHostToDevice);matrix_multiply_gpu_2M, threadnum(d_a, d_b, d_c2, M, N, K);cudaMemcpy(c_gpu_2, d_c2, sizeof(float)*M*N, cudaMemcpyDeviceToHost);compare_result(c, c_gpu_2, M*N,gpu_2);cudaFree(d_c2);threadnum32;dim3 gridSize(M / threadnum,N / threadnum);dim3 blockSize(threadnum,threadnum);cudaMemcpy(d_c3, c_gpu_3, sizeof(float)*M*N, cudaMemcpyHostToDevice);matrix_multiply_gpu_3gridSize, blockSize(d_a, d_b, d_c3, M, N, K);cudaMemcpy(c_gpu_3, d_c3, sizeof(float)*M*N, cudaMemcpyDeviceToHost);compare_result(c, c_gpu_3, M*N,gpu_3);cudaFree(d_c3);free(a);free(b);free(c);free(c1);free(c2);free(c_gpu_0);free(c_gpu_1);free(c_gpu_2);free(c_gpu_3);cudaFree(d_a);cudaFree(d_b);}