使用shared memory 计算矩阵乘法 (其实并没有加速多少)
阅读原文时间:2021年04月22日阅读:1

#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"

#include
#include

#include

cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);

#define TILE_WIDTH 16

__global__ void MatrixMulKernle(int m, int n, int k, int *A, int *B, int *C)
{
//申请共享内存,存在于每个block中
__shared__ int ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ int ds_B[TILE_WIDTH][TILE_WIDTH];

//简化坐标记法,出现下面6个表示的地方就是并行的地方。
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;       
int ty = threadIdx.y;

//确定结果矩阵中的行和列
int iy = by \* TILE\_WIDTH + ty;
int ix = bx \* TILE\_WIDTH + tx;

if (iy >= m || ix >= k) {
    return;
}
int gw = gridDim.x;
int gh = gridDim.y;

//临时变量
int Cvalue = 0;

//循环读入A,B瓦片,计算结果矩阵,分阶段进行计算
for (int t = 0; t < (n + TILE\_WIDTH - 1) / TILE\_WIDTH; ++t)  
{
    ds\_A\[tx\]\[ty\] = A\[iy\*n + t\*TILE\_WIDTH + tx\];
    ds\_B\[tx\]\[ty\] = B\[(t\*TILE\_WIDTH + ty)\*k + ix\];
    \_\_syncthreads();

    for (int i = 0; i < TILE\_WIDTH; ++i)
        Cvalue += ds\_A\[i\]\[ty\] \* ds\_B\[tx\]\[i\];//从shared memory中取值
    C\[iy\*k + ix\] = Cvalue;
}

}

//不适用shared memory
__global__ void addKernel(int *c, const int *a, const int *b)
{
//const int bs = CUDA_LG::block_size;
//BLOCK_SIZE;
int ix = blockIdx.x * blockDim.x + threadIdx.x,
iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= 100 || iy >= 100) {
return;
}

int sum = 0;

for (int i = 0; i != 200; ++i) {

    int ta = a\[iy \* 100 + i\];

    int tb = b\[i \* 100 + ix\];

    sum += ta\*tb;
}
c\[iy \* 100 + ix\] = sum;

}

int main()
{
const int arow = 100;
const int acol = 200;
const int brow = 200;
const int bcol = 100;

const int arraySize = arow\*acol;

int \* a = new int\[arraySize\];
int \* b = new int\[arraySize\];
int \* c = new int\[arraySize/2\];


for (int j = 0; j != arow; ++j) {
    for (int i = 0; i != acol; ++i) {
        a\[j\*acol + i\] = i;
    }
}

for (int j = 0; j != brow; ++j) {
    for (int i = 0; i != bcol; ++i) {
        b\[j\*bcol + i\] = i;
    }
}
addWithCuda(c, a, b, arraySize);


cudaDeviceReset();


printf("c0=%d c1=%d c\[3,50\]=%d \\n", c\[0\], c\[1\],c\[3\*100+50\]);
delete\[\] a;
delete\[\] b;
delete\[\] c;

system("pause");
return 0;

}

// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;

// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
cudaStatus = cudaMalloc((void\*\*)&dev\_c, size \* sizeof(int));
cudaStatus = cudaMalloc((void\*\*)&dev\_a, size \* sizeof(int));
cudaStatus = cudaMalloc((void\*\*)&dev\_b, size \* sizeof(int));

cudaStatus = cudaMemcpy(dev\_a, a, size \* sizeof(int), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(dev\_b, b, size \* sizeof(int), cudaMemcpyHostToDevice);

int thread\_x = 100;
int thread\_y = 100;
dim3 block(TILE\_WIDTH, TILE\_WIDTH);
int grid\_w = (thread\_x + block.x - 1) / block.x;
int grid\_h = (thread\_y + block.y - 1) / block.y;
dim3 grid(grid\_w, grid\_h);
// Launch a kernel on the GPU with one thread for each element.


TIME\_INIT;
TIME\_MARK("t1");
for(int i=0;i!=10000;++i)
    addKernel << < grid, block >> > (dev\_c, dev\_a, dev\_b);//486ms
TIME\_MARK("t2");
for (int i = 0; i != 10000; ++i)
    MatrixMulKernle << < grid, block >> >(100, 200, 100, dev\_a, dev\_b, dev\_c);//1069ms
TIME\_MARK("t3");
TIME\_PRINT;
cudaStatus = cudaGetLastError();
cudaStatus = cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(c, dev\_c, size/2 \* sizeof(int), cudaMemcpyDeviceToHost);

Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);

return cudaStatus;

}

转载于:https://www.cnblogs.com/luoyinjie/p/10846113.html

手机扫一扫

移动阅读更方便

阿里云服务器
腾讯云服务器
七牛云服务器