2015-02-24 118 views
1

我寫了一個程序來添加兩個二維數組來檢查CPU和GPU的性能。 我使用clock()函數來測量CPU執行,並使用cudaEvent來測量GPU中的核心執行時間。由於我是Udacity下學習CUDA,我想他們的服務器上執行的程序,發現的結果,CUDA C - 使用clock()和cudaEvent的CPU和GPU執行時間,是否正確?

Output: 
GPU: 0.001984 ms 
CPU : 30.000000 ms 

現在到我真正的問題,我覺得這些結果是驚人的快的GPU和現在我對這些結果是否準確或者是否在程序中犯了錯誤持懷疑態度?

這是我的計劃:

#include "stdio.h" 
#include<time.h> 
#define COLUMNS 900 
#define ROWS 900 
long a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS],d[ROWS][COLUMNS]; 
__global__ void add(long *a, long *b, long *c,long *d) 
{ 
int x = blockIdx.x; 
int y = blockIdx.y; 
int i = (COLUMNS*y) + x; 
c[i] = a[i] + b[i]; 
a[i]=d[i]; 
} 

int main() 
{ 
    long *dev_a, *dev_b, *dev_c,*dev_d; 
    float ms; 
    clock_t startc, end; 
    double cpu_time_used; 
    cudaEvent_t start,stop; 


cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_d, ROWS*COLUMNS*sizeof(int)); 

startc = clock(); 
for (long y = 0; y < ROWS; y++) // Fill Arrays 
for (long x = 0; x < COLUMNS; x++) 
{ 
    a[y][x] = x; 
    b[y][x] = y; 
    d[y][x]=rand()%4; 
    c[y][x]=a[y][x]+b[y][x]; 
} 
end = clock(); 

cpu_time_used = ((double) (end - startc))/CLOCKS_PER_SEC; 
cpu_time_used*=1000; 


cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 
cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 
cudaMemcpy(dev_d, d, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 


cudaEventCreate(&start); 
cudaEventCreate(&stop); 
cudaEventRecord(start, 0); 
cudaEventRecord(stop, 0); 


add<<<dim3(1024,1024),dim3(128,128)>>>(dev_a, dev_b, dev_c,dev_d); 

cudaEventSynchronize(stop); 
cudaEventElapsedTime(&ms, start, stop); 
cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int),cudaMemcpyDeviceToHost); 
cudaEventDestroy(start); 
cudaEventDestroy(stop); 




printf("GPU: %f ms",ms); 
printf("\n CPU : %f ms",cpu_time_used); 

return 0; 
} 

謝謝大家提供給我的查詢答案,這裏是我的代碼和更新的結果所做的更改,

更新的代碼:

#include "stdio.h" 
#include <time.h> 
#include <sys/time.h> 
#include <unistd.h> 
#define COLUMNS 500 
#define ROWS 500 
long a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS],d[ROWS][COLUMNS]; 



__global__ void add(long *a, long *b, long *c,long *d) 
{ 
int x = blockIdx.x; 
int y = blockIdx.y; 
int i = (COLUMNS*y) + x; 
c[i] = a[i] + b[i]; 
a[i]=d[i]; 
} 
int main() 
{ 
long *dev_a, *dev_b, *dev_c,*dev_d; 
struct timeval startc, end; 
float ms; 
long mtime, seconds, useconds; 
// clock_t startc, end; 
// double cpu_time_used; 
long ns; 
cudaEvent_t start,stop; 


cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_d, ROWS*COLUMNS*sizeof(int)); 

gettimeofday(&startc, NULL); 
for (long y = 0; y < ROWS; y++) // Fill Arrays 
for (long x = 0; x < COLUMNS; x++) 
{ 
    a[y][x] = x; 
    b[y][x] = y; 
    d[y][x]=rand()%4; 
    c[y][x]=a[y][x]+b[y][x]; 
} 
    gettimeofday(&end, NULL); 

seconds = end.tv_sec - startc.tv_sec; 
useconds = end.tv_usec - startc.tv_usec; 
mtime = ((seconds) * 1000 + useconds/1000.0) + 0.5; 


for (long y = ROWS-1; y < ROWS; y++) // Output Arrays 
{ 
for (long x = COLUMNS-1; x < COLUMNS; x++) 
{ 
    // printf("\n[%ld][%ld]=%ld ",y,x,c[y][x]); 
    // printf("[%d][%d]=%d ",y,x,d[y][x]); 
} 
printf("\n"); 
} 



cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 
cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 
cudaMemcpy(dev_d, d, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 


cudaEventCreate(&start); 
cudaEventCreate(&stop); 
cudaEventRecord(start, 0); 



add<<<dim3(1024,1024),dim3(128,128)>>>(dev_a, dev_b, dev_c,dev_d); 

cudaThreadSynchronize(); 
cudaEventRecord(stop, 0); 
cudaEventSynchronize(stop); 
cudaEventElapsedTime(&ms, start, stop); 

cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int),cudaMemcpyDeviceToHost); 
cudaEventDestroy(start); 
cudaEventDestroy(stop); 



//cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC; 
printf("GPU: %f ms",ms); 
printf("\n CPU : %ld ms",mtime); 
for (long y = ROWS-1; y < ROWS; y++) // Output Arrays 
{ 
    for (long x = COLUMNS-1; x < COLUMNS; x++) 
    { 
     // printf("\n[%ld][%ld]=%ld ",y,x,c[y][x]); 
     // printf("[%d][%d]=%d ",y,x,d[y][x]); 
    } 
    printf("\n"); 
} 
return 0; 
} 

輸出:

GPU: 0.011040 ms 
CPU : 9 ms 

現在我可以安全地告訴它是否正確?

+0

爲[定時CUDA運算]的答案(HTTP ://stackoverflow.com/questions/7876624/timing-cuda-operations)可能是你的興趣。 – JackOLantern 2015-02-24 21:06:18

回答

2

你認爲加速太快,CPU時間太長是正確的。使用此方法計時CPU C++ obtaining milliseconds time on Linux -- clock() doesn't seem to work properly也可能需要將cudaEventRecord(stop, 0);移至內核之後。

我看到5在您的內核讀取和寫入。服用5*4Bytes*500*500/(1024^3*0.009)你的記憶中有大約0.517 GB/s,這是可用的一小部分。我會說你的CPU版本需要一些工作。相比之下,你的GPU在5*4Bytes*500*500/(1024^3*0.01104e-3)約爲421GB/s。我會說你不是那裏。

那麼,這麼多的錯誤....

#include "stdio.h" 
#include <time.h> 
#include <sys/time.h> 
#include <unistd.h> 
#include <cuda.h> 
#include <cuda_runtime.h> 

#define COLUMNS 500 
#define ROWS 500 
long a[ROWS*COLUMNS], b[ROWS*COLUMNS], c[ROWS*COLUMNS],d[ROWS*COLUMNS]; 



__global__ void add(long *a, long *b, long *c,long *d) 
{ 
int x = blockIdx.x; 
int y = blockIdx.y; 
int i = (COLUMNS*y) + x; 
c[i] = a[i] + b[i]; 
a[i]=d[i]; 
} 
int main() 
{ 
long *dev_a, *dev_b, *dev_c,*dev_d; 
struct timeval startc, end; 
float ms; 
long seconds, useconds; 
double mtime; 
cudaEvent_t start,stop; 


for(int i=0; i<ROWS*COLUMNS; i++) 
    d[i]=rand()%4; 

for(int i=0; i<ROWS; i++){ 
    for(int j=0; j<COLUMNS; j++){ 
     a[i*COLUMNS+j]=j; 
     b[i*COLUMNS+j]=i; 
    } 
} 

cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int)); 
cudaMalloc((void **) &dev_d, ROWS*COLUMNS*sizeof(int)); 



gettimeofday(&startc, NULL); 
for (long i = 0; i < ROWS*COLUMNS; i++){ // Fill Arrays 
    c[i]=a[i]+b[i]; 
    a[i]=d[i]; 
} 
    gettimeofday(&end, NULL); 

seconds = end.tv_sec - startc.tv_sec; 
useconds = end.tv_usec - startc.tv_usec; 
mtime = useconds; 
mtime/=1000; 
mtime+=seconds*1000; 

for (long y = ROWS-1; y < ROWS; y++) // Output Arrays 
{ 
for (long x = COLUMNS-1; x < COLUMNS; x++) 
{ 
    // printf("\n[%ld][%ld]=%ld ",y,x,c[y][x]); 
    // printf("[%d][%d]=%d ",y,x,d[y][x]); 
} 
printf("\n"); 
} 



cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 
cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 
cudaMemcpy(dev_d, d, ROWS*COLUMNS*sizeof(int), 
cudaMemcpyHostToDevice); 


cudaEventCreate(&start); 
cudaEventCreate(&stop); 
cudaEventRecord(start, 0); 



add<<<dim3(1024,1024),dim3(128,128)>>>(dev_a, dev_b, dev_c,dev_d); 



cudaEventRecord(stop, 0); 
cudaEventSynchronize(stop); 
cudaEventElapsedTime(&ms, start, stop); 

cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int),cudaMemcpyDeviceToHost); 
cudaEventDestroy(start); 
cudaEventDestroy(stop); 

printf("GPUassert: %s\n", cudaGetErrorString(cudaGetLastError())); 

//cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC; 
double memXFers=5*4*COLUMNS*ROWS; 
memXFers/=1024*1024*1024; 


printf("GPU: %f ms bandwidth %g GB/s",ms, memXFers/(ms/1000.0)); 
printf("\n CPU : %g ms bandwidth %g GB/s",mtime, memXFers/(mtime/1000.0)); 
for (long y = ROWS-1; y < ROWS; y++) // Output Arrays 
{ 
    for (long x = COLUMNS-1; x < COLUMNS; x++) 
    { 
     // printf("\n[%ld][%ld]=%ld ",y,x,c[y][x]); 
     // printf("[%d][%d]=%d ",y,x,d[y][x]); 
    } 
    printf("\n"); 
} 

return 0; 
} 

我現在的結果的方式(顯然不是正確的)...

GPU: 0.001792 ms bandwidth 2598.56 GB/s 
CPU : 0.567 ms bandwidth 8.21272 GB/s 
+0

您*必須*將'cudaEventRecord(stop,0);'移到內核之後。 – ArchaeaSoftware 2015-02-25 08:35:44

+0

首先感謝你的真棒回覆,是的,我已經移動了'cudaEventRecord(stop,0);'在執行完畢後。 – Avinash 2015-02-25 15:00:22

+0

也感謝計時器功能,我試圖在我的代碼中實現它,並發現結果是promiising。 – Avinash 2015-02-25 15:37:12