在我的其中一個項目中,使用CUB的 DeviceReduce :: ReduceByKey時看到一些不正確的結果。但是,在thrust :: reduce_by_key中使用相同的輸入/輸出會產生預期結果。使用CUB ReduceByKey指定gencode時錯誤的結果
#include "cub/cub.cuh"
#include <vector>
#include <iostream>
#include <cuda.h>
struct AddFunctor {
__host__ __device__ __forceinline__
float operator()(const float & a, const float & b) const {
return a + b;
}
} reduction_op;
int main() {
int n = 7680;
std::vector <uint64_t> keys_h(n);
for (int i = 0; i < 4000; i++) keys_h[i] = 1;
for (int i = 4000; i < 5000; i++) keys_h[i] = 2;
for (int i = 5000; i < 7680; i++) keys_h[i] = 3;
uint64_t * keys;
cudaMalloc(&keys, sizeof(uint64_t) * n);
cudaMemcpy(keys, &keys_h[0], sizeof(uint64_t) * n, cudaMemcpyDefault);
uint64_t * unique_keys;
cudaMalloc(&unique_keys, sizeof(uint64_t) * n);
std::vector <float> values_h(n);
for (int i = 0; i < n; i++) values_h[i] = 1.0;
float * values;
cudaMalloc(&values, sizeof(float) * n);
cudaMemcpy(values, &values_h[0], sizeof(float) * n, cudaMemcpyDefault);
float * aggregates;
cudaMalloc(&aggregates, sizeof(float) * n);
int * remaining;
cudaMalloc(&remaining, sizeof(int));
size_t size = 0;
void * buffer = NULL;
cub::DeviceReduce::ReduceByKey(
buffer,
size,
keys,
unique_keys,
values,
aggregates,
remaining,
reduction_op,
n);
cudaMalloc(&buffer, sizeof(char) * size);
cub::DeviceReduce::ReduceByKey(
buffer,
size,
keys,
unique_keys,
values,
aggregates,
remaining,
reduction_op,
n);
int remaining_h;
cudaMemcpy(&remaining_h, remaining, sizeof(int), cudaMemcpyDefault);
std::vector <float> aggregates_h(remaining_h);
cudaMemcpy(&aggregates_h[0], aggregates, sizeof(float) * remaining_h, cudaMemcpyDefault);
for (int i = 0; i < remaining_h; i++) {
std::cout << i << ", " << aggregates_h[i] << std::endl;
}
cudaFree(buffer);
cudaFree(keys);
cudaFree(unique_keys);
cudaFree(values);
cudaFree(aggregates);
cudaFree(remaining);
}
當我有「-gencode ARCH = compute_35,代碼= sm_35」(對於開普勒GTX泰坦),它產生錯誤的結果,但是當我離開這些標誌完全出來,它的工作原理。
$ nvcc cub_test.cu
$ ./a.out
0, 4000
1, 1000
2, 2680
$ nvcc cub_test.cu -gencode arch=compute_35,code=sm_35
$ ./a.out
0, 4000
1, 1000
2, 768
我使用了一些其他CUB調用沒有問題,只是這一個是行爲不端。我也試着在GTX 1080 Ti上運行這個代碼(與 compute_61,sm_61)並且看到相同的行爲。
是否忽略這些編譯器標誌是正確的解決方案?
嘗試一個機器上:
- CUDA 8.0
- 的ubuntu 16.04
- GCC 5.4.0
- 幼獸1.6.4
- 開普勒GTX泰坦(計算能力3.5)
和另一個:
- CUDA 8.0
- 的Ubuntu 16.04
- GCC 5.4.0
- 崽1.6.4
- 帕斯卡爾1080 GTX鈦(計算能力6.1)
我會嘗試重現的開普勒這個明天泰坦我們在工作。 – einpoklum