我對嵌入式和OpenCL相當陌生,目前我正在嘗試開發一個示例代碼以執行到支持OpenCL 1.1 EP的i.MX6q板。嘗試使用CL_MEM_USE_HOST_PTR在OpenCL 1.1中創建一個簡單的複製/粘貼值,爲什麼它不起作用?
我不得不從頭開始,所以我跟着these tutorials,the OpenCL 1.1 Reference pages和this OpenCL example來製作我的第一個OpenCL實現/應用程序。
基本上我想要做的是開發一個「性能測試」在板上運行。它包含兩個int數組(輸入和輸出),用隨機值填充第一個數組,並使用OpenCL工作項將其粘貼到輸出數組中。
我很困惑clEnqueue(讀/寫)緩衝功能和clCreateBuffer標誌(尤其是CL_MEM_USE_HOST_PTR)之間,所以我決定去看看,並用它來練習。
我的代碼編譯正確和正常運行然而,當我在讀輸出數組值,他們仍然停留在0
這裏是我的代碼(這是C++):
void buffer_copy(char* kernelfile)
{
cl_platform_id platform_id;
cl_device_id device_id;
cl_context context;
cl_command_queue cmd_queue;
cl_program program;
// Retrieving all the OpenCL data needed
// to start the performance test
platform_id = get_platform();
device_id = get_device(platform_id);
context = get_context(platform_id, device_id);
cmd_queue = get_command_queue(context, device_id);
program = get_program(context, kernelfile);
cl_mem buffer_input, buffer_output;
size_t buffer_width = 640, buffer_height = 480;
size_t buffer_size = buffer_width * buffer_height;
cl_kernel kernel;
cl_int err = 0;
char* options = "-Werror -cl-std=CL1.1";
int data_input[buffer_size];
int data_output[buffer_size];
// Assigning random values in the data_input array and
// initializing the data_output array to zero-values
srand(time(NULL));
for (size_t index = 0; index < buffer_size; ++index)
{
data_input[index] = rand();
data_output[index] = 0;
}
// Creating OpenCL buffers
buffer_input = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR, buffer_size * sizeof(int), data_input, &err);
assert(err == CL_SUCCESS);
buffer_output = clCreateBuffer(context, CL_MEM_WRITE_ONLY | CL_MEM_USE_HOST_PTR, buffer_size * sizeof(int), data_output, &err);
assert(err == CL_SUCCESS);
err = clBuildProgram(program, 1, &device_id, options, NULL, NULL);
assert(err == CL_SUCCESS);
kernel = clCreateKernel(program, "buffer_copy", &err);
assert(err == CL_SUCCESS);
clSetKernelArg(kernel, 0, sizeof(cl_mem), &buffer_input);
clSetKernelArg(kernel, 1, sizeof(cl_mem), &buffer_output);
size_t device_max_work_group_size;
size_t global_work_size, local_work_size;
size_t preferred_work_group_size_multiple;
cl_ulong global_mem_size, max_mem_alloc_size;
clGetDeviceInfo(device_id, CL_DEVICE_GLOBAL_MEM_SIZE, sizeof(cl_ulong), &global_mem_size, NULL);
clGetDeviceInfo(device_id, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(cl_ulong), &max_mem_alloc_size, NULL);
clGetDeviceInfo(device_id, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), &device_max_work_group_size, NULL);
std::cout << "Global device memory size: " << global_mem_size << " bytes" << std::endl;
std::cout << "Device max memory allocation size: " << max_mem_alloc_size << " bytes" << std::endl;
std::cout << "Device max work group size: " << device_max_work_group_size << std::endl;
clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &global_work_size, NULL);
std::cout << "global_work_size value: " << global_work_size << std::endl;
clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(size_t), &preferred_work_group_size_multiple, NULL);
local_work_size = global_work_size/preferred_work_group_size_multiple;
std::cout << "local_work_size value: " << local_work_size << std::endl;
cl_event events[2];
err = clEnqueueNDRangeKernel(cmd_queue, kernel, 1, NULL, &global_work_size, &local_work_size, 0, 0, &events[0]);
assert (err == CL_SUCCESS);
err = clEnqueueReadBuffer(cmd_queue, buffer_output, CL_TRUE, 0, buffer_size * sizeof(int), data_output, 0, NULL, &events[1]);
assert (err == CL_SUCCESS);
err = clWaitForEvents(2, events);
assert (err == CL_SUCCESS);
for (size_t index = 0; index < buffer_size; ++index)
{
if (data_input[index] != data_output[index])
{
std::cerr << "Error, values differ (at index " << index << ")." << std::endl;
break;
}
else
{
//std::cout << "data_input[index] =\t" << data_input[index] << std::endl;
//std::cout << "data_output[index] =\t" << data_output[index] << std::endl;
}
}
cl_ulong time_start, time_end;
double total_time;
clGetEventProfilingInfo(events[0], CL_PROFILING_COMMAND_START, sizeof(time_start), &time_start, NULL);
clGetEventProfilingInfo(events[1], CL_PROFILING_COMMAND_END, sizeof(time_end), &time_end, NULL);
total_time = time_end - time_start;
std::cout << "Execution time in milliseconds: " << (total_time/1000000.0) << " ms" << std::endl;
clReleaseKernel(kernel);
clReleaseProgram(program);
clReleaseMemObject(buffer_input);
clReleaseMemObject(buffer_output);
clReleaseCommandQueue(cmd_queue);
clReleaseContext(context);
}
而且這裏是我的OpenCL內核:
__kernel void buffer_copy(__global int* input, __global int* output)
{
int id = get_global_id(0);
output[id] = input[id];
}
現在我只是想讓它工作,而不是優化它。我想我在這裏和那裏都錯過了很多優點,但我無法抓住它們。在我看來,我混淆了clCreateBuffer標誌。
難道你們能夠啓發我,幫助我嗎?
編輯:更新代碼+新的相關信息!
看起來值很好粘貼,但只根據內核工作組大小:CL_DEVICE_MAX_WORK_GROUP_SIZE返回1024,並且CL_KERNEL_WORK_GROUP_SIZE也返回1024(這也很奇怪)。所以我的數組的前1024個整數被很好地複製/粘貼,但之後它不再工作。爲了驗證這一點,我手動將global_work_group_size設置爲32,再次運行我的程序,然後只能正確粘貼前32個整數。我真的不明白這裏發生了什麼。
是的,我同意你使用的是不正確的全局大小和本地工作組大小。您的全局大小是您想要運行的總線程數。當您使用共享本地內存或其他工作組概念時,您的本地工作組大小。如果您是初學者,可以忽略它並傳遞NULL,然後讓運行時爲您選擇一個(如果使用非全局全局大小,例如兩次冪或至少爲CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE的倍數,將會更好。 – Dithermaster