I noticed a weird phenomenon that allows you to launch a host function using the triple angle bracket notation in CUDA. To test this, I wrote a simple kernel that copies data between two integer arrays. Note that I am running all of this code on a Tesla K40 and compiling with -gencode arch=compute_35,code=sm_35:
#ifndef HOST_LAUNCH_H
#define HOST_LAUNCH_H
using namespace std;
// Assumes input and output are both length 32
__global__ void CopyKernel(const int* input, int* output) {
size_t global_idx = blockIdx.x * blockDim.x + threadIdx.x;
output[global_idx] = input[global_idx];
}
__host__ void Copy(const int* input, int* output) {
int* d_input = 0;
int* d_output = 0;
cudaMalloc((void**)&d_input, 32 * sizeof(int));
cudaMalloc((void**)&d_output, 32 * sizeof(int));
cudaMemcpy(d_input, input, 32 * sizeof(int), cudaMemcpyHostToDevice);
CopyKernel<<<1,32>>>(d_input, d_output);
cudaMemcpy(output, d_output, 32 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_output);
}
#endif
I then wrote the following unit test:
#include "host_launch.h"
#include <assert.h>
using namespace std;
__host__ void TestKernelLaunch() {
int input[32];
int output[32];
for(int i = 0; i < 32; i++) {
input[i] = i;
output[i] = 0;
}
int* d_input = 0;
int* d_output = 0;
cudaMalloc((void**)&d_input, 32 * sizeof(int));
cudaMalloc((void**)&d_output, 32 * sizeof(int));
cudaMemcpy(d_input, input, 32 * sizeof(int), cudaMemcpyHostToDevice);
for(int i = 0; i < 32; i++) {
assert(output[i] == 0);
}
CopyKernel<<<1,32>>>(d_input, d_output);
cudaMemcpy(output, d_output, 32 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 32; i++) {
assert(output[i] == i);
}
cudaFree(d_input);
cudaFree(d_output);
}
__host__ void TestHostLaunch() {
int input[32];
int output[32];
for(int i = 0; i < 32; i++) {
input[i] = i + 1;
output[i] = 0;
}
int* d_input = 0;
int* d_output = 0;
cudaMalloc((void**)&d_input, 32 * sizeof(int));
cudaMalloc((void**)&d_output, 32 * sizeof(int));
cudaMemcpy(d_input, input, 32 * sizeof(int), cudaMemcpyHostToDevice);
for(int i = 0; i < 32; i++) {
assert(output[i] == 0);
}
//Copy<<<1,32>>>(d_input, d_output);
cudaMemcpy(output, d_output, 32 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 32; i++) {
assert(output[i] == i + 1);
}
cudaFree(d_input);
cudaFree(d_output);
}
__host__ void TestFunctionPointerLaunch(void (*f)(const int*, int*)) {
int input[32];
int output[32];
for(int i = 0; i < 32; i++) {
input[i] = i + 2;
output[i] = 0;
}
int* d_input = 0;
int* d_output = 0;
cudaMalloc((void**)&d_input, 32 * sizeof(int));
cudaMalloc((void**)&d_output, 32 * sizeof(int));
cudaMemcpy(d_input, input, 32 * sizeof(int), cudaMemcpyHostToDevice);
for(int i = 0; i < 32; i++) {
assert(output[i] == 0);
}
f<<<1,32>>>(d_input, d_output);
cudaMemcpy(output, d_output, 32 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 32; i++) {
assert(output[i] == i + 2);
}
cudaFree(d_input);
cudaFree(d_output);
}
int main() {
TestKernelLaunch();
TestFunctionPointerLaunch(CopyKernel);
TestFunctionPointerLaunch(Copy);
}
If I uncomment the line:
//Copy<<<1,32>>>(d_input, d_output);
I get:
host_launch_unittest.cu(49): error: a host function call cannot be configured
But the equivalent is performed with:
f<<<1,32>>>(d_input, d_output);
in TestFunctionPointerLaunch, and it passes all of the assertions. I'm just wondering what the GPU is actually doing under the hood that makes this host function launch behave correctly. I wrote these tests to isolate the behavior, but also found that it works for more complicated kernels/host functions. Also, I decided to time these to see if they were somehow compiled to equivalent operations:
#include "host_launch.h"
#include <iostream>
#include <assert.h>
using namespace std;
__host__ float MeanCopyTime(const int copy_count, void (*f)(const int*, int*)) {
int input[32 * copy_count];
int output[32 * copy_count];
for(int i = 0; i < 32 * copy_count; i++) {
input[i] = i;
output[i] = 0;
}
int* d_input = 0;
int* d_output = 0;
cudaMalloc((void**)&d_input, 32 * copy_count * sizeof(int));
cudaMalloc((void**)&d_output, 32 * copy_count * sizeof(int));
cudaMemcpy(d_input, input, 32 * copy_count * sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i = 0; i < copy_count; i++)
f<<<1,32>>>(d_input + i * 32, d_output + i * 32);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float msecs = 0;
cudaEventElapsedTime(&msecs, start, stop);
cudaMemcpy(output, d_output, 32 * copy_count * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_output);
for(int i = 0; i < 32 * copy_count; i++) {
assert(output[i] == i);
}
return msecs / copy_count;
}
int main() {
int copy_count = 10000;
cout << endl;
cout << "Average Kernel Launch Time: " << MeanCopyTime(copy_count, CopyKernel) << endl;
cout << "Average Host Function Launch Time: " << MeanCopyTime(copy_count, Copy) << endl;
cout << endl;
}
For my architecture, this returns:
Average Kernel Launch Time: 0.00420756
Average Host Function Launch Time: 0.169097
Again any thoughts on what's happening here would be greatly appreciated.