Based on the example from Nvidia GPU computing SDK I created two kernels for the nbody simulation. The first kernel which doesn't take advantage of shared memory is ~15% faster than the second kernel which uses shared memory. Why the kernel with a shared memory is slower ?
Kernel paramters: 8192 bodies, threads per block = 128, blocks per grid = 64. Device: GeForce GTX 560 Ti.
First kernel:
#define N 8192
#define EPS2 0.001f
__device__ float4 vel[N];
__device__ float3 force(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
__global__ void points(float4 *pos, float dt)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if(k >= N) return;
float4 bi, bj, v;
float3 ai;
v = vel[k];
bi = pos[k];
ai = make_float3(0,0,0);
for(int i = 0; i < N; i++)
{
bj = pos[i];
ai = force(bi, bj, ai);
}
v.x += ai.x * dt;
v.y += ai.y * dt;
v.z += ai.z * dt;
bi.x += v.x * dt;
bi.y += v.y * dt;
bi.z += v.z * dt;
pos[k]=bi;
vel[k]=v;
}
Second kernel:
#define N 8192
#define EPS2 0.001f
#define THREADS_PER_BLOCK 128
__device__ float4 vel[N];
__shared__ float4 shPosition[THREADS_PER_BLOCK];
__device__ float3 force(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
__device__ float3 accumulate_tile(float4 myPosition, float3 accel)
{
int i;
for (i = 0; i < THREADS_PER_BLOCK; i++)
{
accel = force(myPosition, shPosition[i], accel);
}
return accel;
}
__global__ void points(float4 *pos, float dt)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if(k >= N) return;
float4 bi, v;
float3 ai;
v = vel[k];
bi = pos[k];
ai = make_float3(0.0f, 0.0f, 0.0f);
int i,tile;
for(tile=0; tile < N / THREADS_PER_BLOCK; tile++)
{
i = tile * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = pos[i];
__syncthreads();
ai = accumulate_tile(bi, ai);
__syncthreads();
}
v.x += ai.x * dt;
v.y += ai.y * dt;
v.z += ai.z * dt;
bi.x += v.x * dt;
bi.y += v.y * dt;
bi.z += v.z * dt;
pos[k]=bi;
vel[k]=v;
}