I have Metal compute kernel that takes two textures for arguments. However, I'm running into a problem where the kernel doesn't run. I have reduced to the problem down to this simple kernel.
#include <metal_stdlib>
using namespace metal;
kernel void test_texture(texture2d<float, access::sample> tex1 [[texture(0)]],
texture2d<float, access::sample> tex2 [[texture(1)]],
device float *buf [[buffer(0)]],
uint idx [[thread_position_in_grid]])
{
buf[idx] = 100;
}
And the following host code.
#import <Metal/Metal.h>
int main(int argc, const char * argv[]) {
@autoreleasepool {
const size_t max_buffer = 128000000;
const size_t max_texture = 16384;
id<MTLDevice> device = MTLCreateSystemDefaultDevice();
id<MTLLibrary> library = [device newDefaultLibrary];
id<MTLCommandQueue> queue = [device newCommandQueue];
id<MTLBuffer> buffer = [device newBufferWithLength:sizeof(float)*max_buffer
options:MTLResourceCPUCacheModeDefaultCache |
MTLResourceStorageModeManaged];
MTLTextureDescriptor *textureDescriptor = [[MTLTextureDescriptor alloc] init];
textureDescriptor.textureType = MTLTextureType2D;
textureDescriptor.pixelFormat = MTLPixelFormatR32Float;
textureDescriptor.width = max_texture;
textureDescriptor.height = max_texture;
textureDescriptor.depth = 1;
textureDescriptor.mipmapLevelCount = 1;
textureDescriptor.sampleCount = 1;
textureDescriptor.arrayLength = 1;
textureDescriptor.resourceOptions = MTLResourceStorageModePrivate | MTLResourceCPUCacheModeDefaultCache;
textureDescriptor.cpuCacheMode = MTLCPUCacheModeDefaultCache;
textureDescriptor.storageMode = MTLStorageModePrivate;
textureDescriptor.usage = MTLTextureUsageShaderRead;
id<MTLTexture> texture1 = [device newTextureWithDescriptor:textureDescriptor];
id<MTLTexture> texture2 = [device newTextureWithDescriptor:textureDescriptor];
MTLComputePipelineDescriptor *discriptor = [[MTLComputePipelineDescriptor alloc] init];
discriptor.computeFunction = [library newFunctionWithName:@"test_texture"];
discriptor.threadGroupSizeIsMultipleOfThreadExecutionWidth = YES;
id<MTLComputePipelineState> pipeline = [device newComputePipelineStateWithDescriptor:discriptor
options:MTLPipelineOptionNone
reflection:NULL
error:NULL];
id<MTLCommandBuffer> command_buffer = queue.commandBuffer;
id<MTLComputeCommandEncoder> compute_encoder = [command_buffer computeCommandEncoder];
[compute_encoder setComputePipelineState:pipeline];
[compute_encoder setTexture:texture1 atIndex:0];
[compute_encoder setTexture:texture2 atIndex:1];
[compute_encoder setBuffer:buffer offset:0 atIndex:0];
[compute_encoder dispatchThreads:MTLSizeMake(max_buffer, 1, 1) threadsPerThreadgroup:MTLSizeMake(1024, 1, 1)];
[compute_encoder endEncoding];
id<MTLBlitCommandEncoder> blit_encoder = [command_buffer blitCommandEncoder];
[blit_encoder synchronizeResource:buffer];
[blit_encoder endEncoding];
[command_buffer commit];
[command_buffer waitUntilCompleted];
float *result = (float *)buffer.contents;
NSLog(@"%f",result[0]);
}
return 0;
}
If I comment out the second texture argument, I get the expected value when I read the result buffer. However when I leave the second texture argument intact, it appears as if kernel doesn't run and the value in result comes out as zero. Is there a limitation on the number of textures that can be sampled in a compute kernel on MacOS? Or is the problem caused by my use of the maximum texture dimensions in both textures (Am I running out of texture memory)?strong text
16384 x 16384 * sizeof(float)
. – user1139069