I want to do two post processing steps for my edge-detection algorithm: 1. Find edges via Sobel-Edge detection algorithm 2. Thin the edges via morphological thinning
So what I want to achieve is to render the whole scene into a separate framebuffer, sample the created texture to find all edges and finally sample the newly generated texture again to thin the edges.
So I assumed that I need three framebuffers (one for edge detection, one for thinning and the default framebuffer) to achieve this task. The first additional framebuffer uses three textures, namely for color, normals and depth. The second additional framebuffer should use the generated images of the first framebuffer to do the thinning and output it again.
If I use two framebuffers (default fb and edge detection fb) everything works fine. But as soon as I add an additional third fb, the texture generated by this third framebuffer is not displayed. Instead a black window is displayed.
So here is the code for the initialization of the first framebuffer:
private void GenerateFramebuffer()
{
//Generate framebuffer
framebuffer = GL.GenFramebuffer();
GL.BindFramebuffer(FramebufferTarget.Framebuffer, framebuffer);
// create a RGBA color texture
GL.GenTextures(1, out textureColorBuffer);
GL.BindTexture(TextureTarget.Texture2D, textureColorBuffer);
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba,
glControl1.Width, glControl1.Height,
0, (PixelFormat)PixelInternalFormat.Rgba, PixelType.UnsignedByte,
IntPtr.Zero);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMinFilter.Linear);
GL.BindTexture(TextureTarget.Texture2D, 0);
// create a RGBA color texture for normals
... generated like texture above
// create a depth texture
... generated like texture above
////Create color attachment texture
GL.FramebufferTexture(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment0, textureColorBuffer, 0);
GL.FramebufferTexture(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment1, normalBuffer, 0);
GL.FramebufferTexture(FramebufferTarget.Framebuffer, FramebufferAttachment.DepthAttachment, depthBuffer, 0);
DrawBuffersEnum[] bufs = new DrawBuffersEnum[2] { (DrawBuffersEnum)FramebufferAttachment.ColorAttachment0, (DrawBuffersEnum)FramebufferAttachment.ColorAttachment1 };
GL.DrawBuffers(bufs.Length, bufs);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
}
Everything works well, no problems appear. The second framebuffer is created equally, except that I need only one color attachment:
private void GenerateFramebuffer2()
{
//Generate framebuffer
framebuffer2 = GL.GenFramebuffer();
GL.BindFramebuffer(FramebufferTarget.Framebuffer, framebuffer2);
// create a RGBA color texture
GL.GenTextures(1, out edgeBuffer);
GL.BindTexture(TextureTarget.Texture2D, edgeBuffer);
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba,
glControl1.Width, glControl1.Height,
0, (PixelFormat)PixelInternalFormat.Rgba, PixelType.UnsignedByte,
IntPtr.Zero);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMinFilter.Linear);
GL.BindTexture(TextureTarget.Texture2D, 0);
////Create color attachment texture
GL.FramebufferTexture(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment0, edgeBuffer, 0);
DrawBuffersEnum[] bufs = new DrawBuffersEnum[1] { (DrawBuffersEnum)FramebufferAttachment.ColorAttachment0};
GL.DrawBuffers(bufs.Length, bufs);
//No need for renderbuffer object since we don't need stencil buffer yet
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
}
And now comes the rendering part: First I bind the first framebuffer and use the fragment shader to output normals and color values to the two attachments:
GL.BindFramebuffer(FramebufferTarget.Framebuffer, framebuffer);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit); // We're not using stencil buffer so why bother with clearing?
GL.Enable(EnableCap.DepthTest);
Shader.Use();
Model.Draw();
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
Then I bind the second fb and use the textures that the previous framebuffer generated:
GL.BindFramebuffer(FramebufferTarget.Framebuffer, framebuffer2);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit); // We're not using stencil buffer so why bother with clearing?
//Find edges and put them in separate texture (colors not needed here)
edgeDetectionShader.Use();
GL.ActiveTexture(TextureUnit.Texture1);
GL.BindTexture(TextureTarget.Texture2D, normalBuffer);
GL.Uniform1(GL.GetUniformLocation(edgeDetectionShader.program, "normalTexture"), 1);
GL.ActiveTexture(TextureUnit.Texture2);
GL.BindTexture(TextureTarget.Texture2D, depthBuffer);
GL.Uniform1(GL.GetUniformLocation(edgeDetectionShader.program, "depthTexture"), 2);
And finally I bind to the default framebuffer again and draw the generated textures onto a simple quad:
/////////////////////////////////////////////////////
// Bind to default framebuffer again and draw the
// quad plane with attched screen texture.
// //////////////////////////////////////////////////
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
// Clear all relevant buffers
GL.ClearColor(1, 1, 1, 1); // Set clear color to white
GL.Clear(ClearBufferMask.ColorBufferBit);
GL.Disable(EnableCap.DepthTest); // We don't care about depth information when rendering a single quad
//Combine edges with color values
finalImageProzessingShader.Use();
GL.BindVertexArray(quadVAO);
//Testing purpose: Send color texture to the shader
GL.ActiveTexture(TextureUnit.Texture0);
GL.BindTexture(TextureTarget.Texture2D, textureColorBuffer);
GL.Uniform1(GL.GetUniformLocation(finalImageProzessingShader.program, "screenTexture"), 0);
//Testing purpose: Send normal texture to the shader
GL.ActiveTexture(TextureUnit.Texture1);
GL.BindTexture(TextureTarget.Texture2D, normalBuffer);
GL.Uniform1(GL.GetUniformLocation(finalImageProzessingShader.program, "normalTexture"), 1);
//Testing purpose: Send depth texture to the shader
GL.ActiveTexture(TextureUnit.Texture3);
GL.BindTexture(TextureTarget.Texture2D, depthBuffer);
GL.Uniform1(GL.GetUniformLocation(finalImageProzessingShader.program, "depthTexture"), 2);
//Send the texture of fb2 to shader (generates black screen)
GL.ActiveTexture(TextureUnit.Texture0);
GL.BindTexture(TextureTarget.Texture2D, edgeBuffer);
GL.Uniform1(GL.GetUniformLocation(finalImageProzessingShader.program, "depthTexture"), 3);
GL.DrawArrays(PrimitiveType.Triangles, 0, 6);
GL.BindVertexArray(0);
So outputting color texture, depth texture or normal texture generated by framebuffer one works great, but if I output the texture generated by framebuffer two, I get a black screen. I also tried it with TextureUnit.Texture4 when using edgeBuffer, but this doesn't work either.
Is this even the right approach when I want to do 2 post-processing steps?