1
votes

I have an example of a compute shader generating a texture which a fragment shader then renders on to a quad which takes up the whole window.

In the fragment shader code, I see a uniform sampler2D, but how is the output from the compute shader actually passed to the fragment shader? Is it just by virtue of being bound? Wouldn't a better practice be to explicitly bind the texture (via a uniform or some other method) to the fragment/vertex shaders?

// Include standard headers
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <math.h>

// Include GLEW
#include <GL/glew.h>

//Glut
#include <GL/glut.h>

const GLchar* computeSource =
    "#version 430 core\n"
    "\n"
    "layout (local_size_x = 32, local_size_y = 16) in;\n"
    "\n"
    "layout (rgba32f) uniform image2D output_image;\n"
    "void main(void)\n"
    "{\n"
    "    imageStore(output_image,\n"
    "    ivec2(gl_GlobalInvocationID.xy),\n"
    "    vec4(vec2(gl_LocalInvocationID.xy) / vec2(gl_WorkGroupSize.xy), 0.0, 0.0));\n"
    "}\n";

const GLchar* vertexSource =
        "#version 430 core\n"
        "\n"
        "in vec4 vert;\n"
        "\n"
        "void main(void)\n"
        "{\n"
        "    gl_Position = vert;\n"
        "}\n";

const GLchar* fragmentSource =
        "#version 430 core\n"
        "\n"
        "layout (location = 0) out vec4 color;\n"
        "\n"
        "uniform sampler2D output_image;\n"
        "\n"
        "void main(void)\n"
        "{\n"
        "    color = texture(output_image, vec2(gl_FragCoord.xy) / vec2(textureSize(output_image, 0)));\n"
        "}\n";

GLuint vao;
GLuint vbo;
GLuint mytexture;
GLuint shaderProgram;
GLuint computeProgram;

void checkError(int line)
{
    GLint err;

    do
    {
        err = glGetError();
        switch (err)
        {
            case GL_NO_ERROR:
                //printf("%d: No error\n", line);
                break;
            case GL_INVALID_ENUM:
                printf("%d: Invalid enum!\n", line);
                break;
            case GL_INVALID_VALUE:
                printf("%d: Invalid value\n", line);
                break;
            case GL_INVALID_OPERATION:
                printf("%d: Invalid operation\n", line);
                break;
            case GL_INVALID_FRAMEBUFFER_OPERATION:
                printf("%d: Invalid framebuffer operation\n", line);
                break;
            case GL_OUT_OF_MEMORY:
                printf("%d: Out of memory\n", line);
                break;
            default:
                printf("%d: glGetError default case. Should not happen!\n", line);
        }
    } while (err != GL_NO_ERROR);
}

void display()
{

    glUseProgram(computeProgram);
    glBindImageTexture(0, mytexture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
    glDispatchCompute(8, 16, 1);

    glBindTexture(GL_TEXTURE_2D, mytexture);

    glClearColor(0.0f, 1.0f, 0.0f, 0.0f);
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    glUseProgram(shaderProgram);

    glDrawArrays(GL_TRIANGLE_FAN, 0, 4);

    glFlush();
    glutSwapBuffers();

}    


void reshape(int width,int height)
{
    double w2h = (height>0) ? (double)width/height : 1;
    //  Set viewport as entire window
    glViewport(0,0, width,height);
}



int main(int argc, char** argv)
{

    // Window Setup

    glutInitWindowSize(640, 400);
    glutInitWindowPosition (140, 140);
    glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
    glutInit(&argc, argv);

    glutCreateWindow( "OpenGL Application" );
    glutDisplayFunc(display);
    glutReshapeFunc(reshape);

    glewExperimental = true; // Needed for core profile
    if (glewInit() != GLEW_OK) {
        fprintf(stderr, "Failed to initialize GLEW\n");
        return -1;
    }

    glGenVertexArrays(1, &vao);
    glBindVertexArray(vao);
    glEnableVertexAttribArray(0);

    glGenBuffers(1, &vbo);

    GLfloat vertices[] = {
        // X    Y      Z     A
        -1.0f, -1.0f, 0.5f, 1.0f,
         1.0f, -1.0f, 0.5f, 1.0f,
         1.0f,  1.0f, 0.5f, 1.0f,
        -1.0f,  1.0f, 0.5f, 1.0f,
    };

    glBindBuffer(GL_ARRAY_BUFFER, vbo);

    glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
    glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL);


    checkError(__LINE__);


    GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
    glShaderSource(vertexShader, 1, &vertexSource, NULL);
    glCompileShader(vertexShader);

    GLuint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
    glShaderSource(fragmentShader, 1, &fragmentSource, NULL);
    glCompileShader(fragmentShader);
    checkError(__LINE__);


    GLuint computeShader;
    computeProgram = glCreateProgram();
    computeShader = glCreateShader(GL_COMPUTE_SHADER);
    glShaderSource(computeShader, 1, &computeSource, NULL);
    glCompileShader(computeShader);
    glAttachShader(computeProgram, computeShader);
    glLinkProgram(computeProgram);

    glGenTextures(1, &mytexture);
    glBindTexture(GL_TEXTURE_2D, mytexture);
    glTexStorage2D(GL_TEXTURE_2D, 8, GL_RGBA32F, 256, 256);
    checkError(__LINE__);

    shaderProgram = glCreateProgram();
    glAttachShader(shaderProgram, vertexShader);
    glAttachShader(shaderProgram, fragmentShader);
    glBindFragDataLocation(shaderProgram, 0, "color");
    glLinkProgram(shaderProgram);
    checkError(__LINE__);

    glutMainLoop();

    return 0;
}
1
Is that code actually behaving as expected? I'd expect a glGetUniformLocation() & glUniform() pair somewhere in there but I'm not seeing it.genpfault
Yeah.. This code is modified from the OpenGL "red book". I really hate the book. They use macros and helper functions in places that it is a really bad idea to do so for clarity, and their explanations are very poor. See the Chapter 12 examples: github.com/openglredbook/examples/tree/master/srcMaxthecat

1 Answers

1
votes

The main reason this is working is that uniform variables in shaders have a default value of 0. From the GLSL 4.5 spec, section 4.3.5:

All uniform variables are read-only and are initialized externally either at link time or through the API. The link-time initial value is either the value of the variable's initializer, if present, or 0 if no initializer is present.

The next part you need to understand is that the value of a sampler variable is the texture unit you want to sample from. Very similarly, the value of an image variable is the image unit used for the image access.

Putting these two pieces together, since you don't set values for these uniform variables, the sampler in the fragment shader will access the texture bound to texture unit 0. The image in the compute shader will access the image bound to image unit 0.

Fortunately for you, this is exactly what you need:

  • Since you never set the active texture unit with glActiveTexture(), this call:

    glBindTexture(GL_TEXTURE_2D, mytexture);
    

    will bind the texture to texture unit 0, which means that it will be sampled in your fragment shader.

  • In your call that binds the image:

    glBindImageTexture(0, mytexture, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA32F);
    

    you pass 0 as the first argument, which specifies the image unit you want to bind to. As a result, the compute shader will access this image.

IMHO, it's good style to always set the values of the uniform variables, even if the default might be sufficient. This makes the code more readable, and setting the uniform values will be essential once you use more than one texture/image. So for clarity, I would have something like this in your code:

GLint imgLoc = glGetUniformLocation(computeProgram, "output_image");
glUniform1i(imgLoc, 0);
...
GLint texLoc = glGetUniformLocation(shaderProgram, "output_image");
glUniform1i(texLoc, 0);

Note that the glUniform1i() calls need to be made while the corresponding program is active.