0
votes

I have coded a small OpenGL program using OpenGL and GLSL shaders. Here's a screen of my application :

enter image description here

Now, my objective is to mix my opengl frame and the one from my webcam in a unique frame using OpenCV. Before to do this with GLSL shaders, I tried to make a very simple program drawing a colored triangle with some basic OpenGL functions (glBengin, glVertex, etc.).

static int      initGL()
{
    if (!glfwInit())
        return (-1);

    if (!glfwOpenWindow(500, 500, 8, 8, 8, 0, 24, 0, GLFW_WINDOW)) {
        glfwTerminate();
        return (-1);
    }

    glEnable(GL_PROJECTION);
    glLoadIdentity();
    gluPerspective(45.0f, 500.0f / 500.0f, 0.1f, 100.0f);
    gluLookAt(0.0f, 0.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f);

    return (0);
}

static void     drawGL()
{
    glBegin(GL_TRIANGLES);
    glColor3ub(255, 0, 0);
    glVertex3f(-1.0f, 0.0f, 0.0f);
    glColor3ub(0, 255, 0);
    glVertex3f(0.0f, 1.0f, 0.0f);
    glColor3ub(0, 0, 255);
    glVertex3f(1.0f, 0.0f, 0.0f);
    glEnd();
}

int             main(void)
{
    initGL();

    CvCapture   *capture = cvCaptureFromCAM(CV_CAP_ANY);

    if (!capture) {
        fprintf(stderr, "ERROR: capture is NULL \n");
        getchar();
        return (-1);
    }

    while (1)
    {
        glClear(GL_COLOR_BUFFER_BIT);
        glEnable(GL_MODELVIEW);

        IplImage    *frame = cvQueryFrame(capture);

        if (!frame) {
            fprintf(stderr, "ERROR: frame is null...\n");
            getchar();
            break;
        }

        glDrawPixels(frame->width, frame->height, GL_RGB, GL_UNSIGNED_BYTE, frame->imageData);

        drawGL();

        glfwSwapBuffers();

        if ( (cvWaitKey(10) & 255) == 27 ) break;
    }

    cvReleaseCapture(&capture);
    cvDestroyWindow("mywindow");

    return (EXIT_SUCCESS);
}

Here's the render :

enter image description here

As you can see the result is correct. Now, I wanted to try my first program using GLSL shaders with OpenCV like below but when my application is launched I have a black screen. I tried several tests and the problem seems to begin when I call glUseProgram function (so when I want to bind the program shader -> in my code it corresponds to the program->bind() call). But here I use the glDrawPixels function to load my video frame. I think it's not the good function to call if I want to use GLSL shaders. Here's a piece of my C++ code :

[...]

/*My GLSL shaders initialization*/

[...]
int                         main(int ac, char **av)
{
    bool                    isAlive = true;
    unsigned int            vaoID = 0;
    SDL_Event               event;
    GLuint                  textureID = 0;
    ShaderProgram           *program = NULL;

    if (!glfwInit())
        return (-1);

    if (!glfwOpenWindow(WIDTH, HEIGHT, 8, 8, 8, 0, 24, 0, GLFW_WINDOW)) {
        glfwTerminate();
        return (-1);
    }

    glEnable(GL_DEPTH_TEST);

    //Viewport initialization

    glViewport(0, 0, WIDTH, HEIGHT);

    //Vertex declaration

    VertexDeclaration *vDeclar = initVertexDeclaration();

    //Glew init component

    glewInit();

    //VBO initialization

    VertexBuffer *vBuffer = initVBO();

    //Shaders initialization

    program = initShaders("triangle-pf.vert", "triangle-pf.frag");

    //Texture initialization

    textureID = loadBMPTexture("Box.bmp");

    //VAO initialization

    initVAO(vaoID, vBuffer, textureID, vDeclar);

    //Screen capture Initialization

    CvCapture   *capture = cvCaptureFromCAM(CV_CAP_ANY);

    if (!capture) {
        fprintf(stderr, "ERROR: capture is NULL \n");
        getchar();
        return (-1);
    }

    //Main loop

    while (isAlive == true)
    {
        //eventListener(&event, &isAlive);

        if (glfwGetKey(GLFW_KEY_ESC))
            isAlive = false;

        glClearDepth(1.0f);
        glClearColor(0.13f, 0.12f, 0.13f, 1.0f);
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

        //---------------------------------------------------

        IplImage    *frame = cvQueryFrame(capture);

        if (!frame) {
            fprintf(stderr, "ERROR: frame is null...\n");
            getchar();
            break;
        }

        //---------------------------------------------------

        program->bind();

        //Projection matrix

        glm::mat4 ProjectionMatrix = glm::perspective(45.0f, 500.0f / 500.0f, 0.1f, 100.0f);

        //View matrix

        glm::mat4 ViewMatrix = glm::lookAt(glm::vec3(0.0f, 0.0f, 8.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));

        //Model matrix

        glm::mat4 ModelMatrix = glm::mat4(1.0f);
        ModelMatrix = glm::translate(ModelMatrix, glm::vec3(0.0f, 0.0f, 0.0f));
        ModelMatrix = glm::rotate(ModelMatrix, angle, glm::vec3(1.0f, 1.0f, 1.0f));
        ModelMatrix = glm::scale(ModelMatrix, glm::vec3(1.0f, 1.0f, 1.0f));

        //Prepare matrix

        glm::mat4 ModelViewMatrix = ViewMatrix * ModelMatrix;
        glm::mat3 NormalMatrix = glm::mat3(glm::vec3(ModelViewMatrix[0]), glm::vec3(ModelViewMatrix[1]), glm::vec3(ModelViewMatrix[2]));
        glm::mat4 ModelViewProjectionMatrix = ProjectionMatrix * ModelViewMatrix;
        glm::vec4 LightPositionVec1 = ViewMatrix * glm::vec4(LightPosition1[0], LightPosition1[1], LightPosition1[2], LightPosition1[3]);

        //Send source light properties

        program->setUniform("LightInfos[0].La", glm::vec3(0.000000f, 0.000000f, 0.000000f));
        program->setUniform("LightInfos[0].Ld", glm::vec3(0.800000f, 0.800000f, 0.800000f));
        program->setUniform("LightInfos[0].Ls", glm::vec3(1.000000f, 1.000000f, 1.000000f));
        program->setUniform("LightInfos[0].Le", glm::vec3(0.200000f, 0.200000f, 0.200000f));

        /*program->setUniform("LightInfos[1].La", glm::vec3(0.000000f, 0.000000f, 0.000000f));
        program->setUniform("LightInfos[1].Ld", glm::vec3(0.800000f, 0.800000f, 0.800000f));
        program->setUniform("LightInfos[1].Ls", glm::vec3(0.000000f, 1.000000f, 1.000000f));
        program->setUniform("LightInfos[1].Le", glm::vec3(0.200000f, 0.200000f, 0.200000f));*/

        //Send model materials properties

        program->setUniform("MaterialInfos.Ka", glm::vec3(0.000000f, 0.000000f, 0.000000f));
        program->setUniform("MaterialInfos.Kd", glm::vec3(1.000000f, 1.000000f, 1.000000f));
        program->setUniform("MaterialInfos.Ks", glm::vec3(1.000000f, 1.000000f, 1.000000f));
        program->setUniform("MaterialInfos.Ke", glm::vec3(0.200000f, 0.000000f, 0.200000f));
        program->setUniform("MaterialInfos.Shininess", 10.0f);

        //Send light position

        program->setUniform("LightInfos[0].Position", LightPositionVec1);

        //Send matrix

        program->setUniform("ProjectionMatrix", ProjectionMatrix);
        program->setUniform("NormalMatrix", NormalMatrix);
        program->setUniform("ModelViewMatrix", ModelViewMatrix);
        program->setUniform("ModelMatrix", ModelMatrix);
        program->setUniform("MVP", ModelViewProjectionMatrix);

        glDrawPixels(frame->width, frame->height, GL_RGB, GL_UNSIGNED_BYTE, frame->imageData);

        //VAO binding

        glBindVertexArray(vaoID);

        //Render meshes

        glDrawArrays(GL_TRIANGLES, 0, vBuffer->getSize());

        glBindVertexArray(0);

        program->release();

        angle += 0.50f;

        glFlush();
        glfwSwapBuffers();
    }

    unsigned int vboID = vBuffer->getHandle();
    glDeleteBuffers(1, &vboID);
    glDeleteVertexArrays(1, &vaoID);
    return (0);
}

So I think the problem comes from glDrawPixels which cannot be used with GLSL shaders. I tried several possible ways without any success. Maybe I have to send the video frame buffer directly to the pixel shader ? In this case, how can I do that ? I'm really lost. Does anyone can help me?

1
"Mix my opengl frame and the one from my webcam in a unique frame using OpenCV," what does that mean? Can't you just render a quad in the background, and use the image data from OpenCV as a texture, then draw other 3D objects on top? I don't understand why you want to use a shader. What is the shader supposed to do?Andreas Haferburg
I use shaders to compute some light effects which are not possible to render with a basic Opengl usage. I just want to render my cube and have my real time video as a background (like the example with the triangle above). It's like a juxtaposition of the video frame (from my webcam) and the other frame with the cube. Do you see what I mean ?user1364743
Yes, that's called augmented reality. Do you need these light effects applied to your background or the 3D objects?Andreas Haferburg
My light effects are applied only on my 3D object like on my first picture. The background is just the video stream from the webcam. It's not a texture (picture number 2). The example with the triangle above works like that. I don't use any texture on the background, it's just the video stream. And I draw a simple triangle on it. I just want to have the same behaviour but using GLSL shaders.user1364743
You do know that you can use a shader for the 3D objects, but keep the old code for the background, right? I still don't understand why you would want to use a shader for the background, since you don't seem to need it.Andreas Haferburg

1 Answers

0
votes

The way you would set this up is to use your image data as a texture. This link might be a good starting point. You can then render a quad in the background, which fills the entire screen, and uses the texture to determine the color of each pixel. The advantage over glDrawPixels() is that if you scale the window (and the quad), the image from the video will get scaled accordingly.

If you need to, you can use the texture in a shader when rendering that quad: The fragment (pixel) shader gets called for each pixel, and you would use a sampler to get the color from the texture. You can then modify that color according to your lighting needs.

What you draw on top of that is up to you.