1
votes

I'm trying to implement this version of ssao with this tutorial:

http://www.learnopengl.com/#!Advanced-Lighting/SSAO

Here is what I end up with for my render textures.

enter image description here

When I move the camera the shadows seem to follow

enter image description here

Seems like I am missing some kind of matrix multiplication with the camera.

CODE

gBuffer Vertex

#version 330 core
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec3 vertexNormal;

out vec3 position;
out vec3 normal;

uniform mat4 m;
uniform mat4 v;
uniform mat4 p;
uniform mat4 n;

void main()
{
    vec4 viewPos = v * m * vec4(vertexPosition, 1.0f);
    position = viewPos.xyz;
    gl_Position = p * viewPos;
    normal = vec3(n * vec4(vertexNormal, 0.0f));
}

gBuffer Fragment

#version 330 core
layout (location = 0) out vec4 gPosition;
layout (location = 1) out vec3 gNormal;
layout (location = 2) out vec4 gColor;

in vec3 position;
in vec3 normal;

const float NEAR = 0.1f;
const float FAR = 50.0f;
float LinearizeDepth(float depth)
{
    float z = depth * 2.0f - 1.0f;
    return (2.0 * NEAR * FAR) / (FAR + NEAR - z * (FAR - NEAR));
}

void main()
{   
    gPosition.xyz = position;
    gPosition.a = LinearizeDepth(gl_FragCoord.z);
    gNormal = normalize(normal);
    gColor.rgb = vec3(1.0f);
}

SSAO Vertex

#version 330 core

layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec2 texCoords;

out vec2 UV;

void main(){
    gl_Position =  vec4(vertexPosition, 1.0f);
    UV = texCoords;
}

SSAO Fragment

#version 330 core

out float FragColor;
in vec2 UV;

uniform sampler2D gPositionDepth;
uniform sampler2D gNormal;
uniform sampler2D texNoise;
uniform vec3 samples[32];
uniform mat4 projection;

// parameters (you'd probably want to use them as uniforms to more easily tweak the effect)
int kernelSize = 32;
float radius = 1.0;

// tile noise texture over screen based on screen dimensions divided by noise size
const vec2 noiseScale = vec2(1024.0f/4.0f, 1024.0f/4.0f);

void main()
{
    // Get input for SSAO algorithm
    vec3 fragPos = texture(gPositionDepth, UV).xyz;
    vec3 normal = texture(gNormal, UV).rgb;
    vec3 randomVec = texture(texNoise, UV * noiseScale).xyz;
    // Create TBN change-of-basis matrix: from tangent-space to view-space
    vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
    vec3 bitangent = cross(normal, tangent);
    mat3 TBN = mat3(tangent, bitangent, normal);
    // Iterate over the sample kernel and calculate occlusion factor
    float occlusion = 0.0;
    for(int i = 0; i < kernelSize; ++i)
    {
        // get sample position
        vec3 sample = TBN * samples[i]; // From tangent to view-space
        sample = fragPos + sample * radius;

        // project sample position (to sample texture) (to get position on screen/texture)
        vec4 offset = vec4(sample, 1.0);
        offset = projection * offset; // from view to clip-space
        offset.xyz /= offset.w; // perspective divide
        offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0

       // get sample depth
        float sampleDepth = -texture(gPositionDepth, offset.xy).w; // Get depth value of kernel sample

        // range check & accumulate
        float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth ));
        occlusion += (sampleDepth >= sample.z ? 1.0 : 0.0) * rangeCheck;
    }
    occlusion = 1.0 - (occlusion / kernelSize);
    FragColor = occlusion;
}

I've read around and saw someone had a similar issue and passed the view matrix into the ssao shader and multiplied the sampleDepth:

float sampleDepth = (viewMatrix * -texture(gPositionDepth, offset.xy)).w;

But seems like it just makes things worse.

Heres another view from up top where you can see the shadows move with the camera

enter image description here

If I position my camera in certain ways things line up

enter image description here

1

1 Answers

0
votes

Although I can only assume the value of your normal matrix n in the gBuffer vertex shader, it seems like you don't store your normals in view space but in world space. Since the SSAO calculations are done in screen space, this could (at least partially) explain the unexpected behavior. In that case, you either need to multiply your view matrix v to your normals before storing them to the gBuffer (potentially more efficient, but may interfere with your other shading calculations) or after retrieving them.