I am semi-experienced in OpenGL and am currently working on simple lighting for a 2D game. My plan is as follows:
- Create a Uniform Buffer Object to store two arrays of vec4s representing all active lights in the scene: one for position and one for color.
- Take in that Uniform Buffer Object as a uniform in my vertex shader (obviously). Transform the data in some way and copy it to an output interface block with the same format as the uniform (sorry if this is confusing/incorrect terminology, don't hesitate to ask for clarification).
- Use the interface block output by the vertex shader as an input into my fragment shader, and perform lighting calculations using the contents of the block.
This is a simple task, but here's the catch: if the arrays in my uniform buffer are of a size greater than 14, my shaders seem to silently fail (no error string from either the compilation or linking process), which strangely makes all other uniforms inaccessible.
The value returned by a call to glGetIntegerv with GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB as the target returns 4096, which I have taken to mean I should be able to store up to 4096 floats / 4 floats/vec4 / 2 vec4s/light = 512 lights in a single uniform buffer.
Here is the relevant C++ code (note that the float vectors are properly sized to fill a vec4 - I do not believe this is an alignment issue to do with the std140 layout I'm using):
// generate some test light data and upload it to a UBO
constexpr uint lightCount = 14;
renderer.GetShader().SetUniform1i("u_LightCount", lightCount);
struct
{
float pos[4 * lightCount] = { 0 };
float color[4 * lightCount] = { 0 };
} lights;
for (uint i = 0; i < 4 * lightCount; i += 4)
{
lights.pos[i + 0] = i * 4;
lights.pos[i + 1] = i * 4;
lights.pos[i + 2] = 100.f;
lights.pos[i + 3] = 1;
lights.color[i + 0] = 1;
lights.color[i + 1] = 0;
lights.color[i + 2] = 0;
lights.color[i + 3] = 1;
}
GLuint ubo = 0;
glGenBuffers(1, &ubo);
glBindBuffer(GL_UNIFORM_BUFFER, ubo);
glBufferData(GL_UNIFORM_BUFFER, sizeof(lights), &lights, GL_DYNAMIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
uint block = glGetUniformBlockIndex(renderer.GetShader().GetId(), "BlockLight");
GLuint bind = 0;
glUniformBlockBinding(renderer.GetShader().GetId(), block, bind);
I might be doing something weird with binding here. The glBindBufferRange call is required for the shaders work, but I don't see how it's different from the glBindBuffer call.
// update light positions based on scroll wheel and upload to the UBO
lightDistance = math::clamp(lightDistance + engine.gl->GetMouseScroll().y, 0, 255);
for (uint i = 0; i < lightCount; i++)
lights.pos[i * 4 + 2] = lightDistance;
glBindBuffer(GL_UNIFORM_BUFFER, ubo);
glBufferData(GL_UNIFORM_BUFFER, sizeof(lights), &lights, GL_DYNAMIC_DRAW);
glBindBufferRange(GL_UNIFORM_BUFFER, bind, ubo, 0, sizeof(lights));
My vertex shader code:
#version 420 core
layout(location = 0) in vec2 i_Position;
layout(location = 1) in vec2 i_TexCoord;
layout(location = 2) in float i_TexIndex;
uniform int u_LightCount;
uniform vec2 u_Scale;
uniform vec2 u_Camera;
layout (std140) uniform BlockLight
{
vec4 pos[14];
vec4 color[14];
} u_Light;
out vec2 v_TexCoord;
out float v_TexIndex;
out BlockLight
{
vec4 pos[14];
vec4 color[14];
} v_Light;
void main()
{
gl_Position = vec4((i_Position + u_Camera) * u_Scale, 0, 1);
v_TexCoord = i_TexCoord;
v_TexIndex = i_TexIndex;
for(int i = 0; i < u_LightCount; i++)
{
v_Light.pos[i] = u_Light.pos[i] + vec4(u_Camera, 0, 1);
v_Light.color[i] = u_Light.color[i];
}
}
And the fragment shader:
#version 330 core
#define PIXEL_SIZE 5
layout(location = 0) out vec4 o_Color;
uniform int u_LightCount;
uniform int u_TextureFrames[32];
uniform vec2 u_Resolution;
uniform sampler2DArray u_Textures[32];
in float v_TexIndex;
in vec2 v_TexCoord;
in BlockLight
{
vec4 pos[14];
vec4 color[14];
} v_Light;
void main()
{
vec2 fragPos = floor((gl_FragCoord.xy - u_Resolution / 2.0) / PIXEL_SIZE);
vec3 lightColor = vec3(0);
for(int i = 0; i < u_LightCount; i++)
{
float x = length(v_Light.pos[i].xy - fragPos) * 2;
float z = v_Light.pos[i].z;
float intensity = (x > z ? 0 : clamp((z - sqrt(z * z - (x - z) * (x - z))) / 255, 0, 1));
lightColor += v_Light.color[i].xyz * intensity;
}
lightColor = clamp(lightColor, vec3(0), vec3(1));
int index = int(v_TexIndex);
o_Color = vec4(lightColor, 1) * texture(u_Textures[index], vec3(v_TexCoord, u_TextureFrames[index]));
}
Intended output (which only happens when <= 14 lights are used): intended output
Error output (occurs when changing one or more of the array sizes in the shader to a value > 14): error output
Is it possible that I've done something wrong and/or skipped something in my OpenGL setup code? Am I using the UBO functions improperly? Is it actually a data padding/formatting error that I've somehow missed? Any help is much appreciated.