3
votes

I'm making a game in C# / XNA. I'm currently working on the shader I'll be using for the terrain. I'm using a texture atlas for speed and efficiency but I'm experiencing texture/color bleeding between tiles: http://i.imgur.com/lZcESsn.png

I get this effect in both FX Composer and my game itself. Here is my shader:

//-----------------------------------------------------------------------------
// InstancedModel.fx
//
// Microsoft XNA Community Game Platform
// Copyright (C) Microsoft Corporation. All rights reserved.
//-----------------------------------------------------------------------------


// Camera settings.
float4x4 World : World < string UIWidget="None"; >;
float4x4 View : View < string UIWidget="None"; >;
float4x4 Projection : Projection < string UIWidget="None"; >;

// This sampler uses a simple Lambert lighting model.
float3 LightDirection = normalize(float3(-1, -1, -1));
float3 DiffuseLight = 1.25;
float3 AmbientLight = 0.25;

float TextureSide = 0; //0 = top, 1 = side, 2 = bottom
float2 TextureCoord;
texture Texture;
float2 TextureSize = 2.0;

sampler Sampler = sampler_state
{
    Texture = (Texture);
    MinFilter = Linear;
    MipFilter = Linear;
    MagFilter = Linear;
    AddressU = Clamp;
    AddressV = Clamp;
};


struct VertexShaderInput
{
    float4 Position : POSITION0;
    float3 Normal : NORMAL0;
    float2 TextureCoordinate : TEXCOORD0;
};


struct VertexShaderOutput
{
    float4 Position : POSITION0;
    float4 Color : COLOR0;
    float2 TextureCoordinate : TEXCOORD0;
};

// Vertex shader helper function shared between the two techniques.
VertexShaderOutput VertexShaderCommon(VertexShaderInput input, float4x4 instanceTransform, float2 atlasCoord, float4 colour)
{
    VertexShaderOutput output;

    // Apply the world and camera matrices to compute the output position.
    float4 worldPosition = mul(input.Position, instanceTransform);
    float4 viewPosition = mul(worldPosition, View);
    output.Position = mul(viewPosition, Projection);

    // Compute lighting, using a simple Lambert model.
    float3 worldNormal = mul(input.Normal, instanceTransform);    
    float diffuseAmount = max(-dot(worldNormal, LightDirection), 0);    
    float3 lightingResult = saturate(diffuseAmount * DiffuseLight + AmbientLight);    
    output.Color = float4(lightingResult, 1);
    output.Color = output.Color * colour;

    //calculate texture coords  
    float2 InputTextureCoords = input.TextureCoordinate;// / TextureSize;
    float2 InputAtlasCoords = atlasCoord;// / TextureSize;  

    float2 textCoordsActual = InputTextureCoords + InputAtlasCoords;

    output.TextureCoordinate = textCoordsActual;

    return output;
}


// Hardware instancing reads the per-instance world transform from a secondary vertex stream.
VertexShaderOutput HardwareInstancingVertexShader(VertexShaderInput input,
                                                  float4x4 instanceTransform : BLENDWEIGHT,
                                                  float2 atlasCoord1 : TEXCOORD1, float2 atlasCoord2 : TEXCOORD2, float2 atlasCoord3 : TEXCOORD3, 
                                                  float4 colour : COLOR1)
{
    float2 atlasCoord = atlasCoord1;
    if (TextureSide == 1)
    {
        atlasCoord = atlasCoord1;
    }
    if (TextureSide == 2)
    {
        atlasCoord = atlasCoord2;
    }
    else if (TextureSide == 3)
    {
        atlasCoord = atlasCoord3;
    }
    return VertexShaderCommon(input, mul(World, transpose(instanceTransform)), atlasCoord, colour);
}


// When instancing is disabled we take the world transform from an effect parameter.
VertexShaderOutput NoInstancingVertexShader(VertexShaderInput input,
                                                  float4x4 instanceTransform : BLENDWEIGHT,
                                                  float2 atlasCoord1 : TEXCOORD1, float2 atlasCoord2 : TEXCOORD2, float2 atlasCoord3 : TEXCOORD3, 
                                                  float4 colour : COLOR1)
{
    return VertexShaderCommon(input, World, TextureCoord, float4(1,1,1,1));
}

float2 HalfPixileCorrectedCoords(float2 coords)
{
    float u = (coords.x) / TextureSize;
    float v = (coords.y) / TextureSize;

    return float2(u, v);
}

// Both techniques share this same pixel shader.
float4 PixelShaderFunction(VertexShaderOutput input, 
                            float2 atlasCoord1 : TEXCOORD1) : COLOR00
{                           
    float2 outputTextureCoords = HalfPixileCorrectedCoords(input.TextureCoordinate);    
    return tex2D(Sampler, outputTextureCoords) * input.Color;
}


// Hardware instancing technique.
technique HardwareInstancing
{
    pass Pass1
    {
        VertexShader = compile vs_3_0 HardwareInstancingVertexShader();
        PixelShader = compile ps_3_0 PixelShaderFunction();
    }
}

// For rendering without instancing.
technique NoInstancing
{
    pass Pass1
    {
        VertexShader = compile vs_3_0 NoInstancingVertexShader();
        PixelShader = compile ps_3_0 PixelShaderFunction();
    }
}

My FX Composer HLSL profile: http://i.imgur.com/wNzmPXA.png

and the test atlas im using: (cant post because I need more reputation, I can perhaps post it in a followup?)

I've done a lot of reading about this, and it seems that I either need to do a "half pixel correction" or wrap the pixels at the edges of the specified texture within the atlas. I've tried both of these with no success.

Question: How do I solve the pixel bleeding issue I'm experiencing?

1

1 Answers

5
votes

If you want to get nice seamless tiling textures using an atlas, you have to create a texture that's 4 times bigger than you'd expect it to be (that is (2 x width) x (2 x height)).

More specifically, each tile in the atlas should look like this: image

The whole tile should be repeated twice, starting with its center at (u,v).

(u,v) are the coordinates of the tile in the atlas texture.

However, the coordinates which you should use for this tile while texturing an object are:

(u0, v0) <---> (u1, v1)

You can calculate them as follows:

rw = tile_width / atlas_width
rh = tile_height / atlas_height
u0 = u + 0.5 * rw
v0 = v + 0.5 * rh
u1 = u0 + rw
v1 = v0 + rh

One of the main problems with color bleeding when using texture atlas is mipmapping. When the mipmaps are created the texture is downsampled and the adjacent tiles are being blended together which causes artifacts. The method I described above prevents it by providing sufficient reserve of texture's area.

Another reason you get artifacts when sampling the texture is texture filtering. The above method also helps with it, since there is always sufficient area covered by tile's texture in the proximity of the samples in the range (u0, v0) - (u1, v1).