3
votes

I'm working on a RGBA32 buffer (8bits per component), and I'd need to multiply each component by a constant, then add each of the results of the multiplication to the others as such :

Result = r*x + g * y + b * z + a*w (dot product between the two vectors rgba and xyzw)

I'm trying to use intel SSE intrinsics to speed up the process, but I don't know how to do such a thing without shuffling the input.

Is there any way to do this? Like building a register contaning {x,y,z,w,x,y,z,w,x,y,z,w,x,y,z,w} and perform an 8 bit multiply with saturation?

The final objective is to multiply RGBA vector by the corresponding color conversion matrix :

[ 66 129  25 0]   [R]
[-38 -74 112 0] * [G]
[112 -94 -18 0]   [B]
[0     0   0 0]   [A]

Thanks

Edit 1 : here's the final function, using floating point calculation for more color precision, which converts a rgba image to a YUV444 one using SSE. Function takes between 1.9 and 3.5ms to convert a full HD image on an intel i5 3570k, using only one thread (it's really easy to thread this function, and it could yield significant performance improvments) :

void SSE_rgba2YUV444_FP(char* a, char* y, char* u, char* v)
{
    __m128i mask = _mm_setr_epi8(0x00,0x04,0x08,0x0c, 0x01,0x05,0x09,0x0d, 0x02,0x06,0x0a,0x0e, 0x03,0x07,0x0b,0x0f); // Masque de mélange, chaque uint8 donne la position à donner (en offset en octet) du uint8 correspondant
    float m[9] = {0.299, 0.587, 0.114, -0.1687, -0.3313, 0.5, 0.5, -0.4187, -0.0813};                                                         // Dans le __m128i que l'on mélange

    __m128i row[4];
    for(int i=0; i<4; i++) {
        row[i] = _mm_loadu_si128((__m128i*)&a[16*i]);
        row[i] = _mm_shuffle_epi8(row[i],mask);
    }
    // row[i] = {rrrrggggbbbbaaaa} tous en uint8t
    __m128i t0 = _mm_unpacklo_epi32(row[0], row[1]); //to = {rrrrrrrrgggggggg}
    __m128i t1 = _mm_unpacklo_epi32(row[2], row[3]); //t1 = {rrrrrrrrgggggggg}
    __m128i t2 = _mm_unpackhi_epi32(row[0], row[1]); //t2 = {bbbbbbbbaaaaaaaa}
    __m128i t3 = _mm_unpackhi_epi32(row[2], row[3]); //t3 = {bbbbbbbbaaaaaaaa}
    row[0] = _mm_unpacklo_epi64(t0, t1); // row[0] = {rrrrrrrrrrrrrrrr}
    row[1] = _mm_unpackhi_epi64(t0, t1); // etc
    row[2] = _mm_unpacklo_epi64(t2, t3);

    __m128i v_lo[3], v_hi[3];
    for(int i=0; i<3; i++) {
        v_lo[i] = _mm_unpacklo_epi8(row[i],_mm_setzero_si128()); // On entrelace chaque row avec des 0, ce qui fait passer les valeurs
        v_hi[i] = _mm_unpackhi_epi8(row[i],_mm_setzero_si128()); // de 8bits à 16bits pour pouvoir travailler dessus
    }

    __m128 v32_lo1[3], v32_hi1[3], v32_lo2[3], v32_hi2[3];
    for(int i=0; i<3; i++) {
        v32_lo1[i] = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_lo[i],_mm_setzero_si128()));
        v32_lo2[i] = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_lo[i],_mm_setzero_si128()));
        v32_hi1[i] = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_hi[i],_mm_setzero_si128()));
        v32_hi2[i] = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_hi[i],_mm_setzero_si128()));
    } // On a nos rgb sur 32 bits

    __m128i yuv[3]; // {Y, U, V} 
    __m128 ylo1 = _mm_add_ps(_mm_mul_ps(v32_lo1[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_lo1[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_lo1[2], _mm_set1_ps(m[2]))));
    __m128 ylo2 = _mm_add_ps(_mm_mul_ps(v32_lo2[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_lo2[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_lo2[2], _mm_set1_ps(m[2]))));
    __m128 yhi1 = _mm_add_ps(_mm_mul_ps(v32_hi1[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_hi1[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_hi1[2], _mm_set1_ps(m[2]))));
    __m128 yhi2 = _mm_add_ps(_mm_mul_ps(v32_hi2[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_hi2[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_hi2[2], _mm_set1_ps(m[2]))));

    __m128i ylo1i = _mm_cvtps_epi32(ylo1);
    __m128i ylo2i = _mm_cvtps_epi32(ylo2);
    __m128i yhi1i = _mm_cvtps_epi32(yhi1);
    __m128i yhi2i = _mm_cvtps_epi32(yhi2);

    __m128i ylo = _mm_packus_epi32(ylo1i, ylo2i);
    __m128i yhi = _mm_packus_epi32(yhi1i, yhi2i);

    yuv[0] = _mm_packus_epi16(ylo, yhi);

    ylo1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo1[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_lo1[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_lo1[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));
    ylo2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo2[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_lo2[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_lo2[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));
    yhi1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi1[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_hi1[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_hi1[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));
    yhi2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi2[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_hi2[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_hi2[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));

    ylo1i = _mm_cvtps_epi32(ylo1);
    ylo2i = _mm_cvtps_epi32(ylo2);
    yhi1i = _mm_cvtps_epi32(yhi1);
    yhi2i = _mm_cvtps_epi32(yhi2);

    ylo = _mm_packus_epi32(ylo1i, ylo2i);
    yhi = _mm_packus_epi32(yhi1i, yhi2i);

    yuv[1] = _mm_packus_epi16(ylo, yhi);

    ylo1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo1[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_lo1[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_lo1[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));
    ylo2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo2[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_lo2[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_lo2[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));
    yhi1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi1[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_hi1[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_hi1[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));
    yhi2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi2[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_hi2[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_hi2[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));

    ylo1i = _mm_cvtps_epi32(ylo1);
    ylo2i = _mm_cvtps_epi32(ylo2);
    yhi1i = _mm_cvtps_epi32(yhi1);
    yhi2i = _mm_cvtps_epi32(yhi2);

    ylo = _mm_packus_epi32(ylo1i, ylo2i);
    yhi = _mm_packus_epi32(yhi1i, yhi2i);

    yuv[2] = _mm_packus_epi16(ylo, yhi);

    _mm_storeu_si128((__m128i*)y,yuv[0]);
    _mm_storeu_si128((__m128i*)u,yuv[1]);
    _mm_storeu_si128((__m128i*)v,yuv[2]);
}
2
I changed my answer to a more efficient version which uses _mm_maddubs_epi16.Z boson
@PaulR, I answered the question using many of your comments. If you have time do you think you could look it over and let me know if you have any suggestions? I don't have a lot of experience with 8-bit integers with SSE/AVX.Z boson
@Zboson: looks good - it would be interesting to benchmark it against scalar code to see what sort of factor improvement you're getting.Paul R
@PaulR, I originally wanted to do this without horizontal operators by doing a AOS to SOA transpose and calculating u and v as well. But I think it's hard to beat _mm_maddubs_epi16. You get multiplication, one of the two additions and 8-bit to 16-bit conversion for the price of one. I have not checked the instruction tables. I'm know that _mm_hadd_epi32 should be avoided but in this case I think the horizontal operators are the best solution.Z boson
@PaulR, I fixed the problems. One was in the formula I compared to from another link. Another was using packs instead of packus and the third was even more subtle. Subtracting over 128 was causing overflow beyond -32768. Using 64 instead fixed this (and I proved it always gets it right). If you want to now more see my edits.Z boson

2 Answers

3
votes

Here is a solution which finds Y,U, and V all at once and only uses vertical operators

To do this I first tranpose four pixels like this

rgbargbargbargba -> rrrrggggbbbbaaaa

using the intrinsic _mm_shuffle_epi8 with a mask. I do this to 16 pixels and then transpose them again

from

row[0] : rrrrggggbbbbaaaa
row[1] : rrrrggggbbbbaaaa
row[2] : rrrrggggbbbbaaaa
ro2[3] : rrrrggggbbbbaaaa

to

row[0] : rrrrrrrrrrrrrrrr    
row[1] : gggggggggggggggg    
row[2] : bbbbbbbbbbbbbbbb

This is done the same way as transposing a 4x4 integer matrix like this:

__m128i t0 = _mm_unpacklo_epi32(row[0], row[1]);
__m128i t1 = _mm_unpacklo_epi32(row[2], row[3]);
__m128i t2 = _mm_unpackhi_epi32(row[0], row[1]);
__m128i t3 = _mm_unpackhi_epi32(row[2], row[3]);
row[0] = _mm_unpacklo_epi64(t0, t1);
row[1] = _mm_unpackhi_epi64(t0, t1);
row[2] = _mm_unpacklo_epi64(t2, t3);

Now I split each row into high and low and expand to 16-bits like this

__m128i v_lo[3], v_hi[3];
for(int i=0; i<3; i++) {
    v_lo[i] = _mm_unpacklo_epi8(row[i],_mm_setzero_si128());
    v_hi[i] = _mm_unpackhi_epi8(row[i],_mm_setzero_si128());
}

Finally, I calculate Y,U, and V like this:

 short m[9] = {66, 129, 25, -38, -74, 112, 112, -94, -18};
__m128i yuv[3];
for(int i=0; i<3; i++) {
    __m128i yuv_lo, yuv_hi;
    yuv_lo = _mm_add_epi16(_mm_add_epi16(
                   _mm_mullo_epi16(v_lo[0], _mm_set1_epi16(m[3*i+0])),
                   _mm_mullo_epi16(v_lo[1], _mm_set1_epi16(m[3*i+1]))),
                   _mm_mullo_epi16(v_lo[2], _mm_set1_epi16(m[3*i+2])));
    yuv_lo = _mm_add_epi16(yuv_lo, _mm_set1_epi16(128));
    yuv_lo = _mm_srli_epi16(yuv_lo, 8);
    yuv_lo = _mm_add_epi16(yuv_lo, _mm_set1_epi16(16));

    yuv_hi = _mm_add_epi16(_mm_add_epi16(
                   _mm_mullo_epi16(v_hi[0], _mm_set1_epi16(m[3*i+0])),
                   _mm_mullo_epi16(v_hi[1], _mm_set1_epi16(m[3*i+1]))),
                   _mm_mullo_epi16(v_hi[2], _mm_set1_epi16(m[3*i+2])));
    yuv_hi = _mm_add_epi16(yuv_hi, _mm_set1_epi16(128));
    yuv_hi = _mm_srli_epi16(yuv_hi, 8);
    yuv_hi = _mm_add_epi16(yuv_hi, _mm_set1_epi16(16));

    yuv[i] = _mm_packus_epi16(yuv_lo,yuv_hi);
}

For a working example of this code see my first answer and the function rgba2yuv_SSE.

3
votes

Here is a solution based on the comments by the OP and Paul R. The intrinsic _mm_maddubs_epi16 requires that the second parameter by signed which is a problem for the 129 factor of g. However, we can get around this by doing this

y = ((66-64)*r + (129-64)*g + (25-64)*b + -64*a) + (64*r + 64*g + 64*b + 64*a)
  = (2*r + 65*g + -39*b -64*a) + 64(r + g + a)

Using this we only need 16-bit integers and we can calculate 16 y bytes at a time like this:

Note that I originally used 128 but this caused overflow since 255*((25-128)-128)<-32768.

__m128i yk = _mm_set1_epi32(0xc0d94102); -64,-39,64,2
__m128i y4[4];
for(int i=0; i<4; i++) {
    __m128i a4 = _mm_loadu_si128((__m128i*)&a[16*i]);
    __m128i t1 = _mm_maddubs_epi16(a4, yk);
    __m128i t2 = _mm_maddubs_epi16(a4, _mm_set1_epi8(1));
    t2 = _mm_slli_epi16(t2, 6);  //multiply by 64
    y4[i] = _mm_add_epi16(t1,t2);
}
short tmp[8];
_mm_storeu_si128((__m128i*)tmp, y4[0]);
__m128i y8_lo = _mm_hadd_epi16(y4[0], y4[1]);
__m128i y8_hi = _mm_hadd_epi16(y4[2], y4[3]);

y8_lo = _mm_add_epi16(y8_lo, _mm_set1_epi16(128));
y8_lo = _mm_srli_epi16(y8_lo, 8);
y8_lo = _mm_add_epi16(y8_lo, _mm_set1_epi16(16));

y8_hi = _mm_add_epi16(y8_hi, _mm_set1_epi16(128));
y8_hi = _mm_srli_epi16(y8_hi, 8);
y8_hi = _mm_add_epi16(y8_hi, _mm_set1_epi16(16));

__m128i y16 = _mm_packus_epi16(y8_lo,y8_hi);

Here is code showing this works. I compared the result with the formula (with modifications) from how to perform rgb yuv conversion in C/C++ which is:

#define CLIP(X) ( (X) > 255 ? 255 : (X) < 0 ? 0 : X)
#define RGB2Y(R, G, B) CLIP(( (  66 * (0xff & R) + 129 * (0xff & G) +  25 * (0xff & B) + 128) >> 8) +  16)

The code:

#include <stdio.h>
#include <x86intrin.h>
#include <stdlib.h>

#define CLIP(X) ( (X) > 255 ? 255 : (X) < 0 ? 0 : X)
#define RGB2Y(R, G, B) CLIP(( (  66 * (0xff & R) + 129 * (0xff & G) +  25 * (0xff & B) + 128) >> 8) +  16)

void rgba2y_SSE_v1(char *a, char *b) {
    __m128i yk = _mm_setr_epi16(66,129,25,0, 66,129,25,0);
    __m128i out[4];
    for(int i=0; i<4; i++) {        
        __m128i a4, lo, hi;
        a4 = _mm_loadu_si128((__m128i*)&a[16*i]);
        lo = _mm_unpacklo_epi8(a4,_mm_setzero_si128());
        hi = _mm_unpackhi_epi8(a4,_mm_setzero_si128());

        lo = _mm_madd_epi16(lo,yk);
        lo = _mm_hadd_epi32(lo,lo);

        hi  = _mm_madd_epi16(hi,yk);
        hi  = _mm_hadd_epi32(hi,hi);
        out[i] = _mm_unpackhi_epi64(lo,hi);
    }
    __m128i out_lo = _mm_packus_epi32(out[0], out[1]);
    __m128i out_hi = _mm_packus_epi32(out[2], out[3]);

    out_lo = _mm_add_epi16(out_lo, _mm_set1_epi16(128));
    out_lo = _mm_srli_epi16(out_lo, 8);
    out_lo = _mm_add_epi16(out_lo, _mm_set1_epi16(16)); 

    out_hi = _mm_add_epi16(out_hi, _mm_set1_epi16(128));
    out_hi = _mm_srli_epi16(out_hi, 8);
    out_hi = _mm_add_epi16(out_hi, _mm_set1_epi16(16)); 

    __m128i y16 = _mm_packus_epi16(out_lo,out_hi);
    _mm_storeu_si128((__m128i*)b,y16);
}

void rgba2y_SSE_v2(char *a, char *b) {
    __m128i yk = _mm_set1_epi32(0xc0d94102);
    __m128i y4[4];
    for(int i=0; i<4; i++) {
        __m128i a4 = _mm_loadu_si128((__m128i*)&a[16*i]);
        __m128i t1 = _mm_maddubs_epi16(a4, yk);
        __m128i t2 = _mm_maddubs_epi16(a4, _mm_set1_epi8(1));
        t2 = _mm_slli_epi16(t2, 6);
        y4[i] = _mm_add_epi16(t1,t2); 
    } 
    short tmp[8];
    _mm_storeu_si128((__m128i*)tmp, y4[0]);
    __m128i y8_lo = _mm_hadd_epi16(y4[0], y4[1]);
    __m128i y8_hi = _mm_hadd_epi16(y4[2], y4[3]);

    y8_lo = _mm_add_epi16(y8_lo, _mm_set1_epi16(128));
    y8_lo = _mm_srli_epi16(y8_lo, 8);
    y8_lo = _mm_add_epi16(y8_lo, _mm_set1_epi16(16)); 

    y8_hi = _mm_add_epi16(y8_hi, _mm_set1_epi16(128));
    y8_hi = _mm_srli_epi16(y8_hi, 8);
    y8_hi = _mm_add_epi16(y8_hi, _mm_set1_epi16(16)); 

    __m128i y16 = _mm_packus_epi16(y8_lo,y8_hi);
    _mm_storeu_si128((__m128i*)b,y16);
}

void rgba2yuv_SSE(char *a, char *b) {
    __m128i mask = _mm_setr_epi8(0x00,0x04,0x08,0x0c, 0x01,0x05,0x09,0x0d, 0x02,0x06,0x0a,0x0e, 0x03,0x07,0x0b,0x0f);
    short m[9] = {66, 129, 25, -38, -74, 112, 112, -94, -18};

    __m128i row[4];
    for(int i=0; i<4; i++) {
        row[i] = _mm_loadu_si128((__m128i*)&a[16*i]);
        row[i] = _mm_shuffle_epi8(row[i],mask);
    }

    __m128i t0 = _mm_unpacklo_epi32(row[0], row[1]);
    __m128i t1 = _mm_unpacklo_epi32(row[2], row[3]);
    __m128i t2 = _mm_unpackhi_epi32(row[0], row[1]);
    __m128i t3 = _mm_unpackhi_epi32(row[2], row[3]);
    row[0] = _mm_unpacklo_epi64(t0, t1);
    row[1] = _mm_unpackhi_epi64(t0, t1);
    row[2] = _mm_unpacklo_epi64(t2, t3);

    __m128i v_lo[3], v_hi[3];
    for(int i=0; i<3; i++) {
        v_lo[i] = _mm_unpacklo_epi8(row[i],_mm_setzero_si128());
        v_hi[i] = _mm_unpackhi_epi8(row[i],_mm_setzero_si128());
    }

    __m128i yuv[3];
    for(int i=0; i<3; i++) {
        __m128i yuv_lo, yuv_hi;
        yuv_lo = _mm_add_epi16(_mm_add_epi16(
                       _mm_mullo_epi16(v_lo[0], _mm_set1_epi16(m[3*i+0])),
                       _mm_mullo_epi16(v_lo[1], _mm_set1_epi16(m[3*i+1]))),
                       _mm_mullo_epi16(v_lo[2], _mm_set1_epi16(m[3*i+2])));
        yuv_lo = _mm_add_epi16(yuv_lo, _mm_set1_epi16(128));
        yuv_lo = _mm_srli_epi16(yuv_lo, 8);
        yuv_lo = _mm_add_epi16(yuv_lo, _mm_set1_epi16(16)); 

        yuv_hi = _mm_add_epi16(_mm_add_epi16(
                       _mm_mullo_epi16(v_hi[0], _mm_set1_epi16(m[3*i+0])),
                       _mm_mullo_epi16(v_hi[1], _mm_set1_epi16(m[3*i+1]))),
                       _mm_mullo_epi16(v_hi[2], _mm_set1_epi16(m[3*i+2])));
        yuv_hi = _mm_add_epi16(yuv_hi, _mm_set1_epi16(128));
        yuv_hi = _mm_srli_epi16(yuv_hi, 8);
        yuv_hi = _mm_add_epi16(yuv_hi, _mm_set1_epi16(16)); 

        yuv[i] = _mm_packus_epi16(yuv_lo,yuv_hi);
    }   
    _mm_storeu_si128((__m128i*)b,yuv[0]);
}



int main(void) {
    char rgba[64];
    char y1[16], y2[16], yuv[48];
    for(int i=0; i<64; i++) rgba[i] = rand()%256;
    rgba2y_SSE_v1(rgba,y1);
    rgba2y_SSE_v2(rgba,y2);
    rgba2yuv_SSE(rgba,yuv);

    printf("RGB2Y: "); for(int i=0; i<16; i++) printf("%x ", 0xff & RGB2Y(rgba[4*i+0], rgba[4*i+1], rgba[4*i+2])); printf("\n");
    printf("SSE_v1 "); for(int i=0; i<16; i++) printf("%x ", 0xff & y1[i]); printf("\n");
    printf("SSE_v2 "); for(int i=0; i<16; i++) printf("%x ", 0xff & y2[i]); printf("\n");
    printf("SSE_v3 "); for(int i=0; i<16; i++) printf("%x ", 0xff & yuv[i]); printf("\n");

}

The output:

RGB2Y: 99 ad 94 e3 9a a2 60 81 45 59 49 a5 aa 9b 60 4d 
SSE_v1 99 ad 94 e3 9a a2 60 81 45 59 49 a5 aa 9b 60 4d 
SSE_v2 99 ad 94 e3 9a a2 60 81 45 59 49 a5 aa 9b 60 4d 
SSE_v3 99 ad 94 e3 9a a2 60 81 45 59 49 a5 aa 9b 60 4d