42 #ifndef QDRAWINGPRIMITIVE_SSE2_P_H 43 #define QDRAWINGPRIMITIVE_SSE2_P_H 45 #include <private/qsimd_p.h> 68 #define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \ 74 __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8); \ 75 __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask); \ 78 pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel); \ 79 pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel); \ 84 pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8)); \ 85 pixelVectorRB = _mm_add_epi16(pixelVectorRB, half); \ 86 pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8)); \ 87 pixelVectorAG = _mm_add_epi16(pixelVectorAG, half); \ 90 pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8); \ 94 pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG); \ 97 result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \ 106 #define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) { \ 108 __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8); \ 109 __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8); \ 110 __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel); \ 111 __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel); \ 112 __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha); \ 113 finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8)); \ 114 finalAG = _mm_add_epi16(finalAG, half); \ 115 finalAG = _mm_andnot_si128(colorMask, finalAG); \ 118 __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask); \ 119 __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask); \ 120 __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel); \ 121 __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel); \ 122 __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha); \ 123 finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8)); \ 124 finalRB = _mm_add_epi16(finalRB, half); \ 125 finalRB = _mm_srli_epi16(finalRB, 8); \ 128 result = _mm_or_si128(finalAG, finalRB); \ 142 #define BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask) { \ 146 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) { \ 148 if (s >= 0xff000000) \ 151 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \ 154 for (; x < length-3; x += 4) { \ 155 const __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \ 156 const __m128i srcVectorAlpha = _mm_and_si128(srcVector, alphaMask); \ 157 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, alphaMask)) == 0xffff) { \ 159 _mm_store_si128((__m128i *)&dst[x], srcVector); \ 160 } else if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, nullVector)) != 0xffff) { \ 166 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \ 167 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \ 168 alphaChannel = _mm_sub_epi16(one, alphaChannel); \ 170 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]); \ 171 __m128i destMultipliedByOneMinusAlpha; \ 172 BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \ 175 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \ 176 _mm_store_si128((__m128i *)&dst[x], result); \ 179 for (; x < length; ++x) { \ 181 if (s >= 0xff000000) \ 184 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \ 199 #define BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector) \ 203 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) { \ 204 quint32 s = src[x]; \ 206 s = BYTE_MUL(s, const_alpha); \ 207 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \ 211 for (; x < length-3; x += 4) { \ 212 __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \ 213 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) { \ 214 BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half); \ 216 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \ 217 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \ 218 alphaChannel = _mm_sub_epi16(one, alphaChannel); \ 220 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]); \ 221 __m128i destMultipliedByOneMinusAlpha; \ 222 BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \ 224 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \ 225 _mm_store_si128((__m128i *)&dst[x], result); \ 228 for (; x < length; ++x) { \ 229 quint32 s = src[x]; \ 231 s = BYTE_MUL(s, const_alpha); \ 232 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \ 239 #endif // QT_HAVE_SSE2 241 #endif // QDRAWINGPRIMITIVE_SSE2_P_H #define QT_END_NAMESPACE
This macro expands to.
#define QT_BEGIN_NAMESPACE
This macro expands to.