dimanche 24 avril 2016

SSE vector wrapper type performance compared to pure __m128

I found an interesting Gamasutra article about SIMD pitfalls, which states that it is not possible to reach the performance of the "pure" __m128 type with wrapper types. Well I was skeptical, so I downloaded the project files and fabricated a comparable test case.

It turned out (for my surprise) that the wrapper version is significantly slower. Since I don't want to talk about just the thin air, the test cases are the following:

In the 1st case Vec4 is a simple alias of the __m128 type with some operators:

#include <xmmintrin.h>
#include <emmintrin.h>

using Vec4 = __m128;

inline __m128 VLoad(float f)
{
    return _mm_set_ps(f, f, f, f);
};

inline Vec4& operator+=(Vec4 &va, Vec4 vb)
{
    return (va = _mm_add_ps(va.simd, vb.simd));
};

inline Vec4& operator*=(Vec4 &va, Vec4 vb)
{
    return (va = _mm_mul_ps(va.simd, vb.simd));
};

inline Vec4 operator+(Vec4 va, Vec4 vb)
{
    return _mm_add_ps(va.simd, vb.simd);
};

inline Vec4 operator-(Vec4 va, Vec4 vb)
{
    return _mm_sub_ps(va.simd, vb.simd);
};

inline Vec4 operator*(Vec4 va, Vec4 vb)
{
    return _mm_mul_ps(va.simd, vb.simd);
};

In the 2nd case Vec4 is a lightweight wrapper around __m128. It is not a complete wrapper, just a short sketch which covers the issue. The operators wrap exactly the same intrinsics, the only difference is (since 16-byte alignment cannot be applied on arguments) that they take Vec4 as const reference:

#include <xmmintrin.h>
#include <emmintrin.h>

struct Vec4
{
    __m128 simd;

    inline Vec4() = default;
    inline Vec4(const Vec4&) = default;
    inline Vec4& operator=(const Vec4&) = default;

    inline Vec4(__m128 s)
        : simd(s)
    {}

    inline operator __m128() const
    {
        return simd;
    }

    inline operator __m128&()
    {
        return simd;
    }

    inline operator const __m128&() const
    {
        return simd;
    }   
};

inline __m128 VLoad(float f)
{
    return _mm_set_ps(f, f, f, f);
};

inline Vec4& operator+=(Vec4 &va, const Vec4 &vb)
{
    return (va = _mm_add_ps(va.simd, vb.simd));
};

inline Vec4& operator*=(Vec4 &va, const Vec4 &vb)
{
    return (va = _mm_mul_ps(va.simd, vb.simd));
};

inline Vec4 operator+(const Vec4 &va, const Vec4 &vb)
{
    return _mm_add_ps(va.simd, vb.simd);
};

inline Vec4 operator-(const Vec4 &va, const Vec4 &vb)
{
    return _mm_sub_ps(va.simd, vb.simd);
};

inline Vec4 operator*(const Vec4 &va, const Vec4 &vb)
{
    return _mm_mul_ps(va.simd, vb.simd);
};

And here is the test kernel which produces different performance with different versions of Vec4:

#include <xmmintrin.h>
#include <emmintrin.h>
#include "vmath.h"

using namespace VMATH;

struct EQSTATE
{
    // Filter #1 (Low band)

    Vec4  lf;       // Frequency
    Vec4  f1p0;     // Poles ...
    Vec4  f1p1;     
    Vec4  f1p2;
    Vec4  f1p3;

    // Filter #2 (High band)

    Vec4  hf;       // Frequency
    Vec4  f2p0;     // Poles ...
    Vec4  f2p1;
    Vec4  f2p2;
    Vec4  f2p3;

    // Sample history buffer

    Vec4  sdm1;     // Sample data minus 1
    Vec4  sdm2;     //                   2
    Vec4  sdm3;     //                   3

    // Gain Controls

    Vec4  lg;       // low  gain
    Vec4  mg;       // mid  gain
    Vec4  hg;       // high gain

};  

static const float cPi = 3.1415926535897932384626433832795f;
static float vsaf = (1.0f / 4294967295.0f);   // Very small amount (Denormal Fix)
static Vec4 vsa = VLoad(vsaf);
static Vec4 M_PI = VLoad(cPi);

Vec4 TestEQ(EQSTATE* es, Vec4& sample)
{
    // Locals

    Vec4  l,m,h;      // Low / Mid / High - Sample Values

    // Filter #1 (lowpass)

    es->f1p0  += (es->lf * (sample   - es->f1p0)) + vsa;vsa));
    es->f1p1  += (es->lf * (es->f1p0 - es->f1p1));
    es->f1p2  += (es->lf * (es->f1p1 - es->f1p2));
    es->f1p3  += (es->lf * (es->f1p2 - es->f1p3));

    l          = es->f1p3;

    // Filter #2 (highpass)

    es->f2p0  += (es->hf * (sample   - es->f2p0)) + vsa;
    es->f2p1  += (es->hf * (es->f2p0 - es->f2p1));
    es->f2p2  += (es->hf * (es->f2p1 - es->f2p2));
    es->f2p3  += (es->hf * (es->f2p2 - es->f2p3));

    h          = es->sdm3 - es->f2p3;

    // Calculate midrange (signal - (low + high))

    m          = es->sdm3 - (h + l);

    // Scale, Combine and store

    l         *= es->lg;
    m         *= es->mg;
    h         *= es->hg;

    // Shuffle history buffer 

    es->sdm3   = es->sdm2;
    es->sdm2   = es->sdm1;
    es->sdm1   = sample;                

    // Return result

    return l + m + h;
}

//make these as globals to enforce the function call;
static Vec4 sample[1024], result[1024];
static EQSTATE es;

#include <chrono>
#include <iostream>

int main()
{
    auto t0 = std::chrono::high_resolution_clock::now();

    for (int ii=0; ii<1024; ii++)
    {
        result[ii] = TestEQ(&es, sample[ii]);
    }

    auto t1 = std::chrono::high_resolution_clock::now();
    auto t = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count();
    std::cout << "timing: " << t << '\n';

    std::cin.get();

    return 0;
}

The above kernel with the 1st version of Vec4 produces the following assembler output:

;   COMDAT ?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z
_TEXT   SEGMENT
?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z PROC      ; TestEQ, COMDAT
; _es$dead$ = ecx
; _sample$ = edx
    vmovaps xmm0, XMMWORD PTR [edx]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3T__m128@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+16, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+32, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+48, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm4, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+80
    vmovaps xmm1, XMMWORD PTR ?es@@3UEQSTATE@@A+192
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+64, xmm4
    vmovaps xmm0, XMMWORD PTR [edx]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3T__m128@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+96, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+112, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+128, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vsubps  xmm2, xmm1, xmm0
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+144, xmm0
    vmovaps xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+176
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+192, xmm0
    vmovaps xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+160
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+176, xmm0
    vmovaps xmm0, XMMWORD PTR [edx]
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+160, xmm0
    vaddps  xmm0, xmm4, xmm2
    vsubps  xmm0, xmm1, xmm0
    vmulps  xmm1, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+224
    vmulps  xmm0, xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+240
    vaddps  xmm1, xmm1, xmm0
    vmulps  xmm0, xmm4, XMMWORD PTR ?es@@3UEQSTATE@@A+208
    vaddps  xmm0, xmm1, xmm0
    ret 0
?TestEQ@@YA?AT__m128@@PAUEQSTATE@@AAT1@@Z ENDP      ; TestEQ

And the 2nd version of Vec4 produces the following listing:

?TestEQ@@YA?AUVec4@VMATH@@PAUEQSTATE@@AAU12@@Z PROC ; TestEQ, COMDAT
; ___$ReturnUdt$ = ecx
; _es$dead$ = edx
    push    ebx
    mov ebx, esp
    sub esp, 8
    and esp, -8                 ; fffffff8H
    add esp, 4
    push    ebp
    mov ebp, DWORD PTR [ebx+4]
    mov eax, DWORD PTR _sample$[ebx]
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A
    vmovaps xmm1, XMMWORD PTR ?es@@3UEQSTATE@@A+192
    mov DWORD PTR [esp+4], ebp
    vmovaps xmm0, XMMWORD PTR [eax]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3UVec4@VMATH@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+16
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+16, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+32
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+32, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+48
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+48, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm4, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+64
    vmovaps xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+80
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+64, xmm4
    vmovaps xmm0, XMMWORD PTR [eax]
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?vsa@@3UVec4@VMATH@@A
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+96
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+96, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+112
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+112, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+128
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+128, xmm0
    vsubps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vmulps  xmm0, xmm0, xmm2
    vaddps  xmm0, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+144
    vsubps  xmm2, xmm1, xmm0
    vmovaps XMMWORD PTR ?es@@3UEQSTATE@@A+144, xmm0
    vaddps  xmm0, xmm2, xmm4
    vsubps  xmm0, xmm1, xmm0
    vmulps  xmm1, xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+224
    vmovdqu xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+176
    vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+192, xmm0
    vmovdqu xmm0, XMMWORD PTR ?es@@3UEQSTATE@@A+160
    vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+176, xmm0
    vmovdqu xmm0, XMMWORD PTR [eax]
    vmovdqu XMMWORD PTR ?es@@3UEQSTATE@@A+160, xmm0
    vmulps  xmm0, xmm4, XMMWORD PTR ?es@@3UEQSTATE@@A+208
    vaddps  xmm1, xmm0, xmm1
    vmulps  xmm0, xmm2, XMMWORD PTR ?es@@3UEQSTATE@@A+240
    vaddps  xmm0, xmm1, xmm0
    vmovaps XMMWORD PTR [ecx], xmm0
    mov eax, ecx
    pop ebp
    mov esp, ebx
    pop ebx
    ret 0
?TestEQ@@YA?AUVec4@VMATH@@PAUEQSTATE@@AAU12@@Z ENDP ; TestEQ

The assembly listings are generated by Visual Studio 2015 Update 2.

The produced assembly of the 2nd version is significantly longer and slower. It is not strictly related to Visual Studio, since Clang 3.8 produces similar results.

I tried to identify the cause of the issue, without success. There are suspicious things like the construction, copy assignment operator and the pass-by-reference which can unnecessarily move the data from SSE registers back to memory, however all my attempts to solve or exactly identify the issue was unsuccessful.

So what the hell is going on there?

Aucun commentaire:

Enregistrer un commentaire