Category: Assembly

Applications and examples for Timing library

Hi,

as you’ve already know I’ve developed a small library to asset performance of code. The aim of this library is to objectively report how long a function last. It would help you choose an implementation of an algorithm for example.
To illustrate that I will take fibonnaci function as an example. I know at least two implementations of this function, one functional and an another iterative. The strong point of the functional version is that it is easier to implement and to understand as contrary to the iterative. But for large number I suspect the functional version to be very slow but I can not say to what point.

int __inline__ fibonacci(int n)
{
      if (n < 3)
            return 1;
      return fibonacci(n - 1) + fibonacci(n - 2);
}

int __inline__ fibonacciOptim(int n)
{
      int first = 0, second = 1, next, c;
      for (c = 0; c < n; c++)
      {
            if (c <= 1)
                  next = c;
            else
            {
                  next = first + second;
                  first = second;
                  second = next;
            }
      }
      return next;
}

It is now time to use the Timing Library
First clone the repository

git clone https://github.com/fflayol/timingperf.git
cd timingperf

Then create a main to set values of the library and add the different version of your code:

#include "performancetiming.hpp"
int __inline__ fibonacci(int n)
{
      if (n < 3)
            return 1;
      return fibonacci(n - 1) + fibonacci(n - 2);
}

int __inline__ fibonacciOptim(int n)
{
      int first = 0, second = 1, next, c;
      for (c = 0; c < n; c++)
      {
            if (c <= 1)
                  next = c;
            else
            {
                  next = first + second;
                  first = second;
                  second = next;
            }
      }
      return next;
}

int main(int argc, char **argv)
{
      timing::addFunction("FIB", fibonacci);
      timing::addFunction("FIB-OPTIM", fibonacciOptim);
      timing::setTimingFunction(2);
      timing::setNumberExecution(1000000);
      timing::Execute(15);
      timing::CalcResult();
}

The result gives:

fflayol@local:~/Perso/timingperf$ ./ex1.out 
Begin [ FIB ]
End   [ FIB ]
Begin [ FIB-OPTIM ]
End   [ FIB-OPTIM ]
Begin [ FIB ]
End [ FIB ]
Begin [ FIB-OPTIM ]
End [ FIB-OPTIM ]
fflayol@local:~/Perso/timingperf$ ./ex1.out
 Begin [ FIB ] End [ FIB ] 
Begin [ FIB-OPTIM ] End [ FIB-OPTIM ] 
Begin [ FIB ] End [ FIB ] 
Begin [ FIB-OPTIM ] End [ FIB-OPTIM ]
|--------------------------------------------------------------------|
|---Name--------Timer-----Duration------Diff-------Min-------Diff----|
|          |           |           |           |         |           |
|      FIB |     RDTSC |      6326 |      100 %|     5906|     100 % | 
|FIB-OPTIM |     RDTSC |       158 |   4003.8 %|      137| 4310.95 % | 
|      FIB |    RDTSCP |      6354 |  99.5593 %|     5916|  99.831 % | 
|FIB-OPTIM |    RDTSCP |       208 |  3041.35 %|      180| 3281.11 % |

The difference here is very important because our optimization version is 40x faster 🙂
An another nice feature is to check compiler optimization quality. You can change the compiler optimization with -O option followed by a numer (0 to 3, the higher the better).

g++ ex1.cpp -std=c++11 -o ex1.out ; g++ -O3 ex1.cpp -std=c++11 -o ex1-opt.out
|---Name------Timer---Duration------Diff------Min------Diff- |
| FIB      | RDTSC | 2253      | 100 %     | 2080|   100 %   |
|FIB-OPTIM | RDTSC | 31        | 7267.74 % | 24  | 8666.67 % |
| FIB      | RDTSCP| 2304      | 97.7865 % | 2108| 98.6717 % |
|FIB-OPTIM | RDTSCP| 51        | 4417.65 % | 45  | 4622.22 % | |------------------------------------------------------------|

As a result both version are faster (3x for the functional version and 5x with the iterative version). As a result the difference of performance is still higher.

Shuffle in SSE

#include <immintrin.h>
#include <stdio.h>
void test(int32_t *Y, int32_t *X)
{
        __m128i *v1  __attribute__((aligned (16)));

        __m128i *v2 __attribute__((aligned (16)));
        __m128i v3 __attribute__((aligned (16)));
        __m128i  v4 __attribute__((aligned (16)));
        int32_t * rslt;
        int64_t * rslt64;
        v1 = (__m128i *) X;
        v2 = (__m128i *) Y;
        rslt = (int32_t * ) v1;
      printf("In test, V1  after MUL  SHUFFLE: %d\t%d\t%d\t%d\t\n", rslt[0], rslt[1], rslt[2], rslt[3]);
      rslt = (int32_t * ) v2;
      printf("In test, V2 before  MUL SHUFFLE: %d\t%d\t%d\t%d\t\n", rslt[0], rslt[1], rslt[2], rslt[3]);
        v3 =  _mm_mul_epi32(*v1, *v2);
        v4 = _mm_mul_epi32(_mm_shuffle_epi32(*v1, _MM_SHUFFLE(2, 3, 0, 1)), _mm_shuffle_epi32(*v2, _MM_SHUFFLE(2, 3, 0, 1)));

        rslt64 = (int64_t * ) &v3;
        printf("In REDC, product before SHUFFLE: %ldt%ldn", rslt64[0], rslt64[1]);

        rslt64 = (int64_t * ) &v4;
        printf("In REDC, product after  SHUFFLE: %ldt%ldn", rslt64[0], rslt64[1]);

        rslt = (int32_t * ) v1;
        printf("In REDC, 4-way vect before  SHUFFLE: %dt%dt%dt%dtn", rslt[0], rslt[1], rslt[2], rslt[3]);

        *v1 = _mm_shuffle_epi32(*v1, _MM_SHUFFLE(2, 3, 0, 1));
        rslt = (int32_t * ) v1;
        printf("In REDC, 4-way vect after  SHUFFLE: %dt%dt%dt%dtn", rslt[0], rslt[1], rslt[2], rslt[3]);
}
int main (int nb, char** argv){
   int32_t a = (int32_t)1234;
   int32_t b = (int32_t)5678;
    test(&a,&b);
}

Quest for the ultimate timer framework

A lot stuff on this blog talks about code optimization, and sometime very small improvement that have performance minimal impacts but in case they are called a lot of time it became difficult to ensure optimization are useful. Let’s take an example. Imagine you have a function that lasts one millisecond. You are optimizing this function and as a result you found two solutions to optimize your code . But if you are using a timer that lasts 0.5 milliseconds, you won’t be able to choose one of this other. The aim of this article is to help you to understand ???

Throughput of the algorithm

In this case

Pro:

  • Easy to implement
  • Can cover a full range of service, or the lifetime

Con

  • Can be difficult to implement as you invert the timing (aka how many cycles I’ve done in one minute for instance)
  • Initial conditions can be impossible to reproduce
  • Program should maintain this feature

Example:

  •  Our ethminer with -m option.

Time with linux command

The time command, is an Unix/Linux standard line command. It will use the internal timer to return the time elapsed by the command.

time ls -l 

real	0m0.715s
user	0m0.000s
sys	0m0.004s

You know that in this blog I like to use example. Imagine you have a 3D application that do a lot complex mathematical calculus function (square root, cosines,..). You know that theses functions are called a lot of time (billion per second). As we have already seen a small improvement in this function can have very strong impact on all the program. Now you know two way to implement this calculus in C by using the standard mathematic library or in assembler which is a bit complex to do but you might achieve better performance. The method I’m gonna to present can be also used when you have two implementations of the same feature and you don’t know which one to choose. If you have to choose how fast is the new version, and do it worth the pain to do it in assembler as the code becomes difficult to maintain.

#include <math.h>
inline double Calc_c(double x,double y,double z){
double tmpx = sqrt(x)*cos(x)/sin(x);
double tmpy = sqrt(y)*cos(y)/sin(y);
double tmpz = sqrt(z)*cos(z)/sin(z);
return (tmpx+tmpy+tmpz)*tmpx+tmpy+tmpz
}

inline double Calc_as(double x,double y,double z){

    __m512d a1  _mm512_set4_pd(x,y,z,0.0);
    
}

We know that the assembler version will be faster but to which value ?

 

SHA3

Preimage attack on Keccak-512 reduced to 8 rounds, requiring 2511.5 time and 2508 memory[2]
Zero-sum distinguishers exist for the full 24-round Keccak-f[1600], though they cannot be used to attack the hash function itself[3]

SHA-3 (Secure Hash Algorithm 3) is the latest member of the Secure Hash Algorithm family of standards, released by NIST on August 5, 2015.[4][5] The reference implementation source code was dedicated to public domain via CC0 waiver.[6] Although part of the same series of standards, SHA-3 is internally quite different from the MD5-like structure of SHA-1 and SHA-2.

SHA-3 is a subset of the broader cryptographic primitive family Keccak (/ˈkɛtʃæk/, or /ˈkɛtʃɑːk/),[7][8] designed by Guido Bertoni, Joan Daemen, Michaël Peeters, and Gilles Van Assche,

Keccak is based on a novel approach called sponge construction. Sponge construction is based on a wide random function or random permutation, and allows inputting (“absorbing” in sponge terminology) any amount of data, and outputting (“squeezing”) any amount of data, while acting as a pseudorandom function with regard to all previous inputs. This leads to great flexibility.

SHA-3 uses the sponge construction,[13][23] in which data is “absorbed” into the sponge, then the result is “squeezed” out. In the absorbing phase, message blocks are XORed into a subset of the state, which is then transformed as a whole using a permutation function f. In the “squeeze” phase, output blocks are read from the same subset of the state, alternated with the state transformation function f. The size of the part of the state that is written and read is called the “rate” (denoted r), and the size of the part that is untouched by input/output is called the “capacity” (denoted c). The capacity determines the security of the scheme. The maximum security level is half the capacity.

Given an input bit string N, a padding function pad, a permutation function f that operates on bit blocks of width b, a rate r and an output length d, we have capacity c = b − r and the sponge construction Z = sponge[f,pad,r](N,d), yielding a bit string Z of length d, works as follows:[24]:18

pad the input N using the pad function, yielding a padded bit string P with a length divisible by r (such that n = len(P)/r is integer),
break P into n consecutive r-bit pieces P0, …, Pn-1
initialize the state S to a string of b 0 bits.
absorb the input into the state: For each block Pi,
extend Pi at the end by a string of c 0 bits, yielding one of length b,
XOR that with S and
apply the block permutation f to the result, yielding a new state S
initialize Z to be the empty string
while the length of Z is less than d:
append the first r bits of S to Z
if Z is still less than d bits long, apply f to S, yielding a new state S.
truncate Z to d bits

The fact that the internal state S contains c additional bits of information in addition to what is output to Z prevents the length extension attacks that SHA-2, SHA-1, MD5 and other hashes based on the Merkle–Damgård construction are susceptible to.

In SHA-3, the state S consists of a 5 × 5 array of w = 64-bit words, b = 5 × 5 × w = 5 × 5 × 64 = 1600 bits total. Keccak is also defined for smaller power-of-2 word sizes w down to 1 bit (25 bits total state). Small state sizes can be used to test cryptanalytic attacks, and intermediate state sizes (from w = 8, 200 bits, to w = 32, 800 bits) can be used in practical, lightweight applications.[11][12]

For SHA-3-224, SHA-3-256, SHA-3-384, and SHA-3-512 instances, r is greater than d, so there is no need for additional block permutations in the squeezing phase; the leading d bits of the state are the desired hash. However, SHAKE-128 and SHAKE-256 allow an arbitrary output length, which is useful in applications such as optimal asymmetric encryption padding.
Padding

To ensure the message can be evenly divided into r-bit blocks, padding is required. SHA-3 uses the pattern 10*1 in its padding function: a 1 bit, followed by zero or more 0 bits (maximum r − 1) and a final 1 bit.

The maximum of r − 1 0 bits occurs when the last message block is r − 1 bits long. Then another block is added after the initial 1 bit, containing r − 1 0 bits before the final 1 bit.

The two 1 bits will be added even if the length of the message is already divisible by r.[24]:5.1 In this case, another block is added to the message, containing a 1 bit, followed by a block of r − 2 0 bits and another 1 bit. This is necessary so that a message with length divisible by r ending in something that looks like padding does not produce the same hash as the message with those bits removed.

The initial 1 bit is required so messages differing only in a few additional 0 bits at the end do not produce the same hash.

The position of the final 1 bit indicates which rate r was used (multi-rate padding), which is required for the security proof to work for different hash variants. Without it, different hash variants of the same short message would be the same up to truncation.
The block permutation

The block transformation f, which is Keccak-f[1600] for SHA-3, is a permutation that uses xor, and and not operations, and is designed for easy implementation in both software and hardware.

It is defined for any power-of-two word size, w = 2ℓ bits. The main SHA-3 submission uses 64-bit words, ℓ = 6.

The state can be considered to be a 5 × 5 × w array of bits. Let a[i][ j][k] be bit (5i + j) × w + k of the input, using a little-endian bit numbering convention and row-major indexing. I.e. i selects the row, j the column, and k the bit.

Index arithmetic is performed modulo 5 for the first two dimensions and modulo w for the third.

The basic block permutation function consists of 12 + 2ℓ rounds of five steps, each individually very simple:

θ
Compute the parity of each of the 5w (320, when w = 64) 5-bit columns, and exclusive-or that into two nearby columns in a regular pattern. To be precise, a[i][ j][k] ← a[i][ j][k] ⊕ parity(a[0…4][ j−1][k]) ⊕ parity(a[0…4][ j+1][k−1])
ρ
Bitwise rotate each of the 25 words by a different triangular number 0, 1, 3, 6, 10, 15, …. To be precise, a[0][0] is not rotated, and for all 0 ≤ t < 24, a[i][ j][k] ← a[i][ j][k−(t+1)(t+2)/2], where ( i j ) = ( 3 2 1 0 ) t ( 0 1 ) {\displaystyle {\begin{pmatrix}i\\j\end{pmatrix}}={\begin{pmatrix}3&2\\1&0\end{pmatrix}}^{t}{\begin{pmatrix}0\\1\end{pmatrix}}} {\begin{pmatrix}i\\j\end{pmatrix}}={\begin{pmatrix}3&2\\1&0\end{pmatrix}}^{t}{\begin{pmatrix}0\\1\end{pmatrix}}. π Permute the 25 words in a fixed pattern. a[j][2i+3j] ← a[ i][j]. χ Bitwise combine along rows, using x ← x ⊕ (¬y & z). To be precise, a[i][ j][k] ← a[i][ j][k] ⊕ (¬a[i][ j+1][k] & a[i][ j+2][k]). This is the only non-linear operation in SHA-3. ι Exclusive-or a round constant into one word of the state. To be precise, in round n, for 0 ≤ m ≤ ℓ, a[0][0][2m−1] is exclusive-ORed with bit m + 7n of a degree-8 LFSR sequence. This breaks the symmetry that is preserved by the other steps. Speed The speed of SHA-3 hashing of long messages is dominated by the computation of f = Keccak-f[1600] and XORing S with the extended Pi, an operation on b = 1600 bits. However, since the last c bits of the extended Pi are 0 anyway, and XOR with 0 is a noop, it is sufficient to perform XOR operations only for r bits (r = 1600 − 2 × 224 = 1152 bits for SHA3-224, 1088 bits for SHA3-256, 832 bits for SHA3-384 and 576 bits for SHA3-512). The lower r is (and, conversely, the higher c = b − r = 1600 − r), the less efficient but more secure the hashing becomes since fewer bits of the message can be XORed into the state (a quick operation) before each application of the computationally expensive f. Keccak[c](N, d) = sponge[Keccak-f[1600], pad10*1, r](N, d)[24]:20 Keccak-f[1600] = Keccak-p[1600, 24][24]:17 c is the capacity r is the rate = 1600 − c N is the input bit string

SSE usage Tutorial

Hi all,

SSE usage is a bit tricky

You have to see see registers as vectorial not a linear and their size is depending of the context.

For instance xmm0 (a SSE 128 bytes register)can be seen as 2*64 bits register or 4*32 bits register or 8*16  bits register or 16*8 bits register.

So what is the aim of this ?

If you want to add two arrays the algorithm wil be

int x [4] ; //let's say that sizeof(int) =32
int y[4];
int z[4];

c[0]=a[0]b[0]
c[1]=a[1]+b[1]
c[2]=a[2]+b[2]
c[3]=a[3]+b[3]

In assembler it gives :

       .loc 1 6 0
        mov     edx, DWORD PTR [rbp-48]
        mov     eax, DWORD PTR [rbp-32]
        add     eax, edx
        mov     DWORD PTR [rbp-16], eax
        .loc 1 7 0
        mov     edx, DWORD PTR [rbp-44]
        mov     eax, DWORD PTR [rbp-28]
        add     eax, edx
        mov     DWORD PTR [rbp-12], eax
        .loc 1 8 0
        mov     edx, DWORD PTR [rbp-40]
        mov     eax, DWORD PTR [rbp-24]
        add     eax, edx
        mov     DWORD PTR [rbp-8], eax
        .loc 1 9 0
        mov     edx, DWORD PTR [rbp-36]
        mov     eax, DWORD PTR [rbp-20]
        add     eax, edx
        mov     DWORD PTR [rbp-4], eax
        mov     eax, 0

For instance c[0] = a[0]+b[0] is generated like that:

       .loc 1 6 0
        mov     edx, DWORD PTR [rbp-48] ; a[0]
        mov     eax, DWORD PTR [rbp-32] ; b[0]
        add     eax, edx ;eax <- a[0]+b[0]
        mov     DWORD PTR [rbp-16], eax ; c[0] = eaz

And we are doing that 4 times. But thanks to the SSE extension operator we can do it with less instructions

The Streaming SIMD Extensions enhance the x86 architecture in four ways:

  1. 8 new 128-bit SIMD floating-point registers that can be directly addressed;
  2. 50 new instructions that work on packed floating-point data;
  3. 8 new instructions designed tocontrol cacheability of all MMX and 32-bit x86 data types, including the ability to stream data to memory without polluting the caches, and to prefetch data before it is actually used;
  4. 12 new instructions that extend the instruction set.

This set enables the programmer to develop algorithms that can mix packed, single-precision, floating-point and integer using both SSE and MMX instructions respectively.

Intel SSE provides eight 128-bit general-purpose registers, each of which can be directly addressed using the register names XMM0 to XMM7. Each register consists of four 32-bit single precision, floating-point numbers, numbered 0 through 3.

SSE instructions operate on either all or the least significant pairs of packed data operands in parallel. The packed instructions (with PS suffix) operate on a pair of operands, while scalar instructions (with SS suffix) always operate on the least significant pair of the two operands; for scalar operations, the three upper components from the first operand are passed through to the destination.

There are two ways to use SSE registers

Scalar the same 4 instructions on 4 datas

Packed

(thanks to Stefano Tommesani)

So let’s return to our code. I think you gonna understand where I want to go. I we fill two registers with 4 values (a[0]..a[3]) in one register and (c[0]..c[3]), add them together and put the result in a third register. With this solution we will do only one addition.

#include 
#include 
#include 

void p128_hex_u8(__m128i in) {
    uint8_t v[16];
    _mm_store_si128((__m128i*)v, in);
    printf("v16_u8: %x %x %x %x | %x %x %x %x | %x %x %x %x | %x %x %x %xn",
           v[0], v[1],  v[2],  v[3],  v[4],  v[5],  v[6],  v[7],
           v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15]);
}
 
void p128_hex_u16(__m128i in) {
    uint16_t v[8];
    _mm_store_si128((__m128i*)v, in);
    printf("v8_u16: %x %x %x %x,  %x %x %x %xn", v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
}
 
void p128_hex_u32(__m128i in) {
    uint32_t v[4] __attribute__((aligned (16)));
    _mm_store_si128((__m128i*)v, in);
    printf("v4_u32: %x %x %x %xn", v[0], v[1], v[2], v[3]);
}
 
void p128_dec_u32(__m128i in) {
    uint32_t v[4] __attribute__((aligned (16)));
    _mm_store_si128((__m128i*)v, in);
    printf("v4_u32: %d %d %d %dn",(uint32_t) v[0], (uint32_t) v[1], (uint32_t)v[2],(uint32_t) v[3]);
}

void p128_hex_u64(__m128i in) {
    long long v[2];  // uint64_t might give format-string warnings with %llx; it's just long in some ABIs
    _mm_store_si128((__m128i*)v, in);
    printf("v2_u64: %llx %llxn", v[0], v[1]);
}
 
 
int main(){
uint32_t a [4] ={1,2,3,4}; //let's say that sizeof(int) = 32
uint32_t b[4] = {11,12,13,14};
uint32_t c[4];
 
c[0]=a[0]+b[0];
c[1]=a[1]+b[1];
c[2]=a[2]+b[2];
c[3]=a[3]+b[3];
printf("Result %d %d %d %dn",c[0],c[1],c[2],c[3]);
 
     __m128i a1 = _mm_set_epi32(a[3], a[2], a[1], a[0]);
     __m128i b1 = _mm_set_epi32(b[3], b[2], b[1], b[0]);
     __m128i c1 = _mm_add_epi32(a1, b1);
     p128_dec_u32(a1);
     p128_dec_u32(b1);
     p128_dec_u32(c1);
}

This a very simple example, as your compiler can already optimize your code with this