Ver a proveniência

further reduce the code in f_impl.h

master
Michael Hamburg há 8 anos
ascendente
cometimento
b4c402c238
10 ficheiros alterados com 36 adições e 72 eliminações
  1. +3
    -6
      src/p25519/arch_ref64/f_impl.h
  2. +5
    -10
      src/p25519/arch_x86_64/f_impl.h
  3. +3
    -6
      src/p448/arch_32/f_impl.h
  4. +5
    -10
      src/p448/arch_arm_32/f_impl.h
  5. +3
    -6
      src/p448/arch_neon_experimental/f_impl.h
  6. +3
    -6
      src/p448/arch_ref64/f_impl.h
  7. +4
    -8
      src/p448/arch_x86_64/f_impl.h
  8. +5
    -10
      src/p480/arch_x86_64/f_impl.h
  9. +3
    -6
      src/p521/arch_ref64/f_impl.h
  10. +2
    -4
      src/p521/arch_x86_64_r12/f_impl.h

+ 3
- 6
src/p25519/arch_ref64/f_impl.h Ver ficheiro

@@ -19,17 +19,15 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<5; i++) {
for (unsigned int i=0; i<5; i++) {
out->limb[i] = a->limb[i] + b->limb[i];
}
gf_weak_reduce(out);
}

void gf_sub_RAW (gf out, const gf a, const gf b) {
unsigned int i;
uint64_t co1 = ((1ull<<51)-1)*2, co2 = co1-36;
for (i=0; i<5; i++) {
for (unsigned int i=0; i<5; i++) {
out->limb[i] = a->limb[i] - b->limb[i] + ((i==0) ? co2 : co1);
}
gf_weak_reduce(out);
@@ -43,8 +41,7 @@ void gf_bias (gf a, int amt) {
void gf_weak_reduce (gf a) {
uint64_t mask = (1ull<<51) - 1;
uint64_t tmp = a->limb[4] >> 51;
int i;
for (i=4; i>0; i--) {
for (unsigned int i=4; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>51);
}
a->limb[0] = (a->limb[0] & mask) + tmp*19;


+ 5
- 10
src/p25519/arch_x86_64/f_impl.h Ver ficheiro

@@ -15,24 +15,20 @@
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<5; i++) {
for (unsigned int i=0; i<5; i++) {
out->limb[i] = a->limb[i] + b->limb[i];
}
}

void gf_sub_RAW (gf out, const gf a, const gf b) {
unsigned int i;
uint64_t co1 = ((1ull<<51)-1)*2, co2 = co1-36;
for (i=0; i<5; i++) {
out->limb[i] = a->limb[i] - b->limb[i] + ((i==0) ? co2 : co1);
for (unsigned int i=0; i<5; i++) {
out->limb[i] = a->limb[i] - b->limb[i];
}
}

void gf_bias (gf a, int amt) {
a->limb[0] += ((uint64_t)(amt)<<52) - 38*amt;
int i;
for (i=1; i<5; i++) {
for (unsigned int i=1; i<5; i++) {
a->limb[i] += ((uint64_t)(amt)<<52)-2*amt;
}
}
@@ -40,8 +36,7 @@ void gf_bias (gf a, int amt) {
void gf_weak_reduce (gf a) {
uint64_t mask = (1ull<<51) - 1;
uint64_t tmp = a->limb[4] >> 51;
int i;
for (i=4; i>0; i--) {
for (unsigned int i=4; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>51);
}
a->limb[0] = (a->limb[0] & mask) + tmp*19;


+ 3
- 6
src/p448/arch_32/f_impl.h Ver ficheiro

@@ -20,8 +20,7 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] + ((const uint32xn_t*)b)[i];
}
/*
@@ -33,8 +32,7 @@ void gf_add_RAW (gf out, const gf a, const gf b) {
}

void gf_sub_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] - ((const uint32xn_t*)b)[i];
}
/*
@@ -58,9 +56,8 @@ void gf_bias (gf a, int amt) {
void gf_weak_reduce (gf a) {
uint64_t mask = (1ull<<28) - 1;
uint64_t tmp = a->limb[15] >> 28;
int i;
a->limb[8] += tmp;
for (i=15; i>0; i--) {
for (unsigned int i=15; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>28);
}
a->limb[0] = (a->limb[0] & mask) + tmp;


+ 5
- 10
src/p448/arch_arm_32/f_impl.h Ver ficheiro

@@ -20,26 +20,22 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] + ((const uint32xn_t*)b)[i];
}
/*
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) {
out->limb[i] = a->limb[i] + b->limb[i];
}
*/
}

void gf_sub_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] - ((const uint32xn_t*)b)[i];
}
/*
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) {
out->limb[i] = a->limb[i] - b->limb[i];
}
*/
@@ -58,9 +54,8 @@ void gf_bias (gf a, int amt) {
void gf_weak_reduce (gf a) {
uint64_t mask = (1ull<<28) - 1;
uint64_t tmp = a->limb[15] >> 28;
int i;
a->limb[8] += tmp;
for (i=15; i>0; i--) {
for (unsigned int i=15; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>28);
}
a->limb[0] = (a->limb[0] & mask) + tmp;


+ 3
- 6
src/p448/arch_neon_experimental/f_impl.h Ver ficheiro

@@ -26,15 +26,13 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] + ((const uint32xn_t*)b)[i];
}
}

void gf_sub_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) {
((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] - ((const uint32xn_t*)b)[i];
}
/*
@@ -60,8 +58,7 @@ void gf_weak_reduce (gf a) {
uint32x2_t *aa = (uint32x2_t*) a, vmask = {(1ull<<28)-1, (1ull<<28)-1}, vm2 = {0,-1},
tmp = vshr_n_u32(aa[7],28);
int i;
for (i=7; i>=1; i--) {
for (unsigned int i=7; i>=1; i--) {
aa[i] = vsra_n_u32(aa[i] & vmask, aa[i-1], 28);
}
aa[0] = (aa[0] & vmask) + vrev64_u32(tmp) + (tmp&vm2);


+ 3
- 6
src/p448/arch_ref64/f_impl.h Ver ficheiro

@@ -19,17 +19,15 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<8; i++) {
for (unsigned int i=0; i<8; i++) {
out->limb[i] = a->limb[i] + b->limb[i];
}
gf_weak_reduce(out);
}

void gf_sub_RAW (gf out, const gf a, const gf b) {
unsigned int i;
uint64_t co1 = ((1ull<<56)-1)*2, co2 = co1-2;
for (i=0; i<8; i++) {
for (unsigned int i=0; i<8; i++) {
out->limb[i] = a->limb[i] - b->limb[i] + ((i==4) ? co2 : co1);
}
gf_weak_reduce(out);
@@ -43,9 +41,8 @@ void gf_bias (gf a, int amt) {
void gf_weak_reduce (gf a) {
uint64_t mask = (1ull<<56) - 1;
uint64_t tmp = a->limb[7] >> 56;
int i;
a->limb[4] += tmp;
for (i=7; i>0; i--) {
for (unsigned int i=7; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>56);
}
a->limb[0] = (a->limb[0] & mask) + tmp;


+ 4
- 8
src/p448/arch_x86_64/f_impl.h Ver ficheiro

@@ -18,8 +18,7 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] + ((const uint64xn_t*)b)[i];
}
/*
@@ -31,8 +30,7 @@ void gf_add_RAW (gf out, const gf a, const gf b) {
}

void gf_sub_RAW (gf out, const gf a, const gf b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] - ((const uint64xn_t*)b)[i];
}
/*
@@ -59,8 +57,7 @@ void gf_bias (gf a, int amt) {
aa[2] += hi;
aa[3] += lo;
#else
unsigned int i;
for (i=0; i<sizeof(*a)/sizeof(uint64_t); i++) {
for (unsigned int i=0; i<sizeof(*a)/sizeof(uint64_t); i++) {
a->limb[i] += (i==4) ? co2 : co1;
}
#endif
@@ -70,9 +67,8 @@ void gf_weak_reduce (gf a) {
/* PERF: use pshufb/palignr if anyone cares about speed of this */
uint64_t mask = (1ull<<56) - 1;
uint64_t tmp = a->limb[7] >> 56;
int i;
a->limb[4] += tmp;
for (i=7; i>0; i--) {
for (unsigned int i=7; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>56);
}
a->limb[0] = (a->limb[0] & mask) + tmp;


+ 5
- 10
src/p480/arch_x86_64/f_impl.h Ver ficheiro

@@ -18,8 +18,7 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf *out, const gf *a, const gf *b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] + ((const uint64xn_t*)b)[i];
}
/*
@@ -31,8 +30,7 @@ void gf_add_RAW (gf *out, const gf *a, const gf *b) {
}

void gf_sub_RAW (gf *out, const gf *a, const gf *b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] - ((const uint64xn_t*)b)[i];
}
/*
@@ -44,8 +42,7 @@ void gf_sub_RAW (gf *out, const gf *a, const gf *b) {
}

void gf_copy (gf *out, const gf *a) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(big_register_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(big_register_t); i++) {
((big_register_t *)out)[i] = ((const big_register_t *)a)[i];
}
}
@@ -68,8 +65,7 @@ void gf_bias (
aa[2] += hi;
aa[3] += lo;
#else
unsigned int i;
for (i=0; i<sizeof(*a)/sizeof(uint64_t); i++) {
for (unsigned int i=0; i<sizeof(*a)/sizeof(uint64_t); i++) {
a->limb[i] += (i==4) ? co2 : co1;
}
#endif
@@ -79,9 +75,8 @@ void gf_weak_reduce (gf *a) {
/* PERF: use pshufb/palignr if anyone cares about speed of this */
uint64_t mask = (1ull<<60) - 1;
uint64_t tmp = a->limb[7] >> 60;
int i;
a->limb[4] += tmp;
for (i=7; i>0; i--) {
for (unsigned int i=7; i>0; i--) {
a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>60);
}
a->limb[0] = (a->limb[0] & mask) + tmp;


+ 3
- 6
src/p521/arch_ref64/f_impl.h Ver ficheiro

@@ -17,17 +17,15 @@ extern "C" {
/* -------------- Inline functions begin here -------------- */

void gf_add_RAW (gf *out, const gf *a, const gf *b) {
unsigned int i;
for (i=0; i<9; i++) {
for (unsigned int i=0; i<9; i++) {
out->limb[i] = a->limb[i] + b->limb[i];
}
gf_weak_reduce(out);
}

void gf_sub_RAW (gf *out, const gf *a, const gf *b) {
unsigned int i;
uint64_t co1 = ((1ull<<58)-1)*4, co2 = ((1ull<<57)-1)*4;
for (i=0; i<9; i++) {
for (unsigned int i=0; i<9; i++) {
out->limb[i] = a->limb[i] - b->limb[i] + ((i==8) ? co2 : co1);
}
gf_weak_reduce(out);
@@ -41,8 +39,7 @@ void gf_bias (gf *a, int amt) {
void gf_weak_reduce (gf *a) {
uint64_t mask = (1ull<<58) - 1;
uint64_t tmp = a->limb[8] >> 57;
int i;
for (i=8; i>0; i--) {
for (unsigned int i=8; i>0; i--) {
a->limb[i] = (a->limb[i] & ((i==8) ? mask>>1 : mask)) + (a->limb[i-1]>>58);
}
a->limb[0] = (a->limb[0] & mask) + tmp;


+ 2
- 4
src/p521/arch_x86_64_r12/f_impl.h Ver ficheiro

@@ -32,15 +32,13 @@ static inline uint64x3_t timesW (uint64x3_t u) {
}

void gf_add_RAW (gf *out, const gf *a, const gf *b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] + ((const uint64xn_t*)b)[i];
}
}

void gf_sub_RAW (gf *out, const gf *a, const gf *b) {
unsigned int i;
for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) {
((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] - ((const uint64xn_t*)b)[i];
}
}


Carregando…
Cancelar
Guardar