| @@ -19,17 +19,15 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<5; i++) { | |||
| for (unsigned int i=0; i<5; i++) { | |||
| out->limb[i] = a->limb[i] + b->limb[i]; | |||
| } | |||
| gf_weak_reduce(out); | |||
| } | |||
| void gf_sub_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| uint64_t co1 = ((1ull<<51)-1)*2, co2 = co1-36; | |||
| for (i=0; i<5; i++) { | |||
| for (unsigned int i=0; i<5; i++) { | |||
| out->limb[i] = a->limb[i] - b->limb[i] + ((i==0) ? co2 : co1); | |||
| } | |||
| gf_weak_reduce(out); | |||
| @@ -43,8 +41,7 @@ void gf_bias (gf a, int amt) { | |||
| void gf_weak_reduce (gf a) { | |||
| uint64_t mask = (1ull<<51) - 1; | |||
| uint64_t tmp = a->limb[4] >> 51; | |||
| int i; | |||
| for (i=4; i>0; i--) { | |||
| for (unsigned int i=4; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>51); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp*19; | |||
| @@ -15,24 +15,20 @@ | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<5; i++) { | |||
| for (unsigned int i=0; i<5; i++) { | |||
| out->limb[i] = a->limb[i] + b->limb[i]; | |||
| } | |||
| } | |||
| void gf_sub_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| uint64_t co1 = ((1ull<<51)-1)*2, co2 = co1-36; | |||
| for (i=0; i<5; i++) { | |||
| out->limb[i] = a->limb[i] - b->limb[i] + ((i==0) ? co2 : co1); | |||
| for (unsigned int i=0; i<5; i++) { | |||
| out->limb[i] = a->limb[i] - b->limb[i]; | |||
| } | |||
| } | |||
| void gf_bias (gf a, int amt) { | |||
| a->limb[0] += ((uint64_t)(amt)<<52) - 38*amt; | |||
| int i; | |||
| for (i=1; i<5; i++) { | |||
| for (unsigned int i=1; i<5; i++) { | |||
| a->limb[i] += ((uint64_t)(amt)<<52)-2*amt; | |||
| } | |||
| } | |||
| @@ -40,8 +36,7 @@ void gf_bias (gf a, int amt) { | |||
| void gf_weak_reduce (gf a) { | |||
| uint64_t mask = (1ull<<51) - 1; | |||
| uint64_t tmp = a->limb[4] >> 51; | |||
| int i; | |||
| for (i=4; i>0; i--) { | |||
| for (unsigned int i=4; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>51); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp*19; | |||
| @@ -20,8 +20,7 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| ((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] + ((const uint32xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| @@ -33,8 +32,7 @@ void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| } | |||
| void gf_sub_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| ((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] - ((const uint32xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| @@ -58,9 +56,8 @@ void gf_bias (gf a, int amt) { | |||
| void gf_weak_reduce (gf a) { | |||
| uint64_t mask = (1ull<<28) - 1; | |||
| uint64_t tmp = a->limb[15] >> 28; | |||
| int i; | |||
| a->limb[8] += tmp; | |||
| for (i=15; i>0; i--) { | |||
| for (unsigned int i=15; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>28); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp; | |||
| @@ -20,26 +20,22 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| ((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] + ((const uint32xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) { | |||
| out->limb[i] = a->limb[i] + b->limb[i]; | |||
| } | |||
| */ | |||
| } | |||
| void gf_sub_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| ((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] - ((const uint32xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(out->limb[0]); i++) { | |||
| out->limb[i] = a->limb[i] - b->limb[i]; | |||
| } | |||
| */ | |||
| @@ -58,9 +54,8 @@ void gf_bias (gf a, int amt) { | |||
| void gf_weak_reduce (gf a) { | |||
| uint64_t mask = (1ull<<28) - 1; | |||
| uint64_t tmp = a->limb[15] >> 28; | |||
| int i; | |||
| a->limb[8] += tmp; | |||
| for (i=15; i>0; i--) { | |||
| for (unsigned int i=15; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>28); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp; | |||
| @@ -26,15 +26,13 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| ((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] + ((const uint32xn_t*)b)[i]; | |||
| } | |||
| } | |||
| void gf_sub_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint32xn_t); i++) { | |||
| ((uint32xn_t*)out)[i] = ((const uint32xn_t*)a)[i] - ((const uint32xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| @@ -60,8 +58,7 @@ void gf_weak_reduce (gf a) { | |||
| uint32x2_t *aa = (uint32x2_t*) a, vmask = {(1ull<<28)-1, (1ull<<28)-1}, vm2 = {0,-1}, | |||
| tmp = vshr_n_u32(aa[7],28); | |||
| int i; | |||
| for (i=7; i>=1; i--) { | |||
| for (unsigned int i=7; i>=1; i--) { | |||
| aa[i] = vsra_n_u32(aa[i] & vmask, aa[i-1], 28); | |||
| } | |||
| aa[0] = (aa[0] & vmask) + vrev64_u32(tmp) + (tmp&vm2); | |||
| @@ -19,17 +19,15 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<8; i++) { | |||
| for (unsigned int i=0; i<8; i++) { | |||
| out->limb[i] = a->limb[i] + b->limb[i]; | |||
| } | |||
| gf_weak_reduce(out); | |||
| } | |||
| void gf_sub_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| uint64_t co1 = ((1ull<<56)-1)*2, co2 = co1-2; | |||
| for (i=0; i<8; i++) { | |||
| for (unsigned int i=0; i<8; i++) { | |||
| out->limb[i] = a->limb[i] - b->limb[i] + ((i==4) ? co2 : co1); | |||
| } | |||
| gf_weak_reduce(out); | |||
| @@ -43,9 +41,8 @@ void gf_bias (gf a, int amt) { | |||
| void gf_weak_reduce (gf a) { | |||
| uint64_t mask = (1ull<<56) - 1; | |||
| uint64_t tmp = a->limb[7] >> 56; | |||
| int i; | |||
| a->limb[4] += tmp; | |||
| for (i=7; i>0; i--) { | |||
| for (unsigned int i=7; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>56); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp; | |||
| @@ -18,8 +18,7 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| ((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] + ((const uint64xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| @@ -31,8 +30,7 @@ void gf_add_RAW (gf out, const gf a, const gf b) { | |||
| } | |||
| void gf_sub_RAW (gf out, const gf a, const gf b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| ((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] - ((const uint64xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| @@ -59,8 +57,7 @@ void gf_bias (gf a, int amt) { | |||
| aa[2] += hi; | |||
| aa[3] += lo; | |||
| #else | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*a)/sizeof(uint64_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*a)/sizeof(uint64_t); i++) { | |||
| a->limb[i] += (i==4) ? co2 : co1; | |||
| } | |||
| #endif | |||
| @@ -70,9 +67,8 @@ void gf_weak_reduce (gf a) { | |||
| /* PERF: use pshufb/palignr if anyone cares about speed of this */ | |||
| uint64_t mask = (1ull<<56) - 1; | |||
| uint64_t tmp = a->limb[7] >> 56; | |||
| int i; | |||
| a->limb[4] += tmp; | |||
| for (i=7; i>0; i--) { | |||
| for (unsigned int i=7; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>56); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp; | |||
| @@ -18,8 +18,7 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf *out, const gf *a, const gf *b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| ((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] + ((const uint64xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| @@ -31,8 +30,7 @@ void gf_add_RAW (gf *out, const gf *a, const gf *b) { | |||
| } | |||
| void gf_sub_RAW (gf *out, const gf *a, const gf *b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| ((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] - ((const uint64xn_t*)b)[i]; | |||
| } | |||
| /* | |||
| @@ -44,8 +42,7 @@ void gf_sub_RAW (gf *out, const gf *a, const gf *b) { | |||
| } | |||
| void gf_copy (gf *out, const gf *a) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(big_register_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(big_register_t); i++) { | |||
| ((big_register_t *)out)[i] = ((const big_register_t *)a)[i]; | |||
| } | |||
| } | |||
| @@ -68,8 +65,7 @@ void gf_bias ( | |||
| aa[2] += hi; | |||
| aa[3] += lo; | |||
| #else | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*a)/sizeof(uint64_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*a)/sizeof(uint64_t); i++) { | |||
| a->limb[i] += (i==4) ? co2 : co1; | |||
| } | |||
| #endif | |||
| @@ -79,9 +75,8 @@ void gf_weak_reduce (gf *a) { | |||
| /* PERF: use pshufb/palignr if anyone cares about speed of this */ | |||
| uint64_t mask = (1ull<<60) - 1; | |||
| uint64_t tmp = a->limb[7] >> 60; | |||
| int i; | |||
| a->limb[4] += tmp; | |||
| for (i=7; i>0; i--) { | |||
| for (unsigned int i=7; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & mask) + (a->limb[i-1]>>60); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp; | |||
| @@ -17,17 +17,15 @@ extern "C" { | |||
| /* -------------- Inline functions begin here -------------- */ | |||
| void gf_add_RAW (gf *out, const gf *a, const gf *b) { | |||
| unsigned int i; | |||
| for (i=0; i<9; i++) { | |||
| for (unsigned int i=0; i<9; i++) { | |||
| out->limb[i] = a->limb[i] + b->limb[i]; | |||
| } | |||
| gf_weak_reduce(out); | |||
| } | |||
| void gf_sub_RAW (gf *out, const gf *a, const gf *b) { | |||
| unsigned int i; | |||
| uint64_t co1 = ((1ull<<58)-1)*4, co2 = ((1ull<<57)-1)*4; | |||
| for (i=0; i<9; i++) { | |||
| for (unsigned int i=0; i<9; i++) { | |||
| out->limb[i] = a->limb[i] - b->limb[i] + ((i==8) ? co2 : co1); | |||
| } | |||
| gf_weak_reduce(out); | |||
| @@ -41,8 +39,7 @@ void gf_bias (gf *a, int amt) { | |||
| void gf_weak_reduce (gf *a) { | |||
| uint64_t mask = (1ull<<58) - 1; | |||
| uint64_t tmp = a->limb[8] >> 57; | |||
| int i; | |||
| for (i=8; i>0; i--) { | |||
| for (unsigned int i=8; i>0; i--) { | |||
| a->limb[i] = (a->limb[i] & ((i==8) ? mask>>1 : mask)) + (a->limb[i-1]>>58); | |||
| } | |||
| a->limb[0] = (a->limb[0] & mask) + tmp; | |||
| @@ -32,15 +32,13 @@ static inline uint64x3_t timesW (uint64x3_t u) { | |||
| } | |||
| void gf_add_RAW (gf *out, const gf *a, const gf *b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| ((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] + ((const uint64xn_t*)b)[i]; | |||
| } | |||
| } | |||
| void gf_sub_RAW (gf *out, const gf *a, const gf *b) { | |||
| unsigned int i; | |||
| for (i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| for (unsigned int i=0; i<sizeof(*out)/sizeof(uint64xn_t); i++) { | |||
| ((uint64xn_t*)out)[i] = ((const uint64xn_t*)a)[i] - ((const uint64xn_t*)b)[i]; | |||
| } | |||
| } | |||