You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1702 lines
47 KiB

  1. /* Copyright (c) 2015 Cryptography Research, Inc.
  2. * Released under the MIT License. See LICENSE.txt for license information.
  3. */
  4. /**
  5. * @file decaf.c
  6. * @author Mike Hamburg
  7. * @brief Decaf high-level functions.
  8. */
  9. #define _XOPEN_SOURCE 600 /* for posix_memalign */
  10. #define __STDC_WANT_LIB_EXT1__ 1 /* for memset_s */
  11. #include "decaf.h"
  12. #include <string.h>
  13. #include "field.h"
  14. #include "decaf_255_config.h"
  15. #define WBITS DECAF_WORD_BITS
  16. /* Rename table for eventual factoring into .c.inc, MSR ECC style */
  17. #define SCALAR_LIMBS DECAF_255_SCALAR_LIMBS
  18. #define SCALAR_BITS DECAF_255_SCALAR_BITS
  19. #define NLIMBS DECAF_255_LIMBS
  20. #define API_NS(_id) decaf_255_##_id
  21. #define API_NS2(_pref,_id) _pref##_decaf_255_##_id
  22. #define scalar_t decaf_255_scalar_t
  23. #define point_t decaf_255_point_t
  24. #define precomputed_s decaf_255_precomputed_s
  25. #define SER_BYTES DECAF_255_SER_BYTES
  26. #if WBITS == 64
  27. typedef __int128_t decaf_sdword_t;
  28. #define SC_LIMB(x) (x##ull)
  29. #elif WBITS == 32
  30. typedef int64_t decaf_sdword_t;
  31. #define SC_LIMB(x) (x##ull)&((1ull<<32)-1), (x##ull)>>32
  32. #else
  33. #error "Only supporting 32- and 64-bit platforms right now"
  34. #endif
  35. //static const int QUADRATIC_NONRESIDUE = -1;
  36. #define sv static void
  37. #define snv static void __attribute__((noinline))
  38. #define siv static inline void __attribute__((always_inline))
  39. static const gf ZERO = {{{0}}}, ONE = {{{1}}}, TWO = {{{2}}};
  40. static const int EDWARDS_D = 121665;
  41. static const scalar_t sc_p = {{{
  42. SC_LIMB(0x5812631a5cf5d3ed),
  43. SC_LIMB(0x14def9dea2f79cd6),
  44. SC_LIMB(0),
  45. SC_LIMB(0),
  46. SC_LIMB(0x1000000000000000)
  47. }}};
  48. const scalar_t API_NS(scalar_one) = {{{1}}}, API_NS(scalar_zero) = {{{0}}};
  49. extern const scalar_t sc_r2;
  50. extern const decaf_word_t MONTGOMERY_FACTOR;
  51. /* sqrt(9) = 3 from the curve spec. Not exported, but used by pregen tool. */
  52. const unsigned char base_point_ser_for_pregen[SER_BYTES] = {
  53. 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
  54. };
  55. extern const point_t API_NS(point_base);
  56. /* Projective Niels coordinates */
  57. typedef struct { gf a, b, c; } niels_s, niels_t[1];
  58. typedef struct { niels_t n; gf z; } pniels_s, pniels_t[1];
  59. /* Precomputed base */
  60. struct precomputed_s { niels_t table [DECAF_COMBS_N<<(DECAF_COMBS_T-1)]; };
  61. extern const field_t API_NS(precomputed_base_as_fe)[];
  62. const precomputed_s *API_NS(precomputed_base) =
  63. (const precomputed_s *) &API_NS(precomputed_base_as_fe);
  64. const size_t API_NS2(sizeof,precomputed_s) = sizeof(precomputed_s);
  65. const size_t API_NS2(alignof,precomputed_s) = 32;
  66. #ifdef __clang__
  67. #if 100*__clang_major__ + __clang_minor__ > 305
  68. #define VECTORIZE _Pragma("clang loop unroll(disable) vectorize(enable) vectorize_width(8)")
  69. #endif
  70. #endif
  71. #ifndef VECTORIZE
  72. #define VECTORIZE
  73. #endif
  74. #define FOR_LIMB(i,op) { unsigned int i=0; for (i=0; i<NLIMBS; i++) { op; }}
  75. #define FOR_LIMB_V(i,op) { unsigned int i=0; VECTORIZE for (i=0; i<NLIMBS; i++) { op; }}
  76. /** Copy x = y */
  77. siv gf_cpy(gf x, const gf y) { x[0] = y[0]; }
  78. /** Mostly-unoptimized multiply, but at least it's unrolled. */
  79. siv gf_mul (gf c, const gf a, const gf b) {
  80. field_mul((field_t *)c, (const field_t *)a, (const field_t *)b);
  81. }
  82. /** Dedicated square */
  83. siv gf_sqr (gf c, const gf a) {
  84. field_sqr((field_t *)c, (const field_t *)a);
  85. }
  86. /** Inverse square root using addition chain. */
  87. siv gf_isqrt(gf y, const gf x) {
  88. field_isr((field_t *)y, (const field_t *)x);
  89. }
  90. /** Inverse. TODO: adapt to 5-mod-8 fields? */
  91. sv gf_invert(gf y, const gf x) {
  92. gf t1, t2;
  93. gf_sqr(t1, x); // o^2
  94. gf_isqrt(t2, t1); // +-1/sqrt(o^2) = +-1/o
  95. gf_sqr(t1, t2);
  96. gf_mul(t2, t1, x); // not direct to y in case of alias.
  97. gf_cpy(y, t2);
  98. }
  99. /** Add mod p. Conservatively always weak-reduce. */
  100. snv gf_add ( gf_s *__restrict__ c, const gf a, const gf b ) {
  101. field_add((field_t *)c, (const field_t *)a, (const field_t *)b);
  102. }
  103. /** Subtract mod p. Conservatively always weak-reduce. */
  104. snv gf_sub ( gf c, const gf a, const gf b ) {
  105. field_sub((field_t *)c, (const field_t *)a, (const field_t *)b);
  106. }
  107. /** Add mod p. Conservatively always weak-reduce.) */
  108. siv gf_bias ( gf c, int amt) {
  109. field_bias((field_t *)c, amt);
  110. }
  111. /** Subtract mod p. Bias by 2 and don't reduce */
  112. siv gf_sub_nr ( gf_s *__restrict__ c, const gf a, const gf b ) {
  113. // FOR_LIMB_V(i, c->limb[i] = a->limb[i] - b->limb[i] + 2*P->limb[i] );
  114. ANALYZE_THIS_ROUTINE_CAREFULLY; //TODO
  115. field_sub_nr((field_t *)c, (const field_t *)a, (const field_t *)b);
  116. gf_bias(c, 2);
  117. if (WBITS==32) field_weak_reduce((field_t*) c); // HACK FIXME
  118. }
  119. /** Subtract mod p. Bias by amt but don't reduce. */
  120. siv gf_sub_nr_x ( gf c, const gf a, const gf b, int amt ) {
  121. ANALYZE_THIS_ROUTINE_CAREFULLY; //TODO
  122. field_sub_nr((field_t *)c, (const field_t *)a, (const field_t *)b);
  123. gf_bias(c, amt);
  124. if (WBITS==32) field_weak_reduce((field_t*) c); // HACK FIXME
  125. }
  126. /** Add mod p. Don't reduce. */
  127. siv gf_add_nr ( gf c, const gf a, const gf b ) {
  128. // FOR_LIMB_V(i, c->limb[i] = a->limb[i] + b->limb[i]);
  129. ANALYZE_THIS_ROUTINE_CAREFULLY; //TODO
  130. field_add_nr((field_t *)c, (const field_t *)a, (const field_t *)b);
  131. }
  132. /** Constant time, x = is_z ? z : y */
  133. siv cond_sel(gf x, const gf y, const gf z, decaf_bool_t is_z) {
  134. big_register_t br_mask = br_set_to_mask(is_z);
  135. big_register_t *out = (big_register_t *)x;
  136. const big_register_t *y_ = (const big_register_t *)y, *z_ = (const big_register_t *)z;
  137. word_t k;
  138. for (k=0; k<sizeof(gf)/sizeof(big_register_t); k++) {
  139. out[k] = (~br_mask & y_[k]) | (br_mask & z_[k]);
  140. }
  141. /*
  142. constant_time_select(x,z,y,sizeof(gf),is_z);
  143. */
  144. }
  145. /** Constant time, if (neg) x=-x; */
  146. sv cond_neg(gf x, decaf_bool_t neg) {
  147. gf y;
  148. gf_sub(y,ZERO,x);
  149. cond_sel(x,x,y,neg);
  150. }
  151. /** Constant time, if (swap) (x,y) = (y,x); */
  152. siv cond_swap(gf x, gf_s *__restrict__ y, decaf_bool_t swap) {
  153. FOR_LIMB_V(i, {
  154. decaf_word_t s = (x->limb[i] ^ y->limb[i]) & swap;
  155. x->limb[i] ^= s;
  156. y->limb[i] ^= s;
  157. });
  158. }
  159. /**
  160. * Mul by signed int. Not constant-time WRT the sign of that int.
  161. * Just uses a full mul (PERF)
  162. */
  163. siv gf_mlw(gf c, const gf a, int w) {
  164. if (w>0) {
  165. field_mulw((field_t *)c, (const field_t *)a, w);
  166. } else {
  167. field_mulw((field_t *)c, (const field_t *)a, -w);
  168. gf_sub(c,ZERO,c);
  169. }
  170. }
  171. /** Canonicalize */
  172. siv gf_canon ( gf a ) {
  173. field_strong_reduce((field_t *)a);
  174. }
  175. /** Compare a==b */
  176. static decaf_word_t __attribute__((noinline)) gf_eq(const gf a, const gf b) {
  177. gf c;
  178. gf_sub(c,a,b);
  179. gf_canon(c);
  180. decaf_word_t ret=0;
  181. FOR_LIMB(i, ret |= c->limb[i] );
  182. /* Hope the compiler is too dumb to optimize this, thus noinline */
  183. return ((decaf_dword_t)ret - 1) >> WBITS;
  184. }
  185. /** Inverse square root using addition chain. */
  186. static decaf_bool_t gf_isqrt_chk(gf y, const gf x, decaf_bool_t allow_zero) {
  187. gf tmp0, tmp1;
  188. field_isr((field_t *)y, (const field_t *)x);
  189. gf_sqr(tmp0,y);
  190. gf_mul(tmp1,tmp0,x);
  191. return gf_eq(tmp1,ONE) | (allow_zero & gf_eq(tmp1,ZERO));
  192. }
  193. /** Return high bit of x = low bit of 2x mod p */
  194. static decaf_word_t hibit(const gf x) {
  195. gf y;
  196. gf_add(y,x,x);
  197. gf_canon(y);
  198. return -(y->limb[0]&1);
  199. }
  200. /** {extra,accum} - sub +? p
  201. * Must have extra <= 1
  202. */
  203. snv sc_subx(
  204. scalar_t out,
  205. const decaf_word_t accum[SCALAR_LIMBS],
  206. const scalar_t sub,
  207. const scalar_t p,
  208. decaf_word_t extra
  209. ) {
  210. decaf_sdword_t chain = 0;
  211. unsigned int i;
  212. for (i=0; i<SCALAR_LIMBS; i++) {
  213. chain = (chain + accum[i]) - sub->limb[i];
  214. out->limb[i] = chain;
  215. chain >>= WBITS;
  216. }
  217. decaf_bool_t borrow = chain+extra; /* = 0 or -1 */
  218. chain = 0;
  219. for (i=0; i<SCALAR_LIMBS; i++) {
  220. chain = (chain + out->limb[i]) + (p->limb[i] & borrow);
  221. out->limb[i] = chain;
  222. chain >>= WBITS;
  223. }
  224. }
  225. snv sc_montmul (
  226. scalar_t out,
  227. const scalar_t a,
  228. const scalar_t b
  229. ) {
  230. unsigned int i,j;
  231. decaf_word_t accum[SCALAR_LIMBS+1] = {0};
  232. decaf_word_t hi_carry = 0;
  233. for (i=0; i<SCALAR_LIMBS; i++) {
  234. decaf_word_t mand = a->limb[i];
  235. const decaf_word_t *mier = b->limb;
  236. decaf_dword_t chain = 0;
  237. for (j=0; j<SCALAR_LIMBS; j++) {
  238. chain += ((decaf_dword_t)mand)*mier[j] + accum[j];
  239. accum[j] = chain;
  240. chain >>= WBITS;
  241. }
  242. accum[j] = chain;
  243. mand = accum[0] * MONTGOMERY_FACTOR;
  244. chain = 0;
  245. mier = sc_p->limb;
  246. for (j=0; j<SCALAR_LIMBS; j++) {
  247. chain += (decaf_dword_t)mand*mier[j] + accum[j];
  248. if (j) accum[j-1] = chain;
  249. chain >>= WBITS;
  250. }
  251. chain += accum[j];
  252. chain += hi_carry;
  253. accum[j-1] = chain;
  254. hi_carry = chain >> WBITS;
  255. }
  256. sc_subx(out, accum, sc_p, sc_p, hi_carry);
  257. }
  258. void API_NS(scalar_mul) (
  259. scalar_t out,
  260. const scalar_t a,
  261. const scalar_t b
  262. ) {
  263. sc_montmul(out,a,b);
  264. sc_montmul(out,out,sc_r2);
  265. }
  266. /* PERF: could implement this */
  267. siv sc_montsqr (
  268. scalar_t out,
  269. const scalar_t a
  270. ) {
  271. sc_montmul(out,a,a);
  272. }
  273. decaf_bool_t API_NS(scalar_invert) (
  274. scalar_t out,
  275. const scalar_t a
  276. ) {
  277. #if 0
  278. /* FIELD MAGIC. FIXME: not updated for 25519 */
  279. scalar_t chain[7], tmp;
  280. sc_montmul(chain[0],a,sc_r2);
  281. unsigned int i,j;
  282. /* Addition chain generated by a not-too-clever SAGE script. First part: compute a^(2^222-1) */
  283. const struct { uint8_t widx, sidx, sct, midx; } muls [] = {
  284. {2,0,1,0}, {3,2,1,0}, {4,3,1,0}, {5,4,1,0}, /* 0x3,7,f,1f */
  285. {1,5,1,0}, {1,1,3,3}, {6,1,9,1}, {1,6,1,0}, {6,1,18,6}, /* a^(2^37-1) */
  286. {1,6,37,6}, {1,1,37,6}, {1,1,111,1} /* a^(2^222-1) */
  287. };
  288. /* Second part: sliding window */
  289. const struct { uint8_t sct, midx; } muls1 [] = {
  290. {6, 5}, {4, 2}, {3, 0}, {2, 0}, {4, 0}, {8, 5},
  291. {2, 0}, {5, 3}, {4, 0}, {4, 0}, {5, 3}, {3, 2},
  292. {3, 2}, {3, 2}, {2, 0}, {3, 0}, {4, 2}, {2, 0},
  293. {4, 3}, {3, 2}, {2, 0}, {3, 2}, {5, 2}, {3, 2},
  294. {2, 0}, {3, 0}, {7, 0}, {5, 0}, {3, 2}, {3, 2},
  295. {4, 2}, {5, 0}, {5, 3}, {3, 0}, {2, 0}, {5, 2},
  296. {4, 3}, {4, 0}, {3, 2}, {7, 4}, {2, 0}, {2, 0},
  297. {2, 0}, {2, 0}, {3, 0}, {5, 2}, {5, 4}, {5, 2},
  298. {5, 0}, {2, 0}, {3, 0}, {3, 0}, {2, 0}, {2, 0},
  299. {2, 0}, {3, 2}, {2, 0}, {3, 2}, {5, 0}, {4, 0},
  300. {6, 4}, {4, 0}
  301. };
  302. for (i=0; i<sizeof(muls)/sizeof(muls[0]); i++) {
  303. sc_montsqr(tmp, chain[muls[i].sidx]);
  304. for (j=1; j<muls[i].sct; j++) {
  305. sc_montsqr(tmp, tmp);
  306. }
  307. sc_montmul(chain[muls[i].widx], tmp, chain[muls[i].midx]);
  308. }
  309. for (i=0; i<sizeof(muls1)/sizeof(muls1[0]); i++) {
  310. sc_montsqr(tmp, chain[1]);
  311. for (j=1; j<muls1[i].sct; j++) {
  312. sc_montsqr(tmp, tmp);
  313. }
  314. sc_montmul(chain[1], tmp, chain[muls1[i].midx]);
  315. }
  316. sc_montmul(out,chain[1],API_NS(scalar_one));
  317. for (i=0; i<sizeof(chain)/sizeof(chain[0]); i++) {
  318. API_NS(scalar_destroy)(chain[i]);
  319. }
  320. return ~API_NS(scalar_eq)(out,API_NS(scalar_zero));
  321. #else
  322. (void)out;
  323. (void)a;
  324. return 0;
  325. #endif
  326. }
  327. void API_NS(scalar_sub) (
  328. scalar_t out,
  329. const scalar_t a,
  330. const scalar_t b
  331. ) {
  332. sc_subx(out, a->limb, b, sc_p, 0);
  333. }
  334. void API_NS(scalar_add) (
  335. scalar_t out,
  336. const scalar_t a,
  337. const scalar_t b
  338. ) {
  339. decaf_dword_t chain = 0;
  340. unsigned int i;
  341. for (i=0; i<SCALAR_LIMBS; i++) {
  342. chain = (chain + a->limb[i]) + b->limb[i];
  343. out->limb[i] = chain;
  344. chain >>= WBITS;
  345. }
  346. sc_subx(out, out->limb, sc_p, sc_p, chain);
  347. }
  348. snv sc_halve (
  349. scalar_t out,
  350. const scalar_t a,
  351. const scalar_t p
  352. ) {
  353. decaf_word_t mask = -(a->limb[0] & 1);
  354. decaf_dword_t chain = 0;
  355. unsigned int i;
  356. for (i=0; i<SCALAR_LIMBS; i++) {
  357. chain = (chain + a->limb[i]) + (p->limb[i] & mask);
  358. out->limb[i] = chain;
  359. chain >>= WBITS;
  360. }
  361. for (i=0; i<SCALAR_LIMBS-1; i++) {
  362. out->limb[i] = out->limb[i]>>1 | out->limb[i+1]<<(WBITS-1);
  363. }
  364. out->limb[i] = out->limb[i]>>1 | chain<<(WBITS-1);
  365. }
  366. void API_NS(scalar_set) (
  367. scalar_t out,
  368. decaf_word_t w
  369. ) {
  370. memset(out,0,sizeof(scalar_t));
  371. out->limb[0] = w;
  372. }
  373. decaf_bool_t API_NS(scalar_eq) (
  374. const scalar_t a,
  375. const scalar_t b
  376. ) {
  377. decaf_word_t diff = 0;
  378. unsigned int i;
  379. for (i=0; i<SCALAR_LIMBS; i++) {
  380. diff |= a->limb[i] ^ b->limb[i];
  381. }
  382. return (((decaf_dword_t)diff)-1)>>WBITS;
  383. }
  384. /* *** API begins here *** */
  385. /** identity = (0,1) */
  386. const point_t API_NS(point_identity) = {{{{{0}}},{{{1}}},{{{1}}},{{{0}}}}};
  387. static void gf_encode ( unsigned char ser[SER_BYTES], gf a ) {
  388. field_serialize(ser, (field_t *)a);
  389. }
  390. void API_NS(point_encode)( unsigned char ser[SER_BYTES], const point_t p ) {
  391. /* Can shave off one mul here; not important but makes consistent with paper */
  392. gf a, b, c, d;
  393. gf_mlw ( a, p->y, 1-EDWARDS_D );
  394. gf_mul ( c, a, p->t ); /* -dYT, with EDWARDS_D = d-1 */
  395. gf_mul ( a, p->x, p->z );
  396. gf_sub ( d, c, a ); /* aXZ-dYT with a=-1 */
  397. gf_add ( a, p->z, p->y );
  398. gf_sub ( b, p->z, p->y );
  399. gf_mul ( c, b, a );
  400. gf_mlw ( b, c, -EDWARDS_D ); /* (a-d)(Z+Y)(Z-Y) */
  401. gf_isqrt ( a, b ); /* r in the paper */
  402. gf_mlw ( b, a, -EDWARDS_D ); /* u in the paper */
  403. gf_mul ( c, b, a ); /* ur */
  404. gf_mul ( a, c, d ); /* ur (aZX-dYT) */
  405. gf_add ( d, b, b ); /* 2u = -2au since a=-1 */
  406. gf_mul ( c, d, p->z ); /* 2uZ */
  407. cond_neg ( b, ~hibit(c) ); /* u <- -u if negative. */
  408. gf_mul ( c, b, p->y );
  409. gf_add ( a, a, c );
  410. cond_neg ( a, hibit(a) );
  411. gf_encode(ser, a);
  412. }
  413. /**
  414. * Deserialize a bool, return TRUE if < p.
  415. */
  416. static decaf_bool_t gf_deser(gf s, const unsigned char ser[SER_BYTES]) {
  417. return field_deserialize((field_t *)s, ser);
  418. }
  419. decaf_bool_t API_NS(point_decode) (
  420. point_t p,
  421. const unsigned char ser[SER_BYTES],
  422. decaf_bool_t allow_identity
  423. ) {
  424. gf s, a, b, c, d;
  425. decaf_bool_t succ = gf_deser(s, ser), zero = gf_eq(s, ZERO);
  426. succ &= allow_identity | ~zero;
  427. succ &= ~hibit(s);
  428. gf_sqr ( a, s );
  429. gf_sub ( p->z, ONE, a ); /* 1-s^2 = 1+as^2 since a=-1 */
  430. gf_sqr ( b, p->z );
  431. gf_mlw ( c, a, 4-4*EDWARDS_D );
  432. gf_add ( c, c, b ); /* u = Z^2 - 4ds^2 with d = EDWARDS_D-1 */
  433. gf_mul ( b, c, a );
  434. succ &= gf_isqrt_chk ( d, b, DECAF_TRUE ); /* v <- 1/sqrt(us^2) */
  435. gf_mul ( b, c, d );
  436. cond_neg ( d, hibit(b) ); /* v <- -v if uv negative */
  437. gf_add ( p->x, s, s ); /* X = 2s */
  438. gf_mul ( c, d, s );
  439. gf_sub ( b, TWO, p->z );
  440. gf_mul ( a, b, c ); /* vs(2-Z) */
  441. gf_mul ( p->y,a,p->z ); /* Y = wZ */
  442. gf_mul ( p->t,a,p->x ); /* T = wX */
  443. p->y->limb[0] -= zero;
  444. /* TODO: do something safe-ish if ~succ? */
  445. return succ;
  446. }
  447. void API_NS(point_sub) (
  448. point_t p,
  449. const point_t q,
  450. const point_t r
  451. ) {
  452. gf a, b, c, d;
  453. gf_sub_nr ( b, q->y, q->x );
  454. gf_sub_nr ( d, r->y, r->x );
  455. gf_add_nr ( c, r->y, r->x );
  456. gf_mul ( a, c, b );
  457. gf_add_nr ( b, q->y, q->x );
  458. gf_mul ( p->y, d, b );
  459. gf_mul ( b, r->t, q->t );
  460. gf_mlw ( p->x, b, 2-2*EDWARDS_D );
  461. gf_add_nr ( b, a, p->y );
  462. gf_sub_nr ( c, p->y, a );
  463. gf_mul ( a, q->z, r->z );
  464. gf_add_nr ( a, a, a );
  465. gf_sub_nr ( p->y, a, p->x );
  466. gf_add_nr ( a, a, p->x );
  467. gf_mul ( p->z, a, p->y );
  468. gf_mul ( p->x, p->y, c );
  469. gf_mul ( p->y, a, b );
  470. gf_mul ( p->t, b, c );
  471. }
  472. void API_NS(point_add) (
  473. point_t p,
  474. const point_t q,
  475. const point_t r
  476. ) {
  477. gf a, b, c, d;
  478. gf_sub_nr ( b, q->y, q->x );
  479. gf_sub_nr ( c, r->y, r->x );
  480. gf_add_nr ( d, r->y, r->x );
  481. gf_mul ( a, c, b );
  482. gf_add_nr ( b, q->y, q->x );
  483. gf_mul ( p->y, d, b );
  484. gf_mul ( b, r->t, q->t );
  485. gf_mlw ( p->x, b, 2-2*EDWARDS_D );
  486. gf_add_nr ( b, a, p->y );
  487. gf_sub_nr ( c, p->y, a );
  488. gf_mul ( a, q->z, r->z );
  489. gf_add_nr ( a, a, a );
  490. gf_add_nr ( p->y, a, p->x );
  491. gf_sub_nr ( a, a, p->x );
  492. gf_mul ( p->z, a, p->y );
  493. gf_mul ( p->x, p->y, c );
  494. gf_mul ( p->y, a, b );
  495. gf_mul ( p->t, b, c );
  496. }
  497. snv point_double_internal (
  498. point_t p,
  499. const point_t q,
  500. decaf_bool_t before_double
  501. ) {
  502. gf a, b, c, d;
  503. gf_sqr ( c, q->x );
  504. gf_sqr ( a, q->y );
  505. gf_add_nr ( d, c, a );
  506. gf_add_nr ( p->t, q->y, q->x );
  507. gf_sqr ( b, p->t );
  508. gf_sub_nr_x ( b, b, d, 3 );
  509. gf_sub_nr ( p->t, a, c );
  510. gf_sqr ( p->x, q->z );
  511. gf_add_nr ( p->z, p->x, p->x );
  512. gf_sub_nr_x ( a, p->z, p->t, 4 );
  513. gf_mul ( p->x, a, b );
  514. gf_mul ( p->z, p->t, a );
  515. gf_mul ( p->y, p->t, d );
  516. if (!before_double) gf_mul ( p->t, b, d );
  517. }
  518. void API_NS(point_double)(point_t p, const point_t q) {
  519. point_double_internal(p,q,0);
  520. }
  521. void API_NS(point_negate) (
  522. point_t nega,
  523. const point_t a
  524. ) {
  525. gf_sub(nega->x, ZERO, a->x);
  526. gf_cpy(nega->y, a->y);
  527. gf_cpy(nega->z, a->z);
  528. gf_sub(nega->t, ZERO, a->t);
  529. }
  530. siv scalar_decode_short (
  531. scalar_t s,
  532. const unsigned char ser[SER_BYTES],
  533. unsigned int nbytes
  534. ) {
  535. unsigned int i,j,k=0;
  536. for (i=0; i<SCALAR_LIMBS; i++) {
  537. decaf_word_t out = 0;
  538. for (j=0; j<sizeof(decaf_word_t) && k<nbytes; j++,k++) {
  539. out |= ((decaf_word_t)ser[k])<<(8*j);
  540. }
  541. s->limb[i] = out;
  542. }
  543. }
  544. decaf_bool_t API_NS(scalar_decode)(
  545. scalar_t s,
  546. const unsigned char ser[SER_BYTES]
  547. ) {
  548. unsigned int i;
  549. scalar_decode_short(s, ser, SER_BYTES);
  550. decaf_sdword_t accum = 0;
  551. for (i=0; i<SCALAR_LIMBS; i++) {
  552. accum = (accum + s->limb[i] - sc_p->limb[i]) >> WBITS;
  553. }
  554. API_NS(scalar_mul)(s,s,API_NS(scalar_one)); /* ham-handed reduce */
  555. return accum;
  556. }
  557. void decaf_bzero (
  558. void *s,
  559. size_t size
  560. ) {
  561. #ifdef __STDC_LIB_EXT1__
  562. memset_s(s, size, 0, size);
  563. #else
  564. const size_t sw = sizeof(decaf_word_t);
  565. volatile uint8_t *destroy = (volatile uint8_t *)s;
  566. for (; size && ((uintptr_t)destroy)%sw; size--, destroy++)
  567. *destroy = 0;
  568. for (; size >= sw; size -= sw, destroy += sw)
  569. *(volatile decaf_word_t *)destroy = 0;
  570. for (; size; size--, destroy++)
  571. *destroy = 0;
  572. #endif
  573. }
  574. void API_NS(scalar_destroy) (
  575. scalar_t scalar
  576. ) {
  577. decaf_bzero(scalar, sizeof(scalar_t));
  578. }
  579. static inline void ignore_result ( decaf_bool_t boo ) {
  580. (void)boo;
  581. }
  582. void API_NS(scalar_decode_long)(
  583. scalar_t s,
  584. const unsigned char *ser,
  585. size_t ser_len
  586. ) {
  587. if (ser_len == 0) {
  588. API_NS(scalar_copy)(s, API_NS(scalar_zero));
  589. return;
  590. }
  591. size_t i;
  592. scalar_t t1, t2;
  593. i = ser_len - (ser_len%SER_BYTES);
  594. if (i==ser_len) i -= SER_BYTES;
  595. scalar_decode_short(t1, &ser[i], ser_len-i);
  596. if (ser_len == sizeof(scalar_t)) {
  597. assert(i==0);
  598. /* ham-handed reduce */
  599. API_NS(scalar_mul)(s,t1,API_NS(scalar_one));
  600. API_NS(scalar_destroy)(t1);
  601. return;
  602. }
  603. while (i) {
  604. i -= SER_BYTES;
  605. sc_montmul(t1,t1,sc_r2);
  606. ignore_result( API_NS(scalar_decode)(t2, ser+i) );
  607. API_NS(scalar_add)(t1, t1, t2);
  608. }
  609. API_NS(scalar_copy)(s, t1);
  610. API_NS(scalar_destroy)(t1);
  611. API_NS(scalar_destroy)(t2);
  612. }
  613. void API_NS(scalar_encode)(
  614. unsigned char ser[SER_BYTES],
  615. const scalar_t s
  616. ) {
  617. unsigned int i,j,k=0;
  618. for (i=0; i<SCALAR_LIMBS; i++) {
  619. for (j=0; j<sizeof(decaf_word_t); j++,k++) {
  620. ser[k] = s->limb[i] >> (8*j);
  621. }
  622. }
  623. }
  624. /* Operations on [p]niels */
  625. siv cond_neg_niels (
  626. niels_t n,
  627. decaf_bool_t neg
  628. ) {
  629. cond_swap(n->a, n->b, neg);
  630. cond_neg(n->c, neg);
  631. }
  632. static void pt_to_pniels (
  633. pniels_t b,
  634. const point_t a
  635. ) {
  636. gf_sub ( b->n->a, a->y, a->x );
  637. gf_add ( b->n->b, a->x, a->y );
  638. gf_mlw ( b->n->c, a->t, 2*EDWARDS_D-2 );
  639. gf_add ( b->z, a->z, a->z );
  640. }
  641. static void pniels_to_pt (
  642. point_t e,
  643. const pniels_t d
  644. ) {
  645. gf eu;
  646. gf_add ( eu, d->n->b, d->n->a );
  647. gf_sub ( e->y, d->n->b, d->n->a );
  648. gf_mul ( e->t, e->y, eu);
  649. gf_mul ( e->x, d->z, e->y );
  650. gf_mul ( e->y, d->z, eu );
  651. gf_sqr ( e->z, d->z );
  652. }
  653. snv niels_to_pt (
  654. point_t e,
  655. const niels_t n
  656. ) {
  657. gf_add ( e->y, n->b, n->a );
  658. gf_sub ( e->x, n->b, n->a );
  659. gf_mul ( e->t, e->y, e->x );
  660. gf_cpy ( e->z, ONE );
  661. }
  662. snv add_niels_to_pt (
  663. point_t d,
  664. const niels_t e,
  665. decaf_bool_t before_double
  666. ) {
  667. gf a, b, c;
  668. gf_sub_nr ( b, d->y, d->x );
  669. gf_mul ( a, e->a, b );
  670. gf_add_nr ( b, d->x, d->y );
  671. gf_mul ( d->y, e->b, b );
  672. gf_mul ( d->x, e->c, d->t );
  673. gf_add_nr ( c, a, d->y );
  674. gf_sub_nr ( b, d->y, a );
  675. gf_sub_nr ( d->y, d->z, d->x );
  676. gf_add_nr ( a, d->x, d->z );
  677. gf_mul ( d->z, a, d->y );
  678. gf_mul ( d->x, d->y, b );
  679. gf_mul ( d->y, a, c );
  680. if (!before_double) gf_mul ( d->t, b, c );
  681. }
  682. snv sub_niels_from_pt (
  683. point_t d,
  684. const niels_t e,
  685. decaf_bool_t before_double
  686. ) {
  687. gf a, b, c;
  688. gf_sub_nr ( b, d->y, d->x );
  689. gf_mul ( a, e->b, b );
  690. gf_add_nr ( b, d->x, d->y );
  691. gf_mul ( d->y, e->a, b );
  692. gf_mul ( d->x, e->c, d->t );
  693. gf_add_nr ( c, a, d->y );
  694. gf_sub_nr ( b, d->y, a );
  695. gf_add_nr ( d->y, d->z, d->x );
  696. gf_sub_nr ( a, d->z, d->x );
  697. gf_mul ( d->z, a, d->y );
  698. gf_mul ( d->x, d->y, b );
  699. gf_mul ( d->y, a, c );
  700. if (!before_double) gf_mul ( d->t, b, c );
  701. }
  702. sv add_pniels_to_pt (
  703. point_t p,
  704. const pniels_t pn,
  705. decaf_bool_t before_double
  706. ) {
  707. gf L0;
  708. gf_mul ( L0, p->z, pn->z );
  709. gf_cpy ( p->z, L0 );
  710. add_niels_to_pt( p, pn->n, before_double );
  711. }
  712. sv sub_pniels_from_pt (
  713. point_t p,
  714. const pniels_t pn,
  715. decaf_bool_t before_double
  716. ) {
  717. gf L0;
  718. gf_mul ( L0, p->z, pn->z );
  719. gf_cpy ( p->z, L0 );
  720. sub_niels_from_pt( p, pn->n, before_double );
  721. }
  722. extern const scalar_t API_NS(point_scalarmul_adjustment);
  723. /* TODO: get rid of big_register_t dependencies? */
  724. siv constant_time_lookup_xx (
  725. void *__restrict__ out_,
  726. const void *table_,
  727. decaf_word_t elem_bytes,
  728. decaf_word_t n_table,
  729. decaf_word_t idx
  730. ) {
  731. big_register_t big_one = br_set_to_mask(1), big_i = br_set_to_mask(idx);
  732. big_register_t *out = (big_register_t *)out_;
  733. const unsigned char *table = (const unsigned char *)table_;
  734. word_t j,k;
  735. big_register_t br_mask = br_is_zero(big_i);
  736. for (k=0; k<elem_bytes/sizeof(big_register_t); k++)
  737. out[k] = br_mask & *(const big_register_t*)(&table[k*sizeof(big_register_t)]);
  738. big_i-=big_one;
  739. for (j=1; j<n_table; j++, big_i-=big_one) {
  740. br_mask = br_is_zero(big_i);
  741. for (k=0; k<elem_bytes/sizeof(big_register_t); k++) {
  742. out[k] |= br_mask & *(const big_register_t*)(&table[k*sizeof(big_register_t)+j*elem_bytes]);
  743. }
  744. }
  745. }
  746. snv prepare_fixed_window(
  747. pniels_t *multiples,
  748. const point_t b,
  749. int ntable
  750. ) {
  751. point_t tmp;
  752. pniels_t pn;
  753. int i;
  754. point_double_internal(tmp, b, 0);
  755. pt_to_pniels(pn, tmp);
  756. pt_to_pniels(multiples[0], b);
  757. API_NS(point_copy)(tmp, b);
  758. for (i=1; i<ntable; i++) {
  759. add_pniels_to_pt(tmp, pn, 0);
  760. pt_to_pniels(multiples[i], tmp);
  761. }
  762. }
  763. void API_NS(point_scalarmul) (
  764. point_t a,
  765. const point_t b,
  766. const scalar_t scalar
  767. ) {
  768. const int WINDOW = DECAF_WINDOW_BITS,
  769. WINDOW_MASK = (1<<WINDOW)-1,
  770. WINDOW_T_MASK = WINDOW_MASK >> 1,
  771. NTABLE = 1<<(WINDOW-1);
  772. scalar_t scalar1x;
  773. API_NS(scalar_add)(scalar1x, scalar, API_NS(point_scalarmul_adjustment));
  774. sc_halve(scalar1x,scalar1x,sc_p);
  775. /* Set up a precomputed table with odd multiples of b. */
  776. pniels_t pn, multiples[NTABLE];
  777. point_t tmp;
  778. prepare_fixed_window(multiples, b, NTABLE);
  779. /* Initialize. */
  780. int i,j,first=1;
  781. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  782. for (; i>=0; i-=WINDOW) {
  783. /* Fetch another block of bits */
  784. decaf_word_t bits = scalar1x->limb[i/WBITS] >> (i%WBITS);
  785. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  786. bits ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  787. }
  788. bits &= WINDOW_MASK;
  789. decaf_word_t inv = (bits>>(WINDOW-1))-1;
  790. bits ^= inv;
  791. /* Add in from table. Compute t only on last iteration. */
  792. constant_time_lookup_xx(pn, multiples, sizeof(pn), NTABLE, bits & WINDOW_T_MASK);
  793. cond_neg_niels(pn->n, inv);
  794. if (first) {
  795. pniels_to_pt(tmp, pn);
  796. first = 0;
  797. } else {
  798. /* Using Hisil et al's lookahead method instead of extensible here
  799. * for no particular reason. Double WINDOW times, but only compute t on
  800. * the last one.
  801. */
  802. for (j=0; j<WINDOW-1; j++)
  803. point_double_internal(tmp, tmp, -1);
  804. point_double_internal(tmp, tmp, 0);
  805. add_pniels_to_pt(tmp, pn, i ? -1 : 0);
  806. }
  807. }
  808. /* Write out the answer */
  809. API_NS(point_copy)(a,tmp);
  810. }
  811. void API_NS(point_double_scalarmul) (
  812. point_t a,
  813. const point_t b,
  814. const scalar_t scalarb,
  815. const point_t c,
  816. const scalar_t scalarc
  817. ) {
  818. const int WINDOW = DECAF_WINDOW_BITS,
  819. WINDOW_MASK = (1<<WINDOW)-1,
  820. WINDOW_T_MASK = WINDOW_MASK >> 1,
  821. NTABLE = 1<<(WINDOW-1);
  822. scalar_t scalar1x, scalar2x;
  823. API_NS(scalar_add)(scalar1x, scalarb, API_NS(point_scalarmul_adjustment));
  824. sc_halve(scalar1x,scalar1x,sc_p);
  825. API_NS(scalar_add)(scalar2x, scalarc, API_NS(point_scalarmul_adjustment));
  826. sc_halve(scalar2x,scalar2x,sc_p);
  827. /* Set up a precomputed table with odd multiples of b. */
  828. pniels_t pn, multiples1[NTABLE], multiples2[NTABLE];
  829. point_t tmp;
  830. prepare_fixed_window(multiples1, b, NTABLE);
  831. prepare_fixed_window(multiples2, c, NTABLE);
  832. /* Initialize. */
  833. int i,j,first=1;
  834. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  835. for (; i>=0; i-=WINDOW) {
  836. /* Fetch another block of bits */
  837. decaf_word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  838. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  839. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  840. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  841. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  842. }
  843. bits1 &= WINDOW_MASK;
  844. bits2 &= WINDOW_MASK;
  845. decaf_word_t inv1 = (bits1>>(WINDOW-1))-1;
  846. decaf_word_t inv2 = (bits2>>(WINDOW-1))-1;
  847. bits1 ^= inv1;
  848. bits2 ^= inv2;
  849. /* Add in from table. Compute t only on last iteration. */
  850. constant_time_lookup_xx(pn, multiples1, sizeof(pn), NTABLE, bits1 & WINDOW_T_MASK);
  851. cond_neg_niels(pn->n, inv1);
  852. if (first) {
  853. pniels_to_pt(tmp, pn);
  854. first = 0;
  855. } else {
  856. /* Using Hisil et al's lookahead method instead of extensible here
  857. * for no particular reason. Double WINDOW times, but only compute t on
  858. * the last one.
  859. */
  860. for (j=0; j<WINDOW-1; j++)
  861. point_double_internal(tmp, tmp, -1);
  862. point_double_internal(tmp, tmp, 0);
  863. add_pniels_to_pt(tmp, pn, 0);
  864. }
  865. constant_time_lookup_xx(pn, multiples2, sizeof(pn), NTABLE, bits2 & WINDOW_T_MASK);
  866. cond_neg_niels(pn->n, inv2);
  867. add_pniels_to_pt(tmp, pn, i?-1:0);
  868. }
  869. /* Write out the answer */
  870. API_NS(point_copy)(a,tmp);
  871. }
  872. decaf_bool_t API_NS(point_eq) ( const point_t p, const point_t q ) {
  873. /* equality mod 2-torsion compares x/y */
  874. gf a, b;
  875. gf_mul ( a, p->y, q->x );
  876. gf_mul ( b, q->y, p->x );
  877. return gf_eq(a,b);
  878. }
  879. unsigned char API_NS(point_from_hash_nonuniform) (
  880. point_t p,
  881. const unsigned char ser[SER_BYTES]
  882. ) {
  883. gf r0,r,a,b,c,dee,D,N,rN,e;
  884. decaf_bool_t over = ~gf_deser(r0,ser);
  885. decaf_bool_t sgn_r0 = hibit(r0);
  886. gf_canon(r0);
  887. gf_sqr(a,r0);
  888. gf_sub(r,ZERO,a); /*gf_mlw(r,a,QUADRATIC_NONRESIDUE);*/
  889. gf_mlw(dee,ONE,EDWARDS_D);
  890. gf_mlw(c,r,EDWARDS_D);
  891. /* Compute D := (dr+a-d)(dr-ar-d) with a=1 */
  892. gf_sub(a,c,dee);
  893. gf_add(a,a,ONE);
  894. decaf_bool_t special_identity_case = gf_eq(a,ZERO);
  895. gf_sub(b,c,r);
  896. gf_sub(b,b,dee);
  897. gf_mul(D,a,b);
  898. /* compute N := (r+1)(a-2d) */
  899. gf_add(a,r,ONE);
  900. gf_mlw(N,a,1-2*EDWARDS_D);
  901. /* e = +-1/sqrt(+-ND) */
  902. gf_mul(rN,r,N);
  903. gf_mul(a,rN,D);
  904. decaf_bool_t square = gf_isqrt_chk(e,a,DECAF_FALSE);
  905. decaf_bool_t r_is_zero = gf_eq(r,ZERO);
  906. square |= r_is_zero;
  907. square |= special_identity_case;
  908. /* b <- t/s */
  909. cond_sel(c,r0,r,square); /* r? = sqr ? r : 1 */
  910. /* In two steps to avoid overflow on 32-bit arch */
  911. gf_mlw(a,c,1-2*EDWARDS_D);
  912. gf_mlw(b,a,1-2*EDWARDS_D);
  913. gf_sub(c,r,ONE);
  914. gf_mul(a,b,c); /* = r? * (r-1) * (a-2d)^2 with a=1 */
  915. gf_mul(b,a,e);
  916. cond_neg(b,~square);
  917. cond_sel(c,r0,ONE,square);
  918. gf_mul(a,e,c);
  919. gf_mul(c,a,D); /* 1/s except for sign. FUTURE: simplify using this. */
  920. gf_sub(b,b,c);
  921. /* a <- s = e * N * (sqr ? r : r0)
  922. * e^2 r N D = 1
  923. * 1/s = 1/(e * N * (sqr ? r : r0)) = e * D * (sqr ? 1 : r0)
  924. */
  925. gf_mul(a,N,r0);
  926. cond_sel(rN,a,rN,square);
  927. gf_mul(a,rN,e);
  928. gf_mul(c,a,b);
  929. /* Normalize/negate */
  930. decaf_bool_t neg_s = hibit(a)^~square;
  931. cond_neg(a,neg_s); /* ends up negative if ~square */
  932. decaf_bool_t sgn_t_over_s = hibit(b)^neg_s;
  933. sgn_t_over_s &= ~gf_eq(N,ZERO);
  934. sgn_t_over_s |= gf_eq(D,ZERO);
  935. /* b <- t */
  936. cond_sel(b,c,ONE,gf_eq(c,ZERO)); /* 0,0 -> 1,0 */
  937. /* isogenize */
  938. gf_sqr(c,a); /* s^2 */
  939. gf_add(a,a,a); /* 2s */
  940. gf_add(e,c,ONE);
  941. gf_mul(p->t,a,e); /* 2s(1+s^2) */
  942. gf_mul(p->x,a,b); /* 2st */
  943. gf_sub(a,ONE,c);
  944. gf_mul(p->y,e,a); /* (1+s^2)(1-s^2) */
  945. gf_mul(p->z,a,b); /* (1-s^2)t */
  946. return (~square & 1) | (sgn_t_over_s & 2) | (sgn_r0 & 4) | (over & 8);
  947. }
  948. decaf_bool_t
  949. API_NS(invert_elligator_nonuniform) (
  950. unsigned char recovered_hash[DECAF_255_SER_BYTES],
  951. const point_t p,
  952. unsigned char hint
  953. ) {
  954. decaf_bool_t sgn_s = -(hint & 1),
  955. sgn_t_over_s = -(hint>>1 & 1),
  956. sgn_r0 = -(hint>>2 & 1);
  957. gf a, b, c, d;
  958. gf_mlw ( a, p->y, 1-EDWARDS_D );
  959. gf_mul ( c, a, p->t );
  960. gf_mul ( a, p->x, p->z );
  961. gf_sub ( d, c, a );
  962. gf_add ( a, p->z, p->y );
  963. gf_sub ( b, p->z, p->y );
  964. gf_mul ( c, b, a );
  965. gf_mlw ( b, c, -EDWARDS_D );
  966. gf_isqrt ( a, b );
  967. gf_mlw ( b, a, -EDWARDS_D );
  968. gf_mul ( c, b, a );
  969. gf_mul ( a, c, d );
  970. gf_add ( d, b, b );
  971. gf_mul ( c, d, p->z );
  972. cond_neg ( b, sgn_t_over_s^~hibit(c) );
  973. cond_neg ( c, sgn_t_over_s^~hibit(c) );
  974. gf_mul ( d, b, p->y );
  975. gf_add ( a, a, d );
  976. cond_neg( a, hibit(a)^sgn_s);
  977. /* ok, s = a; c = -t/s */
  978. gf_mul(b,c,a);
  979. gf_sub(b,ONE,b); /* t+1 */
  980. gf_sqr(c,a); /* s^2 */
  981. { /* identity adjustments */
  982. /* in case of identity, currently c=0, t=0, b=1, will encode to 1 */
  983. /* if hint is 0, -> 0 */
  984. /* if hint is to neg t/s, then go to infinity, effectively set s to 1 */
  985. decaf_bool_t is_identity = gf_eq(p->x,ZERO);
  986. cond_sel(c,c,ONE,is_identity & sgn_t_over_s);
  987. cond_sel(b,b,ZERO,is_identity & ~sgn_t_over_s & ~sgn_s); /* identity adjust */
  988. }
  989. gf_mlw(d,c,2*EDWARDS_D-1); /* $d = (2d-a)s^2 */
  990. gf_add(a,b,d); /* num? */
  991. gf_sub(d,b,d); /* den? */
  992. gf_mul(b,a,d); /* n*d */
  993. cond_sel(a,d,a,sgn_s);
  994. decaf_bool_t succ = gf_isqrt_chk(c,b,DECAF_TRUE);
  995. gf_mul(b,a,c);
  996. cond_neg(b, sgn_r0^hibit(b));
  997. succ &= ~(gf_eq(b,ZERO) & sgn_r0);
  998. gf_encode(recovered_hash, b);
  999. /* TODO: deal with overflow flag */
  1000. return succ;
  1001. }
  1002. unsigned char API_NS(point_from_hash_uniform) (
  1003. point_t pt,
  1004. const unsigned char hashed_data[2*SER_BYTES]
  1005. ) {
  1006. point_t pt2;
  1007. unsigned char ret1 =
  1008. API_NS(point_from_hash_nonuniform)(pt,hashed_data);
  1009. unsigned char ret2 =
  1010. API_NS(point_from_hash_nonuniform)(pt2,&hashed_data[SER_BYTES]);
  1011. API_NS(point_add)(pt,pt,pt2);
  1012. return ret1 | (ret2<<4);
  1013. }
  1014. decaf_bool_t
  1015. API_NS(invert_elligator_uniform) (
  1016. unsigned char partial_hash[2*SER_BYTES],
  1017. const point_t p,
  1018. unsigned char hint
  1019. ) {
  1020. point_t pt2;
  1021. API_NS(point_from_hash_nonuniform)(pt2,&partial_hash[SER_BYTES]);
  1022. API_NS(point_sub)(pt2,p,pt2);
  1023. return API_NS(invert_elligator_nonuniform)(partial_hash,pt2,hint);
  1024. }
  1025. decaf_bool_t API_NS(point_valid) (
  1026. const point_t p
  1027. ) {
  1028. gf a,b,c;
  1029. gf_mul(a,p->x,p->y);
  1030. gf_mul(b,p->z,p->t);
  1031. decaf_bool_t out = gf_eq(a,b);
  1032. gf_sqr(a,p->x);
  1033. gf_sqr(b,p->y);
  1034. gf_sub(a,b,a);
  1035. gf_sqr(b,p->t);
  1036. gf_mlw(c,b,1-EDWARDS_D);
  1037. gf_sqr(b,p->z);
  1038. gf_sub(b,b,c);
  1039. out &= gf_eq(a,b);
  1040. out &= ~gf_eq(p->z,ZERO);
  1041. return out;
  1042. }
  1043. void API_NS(point_debugging_2torque) (
  1044. point_t q,
  1045. const point_t p
  1046. ) {
  1047. gf_sub(q->x,ZERO,p->x);
  1048. gf_sub(q->y,ZERO,p->y);
  1049. gf_cpy(q->z,p->z);
  1050. gf_cpy(q->t,p->t);
  1051. }
  1052. static void gf_batch_invert (
  1053. gf *__restrict__ out,
  1054. /* const */ gf *in,
  1055. unsigned int n
  1056. ) {
  1057. gf t1;
  1058. assert(n>1);
  1059. gf_cpy(out[1], in[0]);
  1060. int i;
  1061. for (i=1; i<(int) (n-1); i++) {
  1062. gf_mul(out[i+1], out[i], in[i]);
  1063. }
  1064. gf_mul(out[0], out[n-1], in[n-1]);
  1065. gf_invert(out[0], out[0]);
  1066. for (i=n-1; i>0; i--) {
  1067. gf_mul(t1, out[i], out[0]);
  1068. gf_cpy(out[i], t1);
  1069. gf_mul(t1, out[0], in[i]);
  1070. gf_cpy(out[0], t1);
  1071. }
  1072. }
  1073. static void batch_normalize_niels (
  1074. niels_t *table,
  1075. gf *zs,
  1076. gf *zis,
  1077. int n
  1078. ) {
  1079. int i;
  1080. gf product;
  1081. gf_batch_invert(zis, zs, n);
  1082. for (i=0; i<n; i++) {
  1083. gf_mul(product, table[i]->a, zis[i]);
  1084. gf_canon(product);
  1085. gf_cpy(table[i]->a, product);
  1086. gf_mul(product, table[i]->b, zis[i]);
  1087. gf_canon(product);
  1088. gf_cpy(table[i]->b, product);
  1089. gf_mul(product, table[i]->c, zis[i]);
  1090. gf_canon(product);
  1091. gf_cpy(table[i]->c, product);
  1092. }
  1093. }
  1094. void API_NS(precompute) (
  1095. precomputed_s *table,
  1096. const point_t base
  1097. ) {
  1098. const unsigned int n = DECAF_COMBS_N, t = DECAF_COMBS_T, s = DECAF_COMBS_S;
  1099. assert(n*t*s >= SCALAR_BITS);
  1100. point_t working, start, doubles[t-1];
  1101. API_NS(point_copy)(working, base);
  1102. pniels_t pn_tmp;
  1103. gf zs[n<<(t-1)], zis[n<<(t-1)];
  1104. unsigned int i,j,k;
  1105. /* Compute n tables */
  1106. for (i=0; i<n; i++) {
  1107. /* Doubling phase */
  1108. for (j=0; j<t; j++) {
  1109. if (j) API_NS(point_add)(start, start, working);
  1110. else API_NS(point_copy)(start, working);
  1111. if (j==t-1 && i==n-1) break;
  1112. point_double_internal(working, working,0);
  1113. if (j<t-1) API_NS(point_copy)(doubles[j], working);
  1114. for (k=0; k<s-1; k++)
  1115. point_double_internal(working, working, k<s-2);
  1116. }
  1117. /* Gray-code phase */
  1118. for (j=0;; j++) {
  1119. int gray = j ^ (j>>1);
  1120. int idx = (((i+1)<<(t-1))-1) ^ gray;
  1121. pt_to_pniels(pn_tmp, start);
  1122. memcpy(table->table[idx], pn_tmp->n, sizeof(pn_tmp->n));
  1123. gf_cpy(zs[idx], pn_tmp->z);
  1124. if (j >= (1u<<(t-1)) - 1) break;
  1125. int delta = (j+1) ^ ((j+1)>>1) ^ gray;
  1126. for (k=0; delta>1; k++)
  1127. delta >>=1;
  1128. if (gray & (1<<k)) {
  1129. API_NS(point_add)(start, start, doubles[k]);
  1130. } else {
  1131. API_NS(point_sub)(start, start, doubles[k]);
  1132. }
  1133. }
  1134. }
  1135. batch_normalize_niels(table->table,zs,zis,n<<(t-1));
  1136. }
  1137. extern const scalar_t API_NS(precomputed_scalarmul_adjustment);
  1138. siv constant_time_lookup_xx_niels (
  1139. niels_s *__restrict__ ni,
  1140. const niels_t *table,
  1141. int nelts,
  1142. int idx
  1143. ) {
  1144. constant_time_lookup_xx(ni, table, sizeof(niels_s), nelts, idx);
  1145. }
  1146. void API_NS(precomputed_scalarmul) (
  1147. point_t out,
  1148. const precomputed_s *table,
  1149. const scalar_t scalar
  1150. ) {
  1151. int i;
  1152. unsigned j,k;
  1153. const unsigned int n = DECAF_COMBS_N, t = DECAF_COMBS_T, s = DECAF_COMBS_S;
  1154. scalar_t scalar1x;
  1155. API_NS(scalar_add)(scalar1x, scalar, API_NS(precomputed_scalarmul_adjustment));
  1156. sc_halve(scalar1x,scalar1x,sc_p);
  1157. niels_t ni;
  1158. for (i=s-1; i>=0; i--) {
  1159. if (i != (int)s-1) point_double_internal(out,out,0);
  1160. for (j=0; j<n; j++) {
  1161. int tab = 0;
  1162. for (k=0; k<t; k++) {
  1163. unsigned int bit = i + s*(k + j*t);
  1164. if (bit < SCALAR_BITS) {
  1165. tab |= (scalar1x->limb[bit/WBITS] >> (bit%WBITS) & 1) << k;
  1166. }
  1167. }
  1168. decaf_bool_t invert = (tab>>(t-1))-1;
  1169. tab ^= invert;
  1170. tab &= (1<<(t-1)) - 1;
  1171. constant_time_lookup_xx_niels(ni, &table->table[j<<(t-1)], 1<<(t-1), tab);
  1172. cond_neg_niels(ni, invert);
  1173. if ((i!=s-1)||j) {
  1174. add_niels_to_pt(out, ni, j==n-1 && i);
  1175. } else {
  1176. niels_to_pt(out, ni);
  1177. }
  1178. }
  1179. }
  1180. }
  1181. #if DECAF_USE_MONTGOMERY_LADDER
  1182. /** Return high bit of x/2 = low bit of x mod p */
  1183. static inline decaf_word_t lobit(gf x) {
  1184. gf_canon(x);
  1185. return -(x->limb[0]&1);
  1186. }
  1187. decaf_bool_t API_NS(direct_scalarmul) (
  1188. uint8_t scaled[SER_BYTES],
  1189. const uint8_t base[SER_BYTES],
  1190. const scalar_t scalar,
  1191. decaf_bool_t allow_identity,
  1192. decaf_bool_t short_circuit
  1193. ) {
  1194. /* The Montgomery ladder does not short-circuit return on invalid points,
  1195. * since it detects them during recompress.
  1196. */
  1197. (void)short_circuit;
  1198. gf s0, x0, xa, za, xd, zd, xs, zs, L0, L1;
  1199. decaf_bool_t succ = gf_deser ( s0, base );
  1200. succ &= allow_identity |~ gf_eq( s0, ZERO);
  1201. /* Prepare the Montgomery ladder: Q = 1:0, P+Q = P */
  1202. gf_sqr ( xa, s0 );
  1203. gf_cpy ( x0, xa );
  1204. gf_cpy ( za, ONE );
  1205. gf_cpy ( xd, ONE );
  1206. gf_cpy ( zd, ZERO );
  1207. int j;
  1208. decaf_bool_t pflip = 0;
  1209. for (j=SCALAR_BITS-1; j>=0; j--) {
  1210. /* Augmented Montgomery ladder */
  1211. decaf_bool_t flip = -((scalar->limb[j/WBITS]>>(j%WBITS))&1);
  1212. /* Differential add first... */
  1213. gf_add_nr ( xs, xa, za );
  1214. gf_sub_nr ( zs, xa, za );
  1215. gf_add_nr ( xa, xd, zd );
  1216. gf_sub_nr ( za, xd, zd );
  1217. cond_sel(L0,xa,xs,flip^pflip);
  1218. cond_sel(L1,za,zs,flip^pflip);
  1219. gf_mul ( xd, xa, zs );
  1220. gf_mul ( zd, xs, za );
  1221. gf_add_nr ( xs, xd, zd );
  1222. gf_sub_nr ( zd, xd, zd );
  1223. gf_mul ( zs, zd, s0 );
  1224. gf_sqr ( xa, xs );
  1225. gf_sqr ( za, zs );
  1226. /* ... and then double */
  1227. gf_sqr ( zd, L0 );
  1228. gf_sqr ( L0, L1 );
  1229. gf_sub_nr ( L1, zd, L0 );
  1230. gf_mul ( xd, L0, zd );
  1231. gf_mlw ( zd, L1, 1-EDWARDS_D );
  1232. gf_add_nr ( L0, L0, zd );
  1233. gf_mul ( zd, L0, L1 );
  1234. pflip = flip;
  1235. }
  1236. cond_swap(xa,xd,pflip);
  1237. cond_swap(za,zd,pflip);
  1238. /* OK, time to reserialize! Should be easy (heh, but seriously, TODO: simplify) */
  1239. gf xz_d, xz_a, xz_s, den, L2, L3;
  1240. mask_t zcase, output_zero, sflip, za_zero;
  1241. gf_mul(xz_s, xs, zs);
  1242. gf_mul(xz_d, xd, zd);
  1243. gf_mul(xz_a, xa, za);
  1244. output_zero = gf_eq(xz_d, ZERO);
  1245. xz_d->limb[0] -= output_zero; /* make xz_d always nonzero */
  1246. zcase = output_zero | gf_eq(xz_a, ZERO);
  1247. za_zero = gf_eq(za, ZERO);
  1248. /* Curve test in zcase, compute x0^2 + (2d-4)x0 + 1
  1249. * (we know that x0 = s0^2 is square).
  1250. */
  1251. gf_add(L0,x0,ONE);
  1252. gf_sqr(L1,L0);
  1253. gf_mlw(L0,x0,-4*EDWARDS_D);
  1254. gf_add(L1,L1,L0);
  1255. cond_sel(xz_a,xz_a,L1,zcase);
  1256. /* Compute denominator = x0 xa za xd zd */
  1257. gf_mul(L0, x0, xz_a);
  1258. gf_mul(L1, L0, xz_d);
  1259. gf_isqrt(den, L1);
  1260. /* Check that the square root came out OK. */
  1261. gf_sqr(L2, den);
  1262. gf_mul(L3, L0, L2); /* x0 xa za den^2 = 1/xz_d, for later */
  1263. gf_mul(L0, L1, L2);
  1264. gf_add(L0, L0, ONE);
  1265. succ &= ~hibit(s0) & ~gf_eq(L0, ZERO);
  1266. /* Compute y/x for input and output point. */
  1267. gf_mul(L1, x0, xd);
  1268. gf_sub(L1, zd, L1);
  1269. gf_mul(L0, za, L1); /* L0 = "opq" */
  1270. gf_mul(L1, x0, zd);
  1271. gf_sub(L1, L1, xd);
  1272. gf_mul(L2, xa, L1); /* L2 = "pqr" */
  1273. gf_sub(L1, L0, L2);
  1274. gf_add(L0, L0, L2);
  1275. gf_mul(L2, L1, den); /* L2 = y0 / x0 */
  1276. gf_mul(L1, L0, den); /* L1 = yO / xO */
  1277. sflip = (lobit(L1) ^ lobit(L2)) | za_zero;
  1278. /* OK, done with y-coordinates */
  1279. /* If xa==0 or za ==0: return 0
  1280. * Else if za == 0: return s0 * (sflip ? zd : xd)^2 * L3
  1281. * Else if zd == 0: return s0 * (sflip ? zd : xd)^2 * L3
  1282. * Else if pflip: return xs * zs * (sflip ? zd : xd) * L3
  1283. * Else: return s0 * xs * zs * (sflip ? zd : xd) * den
  1284. */
  1285. cond_sel(xd, xd, zd, sflip); /* xd = actual xd we care about */
  1286. cond_sel(den,den,L3,pflip|zcase);
  1287. cond_sel(xz_s,xz_s,xd,zcase);
  1288. cond_sel(s0,s0,ONE,pflip&~zcase);
  1289. cond_sel(s0,s0,ZERO,output_zero);
  1290. gf_mul(L0,xd,den);
  1291. gf_mul(L1,L0,s0);
  1292. gf_mul(L0,L1,xz_s);
  1293. cond_neg(L0,hibit(L0));
  1294. gf_encode(scaled, L0);
  1295. return succ;
  1296. }
  1297. #else /* DECAF_USE_MONTGOMERY_LADDER */
  1298. decaf_bool_t API_NS(direct_scalarmul) (
  1299. uint8_t scaled[SER_BYTES],
  1300. const uint8_t base[SER_BYTES],
  1301. const scalar_t scalar,
  1302. decaf_bool_t allow_identity,
  1303. decaf_bool_t short_circuit
  1304. ) {
  1305. point_t basep;
  1306. decaf_bool_t succ = API_NS(point_decode)(basep, base, allow_identity);
  1307. if (short_circuit & ~succ) return succ;
  1308. API_NS(point_scalarmul)(basep, basep, scalar);
  1309. API_NS(point_encode)(scaled, basep);
  1310. return succ;
  1311. }
  1312. #endif /* DECAF_USE_MONTGOMERY_LADDER */
  1313. /**
  1314. * @cond internal
  1315. * Control for variable-time scalar multiply algorithms.
  1316. */
  1317. struct smvt_control {
  1318. int power, addend;
  1319. };
  1320. static int recode_wnaf (
  1321. struct smvt_control *control, /* [nbits/(tableBits+1) + 3] */
  1322. const scalar_t scalar,
  1323. unsigned int tableBits
  1324. ) {
  1325. int current = 0, i, j;
  1326. unsigned int position = 0;
  1327. /* PERF: negate scalar if it's large
  1328. * PERF: this is a pretty simplistic algorithm. I'm sure there's a faster one...
  1329. * PERF MINOR: not technically WNAF, since last digits can be adjacent. Could be rtl.
  1330. */
  1331. for (i=SCALAR_BITS-1; i >= 0; i--) {
  1332. int bit = (scalar->limb[i/WORD_BITS] >> (i%WORD_BITS)) & 1;
  1333. current = 2*current + bit;
  1334. /*
  1335. * Sizing: |current| >= 2^(tableBits+1) -> |current| = 2^0
  1336. * So current loses (tableBits+1) bits every time. It otherwise gains
  1337. * 1 bit per iteration. The number of iterations is
  1338. * (nbits + 2 + tableBits), and an additional control word is added at
  1339. * the end. So the total number of control words is at most
  1340. * ceil((nbits+1) / (tableBits+1)) + 2 = floor((nbits)/(tableBits+1)) + 2.
  1341. * There's also the stopper with power -1, for a total of +3.
  1342. */
  1343. if (current >= (2<<tableBits) || current <= -1 - (2<<tableBits)) {
  1344. int delta = (current + 1) >> 1; /* |delta| < 2^tablebits */
  1345. current = -(current & 1);
  1346. for (j=i; (delta & 1) == 0; j++) {
  1347. delta >>= 1;
  1348. }
  1349. control[position].power = j+1;
  1350. control[position].addend = delta;
  1351. position++;
  1352. assert(position <= SCALAR_BITS/(tableBits+1) + 2);
  1353. }
  1354. }
  1355. if (current) {
  1356. for (j=0; (current & 1) == 0; j++) {
  1357. current >>= 1;
  1358. }
  1359. control[position].power = j;
  1360. control[position].addend = current;
  1361. position++;
  1362. assert(position <= SCALAR_BITS/(tableBits+1) + 2);
  1363. }
  1364. control[position].power = -1;
  1365. control[position].addend = 0;
  1366. return position;
  1367. }
  1368. sv prepare_wnaf_table(
  1369. pniels_t *output,
  1370. const point_t working,
  1371. unsigned int tbits
  1372. ) {
  1373. point_t tmp;
  1374. int i;
  1375. pt_to_pniels(output[0], working);
  1376. if (tbits == 0) return;
  1377. API_NS(point_double)(tmp,working);
  1378. pniels_t twop;
  1379. pt_to_pniels(twop, tmp);
  1380. add_pniels_to_pt(tmp, output[0],0);
  1381. pt_to_pniels(output[1], tmp);
  1382. for (i=2; i < 1<<tbits; i++) {
  1383. add_pniels_to_pt(tmp, twop,0);
  1384. pt_to_pniels(output[i], tmp);
  1385. }
  1386. }
  1387. extern const field_t API_NS(precomputed_wnaf_as_fe)[];
  1388. static const niels_t *API_NS(wnaf_base) = (const niels_t *)API_NS(precomputed_wnaf_as_fe);
  1389. const size_t API_NS2(sizeof,precomputed_wnafs) __attribute((visibility("hidden")))
  1390. = sizeof(niels_t)<<DECAF_WNAF_FIXED_TABLE_BITS;
  1391. void API_NS(precompute_wnafs) (
  1392. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1393. const point_t base
  1394. ) __attribute__ ((visibility ("hidden")));
  1395. void API_NS(precompute_wnafs) (
  1396. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1397. const point_t base
  1398. ) {
  1399. pniels_t tmp[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1400. gf zs[1<<DECAF_WNAF_FIXED_TABLE_BITS], zis[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1401. int i;
  1402. prepare_wnaf_table(tmp,base,DECAF_WNAF_FIXED_TABLE_BITS);
  1403. for (i=0; i<1<<DECAF_WNAF_FIXED_TABLE_BITS; i++) {
  1404. memcpy(out[i], tmp[i]->n, sizeof(niels_t));
  1405. gf_cpy(zs[i], tmp[i]->z);
  1406. }
  1407. batch_normalize_niels(out, zs, zis, 1<<DECAF_WNAF_FIXED_TABLE_BITS);
  1408. }
  1409. void API_NS(base_double_scalarmul_non_secret) (
  1410. point_t combo,
  1411. const scalar_t scalar1,
  1412. const point_t base2,
  1413. const scalar_t scalar2
  1414. ) {
  1415. const int table_bits_var = DECAF_WNAF_VAR_TABLE_BITS,
  1416. table_bits_pre = DECAF_WNAF_FIXED_TABLE_BITS;
  1417. struct smvt_control control_var[SCALAR_BITS/(table_bits_var+1)+3];
  1418. struct smvt_control control_pre[SCALAR_BITS/(table_bits_pre+1)+3];
  1419. int ncb_pre = recode_wnaf(control_pre, scalar1, table_bits_pre);
  1420. int ncb_var = recode_wnaf(control_var, scalar2, table_bits_var);
  1421. pniels_t precmp_var[1<<table_bits_var];
  1422. prepare_wnaf_table(precmp_var, base2, table_bits_var);
  1423. int contp=0, contv=0, i = control_var[0].power;
  1424. if (i < 0) {
  1425. API_NS(point_copy)(combo, API_NS(point_identity));
  1426. return;
  1427. } else if (i > control_pre[0].power) {
  1428. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1429. contv++;
  1430. } else if (i == control_pre[0].power && i >=0 ) {
  1431. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1432. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1], i);
  1433. contv++; contp++;
  1434. } else {
  1435. i = control_pre[0].power;
  1436. niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1]);
  1437. contp++;
  1438. }
  1439. for (i--; i >= 0; i--) {
  1440. int cv = (i==control_var[contv].power), cp = (i==control_pre[contp].power);
  1441. point_double_internal(combo,combo,i && !(cv||cp));
  1442. if (cv) {
  1443. assert(control_var[contv].addend);
  1444. if (control_var[contv].addend > 0) {
  1445. add_pniels_to_pt(combo, precmp_var[control_var[contv].addend >> 1], i&&!cp);
  1446. } else {
  1447. sub_pniels_from_pt(combo, precmp_var[(-control_var[contv].addend) >> 1], i&&!cp);
  1448. }
  1449. contv++;
  1450. }
  1451. if (cp) {
  1452. assert(control_pre[contp].addend);
  1453. if (control_pre[contp].addend > 0) {
  1454. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[contp].addend >> 1], i);
  1455. } else {
  1456. sub_niels_from_pt(combo, API_NS(wnaf_base)[(-control_pre[contp].addend) >> 1], i);
  1457. }
  1458. contp++;
  1459. }
  1460. }
  1461. assert(contv == ncb_var); (void)ncb_var;
  1462. assert(contp == ncb_pre); (void)ncb_pre;
  1463. }
  1464. void API_NS(point_destroy) (
  1465. point_t point
  1466. ) {
  1467. decaf_bzero(point, sizeof(point_t));
  1468. }
  1469. decaf_bool_t decaf_memeq (
  1470. const void *data1_,
  1471. const void *data2_,
  1472. size_t size
  1473. ) {
  1474. const unsigned char *data1 = (const unsigned char *)data1_;
  1475. const unsigned char *data2 = (const unsigned char *)data2_;
  1476. unsigned char ret = 0;
  1477. for (; size; size--, data1++, data2++) {
  1478. ret |= *data1 ^ *data2;
  1479. }
  1480. return (((decaf_dword_t)ret) - 1) >> 8;
  1481. }
  1482. void API_NS(precomputed_destroy) (
  1483. precomputed_s *pre
  1484. ) {
  1485. decaf_bzero(pre, API_NS2(sizeof,precomputed_s));
  1486. }