You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1376 lines
39 KiB

  1. /** @brief Decaf high-level functions. */
  2. #define _XOPEN_SOURCE 600 /* for posix_memalign */
  3. #include "word.h"
  4. #include "field.h"
  5. #include <decaf.h>
  6. /* Template stuff */
  7. #define API_NS(_id) $(c_ns)_##_id
  8. #define SCALAR_BITS $(C_NS)_SCALAR_BITS
  9. #define SCALAR_SER_BYTES $(C_NS)_SCALAR_BYTES
  10. #define SCALAR_LIMBS $(C_NS)_SCALAR_LIMBS
  11. #define scalar_t API_NS(scalar_t)
  12. #define point_t API_NS(point_t)
  13. #define precomputed_s API_NS(precomputed_s)
  14. #define IMAGINE_TWIST $(imagine_twist)
  15. #define COFACTOR $(cofactor)
  16. /* Comb config: number of combs, n, t, s. */
  17. #define COMBS_N $(combs.n)
  18. #define COMBS_T $(combs.t)
  19. #define COMBS_S $(combs.s)
  20. #define DECAF_WINDOW_BITS $(window_bits)
  21. #define DECAF_WNAF_FIXED_TABLE_BITS $(wnaf.fixed)
  22. #define DECAF_WNAF_VAR_TABLE_BITS $(wnaf.var)
  23. static const int EDWARDS_D = $(d);
  24. static const scalar_t point_scalarmul_adjustment = {{{
  25. $(ser((2**(scalar_bits-1+window_bits - ((scalar_bits-1)%window_bits)) - 1) % q,64,"SC_LIMB"))
  26. }}}, precomputed_scalarmul_adjustment = {{{
  27. $(ser((2**(combs.n*combs.t*combs.s) - 1) % q,64,"SC_LIMB"))
  28. }}};
  29. const uint8_t API_NS(x_base_point)[X_SER_BYTES] = { $(ser(mont_base,8)) };
  30. #if COFACTOR==8
  31. static const gf SQRT_ONE_MINUS_D = {FIELD_LITERAL(
  32. $(ser(msqrt(1-d,modulus),gf_lit_limb_bits) if cofactor == 8 else "/* NONE */")
  33. )};
  34. #endif
  35. /* End of template stuff */
  36. /* Sanity */
  37. #if (COFACTOR == 8) && !IMAGINE_TWIST
  38. /* FUTURE: Curve41417 doesn't have these properties. */
  39. #error "Currently require IMAGINE_TWIST (and thus p=5 mod 8) for cofactor 8"
  40. #endif
  41. #if IMAGINE_TWIST && (P_MOD_8 != 5)
  42. #error "Cannot use IMAGINE_TWIST except for p == 5 mod 8"
  43. #endif
  44. #if (COFACTOR != 8) && (COFACTOR != 4)
  45. #error "COFACTOR must be 4 or 8"
  46. #endif
  47. #if IMAGINE_TWIST
  48. extern const gf SQRT_MINUS_ONE;
  49. #endif
  50. #define WBITS DECAF_WORD_BITS /* NB this may be different from ARCH_WORD_BITS */
  51. extern const point_t API_NS(point_base);
  52. /* Projective Niels coordinates */
  53. typedef struct { gf a, b, c; } niels_s, niels_t[1];
  54. typedef struct { niels_t n; gf z; } __attribute__((aligned(sizeof(big_register_t))))
  55. pniels_s, pniels_t[1];
  56. /* Precomputed base */
  57. struct precomputed_s { niels_t table [COMBS_N<<(COMBS_T-1)]; };
  58. extern const gf API_NS(precomputed_base_as_fe)[];
  59. const precomputed_s *API_NS(precomputed_base) =
  60. (const precomputed_s *) &API_NS(precomputed_base_as_fe);
  61. const size_t API_NS(sizeof_precomputed_s) = sizeof(precomputed_s);
  62. const size_t API_NS(alignof_precomputed_s) = sizeof(big_register_t);
  63. /** Inverse. */
  64. static void
  65. gf_invert(gf y, const gf x) {
  66. gf t1, t2;
  67. gf_sqr(t1, x); // o^2
  68. mask_t ret = gf_isr(t2, t1); // +-1/sqrt(o^2) = +-1/o
  69. (void)ret; assert(ret);
  70. gf_sqr(t1, t2);
  71. gf_mul(t2, t1, x); // not direct to y in case of alias.
  72. gf_copy(y, t2);
  73. }
  74. #if COFACTOR==8
  75. /** Return high bit of x = low bit of 2x mod p */
  76. static mask_t gf_lobit(const gf x) {
  77. gf y;
  78. gf_copy(y,x);
  79. gf_strong_reduce(y);
  80. return -(y->limb[0]&1);
  81. }
  82. #endif
  83. /** identity = (0,1) */
  84. const point_t API_NS(point_identity) = {{{{{0}}},{{{1}}},{{{1}}},{{{0}}}}};
  85. void API_NS(deisogenize) (
  86. gf_s *__restrict__ s,
  87. gf_s *__restrict__ minus_t_over_s,
  88. const point_t p,
  89. mask_t toggle_hibit_s,
  90. mask_t toggle_hibit_t_over_s,
  91. mask_t toggle_rotation
  92. ) {
  93. #if COFACTOR == 4 && !IMAGINE_TWIST
  94. (void) toggle_rotation;
  95. gf b, d;
  96. gf_s *c = s, *a = minus_t_over_s;
  97. gf_mulw(a, p->y, 1-EDWARDS_D);
  98. gf_mul(c, a, p->t); /* -dYT, with EDWARDS_D = d-1 */
  99. gf_mul(a, p->x, p->z);
  100. gf_sub(d, c, a); /* aXZ-dYT with a=-1 */
  101. gf_add(a, p->z, p->y);
  102. gf_sub(b, p->z, p->y);
  103. gf_mul(c, b, a);
  104. gf_mulw(b, c, -EDWARDS_D); /* (a-d)(Z+Y)(Z-Y) */
  105. mask_t ok = gf_isr (a,b); /* r in the paper */
  106. (void)ok; assert(ok | gf_eq(b,ZERO));
  107. gf_mulw (b, a, -EDWARDS_D); /* u in the paper */
  108. gf_mul(c,a,d); /* r(aZX-dYT) */
  109. gf_mul(a,b,p->z); /* uZ */
  110. gf_add(a,a,a); /* 2uZ */
  111. gf_cond_neg(c, toggle_hibit_t_over_s ^ ~gf_hibit(a)); /* u <- -u if negative. */
  112. gf_cond_neg(a, toggle_hibit_t_over_s ^ ~gf_hibit(a)); /* t/s <-? -t/s */
  113. gf_add(d,c,p->y);
  114. gf_mul(s,b,d);
  115. gf_cond_neg(s, toggle_hibit_s ^ gf_hibit(s));
  116. #else
  117. /* More complicated because of rotation */
  118. /* MAGIC This code is wrong for certain non-Curve25519 curves;
  119. * check if it's because of Cofactor==8 or IMAGINE_ROTATION */
  120. gf c, d;
  121. gf_s *b = s, *a = minus_t_over_s;
  122. #if IMAGINE_TWIST
  123. gf x, t;
  124. gf_mul ( x, p->x, SQRT_MINUS_ONE);
  125. gf_mul ( t, p->t, SQRT_MINUS_ONE);
  126. gf_sub ( x, ZERO, x );
  127. gf_sub ( t, ZERO, t );
  128. gf_add ( a, p->z, x );
  129. gf_sub ( b, p->z, x );
  130. gf_mul ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 - X^2 */
  131. #else
  132. const gf_s *x = p->x, *t = p->t;
  133. /* Won't hit the gf_cond_sel below because COFACTOR==8 requires IMAGINE_TWIST for now. */
  134. gf_sqr ( a, p->z );
  135. gf_sqr ( b, p->x );
  136. gf_add ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 + X^2 */
  137. #endif
  138. gf_mul ( a, p->z, t ); /* "tz" = T*Z */
  139. gf_sqr ( b, a );
  140. gf_mul ( d, b, c ); /* (TZ)^2 * (Z^2-aX^2) */
  141. mask_t ok = gf_isr(b, d);
  142. (void)ok; assert(ok | gf_eq(d,ZERO));
  143. gf_mul ( d, b, a ); /* "osx" = 1 / sqrt(z^2-ax^2) */
  144. gf_mul ( a, b, c );
  145. gf_mul ( b, a, d ); /* 1/tz */
  146. mask_t rotate;
  147. #if (COFACTOR == 8)
  148. gf e;
  149. gf_sqr(e, p->z);
  150. gf_mul(a, e, b); /* z^2 / tz = z/t = 1/xy */
  151. rotate = gf_hibit(a) ^ toggle_rotation;
  152. /* Curve25519: cond select between zx * 1/tz or sqrt(1-d); y=-x */
  153. gf_mul ( a, b, c );
  154. gf_cond_sel ( a, a, SQRT_ONE_MINUS_D, rotate );
  155. gf_cond_sel ( x, p->y, x, rotate );
  156. #else
  157. (void)toggle_rotation;
  158. rotate = 0;
  159. #endif
  160. gf_mul ( c, a, d ); // new "osx"
  161. gf_mul ( a, c, p->z );
  162. gf_add ( a, a, a ); // 2 * "osx" * Z
  163. mask_t tg1 = rotate ^ toggle_hibit_t_over_s ^~ gf_hibit(a);
  164. gf_cond_neg ( c, tg1 );
  165. gf_cond_neg ( a, rotate ^ tg1 );
  166. gf_mul ( d, b, p->z );
  167. gf_add ( d, d, c );
  168. gf_mul ( b, d, x ); /* here "x" = y unless rotate */
  169. gf_cond_neg ( b, toggle_hibit_s ^ gf_hibit(b) );
  170. #endif
  171. }
  172. void API_NS(point_encode)( unsigned char ser[SER_BYTES], const point_t p ) {
  173. gf s, mtos;
  174. API_NS(deisogenize)(s,mtos,p,0,0,0);
  175. gf_serialize(ser,s,0);
  176. }
  177. decaf_error_t API_NS(point_decode) (
  178. point_t p,
  179. const unsigned char ser[SER_BYTES],
  180. decaf_bool_t allow_identity
  181. ) {
  182. gf s, a, b, c, d, e, f;
  183. mask_t succ = gf_deserialize(s, ser, 0);
  184. mask_t zero = gf_eq(s, ZERO);
  185. succ &= bool_to_mask(allow_identity) | ~zero;
  186. gf_sqr ( a, s );
  187. #if IMAGINE_TWIST
  188. gf_sub ( f, ONE, a ); /* f = 1-as^2 = 1-s^2*/
  189. #else
  190. gf_add ( f, ONE, a ); /* f = 1-as^2 = 1+s^2 */
  191. #endif
  192. succ &= ~ gf_eq( f, ZERO );
  193. gf_sqr ( b, f );
  194. gf_mulw ( c, a, 4*IMAGINE_TWIST-4*EDWARDS_D );
  195. gf_add ( c, c, b ); /* t^2 */
  196. gf_mul ( d, f, s ); /* s(1-as^2) for denoms */
  197. gf_sqr ( e, d );
  198. gf_mul ( b, c, e );
  199. succ &= gf_isr(e,b) | gf_eq(b,ZERO); /* e = 1/(t s (1-as^2)) */
  200. gf_mul ( b, e, d ); /* 1/t */
  201. gf_mul ( d, e, c ); /* d = t / (s(1-as^2)) */
  202. gf_mul ( e, d, f ); /* t/s */
  203. mask_t negtos = gf_hibit(e);
  204. gf_cond_neg(b, negtos);
  205. gf_cond_neg(d, negtos);
  206. #if IMAGINE_TWIST
  207. gf_add ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  208. #else
  209. gf_sub ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  210. #endif
  211. #if COFACTOR == 8
  212. gf_mul ( a, p->z, d); /* t(1+s^2) / s(1-s^2) = 2/xy */
  213. succ &= ~gf_lobit(a); /* = ~gf_hibit(a/2), since gf_hibit(x) = gf_lobit(2x) */
  214. #endif
  215. gf_mul ( a, f, b ); /* y = (1-s^2) / t */
  216. gf_mul ( p->y, p->z, a ); /* Y = yZ */
  217. #if IMAGINE_TWIST
  218. gf_add ( b, s, s );
  219. gf_mul(p->x, b, SQRT_MINUS_ONE); /* Curve25519 */
  220. #else
  221. gf_add ( p->x, s, s );
  222. #endif
  223. gf_mul ( p->t, p->x, a ); /* T = 2s (1-as^2)/t */
  224. p->y->limb[0] -= zero;
  225. assert(API_NS(point_valid)(p) | ~succ);
  226. return decaf_succeed_if(mask_to_bool(succ));
  227. }
  228. #if IMAGINE_TWIST
  229. #define TWISTED_D (-(EDWARDS_D))
  230. #else
  231. #define TWISTED_D ((EDWARDS_D)-1)
  232. #endif
  233. #if TWISTED_D < 0
  234. #define EFF_D (-(TWISTED_D))
  235. #define NEG_D 1
  236. #else
  237. #define EFF_D TWISTED_D
  238. #define NEG_D 0
  239. #endif
  240. void API_NS(point_sub) (
  241. point_t p,
  242. const point_t q,
  243. const point_t r
  244. ) {
  245. gf a, b, c, d;
  246. gf_sub_nr ( b, q->y, q->x );
  247. gf_sub_nr ( d, r->y, r->x );
  248. gf_add_nr ( c, r->y, r->x );
  249. gf_mul ( a, c, b );
  250. gf_add_nr ( b, q->y, q->x );
  251. gf_mul ( p->y, d, b );
  252. gf_mul ( b, r->t, q->t );
  253. gf_mulw ( p->x, b, 2*EFF_D );
  254. gf_add_nr ( b, a, p->y );
  255. gf_sub_nr ( c, p->y, a );
  256. gf_mul ( a, q->z, r->z );
  257. gf_add_nr ( a, a, a );
  258. #if NEG_D
  259. gf_sub_nr ( p->y, a, p->x );
  260. gf_add_nr ( a, a, p->x );
  261. #else
  262. gf_add_nr ( p->y, a, p->x );
  263. gf_sub_nr ( a, a, p->x );
  264. #endif
  265. gf_mul ( p->z, a, p->y );
  266. gf_mul ( p->x, p->y, c );
  267. gf_mul ( p->y, a, b );
  268. gf_mul ( p->t, b, c );
  269. }
  270. void API_NS(point_add) (
  271. point_t p,
  272. const point_t q,
  273. const point_t r
  274. ) {
  275. gf a, b, c, d;
  276. gf_sub_nr ( b, q->y, q->x );
  277. gf_sub_nr ( c, r->y, r->x );
  278. gf_add_nr ( d, r->y, r->x );
  279. gf_mul ( a, c, b );
  280. gf_add_nr ( b, q->y, q->x );
  281. gf_mul ( p->y, d, b );
  282. gf_mul ( b, r->t, q->t );
  283. gf_mulw ( p->x, b, 2*EFF_D );
  284. gf_add_nr ( b, a, p->y );
  285. gf_sub_nr ( c, p->y, a );
  286. gf_mul ( a, q->z, r->z );
  287. gf_add_nr ( a, a, a );
  288. #if NEG_D
  289. gf_add_nr ( p->y, a, p->x );
  290. gf_sub_nr ( a, a, p->x );
  291. #else
  292. gf_sub_nr ( p->y, a, p->x );
  293. gf_add_nr ( a, a, p->x );
  294. #endif
  295. gf_mul ( p->z, a, p->y );
  296. gf_mul ( p->x, p->y, c );
  297. gf_mul ( p->y, a, b );
  298. gf_mul ( p->t, b, c );
  299. }
  300. static NOINLINE void
  301. point_double_internal (
  302. point_t p,
  303. const point_t q,
  304. int before_double
  305. ) {
  306. gf a, b, c, d;
  307. gf_sqr ( c, q->x );
  308. gf_sqr ( a, q->y );
  309. gf_add_nr ( d, c, a );
  310. gf_add_nr ( p->t, q->y, q->x );
  311. gf_sqr ( b, p->t );
  312. gf_subx_nr ( b, b, d, 3 );
  313. gf_sub_nr ( p->t, a, c );
  314. gf_sqr ( p->x, q->z );
  315. gf_add_nr ( p->z, p->x, p->x );
  316. gf_subx_nr ( a, p->z, p->t, 4 );
  317. gf_mul ( p->x, a, b );
  318. gf_mul ( p->z, p->t, a );
  319. gf_mul ( p->y, p->t, d );
  320. if (!before_double) gf_mul ( p->t, b, d );
  321. }
  322. void API_NS(point_double)(point_t p, const point_t q) {
  323. point_double_internal(p,q,0);
  324. }
  325. void API_NS(point_negate) (
  326. point_t nega,
  327. const point_t a
  328. ) {
  329. gf_sub(nega->x, ZERO, a->x);
  330. gf_copy(nega->y, a->y);
  331. gf_copy(nega->z, a->z);
  332. gf_sub(nega->t, ZERO, a->t);
  333. }
  334. /* Operations on [p]niels */
  335. static INLINE void
  336. cond_neg_niels (
  337. niels_t n,
  338. mask_t neg
  339. ) {
  340. gf_cond_swap(n->a, n->b, neg);
  341. gf_cond_neg(n->c, neg);
  342. }
  343. static NOINLINE void pt_to_pniels (
  344. pniels_t b,
  345. const point_t a
  346. ) {
  347. gf_sub ( b->n->a, a->y, a->x );
  348. gf_add ( b->n->b, a->x, a->y );
  349. gf_mulw ( b->n->c, a->t, 2*TWISTED_D );
  350. gf_add ( b->z, a->z, a->z );
  351. }
  352. static NOINLINE void pniels_to_pt (
  353. point_t e,
  354. const pniels_t d
  355. ) {
  356. gf eu;
  357. gf_add ( eu, d->n->b, d->n->a );
  358. gf_sub ( e->y, d->n->b, d->n->a );
  359. gf_mul ( e->t, e->y, eu);
  360. gf_mul ( e->x, d->z, e->y );
  361. gf_mul ( e->y, d->z, eu );
  362. gf_sqr ( e->z, d->z );
  363. }
  364. static NOINLINE void
  365. niels_to_pt (
  366. point_t e,
  367. const niels_t n
  368. ) {
  369. gf_add ( e->y, n->b, n->a );
  370. gf_sub ( e->x, n->b, n->a );
  371. gf_mul ( e->t, e->y, e->x );
  372. gf_copy ( e->z, ONE );
  373. }
  374. static NOINLINE void
  375. add_niels_to_pt (
  376. point_t d,
  377. const niels_t e,
  378. int before_double
  379. ) {
  380. gf a, b, c;
  381. gf_sub_nr ( b, d->y, d->x );
  382. gf_mul ( a, e->a, b );
  383. gf_add_nr ( b, d->x, d->y );
  384. gf_mul ( d->y, e->b, b );
  385. gf_mul ( d->x, e->c, d->t );
  386. gf_add_nr ( c, a, d->y );
  387. gf_sub_nr ( b, d->y, a );
  388. gf_sub_nr ( d->y, d->z, d->x );
  389. gf_add_nr ( a, d->x, d->z );
  390. gf_mul ( d->z, a, d->y );
  391. gf_mul ( d->x, d->y, b );
  392. gf_mul ( d->y, a, c );
  393. if (!before_double) gf_mul ( d->t, b, c );
  394. }
  395. static NOINLINE void
  396. sub_niels_from_pt (
  397. point_t d,
  398. const niels_t e,
  399. int before_double
  400. ) {
  401. gf a, b, c;
  402. gf_sub_nr ( b, d->y, d->x );
  403. gf_mul ( a, e->b, b );
  404. gf_add_nr ( b, d->x, d->y );
  405. gf_mul ( d->y, e->a, b );
  406. gf_mul ( d->x, e->c, d->t );
  407. gf_add_nr ( c, a, d->y );
  408. gf_sub_nr ( b, d->y, a );
  409. gf_add_nr ( d->y, d->z, d->x );
  410. gf_sub_nr ( a, d->z, d->x );
  411. gf_mul ( d->z, a, d->y );
  412. gf_mul ( d->x, d->y, b );
  413. gf_mul ( d->y, a, c );
  414. if (!before_double) gf_mul ( d->t, b, c );
  415. }
  416. static void
  417. add_pniels_to_pt (
  418. point_t p,
  419. const pniels_t pn,
  420. int before_double
  421. ) {
  422. gf L0;
  423. gf_mul ( L0, p->z, pn->z );
  424. gf_copy ( p->z, L0 );
  425. add_niels_to_pt( p, pn->n, before_double );
  426. }
  427. static void
  428. sub_pniels_from_pt (
  429. point_t p,
  430. const pniels_t pn,
  431. int before_double
  432. ) {
  433. gf L0;
  434. gf_mul ( L0, p->z, pn->z );
  435. gf_copy ( p->z, L0 );
  436. sub_niels_from_pt( p, pn->n, before_double );
  437. }
  438. static INLINE void
  439. constant_time_lookup_xx (
  440. void *__restrict__ out_,
  441. const void *table_,
  442. word_t elem_bytes,
  443. word_t n_table,
  444. word_t idx
  445. ) {
  446. constant_time_lookup(out_,table_,elem_bytes,n_table,idx);
  447. }
  448. static NOINLINE void
  449. prepare_fixed_window(
  450. pniels_t *multiples,
  451. const point_t b,
  452. int ntable
  453. ) {
  454. point_t tmp;
  455. pniels_t pn;
  456. int i;
  457. point_double_internal(tmp, b, 0);
  458. pt_to_pniels(pn, tmp);
  459. pt_to_pniels(multiples[0], b);
  460. API_NS(point_copy)(tmp, b);
  461. for (i=1; i<ntable; i++) {
  462. add_pniels_to_pt(tmp, pn, 0);
  463. pt_to_pniels(multiples[i], tmp);
  464. }
  465. decaf_bzero(pn,sizeof(pn));
  466. decaf_bzero(tmp,sizeof(tmp));
  467. }
  468. void API_NS(point_scalarmul) (
  469. point_t a,
  470. const point_t b,
  471. const scalar_t scalar
  472. ) {
  473. const int WINDOW = DECAF_WINDOW_BITS,
  474. WINDOW_MASK = (1<<WINDOW)-1,
  475. WINDOW_T_MASK = WINDOW_MASK >> 1,
  476. NTABLE = 1<<(WINDOW-1);
  477. scalar_t scalar1x;
  478. API_NS(scalar_add)(scalar1x, scalar, point_scalarmul_adjustment);
  479. API_NS(scalar_halve)(scalar1x,scalar1x);
  480. /* Set up a precomputed table with odd multiples of b. */
  481. pniels_t pn, multiples[NTABLE];
  482. point_t tmp;
  483. prepare_fixed_window(multiples, b, NTABLE);
  484. /* Initialize. */
  485. int i,j,first=1;
  486. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  487. for (; i>=0; i-=WINDOW) {
  488. /* Fetch another block of bits */
  489. word_t bits = scalar1x->limb[i/WBITS] >> (i%WBITS);
  490. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  491. bits ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  492. }
  493. bits &= WINDOW_MASK;
  494. mask_t inv = (bits>>(WINDOW-1))-1;
  495. bits ^= inv;
  496. /* Add in from table. Compute t only on last iteration. */
  497. constant_time_lookup_xx(pn, multiples, sizeof(pn), NTABLE, bits & WINDOW_T_MASK);
  498. cond_neg_niels(pn->n, inv);
  499. if (first) {
  500. pniels_to_pt(tmp, pn);
  501. first = 0;
  502. } else {
  503. /* Using Hisil et al's lookahead method instead of extensible here
  504. * for no particular reason. Double WINDOW times, but only compute t on
  505. * the last one.
  506. */
  507. for (j=0; j<WINDOW-1; j++)
  508. point_double_internal(tmp, tmp, -1);
  509. point_double_internal(tmp, tmp, 0);
  510. add_pniels_to_pt(tmp, pn, i ? -1 : 0);
  511. }
  512. }
  513. /* Write out the answer */
  514. API_NS(point_copy)(a,tmp);
  515. decaf_bzero(scalar1x,sizeof(scalar1x));
  516. decaf_bzero(pn,sizeof(pn));
  517. decaf_bzero(multiples,sizeof(multiples));
  518. decaf_bzero(tmp,sizeof(tmp));
  519. }
  520. void API_NS(point_double_scalarmul) (
  521. point_t a,
  522. const point_t b,
  523. const scalar_t scalarb,
  524. const point_t c,
  525. const scalar_t scalarc
  526. ) {
  527. const int WINDOW = DECAF_WINDOW_BITS,
  528. WINDOW_MASK = (1<<WINDOW)-1,
  529. WINDOW_T_MASK = WINDOW_MASK >> 1,
  530. NTABLE = 1<<(WINDOW-1);
  531. scalar_t scalar1x, scalar2x;
  532. API_NS(scalar_add)(scalar1x, scalarb, point_scalarmul_adjustment);
  533. API_NS(scalar_halve)(scalar1x,scalar1x);
  534. API_NS(scalar_add)(scalar2x, scalarc, point_scalarmul_adjustment);
  535. API_NS(scalar_halve)(scalar2x,scalar2x);
  536. /* Set up a precomputed table with odd multiples of b. */
  537. pniels_t pn, multiples1[NTABLE], multiples2[NTABLE];
  538. point_t tmp;
  539. prepare_fixed_window(multiples1, b, NTABLE);
  540. prepare_fixed_window(multiples2, c, NTABLE);
  541. /* Initialize. */
  542. int i,j,first=1;
  543. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  544. for (; i>=0; i-=WINDOW) {
  545. /* Fetch another block of bits */
  546. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  547. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  548. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  549. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  550. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  551. }
  552. bits1 &= WINDOW_MASK;
  553. bits2 &= WINDOW_MASK;
  554. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  555. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  556. bits1 ^= inv1;
  557. bits2 ^= inv2;
  558. /* Add in from table. Compute t only on last iteration. */
  559. constant_time_lookup_xx(pn, multiples1, sizeof(pn), NTABLE, bits1 & WINDOW_T_MASK);
  560. cond_neg_niels(pn->n, inv1);
  561. if (first) {
  562. pniels_to_pt(tmp, pn);
  563. first = 0;
  564. } else {
  565. /* Using Hisil et al's lookahead method instead of extensible here
  566. * for no particular reason. Double WINDOW times, but only compute t on
  567. * the last one.
  568. */
  569. for (j=0; j<WINDOW-1; j++)
  570. point_double_internal(tmp, tmp, -1);
  571. point_double_internal(tmp, tmp, 0);
  572. add_pniels_to_pt(tmp, pn, 0);
  573. }
  574. constant_time_lookup_xx(pn, multiples2, sizeof(pn), NTABLE, bits2 & WINDOW_T_MASK);
  575. cond_neg_niels(pn->n, inv2);
  576. add_pniels_to_pt(tmp, pn, i?-1:0);
  577. }
  578. /* Write out the answer */
  579. API_NS(point_copy)(a,tmp);
  580. decaf_bzero(scalar1x,sizeof(scalar1x));
  581. decaf_bzero(scalar2x,sizeof(scalar2x));
  582. decaf_bzero(pn,sizeof(pn));
  583. decaf_bzero(multiples1,sizeof(multiples1));
  584. decaf_bzero(multiples2,sizeof(multiples2));
  585. decaf_bzero(tmp,sizeof(tmp));
  586. }
  587. void API_NS(point_dual_scalarmul) (
  588. point_t a1,
  589. point_t a2,
  590. const point_t b,
  591. const scalar_t scalar1,
  592. const scalar_t scalar2
  593. ) {
  594. const int WINDOW = DECAF_WINDOW_BITS,
  595. WINDOW_MASK = (1<<WINDOW)-1,
  596. WINDOW_T_MASK = WINDOW_MASK >> 1,
  597. NTABLE = 1<<(WINDOW-1);
  598. scalar_t scalar1x, scalar2x;
  599. API_NS(scalar_add)(scalar1x, scalar1, point_scalarmul_adjustment);
  600. API_NS(scalar_halve)(scalar1x,scalar1x);
  601. API_NS(scalar_add)(scalar2x, scalar2, point_scalarmul_adjustment);
  602. API_NS(scalar_halve)(scalar2x,scalar2x);
  603. /* Set up a precomputed table with odd multiples of b. */
  604. point_t multiples1[NTABLE], multiples2[NTABLE], working, tmp;
  605. pniels_t pn;
  606. API_NS(point_copy)(working, b);
  607. /* Initialize. */
  608. int i,j;
  609. for (i=0; i<NTABLE; i++) {
  610. API_NS(point_copy)(multiples1[i], API_NS(point_identity));
  611. API_NS(point_copy)(multiples2[i], API_NS(point_identity));
  612. }
  613. for (i=0; i<SCALAR_BITS; i+=WINDOW) {
  614. if (i) {
  615. for (j=0; j<WINDOW-1; j++)
  616. point_double_internal(working, working, -1);
  617. point_double_internal(working, working, 0);
  618. }
  619. /* Fetch another block of bits */
  620. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  621. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  622. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  623. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  624. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  625. }
  626. bits1 &= WINDOW_MASK;
  627. bits2 &= WINDOW_MASK;
  628. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  629. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  630. bits1 ^= inv1;
  631. bits2 ^= inv2;
  632. pt_to_pniels(pn, working);
  633. constant_time_lookup_xx(tmp, multiples1, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  634. cond_neg_niels(pn->n, inv1);
  635. /* add_pniels_to_pt(multiples1[bits1 & WINDOW_T_MASK], pn, 0); */
  636. add_pniels_to_pt(tmp, pn, 0);
  637. constant_time_insert(multiples1, tmp, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  638. constant_time_lookup_xx(tmp, multiples2, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  639. cond_neg_niels(pn->n, inv1^inv2);
  640. /* add_pniels_to_pt(multiples2[bits2 & WINDOW_T_MASK], pn, 0); */
  641. add_pniels_to_pt(tmp, pn, 0);
  642. constant_time_insert(multiples2, tmp, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  643. }
  644. if (NTABLE > 1) {
  645. API_NS(point_copy)(working, multiples1[NTABLE-1]);
  646. API_NS(point_copy)(tmp , multiples2[NTABLE-1]);
  647. for (i=NTABLE-1; i>1; i--) {
  648. API_NS(point_add)(multiples1[i-1], multiples1[i-1], multiples1[i]);
  649. API_NS(point_add)(multiples2[i-1], multiples2[i-1], multiples2[i]);
  650. API_NS(point_add)(working, working, multiples1[i-1]);
  651. API_NS(point_add)(tmp, tmp, multiples2[i-1]);
  652. }
  653. API_NS(point_add)(multiples1[0], multiples1[0], multiples1[1]);
  654. API_NS(point_add)(multiples2[0], multiples2[0], multiples2[1]);
  655. point_double_internal(working, working, 0);
  656. point_double_internal(tmp, tmp, 0);
  657. API_NS(point_add)(a1, working, multiples1[0]);
  658. API_NS(point_add)(a2, tmp, multiples2[0]);
  659. } else {
  660. API_NS(point_copy)(a1, multiples1[0]);
  661. API_NS(point_copy)(a2, multiples2[0]);
  662. }
  663. decaf_bzero(scalar1x,sizeof(scalar1x));
  664. decaf_bzero(scalar2x,sizeof(scalar2x));
  665. decaf_bzero(pn,sizeof(pn));
  666. decaf_bzero(multiples1,sizeof(multiples1));
  667. decaf_bzero(multiples2,sizeof(multiples2));
  668. decaf_bzero(tmp,sizeof(tmp));
  669. decaf_bzero(working,sizeof(working));
  670. }
  671. decaf_bool_t API_NS(point_eq) ( const point_t p, const point_t q ) {
  672. /* equality mod 2-torsion compares x/y */
  673. gf a, b;
  674. gf_mul ( a, p->y, q->x );
  675. gf_mul ( b, q->y, p->x );
  676. mask_t succ = gf_eq(a,b);
  677. #if (COFACTOR == 8) && IMAGINE_TWIST
  678. gf_mul ( a, p->y, q->y );
  679. gf_mul ( b, q->x, p->x );
  680. #if !(IMAGINE_TWIST)
  681. gf_sub ( a, ZERO, a );
  682. #else
  683. /* Interesting note: the 4tor would normally be rotation.
  684. * But because of the *i twist, it's actually
  685. * (x,y) <-> (iy,ix)
  686. */
  687. /* No code, just a comment. */
  688. #endif
  689. succ |= gf_eq(a,b);
  690. #endif
  691. return mask_to_bool(succ);
  692. }
  693. decaf_bool_t API_NS(point_valid) (
  694. const point_t p
  695. ) {
  696. gf a,b,c;
  697. gf_mul(a,p->x,p->y);
  698. gf_mul(b,p->z,p->t);
  699. mask_t out = gf_eq(a,b);
  700. gf_sqr(a,p->x);
  701. gf_sqr(b,p->y);
  702. gf_sub(a,b,a);
  703. gf_sqr(b,p->t);
  704. gf_mulw(c,b,TWISTED_D);
  705. gf_sqr(b,p->z);
  706. gf_add(b,b,c);
  707. out &= gf_eq(a,b);
  708. out &= ~gf_eq(p->z,ZERO);
  709. return mask_to_bool(out);
  710. }
  711. void API_NS(point_debugging_torque) (
  712. point_t q,
  713. const point_t p
  714. ) {
  715. #if COFACTOR == 8
  716. gf tmp;
  717. gf_mul(tmp,p->x,SQRT_MINUS_ONE);
  718. gf_mul(q->x,p->y,SQRT_MINUS_ONE);
  719. gf_copy(q->y,tmp);
  720. gf_copy(q->z,p->z);
  721. gf_sub(q->t,ZERO,p->t);
  722. #else
  723. gf_sub(q->x,ZERO,p->x);
  724. gf_sub(q->y,ZERO,p->y);
  725. gf_copy(q->z,p->z);
  726. gf_copy(q->t,p->t);
  727. #endif
  728. }
  729. void API_NS(point_debugging_pscale) (
  730. point_t q,
  731. const point_t p,
  732. const uint8_t factor[SER_BYTES]
  733. ) {
  734. gf gfac,tmp;
  735. /* NB this means you'll never pscale by negative numbers for p521 */
  736. ignore_result(gf_deserialize(gfac,factor,0));
  737. gf_cond_sel(gfac,gfac,ONE,gf_eq(gfac,ZERO));
  738. gf_mul(tmp,p->x,gfac);
  739. gf_copy(q->x,tmp);
  740. gf_mul(tmp,p->y,gfac);
  741. gf_copy(q->y,tmp);
  742. gf_mul(tmp,p->z,gfac);
  743. gf_copy(q->z,tmp);
  744. gf_mul(tmp,p->t,gfac);
  745. gf_copy(q->t,tmp);
  746. }
  747. static void gf_batch_invert (
  748. gf *__restrict__ out,
  749. const gf *in,
  750. unsigned int n
  751. ) {
  752. gf t1;
  753. assert(n>1);
  754. gf_copy(out[1], in[0]);
  755. int i;
  756. for (i=1; i<(int) (n-1); i++) {
  757. gf_mul(out[i+1], out[i], in[i]);
  758. }
  759. gf_mul(out[0], out[n-1], in[n-1]);
  760. gf_invert(out[0], out[0]);
  761. for (i=n-1; i>0; i--) {
  762. gf_mul(t1, out[i], out[0]);
  763. gf_copy(out[i], t1);
  764. gf_mul(t1, out[0], in[i]);
  765. gf_copy(out[0], t1);
  766. }
  767. }
  768. static void batch_normalize_niels (
  769. niels_t *table,
  770. const gf *zs,
  771. gf *__restrict__ zis,
  772. int n
  773. ) {
  774. int i;
  775. gf product;
  776. gf_batch_invert(zis, zs, n);
  777. for (i=0; i<n; i++) {
  778. gf_mul(product, table[i]->a, zis[i]);
  779. gf_strong_reduce(product);
  780. gf_copy(table[i]->a, product);
  781. gf_mul(product, table[i]->b, zis[i]);
  782. gf_strong_reduce(product);
  783. gf_copy(table[i]->b, product);
  784. gf_mul(product, table[i]->c, zis[i]);
  785. gf_strong_reduce(product);
  786. gf_copy(table[i]->c, product);
  787. }
  788. decaf_bzero(product,sizeof(product));
  789. }
  790. void API_NS(precompute) (
  791. precomputed_s *table,
  792. const point_t base
  793. ) {
  794. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  795. assert(n*t*s >= SCALAR_BITS);
  796. point_t working, start, doubles[t-1];
  797. API_NS(point_copy)(working, base);
  798. pniels_t pn_tmp;
  799. gf zs[n<<(t-1)], zis[n<<(t-1)];
  800. unsigned int i,j,k;
  801. /* Compute n tables */
  802. for (i=0; i<n; i++) {
  803. /* Doubling phase */
  804. for (j=0; j<t; j++) {
  805. if (j) API_NS(point_add)(start, start, working);
  806. else API_NS(point_copy)(start, working);
  807. if (j==t-1 && i==n-1) break;
  808. point_double_internal(working, working,0);
  809. if (j<t-1) API_NS(point_copy)(doubles[j], working);
  810. for (k=0; k<s-1; k++)
  811. point_double_internal(working, working, k<s-2);
  812. }
  813. /* Gray-code phase */
  814. for (j=0;; j++) {
  815. int gray = j ^ (j>>1);
  816. int idx = (((i+1)<<(t-1))-1) ^ gray;
  817. pt_to_pniels(pn_tmp, start);
  818. memcpy(table->table[idx], pn_tmp->n, sizeof(pn_tmp->n));
  819. gf_copy(zs[idx], pn_tmp->z);
  820. if (j >= (1u<<(t-1)) - 1) break;
  821. int delta = (j+1) ^ ((j+1)>>1) ^ gray;
  822. for (k=0; delta>1; k++)
  823. delta >>=1;
  824. if (gray & (1<<k)) {
  825. API_NS(point_add)(start, start, doubles[k]);
  826. } else {
  827. API_NS(point_sub)(start, start, doubles[k]);
  828. }
  829. }
  830. }
  831. batch_normalize_niels(table->table,(const gf *)zs,zis,n<<(t-1));
  832. decaf_bzero(zs,sizeof(zs));
  833. decaf_bzero(zis,sizeof(zis));
  834. decaf_bzero(pn_tmp,sizeof(pn_tmp));
  835. decaf_bzero(working,sizeof(working));
  836. decaf_bzero(start,sizeof(start));
  837. decaf_bzero(doubles,sizeof(doubles));
  838. }
  839. static INLINE void
  840. constant_time_lookup_xx_niels (
  841. niels_s *__restrict__ ni,
  842. const niels_t *table,
  843. int nelts,
  844. int idx
  845. ) {
  846. constant_time_lookup_xx(ni, table, sizeof(niels_s), nelts, idx);
  847. }
  848. void API_NS(precomputed_scalarmul) (
  849. point_t out,
  850. const precomputed_s *table,
  851. const scalar_t scalar
  852. ) {
  853. int i;
  854. unsigned j,k;
  855. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  856. scalar_t scalar1x;
  857. API_NS(scalar_add)(scalar1x, scalar, precomputed_scalarmul_adjustment);
  858. API_NS(scalar_halve)(scalar1x,scalar1x);
  859. niels_t ni;
  860. for (i=s-1; i>=0; i--) {
  861. if (i != (int)s-1) point_double_internal(out,out,0);
  862. for (j=0; j<n; j++) {
  863. int tab = 0;
  864. for (k=0; k<t; k++) {
  865. unsigned int bit = i + s*(k + j*t);
  866. if (bit < SCALAR_BITS) {
  867. tab |= (scalar1x->limb[bit/WBITS] >> (bit%WBITS) & 1) << k;
  868. }
  869. }
  870. mask_t invert = (tab>>(t-1))-1;
  871. tab ^= invert;
  872. tab &= (1<<(t-1)) - 1;
  873. constant_time_lookup_xx_niels(ni, &table->table[j<<(t-1)], 1<<(t-1), tab);
  874. cond_neg_niels(ni, invert);
  875. if ((i!=(int)s-1)||j) {
  876. add_niels_to_pt(out, ni, j==n-1 && i);
  877. } else {
  878. niels_to_pt(out, ni);
  879. }
  880. }
  881. }
  882. decaf_bzero(ni,sizeof(ni));
  883. decaf_bzero(scalar1x,sizeof(scalar1x));
  884. }
  885. void API_NS(point_cond_sel) (
  886. point_t out,
  887. const point_t a,
  888. const point_t b,
  889. decaf_bool_t pick_b
  890. ) {
  891. constant_time_select(out,a,b,sizeof(point_t),bool_to_mask(pick_b),0);
  892. }
  893. /* FUTURE: restore Curve25519 Montgomery ladder? */
  894. decaf_error_t API_NS(direct_scalarmul) (
  895. uint8_t scaled[SER_BYTES],
  896. const uint8_t base[SER_BYTES],
  897. const scalar_t scalar,
  898. decaf_bool_t allow_identity,
  899. decaf_bool_t short_circuit
  900. ) {
  901. point_t basep;
  902. decaf_error_t succ = API_NS(point_decode)(basep, base, allow_identity);
  903. if (short_circuit && succ != DECAF_SUCCESS) return succ;
  904. API_NS(point_cond_sel)(basep, API_NS(point_base), basep, succ);
  905. API_NS(point_scalarmul)(basep, basep, scalar);
  906. API_NS(point_encode)(scaled, basep);
  907. API_NS(point_destroy)(basep);
  908. return succ;
  909. }
  910. decaf_error_t API_NS(x_direct_scalarmul) (
  911. uint8_t out[X_PUBLIC_BYTES],
  912. const uint8_t base[X_PUBLIC_BYTES],
  913. const uint8_t scalar[X_PRIVATE_BYTES]
  914. ) {
  915. gf x1, x2, z2, x3, z3, t1, t2;
  916. ignore_result(gf_deserialize(x1,base,1));
  917. gf_copy(x2,ONE);
  918. gf_copy(z2,ZERO);
  919. gf_copy(x3,x1);
  920. gf_copy(z3,ONE);
  921. int t;
  922. mask_t swap = 0;
  923. for (t = X_PRIVATE_BITS-1; t>=0; t--) {
  924. uint8_t sb = scalar[t/8];
  925. /* Scalar conditioning */
  926. if (t/8==0) sb &= -(uint8_t)COFACTOR;
  927. else if (t == X_PRIVATE_BITS-1) sb = -1;
  928. mask_t k_t = (sb>>(t%8)) & 1;
  929. k_t = -k_t; /* set to all 0s or all 1s */
  930. swap ^= k_t;
  931. gf_cond_swap(x2,x3,swap);
  932. gf_cond_swap(z2,z3,swap);
  933. swap = k_t;
  934. gf_add_nr(t1,x2,z2); /* A = x2 + z2 */
  935. gf_sub_nr(t2,x2,z2); /* B = x2 - z2 */
  936. gf_sub_nr(z2,x3,z3); /* D = x3 - z3 */
  937. gf_mul(x2,t1,z2); /* DA */
  938. gf_add_nr(z2,z3,x3); /* C = x3 + z3 */
  939. gf_mul(x3,t2,z2); /* CB */
  940. gf_sub_nr(z3,x2,x3); /* DA-CB */
  941. gf_sqr(z2,z3); /* (DA-CB)^2 */
  942. gf_mul(z3,x1,z2); /* z3 = x1(DA-CB)^2 */
  943. gf_add_nr(z2,x2,x3); /* (DA+CB) */
  944. gf_sqr(x3,z2); /* x3 = (DA+CB)^2 */
  945. gf_sqr(z2,t1); /* AA = A^2 */
  946. gf_sqr(t1,t2); /* BB = B^2 */
  947. gf_mul(x2,z2,t1); /* x2 = AA*BB */
  948. gf_sub_nr(t2,z2,t1); /* E = AA-BB */
  949. gf_mulw(t1,t2,-EDWARDS_D); /* E*-d = a24*E */
  950. gf_add_nr(t1,t1,z2); /* AA + a24*E */
  951. gf_mul(z2,t2,t1); /* z2 = E(AA+a24*E) */
  952. }
  953. /* Finish */
  954. gf_cond_swap(x2,x3,swap);
  955. gf_cond_swap(z2,z3,swap);
  956. gf_invert(z2,z2);
  957. gf_mul(x1,x2,z2);
  958. gf_serialize(out,x1,1);
  959. mask_t nz = ~gf_eq(x1,ZERO);
  960. decaf_bzero(x1,sizeof(x1));
  961. decaf_bzero(x2,sizeof(x2));
  962. decaf_bzero(z2,sizeof(z2));
  963. decaf_bzero(x3,sizeof(x3));
  964. decaf_bzero(z3,sizeof(z3));
  965. decaf_bzero(t1,sizeof(t1));
  966. decaf_bzero(t2,sizeof(t2));
  967. return decaf_succeed_if(mask_to_bool(nz));
  968. }
  969. void API_NS(x_base_scalarmul) (
  970. uint8_t out[X_PUBLIC_BYTES],
  971. const uint8_t scalar[X_PRIVATE_BYTES]
  972. ) {
  973. /* Scalar conditioning */
  974. uint8_t scalar2[X_PRIVATE_BYTES];
  975. memcpy(scalar2,scalar,sizeof(scalar2));
  976. scalar2[0] &= -(uint8_t)COFACTOR;
  977. scalar2[X_PRIVATE_BYTES-1] &= ~(-1<<((X_PRIVATE_BITS+7)%8));
  978. scalar2[X_PRIVATE_BYTES-1] |= 1<<((X_PRIVATE_BITS+7)%8);
  979. scalar_t the_scalar;
  980. API_NS(scalar_decode_long)(the_scalar,scalar2,sizeof(scalar2));
  981. /* We're gonna isogenize by 2, so divide by 2.
  982. *
  983. * Why by 2, even though it's a 4-isogeny?
  984. *
  985. * The isogeny map looks like
  986. * Montgomery <-2-> Jacobi <-2-> Edwards
  987. *
  988. * Since the Jacobi base point is the PREimage of the iso to
  989. * the Montgomery curve, and we're going
  990. * Jacobi -> Edwards -> Jacobi -> Montgomery,
  991. * we pick up only a factor of 2 over Jacobi -> Montgomery.
  992. */
  993. API_NS(scalar_halve)(the_scalar,the_scalar);
  994. #if COFACTOR==8
  995. /* If the base point isn't in the prime-order subgroup (PERF:
  996. * guarantee that it is?) then a 4-isogeny isn't necessarily
  997. * enough to clear the cofactor. So add another doubling.
  998. */
  999. API_NS(scalar_halve)(the_scalar,the_scalar);
  1000. #endif
  1001. point_t p;
  1002. API_NS(precomputed_scalarmul)(p,API_NS(precomputed_base),the_scalar);
  1003. #if COFACTOR==8
  1004. API_NS(point_double)(p,p);
  1005. #endif
  1006. /* Isogenize to Montgomery curve */
  1007. gf_invert(p->t,p->x); /* 1/x */
  1008. gf_mul(p->z,p->t,p->y); /* y/x */
  1009. gf_sqr(p->y,p->z); /* (y/x)^2 */
  1010. #if IMAGINE_TWIST
  1011. gf_sub(p->y,ZERO,p->y);
  1012. #endif
  1013. gf_serialize(out,p->y,1);
  1014. decaf_bzero(scalar2,sizeof(scalar2));
  1015. API_NS(scalar_destroy)(the_scalar);
  1016. API_NS(point_destroy)(p);
  1017. }
  1018. /**
  1019. * @cond internal
  1020. * Control for variable-time scalar multiply algorithms.
  1021. */
  1022. struct smvt_control {
  1023. int power, addend;
  1024. };
  1025. static int recode_wnaf (
  1026. struct smvt_control *control, /* [nbits/(tableBits+1) + 3] */
  1027. const scalar_t scalar,
  1028. unsigned int tableBits
  1029. ) {
  1030. unsigned int table_size = SCALAR_BITS/(tableBits+1) + 3;
  1031. int position = table_size - 1; /* at the end */
  1032. /* place the end marker */
  1033. control[position].power = -1;
  1034. control[position].addend = 0;
  1035. position--;
  1036. /* PERF: Could negate scalar if it's large. But then would need more cases
  1037. * in the actual code that uses it, all for an expected reduction of like 1/5 op.
  1038. * Probably not worth it.
  1039. */
  1040. uint64_t current = scalar->limb[0] & 0xFFFF;
  1041. uint32_t mask = (1<<(tableBits+1))-1;
  1042. unsigned int w;
  1043. const unsigned int B_OVER_16 = sizeof(scalar->limb[0]) / 2;
  1044. for (w = 1; w<(SCALAR_BITS-1)/16+3; w++) {
  1045. if (w < (SCALAR_BITS-1)/16+1) {
  1046. /* Refill the 16 high bits of current */
  1047. current += (uint32_t)((scalar->limb[w/B_OVER_16]>>(16*(w%B_OVER_16)))<<16);
  1048. }
  1049. while (current & 0xFFFF) {
  1050. assert(position >= 0);
  1051. uint32_t pos = __builtin_ctz((uint32_t)current), odd = (uint32_t)current >> pos;
  1052. int32_t delta = odd & mask;
  1053. if (odd & 1<<(tableBits+1)) delta -= (1<<(tableBits+1));
  1054. current -= delta << pos;
  1055. control[position].power = pos + 16*(w-1);
  1056. control[position].addend = delta;
  1057. position--;
  1058. }
  1059. current >>= 16;
  1060. }
  1061. assert(current==0);
  1062. position++;
  1063. unsigned int n = table_size - position;
  1064. unsigned int i;
  1065. for (i=0; i<n; i++) {
  1066. control[i] = control[i+position];
  1067. }
  1068. return n-1;
  1069. }
  1070. static void
  1071. prepare_wnaf_table(
  1072. pniels_t *output,
  1073. const point_t working,
  1074. unsigned int tbits
  1075. ) {
  1076. point_t tmp;
  1077. int i;
  1078. pt_to_pniels(output[0], working);
  1079. if (tbits == 0) return;
  1080. API_NS(point_double)(tmp,working);
  1081. pniels_t twop;
  1082. pt_to_pniels(twop, tmp);
  1083. add_pniels_to_pt(tmp, output[0],0);
  1084. pt_to_pniels(output[1], tmp);
  1085. for (i=2; i < 1<<tbits; i++) {
  1086. add_pniels_to_pt(tmp, twop,0);
  1087. pt_to_pniels(output[i], tmp);
  1088. }
  1089. API_NS(point_destroy)(tmp);
  1090. decaf_bzero(twop,sizeof(twop));
  1091. }
  1092. extern const gf API_NS(precomputed_wnaf_as_fe)[];
  1093. static const niels_t *API_NS(wnaf_base) = (const niels_t *)API_NS(precomputed_wnaf_as_fe);
  1094. const size_t API_NS(sizeof_precomputed_wnafs) __attribute((visibility("hidden")))
  1095. = sizeof(niels_t)<<DECAF_WNAF_FIXED_TABLE_BITS;
  1096. void API_NS(precompute_wnafs) (
  1097. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1098. const point_t base
  1099. ) __attribute__ ((visibility ("hidden")));
  1100. void API_NS(precompute_wnafs) (
  1101. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1102. const point_t base
  1103. ) {
  1104. pniels_t tmp[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1105. gf zs[1<<DECAF_WNAF_FIXED_TABLE_BITS], zis[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1106. int i;
  1107. prepare_wnaf_table(tmp,base,DECAF_WNAF_FIXED_TABLE_BITS);
  1108. for (i=0; i<1<<DECAF_WNAF_FIXED_TABLE_BITS; i++) {
  1109. memcpy(out[i], tmp[i]->n, sizeof(niels_t));
  1110. gf_copy(zs[i], tmp[i]->z);
  1111. }
  1112. batch_normalize_niels(out, (const gf *)zs, zis, 1<<DECAF_WNAF_FIXED_TABLE_BITS);
  1113. decaf_bzero(tmp,sizeof(tmp));
  1114. decaf_bzero(zs,sizeof(zs));
  1115. decaf_bzero(zis,sizeof(zis));
  1116. }
  1117. void API_NS(base_double_scalarmul_non_secret) (
  1118. point_t combo,
  1119. const scalar_t scalar1,
  1120. const point_t base2,
  1121. const scalar_t scalar2
  1122. ) {
  1123. const int table_bits_var = DECAF_WNAF_VAR_TABLE_BITS,
  1124. table_bits_pre = DECAF_WNAF_FIXED_TABLE_BITS;
  1125. struct smvt_control control_var[SCALAR_BITS/(table_bits_var+1)+3];
  1126. struct smvt_control control_pre[SCALAR_BITS/(table_bits_pre+1)+3];
  1127. int ncb_pre = recode_wnaf(control_pre, scalar1, table_bits_pre);
  1128. int ncb_var = recode_wnaf(control_var, scalar2, table_bits_var);
  1129. pniels_t precmp_var[1<<table_bits_var];
  1130. prepare_wnaf_table(precmp_var, base2, table_bits_var);
  1131. int contp=0, contv=0, i = control_var[0].power;
  1132. if (i < 0) {
  1133. API_NS(point_copy)(combo, API_NS(point_identity));
  1134. return;
  1135. } else if (i > control_pre[0].power) {
  1136. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1137. contv++;
  1138. } else if (i == control_pre[0].power && i >=0 ) {
  1139. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1140. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1], i);
  1141. contv++; contp++;
  1142. } else {
  1143. i = control_pre[0].power;
  1144. niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1]);
  1145. contp++;
  1146. }
  1147. for (i--; i >= 0; i--) {
  1148. int cv = (i==control_var[contv].power), cp = (i==control_pre[contp].power);
  1149. point_double_internal(combo,combo,i && !(cv||cp));
  1150. if (cv) {
  1151. assert(control_var[contv].addend);
  1152. if (control_var[contv].addend > 0) {
  1153. add_pniels_to_pt(combo, precmp_var[control_var[contv].addend >> 1], i&&!cp);
  1154. } else {
  1155. sub_pniels_from_pt(combo, precmp_var[(-control_var[contv].addend) >> 1], i&&!cp);
  1156. }
  1157. contv++;
  1158. }
  1159. if (cp) {
  1160. assert(control_pre[contp].addend);
  1161. if (control_pre[contp].addend > 0) {
  1162. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[contp].addend >> 1], i);
  1163. } else {
  1164. sub_niels_from_pt(combo, API_NS(wnaf_base)[(-control_pre[contp].addend) >> 1], i);
  1165. }
  1166. contp++;
  1167. }
  1168. }
  1169. /* This function is non-secret, but whatever this is cheap. */
  1170. decaf_bzero(control_var,sizeof(control_var));
  1171. decaf_bzero(control_pre,sizeof(control_pre));
  1172. decaf_bzero(precmp_var,sizeof(precmp_var));
  1173. assert(contv == ncb_var); (void)ncb_var;
  1174. assert(contp == ncb_pre); (void)ncb_pre;
  1175. }
  1176. void API_NS(point_destroy) (
  1177. point_t point
  1178. ) {
  1179. decaf_bzero(point, sizeof(point_t));
  1180. }
  1181. void API_NS(precomputed_destroy) (
  1182. precomputed_s *pre
  1183. ) {
  1184. decaf_bzero(pre, API_NS(sizeof_precomputed_s));
  1185. }