You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1616 lines
46 KiB

  1. /**
  2. * @file curve25519/decaf.c
  3. * @author Mike Hamburg
  4. *
  5. * @copyright
  6. * Copyright (c) 2015-2016 Cryptography Research, Inc. \n
  7. * Released under the MIT License. See LICENSE.txt for license information.
  8. *
  9. * @brief Decaf high-level functions.
  10. *
  11. * @warning This file was automatically generated in Python.
  12. * Please do not edit it.
  13. */
  14. #define _XOPEN_SOURCE 600 /* for posix_memalign */
  15. #include "word.h"
  16. #include "field.h"
  17. #include <decaf.h>
  18. #include <decaf/ed255.h>
  19. /* Template stuff */
  20. #define API_NS(_id) decaf_255_##_id
  21. #define SCALAR_BITS DECAF_255_SCALAR_BITS
  22. #define SCALAR_SER_BYTES DECAF_255_SCALAR_BYTES
  23. #define SCALAR_LIMBS DECAF_255_SCALAR_LIMBS
  24. #define scalar_t API_NS(scalar_t)
  25. #define point_t API_NS(point_t)
  26. #define precomputed_s API_NS(precomputed_s)
  27. #define IMAGINE_TWIST 1
  28. #define COFACTOR 8
  29. /* Comb config: number of combs, n, t, s. */
  30. #define COMBS_N 3
  31. #define COMBS_T 5
  32. #define COMBS_S 17
  33. #define DECAF_WINDOW_BITS 4
  34. #define DECAF_WNAF_FIXED_TABLE_BITS 5
  35. #define DECAF_WNAF_VAR_TABLE_BITS 3
  36. #define EDDSA_USE_SIGMA_ISOGENY 1
  37. static const int EDWARDS_D = -121665;
  38. static const scalar_t point_scalarmul_adjustment = {{{
  39. SC_LIMB(0xd6ec31748d98951c), SC_LIMB(0xc6ef5bf4737dcf70), SC_LIMB(0xfffffffffffffffe), SC_LIMB(0x0fffffffffffffff)
  40. }}}, precomputed_scalarmul_adjustment = {{{
  41. SC_LIMB(0x977f4a4775473484), SC_LIMB(0x6de72ae98b3ab623), SC_LIMB(0xffffffffffffffff), SC_LIMB(0x0fffffffffffffff)
  42. }}};
  43. const uint8_t decaf_x25519_base_point[DECAF_X25519_PUBLIC_BYTES] = { 0x09 };
  44. #if COFACTOR==8 || EDDSA_USE_SIGMA_ISOGENY
  45. static const gf SQRT_ONE_MINUS_D = {FIELD_LITERAL(
  46. 0x6db8831bbddec, 0x38d7b56c9c165, 0x016b221394bdc, 0x7540f7816214a, 0x0a0d85b4032b1
  47. )};
  48. #endif
  49. /* End of template stuff */
  50. /* Sanity */
  51. #if (COFACTOR == 8) && !IMAGINE_TWIST
  52. /* FUTURE MAGIC: Curve41417 doesn't have these properties. */
  53. #error "Currently require IMAGINE_TWIST (and thus p=5 mod 8) for cofactor 8"
  54. #endif
  55. #if IMAGINE_TWIST && (P_MOD_8 != 5)
  56. #error "Cannot use IMAGINE_TWIST except for p == 5 mod 8"
  57. #endif
  58. #if (COFACTOR != 8) && (COFACTOR != 4)
  59. #error "COFACTOR must be 4 or 8"
  60. #endif
  61. #if IMAGINE_TWIST
  62. extern const gf SQRT_MINUS_ONE;
  63. #endif
  64. #define WBITS DECAF_WORD_BITS /* NB this may be different from ARCH_WORD_BITS */
  65. extern const point_t API_NS(point_base);
  66. /* Projective Niels coordinates */
  67. typedef struct { gf a, b, c; } niels_s, niels_t[1];
  68. typedef struct { niels_t n; gf z; } VECTOR_ALIGNED pniels_s, pniels_t[1];
  69. /* Precomputed base */
  70. struct precomputed_s { niels_t table [COMBS_N<<(COMBS_T-1)]; };
  71. extern const gf API_NS(precomputed_base_as_fe)[];
  72. const precomputed_s *API_NS(precomputed_base) =
  73. (const precomputed_s *) &API_NS(precomputed_base_as_fe);
  74. const size_t API_NS(sizeof_precomputed_s) = sizeof(precomputed_s);
  75. const size_t API_NS(alignof_precomputed_s) = sizeof(big_register_t);
  76. /** Inverse. */
  77. static void
  78. gf_invert(gf y, const gf x) {
  79. gf t1, t2;
  80. gf_sqr(t1, x); // o^2
  81. mask_t ret = gf_isr(t2, t1); // +-1/sqrt(o^2) = +-1/o
  82. (void)ret; assert(ret);
  83. gf_sqr(t1, t2);
  84. gf_mul(t2, t1, x); // not direct to y in case of alias.
  85. gf_copy(y, t2);
  86. }
  87. /** Return high bit of x = low bit of 2x mod p */
  88. static mask_t gf_lobit(const gf x) {
  89. gf y;
  90. gf_copy(y,x);
  91. gf_strong_reduce(y);
  92. return -(y->limb[0]&1);
  93. }
  94. /** identity = (0,1) */
  95. const point_t API_NS(point_identity) = {{{{{0}}},{{{1}}},{{{1}}},{{{0}}}}};
  96. void API_NS(deisogenize) (
  97. gf_s *__restrict__ s,
  98. gf_s *__restrict__ minus_t_over_s,
  99. const point_t p,
  100. mask_t toggle_hibit_s,
  101. mask_t toggle_hibit_t_over_s,
  102. mask_t toggle_rotation
  103. );
  104. void API_NS(deisogenize) (
  105. gf_s *__restrict__ s,
  106. gf_s *__restrict__ minus_t_over_s,
  107. const point_t p,
  108. mask_t toggle_hibit_s,
  109. mask_t toggle_hibit_t_over_s,
  110. mask_t toggle_rotation
  111. ) {
  112. #if COFACTOR == 4 && !IMAGINE_TWIST
  113. (void) toggle_rotation;
  114. gf b, d;
  115. gf_s *c = s, *a = minus_t_over_s;
  116. gf_mulw(a, p->y, 1-EDWARDS_D);
  117. gf_mul(c, a, p->t); /* -dYT, with EDWARDS_D = d-1 */
  118. gf_mul(a, p->x, p->z);
  119. gf_sub(d, c, a); /* aXZ-dYT with a=-1 */
  120. gf_add(a, p->z, p->y);
  121. gf_sub(b, p->z, p->y);
  122. gf_mul(c, b, a);
  123. gf_mulw(b, c, -EDWARDS_D); /* (a-d)(Z+Y)(Z-Y) */
  124. mask_t ok = gf_isr (a,b); /* r in the paper */
  125. (void)ok; assert(ok | gf_eq(b,ZERO));
  126. gf_mulw (b, a, -EDWARDS_D); /* u in the paper */
  127. gf_mul(c,a,d); /* r(aZX-dYT) */
  128. gf_mul(a,b,p->z); /* uZ */
  129. gf_add(a,a,a); /* 2uZ */
  130. mask_t tg = toggle_hibit_t_over_s ^ ~gf_hibit(minus_t_over_s);
  131. gf_cond_neg(minus_t_over_s, tg); /* t/s <-? -t/s */
  132. gf_cond_neg(c, tg); /* u <- -u if negative. */
  133. gf_add(d,c,p->y);
  134. gf_mul(s,b,d);
  135. gf_cond_neg(s, toggle_hibit_s ^ gf_hibit(s));
  136. #else
  137. /* More complicated because of rotation */
  138. /* MAGIC This code is wrong for certain non-Curve25519 curves;
  139. * check if it's because of Cofactor==8 or IMAGINE_ROTATION */
  140. gf c, d;
  141. gf_s *b = s, *a = minus_t_over_s;
  142. #if IMAGINE_TWIST
  143. gf x, t;
  144. gf_div_qnr(x,p->x);
  145. gf_div_qnr(t,p->t);
  146. gf_add ( a, p->z, x );
  147. gf_sub ( b, p->z, x );
  148. gf_mul ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 - X^2 */
  149. #else
  150. const gf_s *x = p->x, *t = p->t;
  151. /* Won't hit the gf_cond_sel below because COFACTOR==8 requires IMAGINE_TWIST for now. */
  152. gf_sqr ( a, p->z );
  153. gf_sqr ( b, p->x );
  154. gf_add ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 + X^2 */
  155. #endif
  156. gf_mul ( a, p->z, t ); /* "tz" = T*Z */
  157. gf_sqr ( b, a );
  158. gf_mul ( d, b, c ); /* (TZ)^2 * (Z^2-aX^2) */
  159. mask_t ok = gf_isr(b, d);
  160. (void)ok; assert(ok | gf_eq(d,ZERO));
  161. gf_mul ( d, b, a ); /* "osx" = 1 / sqrt(z^2-ax^2) */
  162. gf_mul ( a, b, c );
  163. gf_mul ( b, a, d ); /* 1/tz */
  164. mask_t rotate;
  165. #if (COFACTOR == 8)
  166. gf e;
  167. gf_sqr(e, p->z);
  168. gf_mul(a, e, b); /* z^2 / tz = z/t = 1/xy */
  169. rotate = gf_hibit(a) ^ toggle_rotation;
  170. /* Curve25519: cond select between zx * 1/tz or sqrt(1-d); y=-x */
  171. gf_mul ( a, b, c );
  172. gf_cond_sel ( a, a, SQRT_ONE_MINUS_D, rotate );
  173. gf_cond_sel ( x, p->y, x, rotate );
  174. #else
  175. (void)toggle_rotation;
  176. rotate = 0;
  177. #endif
  178. gf_mul ( c, a, d ); // new "osx"
  179. gf_mul ( a, c, p->z );
  180. gf_add ( minus_t_over_s, a, a ); // 2 * "osx" * Z
  181. gf_mul ( d, b, p->z );
  182. mask_t tg = toggle_hibit_t_over_s ^~ gf_hibit(minus_t_over_s);
  183. gf_cond_neg ( minus_t_over_s, tg );
  184. gf_cond_neg ( c, rotate ^ tg );
  185. gf_add ( d, d, c );
  186. gf_mul ( s, d, x ); /* here "x" = y unless rotate */
  187. gf_cond_neg ( s, toggle_hibit_s ^ gf_hibit(s) );
  188. #endif
  189. }
  190. void API_NS(point_encode)( unsigned char ser[SER_BYTES], const point_t p ) {
  191. gf s, mtos;
  192. API_NS(deisogenize)(s,mtos,p,0,0,0);
  193. gf_serialize(ser,s,0);
  194. }
  195. decaf_error_t API_NS(point_decode) (
  196. point_t p,
  197. const unsigned char ser[SER_BYTES],
  198. decaf_bool_t allow_identity
  199. ) {
  200. gf s, a, b, c, d, e, f;
  201. mask_t succ = gf_deserialize(s, ser, 0);
  202. mask_t zero = gf_eq(s, ZERO);
  203. succ &= bool_to_mask(allow_identity) | ~zero;
  204. gf_sqr ( a, s );
  205. #if IMAGINE_TWIST
  206. gf_sub ( f, ONE, a ); /* f = 1-as^2 = 1-s^2*/
  207. #else
  208. gf_add ( f, ONE, a ); /* f = 1-as^2 = 1+s^2 */
  209. #endif
  210. succ &= ~ gf_eq( f, ZERO );
  211. gf_sqr ( b, f );
  212. gf_mulw ( c, a, 4*IMAGINE_TWIST-4*EDWARDS_D );
  213. gf_add ( c, c, b ); /* t^2 */
  214. gf_mul ( d, f, s ); /* s(1-as^2) for denoms */
  215. gf_sqr ( e, d );
  216. gf_mul ( b, c, e );
  217. succ &= gf_isr(e,b) | gf_eq(b,ZERO); /* e = 1/(t s (1-as^2)) */
  218. gf_mul ( b, e, d ); /* 1/t */
  219. gf_mul ( d, e, c ); /* d = t / (s(1-as^2)) */
  220. gf_mul ( e, d, f ); /* t/s */
  221. mask_t negtos = gf_hibit(e);
  222. gf_cond_neg(b, negtos);
  223. gf_cond_neg(d, negtos);
  224. #if IMAGINE_TWIST
  225. gf_add ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  226. #else
  227. gf_sub ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  228. #endif
  229. #if COFACTOR == 8
  230. gf_mul ( a, p->z, d); /* t(1+s^2) / s(1-s^2) = 2/xy */
  231. succ &= ~gf_lobit(a); /* = ~gf_hibit(a/2), since gf_hibit(x) = gf_lobit(2x) */
  232. #endif
  233. gf_mul ( a, f, b ); /* y = (1-s^2) / t */
  234. gf_mul ( p->y, p->z, a ); /* Y = yZ */
  235. #if IMAGINE_TWIST
  236. gf_add ( b, s, s );
  237. gf_mul(p->x, b, SQRT_MINUS_ONE); /* Curve25519 */
  238. #else
  239. gf_add ( p->x, s, s );
  240. #endif
  241. gf_mul ( p->t, p->x, a ); /* T = 2s (1-as^2)/t */
  242. p->y->limb[0] -= zero;
  243. assert(API_NS(point_valid)(p) | ~succ);
  244. return decaf_succeed_if(mask_to_bool(succ));
  245. }
  246. #if IMAGINE_TWIST
  247. #define TWISTED_D (-(EDWARDS_D))
  248. #else
  249. #define TWISTED_D ((EDWARDS_D)-1)
  250. #endif
  251. #if TWISTED_D < 0
  252. #define EFF_D (-(TWISTED_D))
  253. #define NEG_D 1
  254. #else
  255. #define EFF_D TWISTED_D
  256. #define NEG_D 0
  257. #endif
  258. void API_NS(point_sub) (
  259. point_t p,
  260. const point_t q,
  261. const point_t r
  262. ) {
  263. gf a, b, c, d;
  264. gf_sub_nr ( b, q->y, q->x ); /* 3+e */
  265. gf_sub_nr ( d, r->y, r->x ); /* 3+e */
  266. gf_add_nr ( c, r->y, r->x ); /* 2+e */
  267. gf_mul ( a, c, b );
  268. gf_add_nr ( b, q->y, q->x ); /* 2+e */
  269. gf_mul ( p->y, d, b );
  270. gf_mul ( b, r->t, q->t );
  271. gf_mulw ( p->x, b, 2*EFF_D );
  272. gf_add_nr ( b, a, p->y ); /* 2+e */
  273. gf_sub_nr ( c, p->y, a ); /* 3+e */
  274. gf_mul ( a, q->z, r->z );
  275. gf_add_nr ( a, a, a ); /* 2+e */
  276. if (GF_HEADROOM <= 3) gf_weak_reduce(a); /* or 1+e */
  277. #if NEG_D
  278. gf_sub_nr ( p->y, a, p->x ); /* 4+e or 3+e */
  279. gf_add_nr ( a, a, p->x ); /* 3+e or 2+e */
  280. #else
  281. gf_add_nr ( p->y, a, p->x ); /* 3+e or 2+e */
  282. gf_sub_nr ( a, a, p->x ); /* 4+e or 3+e */
  283. #endif
  284. gf_mul ( p->z, a, p->y );
  285. gf_mul ( p->x, p->y, c );
  286. gf_mul ( p->y, a, b );
  287. gf_mul ( p->t, b, c );
  288. }
  289. void API_NS(point_add) (
  290. point_t p,
  291. const point_t q,
  292. const point_t r
  293. ) {
  294. gf a, b, c, d;
  295. gf_sub_nr ( b, q->y, q->x ); /* 3+e */
  296. gf_sub_nr ( c, r->y, r->x ); /* 3+e */
  297. gf_add_nr ( d, r->y, r->x ); /* 2+e */
  298. gf_mul ( a, c, b );
  299. gf_add_nr ( b, q->y, q->x ); /* 2+e */
  300. gf_mul ( p->y, d, b );
  301. gf_mul ( b, r->t, q->t );
  302. gf_mulw ( p->x, b, 2*EFF_D );
  303. gf_add_nr ( b, a, p->y ); /* 2+e */
  304. gf_sub_nr ( c, p->y, a ); /* 3+e */
  305. gf_mul ( a, q->z, r->z );
  306. gf_add_nr ( a, a, a ); /* 2+e */
  307. if (GF_HEADROOM <= 3) gf_weak_reduce(a); /* or 1+e */
  308. #if NEG_D
  309. gf_add_nr ( p->y, a, p->x ); /* 3+e or 2+e */
  310. gf_sub_nr ( a, a, p->x ); /* 4+e or 3+e */
  311. #else
  312. gf_sub_nr ( p->y, a, p->x ); /* 4+e or 3+e */
  313. gf_add_nr ( a, a, p->x ); /* 3+e or 2+e */
  314. #endif
  315. gf_mul ( p->z, a, p->y );
  316. gf_mul ( p->x, p->y, c );
  317. gf_mul ( p->y, a, b );
  318. gf_mul ( p->t, b, c );
  319. }
  320. static DECAF_NOINLINE void
  321. point_double_internal (
  322. point_t p,
  323. const point_t q,
  324. int before_double
  325. ) {
  326. gf a, b, c, d;
  327. gf_sqr ( c, q->x );
  328. gf_sqr ( a, q->y );
  329. gf_add_nr ( d, c, a ); /* 2+e */
  330. gf_add_nr ( p->t, q->y, q->x ); /* 2+e */
  331. gf_sqr ( b, p->t );
  332. gf_subx_nr ( b, b, d, 3 ); /* 4+e */
  333. gf_sub_nr ( p->t, a, c ); /* 3+e */
  334. gf_sqr ( p->x, q->z );
  335. gf_add_nr ( p->z, p->x, p->x ); /* 2+e */
  336. gf_subx_nr ( a, p->z, p->t, 4 ); /* 6+e */
  337. if (GF_HEADROOM == 5) gf_weak_reduce(a); /* or 1+e */
  338. gf_mul ( p->x, a, b );
  339. gf_mul ( p->z, p->t, a );
  340. gf_mul ( p->y, p->t, d );
  341. if (!before_double) gf_mul ( p->t, b, d );
  342. }
  343. void API_NS(point_double)(point_t p, const point_t q) {
  344. point_double_internal(p,q,0);
  345. }
  346. void API_NS(point_negate) (
  347. point_t nega,
  348. const point_t a
  349. ) {
  350. gf_sub(nega->x, ZERO, a->x);
  351. gf_copy(nega->y, a->y);
  352. gf_copy(nega->z, a->z);
  353. gf_sub(nega->t, ZERO, a->t);
  354. }
  355. /* Operations on [p]niels */
  356. static DECAF_INLINE void
  357. cond_neg_niels (
  358. niels_t n,
  359. mask_t neg
  360. ) {
  361. gf_cond_swap(n->a, n->b, neg);
  362. gf_cond_neg(n->c, neg);
  363. }
  364. static DECAF_NOINLINE void pt_to_pniels (
  365. pniels_t b,
  366. const point_t a
  367. ) {
  368. gf_sub ( b->n->a, a->y, a->x );
  369. gf_add ( b->n->b, a->x, a->y );
  370. gf_mulw ( b->n->c, a->t, 2*TWISTED_D );
  371. gf_add ( b->z, a->z, a->z );
  372. }
  373. static DECAF_NOINLINE void pniels_to_pt (
  374. point_t e,
  375. const pniels_t d
  376. ) {
  377. gf eu;
  378. gf_add ( eu, d->n->b, d->n->a );
  379. gf_sub ( e->y, d->n->b, d->n->a );
  380. gf_mul ( e->t, e->y, eu);
  381. gf_mul ( e->x, d->z, e->y );
  382. gf_mul ( e->y, d->z, eu );
  383. gf_sqr ( e->z, d->z );
  384. }
  385. static DECAF_NOINLINE void
  386. niels_to_pt (
  387. point_t e,
  388. const niels_t n
  389. ) {
  390. gf_add ( e->y, n->b, n->a );
  391. gf_sub ( e->x, n->b, n->a );
  392. gf_mul ( e->t, e->y, e->x );
  393. gf_copy ( e->z, ONE );
  394. }
  395. static DECAF_NOINLINE void
  396. add_niels_to_pt (
  397. point_t d,
  398. const niels_t e,
  399. int before_double
  400. ) {
  401. gf a, b, c;
  402. gf_sub_nr ( b, d->y, d->x ); /* 3+e */
  403. gf_mul ( a, e->a, b );
  404. gf_add_nr ( b, d->x, d->y ); /* 2+e */
  405. gf_mul ( d->y, e->b, b );
  406. gf_mul ( d->x, e->c, d->t );
  407. gf_add_nr ( c, a, d->y ); /* 2+e */
  408. gf_sub_nr ( b, d->y, a ); /* 3+e */
  409. gf_sub_nr ( d->y, d->z, d->x ); /* 3+e */
  410. gf_add_nr ( a, d->x, d->z ); /* 2+e */
  411. gf_mul ( d->z, a, d->y );
  412. gf_mul ( d->x, d->y, b );
  413. gf_mul ( d->y, a, c );
  414. if (!before_double) gf_mul ( d->t, b, c );
  415. }
  416. static DECAF_NOINLINE void
  417. sub_niels_from_pt (
  418. point_t d,
  419. const niels_t e,
  420. int before_double
  421. ) {
  422. gf a, b, c;
  423. gf_sub_nr ( b, d->y, d->x ); /* 3+e */
  424. gf_mul ( a, e->b, b );
  425. gf_add_nr ( b, d->x, d->y ); /* 2+e */
  426. gf_mul ( d->y, e->a, b );
  427. gf_mul ( d->x, e->c, d->t );
  428. gf_add_nr ( c, a, d->y ); /* 2+e */
  429. gf_sub_nr ( b, d->y, a ); /* 3+e */
  430. gf_add_nr ( d->y, d->z, d->x ); /* 2+e */
  431. gf_sub_nr ( a, d->z, d->x ); /* 3+e */
  432. gf_mul ( d->z, a, d->y );
  433. gf_mul ( d->x, d->y, b );
  434. gf_mul ( d->y, a, c );
  435. if (!before_double) gf_mul ( d->t, b, c );
  436. }
  437. static void
  438. add_pniels_to_pt (
  439. point_t p,
  440. const pniels_t pn,
  441. int before_double
  442. ) {
  443. gf L0;
  444. gf_mul ( L0, p->z, pn->z );
  445. gf_copy ( p->z, L0 );
  446. add_niels_to_pt( p, pn->n, before_double );
  447. }
  448. static void
  449. sub_pniels_from_pt (
  450. point_t p,
  451. const pniels_t pn,
  452. int before_double
  453. ) {
  454. gf L0;
  455. gf_mul ( L0, p->z, pn->z );
  456. gf_copy ( p->z, L0 );
  457. sub_niels_from_pt( p, pn->n, before_double );
  458. }
  459. static DECAF_NOINLINE void
  460. prepare_fixed_window(
  461. pniels_t *multiples,
  462. const point_t b,
  463. int ntable
  464. ) {
  465. point_t tmp;
  466. pniels_t pn;
  467. int i;
  468. point_double_internal(tmp, b, 0);
  469. pt_to_pniels(pn, tmp);
  470. pt_to_pniels(multiples[0], b);
  471. API_NS(point_copy)(tmp, b);
  472. for (i=1; i<ntable; i++) {
  473. add_pniels_to_pt(tmp, pn, 0);
  474. pt_to_pniels(multiples[i], tmp);
  475. }
  476. decaf_bzero(pn,sizeof(pn));
  477. decaf_bzero(tmp,sizeof(tmp));
  478. }
  479. void API_NS(point_scalarmul) (
  480. point_t a,
  481. const point_t b,
  482. const scalar_t scalar
  483. ) {
  484. const int WINDOW = DECAF_WINDOW_BITS,
  485. WINDOW_MASK = (1<<WINDOW)-1,
  486. WINDOW_T_MASK = WINDOW_MASK >> 1,
  487. NTABLE = 1<<(WINDOW-1);
  488. scalar_t scalar1x;
  489. API_NS(scalar_add)(scalar1x, scalar, point_scalarmul_adjustment);
  490. API_NS(scalar_halve)(scalar1x,scalar1x);
  491. /* Set up a precomputed table with odd multiples of b. */
  492. pniels_t pn, multiples[NTABLE];
  493. point_t tmp;
  494. prepare_fixed_window(multiples, b, NTABLE);
  495. /* Initialize. */
  496. int i,j,first=1;
  497. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  498. for (; i>=0; i-=WINDOW) {
  499. /* Fetch another block of bits */
  500. word_t bits = scalar1x->limb[i/WBITS] >> (i%WBITS);
  501. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  502. bits ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  503. }
  504. bits &= WINDOW_MASK;
  505. mask_t inv = (bits>>(WINDOW-1))-1;
  506. bits ^= inv;
  507. /* Add in from table. Compute t only on last iteration. */
  508. constant_time_lookup(pn, multiples, sizeof(pn), NTABLE, bits & WINDOW_T_MASK);
  509. cond_neg_niels(pn->n, inv);
  510. if (first) {
  511. pniels_to_pt(tmp, pn);
  512. first = 0;
  513. } else {
  514. /* Using Hisil et al's lookahead method instead of extensible here
  515. * for no particular reason. Double WINDOW times, but only compute t on
  516. * the last one.
  517. */
  518. for (j=0; j<WINDOW-1; j++)
  519. point_double_internal(tmp, tmp, -1);
  520. point_double_internal(tmp, tmp, 0);
  521. add_pniels_to_pt(tmp, pn, i ? -1 : 0);
  522. }
  523. }
  524. /* Write out the answer */
  525. API_NS(point_copy)(a,tmp);
  526. decaf_bzero(scalar1x,sizeof(scalar1x));
  527. decaf_bzero(pn,sizeof(pn));
  528. decaf_bzero(multiples,sizeof(multiples));
  529. decaf_bzero(tmp,sizeof(tmp));
  530. }
  531. void API_NS(point_double_scalarmul) (
  532. point_t a,
  533. const point_t b,
  534. const scalar_t scalarb,
  535. const point_t c,
  536. const scalar_t scalarc
  537. ) {
  538. const int WINDOW = DECAF_WINDOW_BITS,
  539. WINDOW_MASK = (1<<WINDOW)-1,
  540. WINDOW_T_MASK = WINDOW_MASK >> 1,
  541. NTABLE = 1<<(WINDOW-1);
  542. scalar_t scalar1x, scalar2x;
  543. API_NS(scalar_add)(scalar1x, scalarb, point_scalarmul_adjustment);
  544. API_NS(scalar_halve)(scalar1x,scalar1x);
  545. API_NS(scalar_add)(scalar2x, scalarc, point_scalarmul_adjustment);
  546. API_NS(scalar_halve)(scalar2x,scalar2x);
  547. /* Set up a precomputed table with odd multiples of b. */
  548. pniels_t pn, multiples1[NTABLE], multiples2[NTABLE];
  549. point_t tmp;
  550. prepare_fixed_window(multiples1, b, NTABLE);
  551. prepare_fixed_window(multiples2, c, NTABLE);
  552. /* Initialize. */
  553. int i,j,first=1;
  554. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  555. for (; i>=0; i-=WINDOW) {
  556. /* Fetch another block of bits */
  557. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  558. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  559. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  560. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  561. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  562. }
  563. bits1 &= WINDOW_MASK;
  564. bits2 &= WINDOW_MASK;
  565. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  566. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  567. bits1 ^= inv1;
  568. bits2 ^= inv2;
  569. /* Add in from table. Compute t only on last iteration. */
  570. constant_time_lookup(pn, multiples1, sizeof(pn), NTABLE, bits1 & WINDOW_T_MASK);
  571. cond_neg_niels(pn->n, inv1);
  572. if (first) {
  573. pniels_to_pt(tmp, pn);
  574. first = 0;
  575. } else {
  576. /* Using Hisil et al's lookahead method instead of extensible here
  577. * for no particular reason. Double WINDOW times, but only compute t on
  578. * the last one.
  579. */
  580. for (j=0; j<WINDOW-1; j++)
  581. point_double_internal(tmp, tmp, -1);
  582. point_double_internal(tmp, tmp, 0);
  583. add_pniels_to_pt(tmp, pn, 0);
  584. }
  585. constant_time_lookup(pn, multiples2, sizeof(pn), NTABLE, bits2 & WINDOW_T_MASK);
  586. cond_neg_niels(pn->n, inv2);
  587. add_pniels_to_pt(tmp, pn, i?-1:0);
  588. }
  589. /* Write out the answer */
  590. API_NS(point_copy)(a,tmp);
  591. decaf_bzero(scalar1x,sizeof(scalar1x));
  592. decaf_bzero(scalar2x,sizeof(scalar2x));
  593. decaf_bzero(pn,sizeof(pn));
  594. decaf_bzero(multiples1,sizeof(multiples1));
  595. decaf_bzero(multiples2,sizeof(multiples2));
  596. decaf_bzero(tmp,sizeof(tmp));
  597. }
  598. void API_NS(point_dual_scalarmul) (
  599. point_t a1,
  600. point_t a2,
  601. const point_t b,
  602. const scalar_t scalar1,
  603. const scalar_t scalar2
  604. ) {
  605. const int WINDOW = DECAF_WINDOW_BITS,
  606. WINDOW_MASK = (1<<WINDOW)-1,
  607. WINDOW_T_MASK = WINDOW_MASK >> 1,
  608. NTABLE = 1<<(WINDOW-1);
  609. scalar_t scalar1x, scalar2x;
  610. API_NS(scalar_add)(scalar1x, scalar1, point_scalarmul_adjustment);
  611. API_NS(scalar_halve)(scalar1x,scalar1x);
  612. API_NS(scalar_add)(scalar2x, scalar2, point_scalarmul_adjustment);
  613. API_NS(scalar_halve)(scalar2x,scalar2x);
  614. /* Set up a precomputed table with odd multiples of b. */
  615. point_t multiples1[NTABLE], multiples2[NTABLE], working, tmp;
  616. pniels_t pn;
  617. API_NS(point_copy)(working, b);
  618. /* Initialize. */
  619. int i,j;
  620. for (i=0; i<NTABLE; i++) {
  621. API_NS(point_copy)(multiples1[i], API_NS(point_identity));
  622. API_NS(point_copy)(multiples2[i], API_NS(point_identity));
  623. }
  624. for (i=0; i<SCALAR_BITS; i+=WINDOW) {
  625. if (i) {
  626. for (j=0; j<WINDOW-1; j++)
  627. point_double_internal(working, working, -1);
  628. point_double_internal(working, working, 0);
  629. }
  630. /* Fetch another block of bits */
  631. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  632. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  633. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  634. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  635. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  636. }
  637. bits1 &= WINDOW_MASK;
  638. bits2 &= WINDOW_MASK;
  639. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  640. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  641. bits1 ^= inv1;
  642. bits2 ^= inv2;
  643. pt_to_pniels(pn, working);
  644. constant_time_lookup(tmp, multiples1, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  645. cond_neg_niels(pn->n, inv1);
  646. /* add_pniels_to_pt(multiples1[bits1 & WINDOW_T_MASK], pn, 0); */
  647. add_pniels_to_pt(tmp, pn, 0);
  648. constant_time_insert(multiples1, tmp, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  649. constant_time_lookup(tmp, multiples2, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  650. cond_neg_niels(pn->n, inv1^inv2);
  651. /* add_pniels_to_pt(multiples2[bits2 & WINDOW_T_MASK], pn, 0); */
  652. add_pniels_to_pt(tmp, pn, 0);
  653. constant_time_insert(multiples2, tmp, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  654. }
  655. if (NTABLE > 1) {
  656. API_NS(point_copy)(working, multiples1[NTABLE-1]);
  657. API_NS(point_copy)(tmp , multiples2[NTABLE-1]);
  658. for (i=NTABLE-1; i>1; i--) {
  659. API_NS(point_add)(multiples1[i-1], multiples1[i-1], multiples1[i]);
  660. API_NS(point_add)(multiples2[i-1], multiples2[i-1], multiples2[i]);
  661. API_NS(point_add)(working, working, multiples1[i-1]);
  662. API_NS(point_add)(tmp, tmp, multiples2[i-1]);
  663. }
  664. API_NS(point_add)(multiples1[0], multiples1[0], multiples1[1]);
  665. API_NS(point_add)(multiples2[0], multiples2[0], multiples2[1]);
  666. point_double_internal(working, working, 0);
  667. point_double_internal(tmp, tmp, 0);
  668. API_NS(point_add)(a1, working, multiples1[0]);
  669. API_NS(point_add)(a2, tmp, multiples2[0]);
  670. } else {
  671. API_NS(point_copy)(a1, multiples1[0]);
  672. API_NS(point_copy)(a2, multiples2[0]);
  673. }
  674. decaf_bzero(scalar1x,sizeof(scalar1x));
  675. decaf_bzero(scalar2x,sizeof(scalar2x));
  676. decaf_bzero(pn,sizeof(pn));
  677. decaf_bzero(multiples1,sizeof(multiples1));
  678. decaf_bzero(multiples2,sizeof(multiples2));
  679. decaf_bzero(tmp,sizeof(tmp));
  680. decaf_bzero(working,sizeof(working));
  681. }
  682. decaf_bool_t API_NS(point_eq) ( const point_t p, const point_t q ) {
  683. /* equality mod 2-torsion compares x/y */
  684. gf a, b;
  685. gf_mul ( a, p->y, q->x );
  686. gf_mul ( b, q->y, p->x );
  687. mask_t succ = gf_eq(a,b);
  688. #if (COFACTOR == 8) && IMAGINE_TWIST
  689. gf_mul ( a, p->y, q->y );
  690. gf_mul ( b, q->x, p->x );
  691. #if !(IMAGINE_TWIST)
  692. gf_sub ( a, ZERO, a );
  693. #else
  694. /* Interesting note: the 4tor would normally be rotation.
  695. * But because of the *i twist, it's actually
  696. * (x,y) <-> (iy,ix)
  697. */
  698. /* No code, just a comment. */
  699. #endif
  700. succ |= gf_eq(a,b);
  701. #endif
  702. return mask_to_bool(succ);
  703. }
  704. decaf_bool_t API_NS(point_valid) (
  705. const point_t p
  706. ) {
  707. gf a,b,c;
  708. gf_mul(a,p->x,p->y);
  709. gf_mul(b,p->z,p->t);
  710. mask_t out = gf_eq(a,b);
  711. gf_sqr(a,p->x);
  712. gf_sqr(b,p->y);
  713. gf_sub(a,b,a);
  714. gf_sqr(b,p->t);
  715. gf_mulw(c,b,TWISTED_D);
  716. gf_sqr(b,p->z);
  717. gf_add(b,b,c);
  718. out &= gf_eq(a,b);
  719. out &= ~gf_eq(p->z,ZERO);
  720. return mask_to_bool(out);
  721. }
  722. void API_NS(point_debugging_torque) (
  723. point_t q,
  724. const point_t p
  725. ) {
  726. #if COFACTOR == 8
  727. gf tmp;
  728. gf_mul(tmp,p->x,SQRT_MINUS_ONE);
  729. gf_mul(q->x,p->y,SQRT_MINUS_ONE);
  730. gf_copy(q->y,tmp);
  731. gf_copy(q->z,p->z);
  732. gf_sub(q->t,ZERO,p->t);
  733. #else
  734. gf_sub(q->x,ZERO,p->x);
  735. gf_sub(q->y,ZERO,p->y);
  736. gf_copy(q->z,p->z);
  737. gf_copy(q->t,p->t);
  738. #endif
  739. }
  740. void API_NS(point_debugging_pscale) (
  741. point_t q,
  742. const point_t p,
  743. const uint8_t factor[SER_BYTES]
  744. ) {
  745. gf gfac,tmp;
  746. /* NB this means you'll never pscale by negative numbers for p521 */
  747. ignore_result(gf_deserialize(gfac,factor,0));
  748. gf_cond_sel(gfac,gfac,ONE,gf_eq(gfac,ZERO));
  749. gf_mul(tmp,p->x,gfac);
  750. gf_copy(q->x,tmp);
  751. gf_mul(tmp,p->y,gfac);
  752. gf_copy(q->y,tmp);
  753. gf_mul(tmp,p->z,gfac);
  754. gf_copy(q->z,tmp);
  755. gf_mul(tmp,p->t,gfac);
  756. gf_copy(q->t,tmp);
  757. }
  758. static void gf_batch_invert (
  759. gf *__restrict__ out,
  760. const gf *in,
  761. unsigned int n
  762. ) {
  763. gf t1;
  764. assert(n>1);
  765. gf_copy(out[1], in[0]);
  766. int i;
  767. for (i=1; i<(int) (n-1); i++) {
  768. gf_mul(out[i+1], out[i], in[i]);
  769. }
  770. gf_mul(out[0], out[n-1], in[n-1]);
  771. gf_invert(out[0], out[0]);
  772. for (i=n-1; i>0; i--) {
  773. gf_mul(t1, out[i], out[0]);
  774. gf_copy(out[i], t1);
  775. gf_mul(t1, out[0], in[i]);
  776. gf_copy(out[0], t1);
  777. }
  778. }
  779. static void batch_normalize_niels (
  780. niels_t *table,
  781. const gf *zs,
  782. gf *__restrict__ zis,
  783. int n
  784. ) {
  785. int i;
  786. gf product;
  787. gf_batch_invert(zis, zs, n);
  788. for (i=0; i<n; i++) {
  789. gf_mul(product, table[i]->a, zis[i]);
  790. gf_strong_reduce(product);
  791. gf_copy(table[i]->a, product);
  792. gf_mul(product, table[i]->b, zis[i]);
  793. gf_strong_reduce(product);
  794. gf_copy(table[i]->b, product);
  795. gf_mul(product, table[i]->c, zis[i]);
  796. gf_strong_reduce(product);
  797. gf_copy(table[i]->c, product);
  798. }
  799. decaf_bzero(product,sizeof(product));
  800. }
  801. void API_NS(precompute) (
  802. precomputed_s *table,
  803. const point_t base
  804. ) {
  805. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  806. assert(n*t*s >= SCALAR_BITS);
  807. point_t working, start, doubles[t-1];
  808. API_NS(point_copy)(working, base);
  809. pniels_t pn_tmp;
  810. gf zs[n<<(t-1)], zis[n<<(t-1)];
  811. unsigned int i,j,k;
  812. /* Compute n tables */
  813. for (i=0; i<n; i++) {
  814. /* Doubling phase */
  815. for (j=0; j<t; j++) {
  816. if (j) API_NS(point_add)(start, start, working);
  817. else API_NS(point_copy)(start, working);
  818. if (j==t-1 && i==n-1) break;
  819. point_double_internal(working, working,0);
  820. if (j<t-1) API_NS(point_copy)(doubles[j], working);
  821. for (k=0; k<s-1; k++)
  822. point_double_internal(working, working, k<s-2);
  823. }
  824. /* Gray-code phase */
  825. for (j=0;; j++) {
  826. int gray = j ^ (j>>1);
  827. int idx = (((i+1)<<(t-1))-1) ^ gray;
  828. pt_to_pniels(pn_tmp, start);
  829. memcpy(table->table[idx], pn_tmp->n, sizeof(pn_tmp->n));
  830. gf_copy(zs[idx], pn_tmp->z);
  831. if (j >= (1u<<(t-1)) - 1) break;
  832. int delta = (j+1) ^ ((j+1)>>1) ^ gray;
  833. for (k=0; delta>1; k++)
  834. delta >>=1;
  835. if (gray & (1<<k)) {
  836. API_NS(point_add)(start, start, doubles[k]);
  837. } else {
  838. API_NS(point_sub)(start, start, doubles[k]);
  839. }
  840. }
  841. }
  842. batch_normalize_niels(table->table,(const gf *)zs,zis,n<<(t-1));
  843. decaf_bzero(zs,sizeof(zs));
  844. decaf_bzero(zis,sizeof(zis));
  845. decaf_bzero(pn_tmp,sizeof(pn_tmp));
  846. decaf_bzero(working,sizeof(working));
  847. decaf_bzero(start,sizeof(start));
  848. decaf_bzero(doubles,sizeof(doubles));
  849. }
  850. static DECAF_INLINE void
  851. constant_time_lookup_niels (
  852. niels_s *__restrict__ ni,
  853. const niels_t *table,
  854. int nelts,
  855. int idx
  856. ) {
  857. constant_time_lookup(ni, table, sizeof(niels_s), nelts, idx);
  858. }
  859. void API_NS(precomputed_scalarmul) (
  860. point_t out,
  861. const precomputed_s *table,
  862. const scalar_t scalar
  863. ) {
  864. int i;
  865. unsigned j,k;
  866. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  867. scalar_t scalar1x;
  868. API_NS(scalar_add)(scalar1x, scalar, precomputed_scalarmul_adjustment);
  869. API_NS(scalar_halve)(scalar1x,scalar1x);
  870. niels_t ni;
  871. for (i=s-1; i>=0; i--) {
  872. if (i != (int)s-1) point_double_internal(out,out,0);
  873. for (j=0; j<n; j++) {
  874. int tab = 0;
  875. for (k=0; k<t; k++) {
  876. unsigned int bit = i + s*(k + j*t);
  877. if (bit < SCALAR_BITS) {
  878. tab |= (scalar1x->limb[bit/WBITS] >> (bit%WBITS) & 1) << k;
  879. }
  880. }
  881. mask_t invert = (tab>>(t-1))-1;
  882. tab ^= invert;
  883. tab &= (1<<(t-1)) - 1;
  884. constant_time_lookup_niels(ni, &table->table[j<<(t-1)], 1<<(t-1), tab);
  885. cond_neg_niels(ni, invert);
  886. if ((i!=(int)s-1)||j) {
  887. add_niels_to_pt(out, ni, j==n-1 && i);
  888. } else {
  889. niels_to_pt(out, ni);
  890. }
  891. }
  892. }
  893. decaf_bzero(ni,sizeof(ni));
  894. decaf_bzero(scalar1x,sizeof(scalar1x));
  895. }
  896. void API_NS(point_cond_sel) (
  897. point_t out,
  898. const point_t a,
  899. const point_t b,
  900. decaf_bool_t pick_b
  901. ) {
  902. constant_time_select(out,a,b,sizeof(point_t),bool_to_mask(pick_b),0);
  903. }
  904. /* FUTURE: restore Curve25519 Montgomery ladder? */
  905. decaf_error_t API_NS(direct_scalarmul) (
  906. uint8_t scaled[SER_BYTES],
  907. const uint8_t base[SER_BYTES],
  908. const scalar_t scalar,
  909. decaf_bool_t allow_identity,
  910. decaf_bool_t short_circuit
  911. ) {
  912. point_t basep;
  913. decaf_error_t succ = API_NS(point_decode)(basep, base, allow_identity);
  914. if (short_circuit && succ != DECAF_SUCCESS) return succ;
  915. API_NS(point_cond_sel)(basep, API_NS(point_base), basep, succ);
  916. API_NS(point_scalarmul)(basep, basep, scalar);
  917. API_NS(point_encode)(scaled, basep);
  918. API_NS(point_destroy)(basep);
  919. return succ;
  920. }
  921. void API_NS(point_mul_by_cofactor_and_encode_like_eddsa) (
  922. uint8_t enc[DECAF_EDDSA_25519_PUBLIC_BYTES],
  923. const point_t p
  924. ) {
  925. /* The point is now on the twisted curve. Move it to untwisted. */
  926. gf x, y, z, t;
  927. point_t q;
  928. #if COFACTOR == 8
  929. API_NS(point_double)(q,p);
  930. #else
  931. API_NS(point_copy)(q,p);
  932. #endif
  933. #if EDDSA_USE_SIGMA_ISOGENY
  934. {
  935. /* Use 4-isogeny like ed25519:
  936. * 2*x*y*sqrt(d/a-1)/(ax^2 + y^2 - 2)
  937. * (y^2 - ax^2)/(y^2 + ax^2)
  938. * with a = -1, d = -EDWARDS_D:
  939. * -2xysqrt(EDWARDS_D-1)/(2z^2-y^2+x^2)
  940. * (y^2+x^2)/(y^2-x^2)
  941. */
  942. gf u;
  943. gf_sqr ( x, q->x ); // x^2
  944. gf_sqr ( t, q->y ); // y^2
  945. gf_add( u, x, t ); // x^2 + y^2
  946. gf_add( z, q->y, q->x );
  947. gf_sqr ( y, z);
  948. gf_sub ( y, u, y ); // -2xy
  949. gf_sub ( z, t, x ); // y^2 - x^2
  950. gf_sqr ( x, q->z );
  951. gf_add ( t, x, x);
  952. gf_sub ( t, t, z); // 2z^2 - y^2 + x^2
  953. gf_mul ( x, y, z ); // 2xy(y^2-x^2)
  954. gf_mul ( y, u, t ); // (x^2+y^2)(2z^2-y^2+x^2)
  955. gf_mul ( u, z, t );
  956. gf_copy( z, u );
  957. gf_mul ( u, x, SQRT_ONE_MINUS_D );
  958. gf_copy( x, u );
  959. decaf_bzero(u,sizeof(u));
  960. }
  961. #elif IMAGINE_TWIST
  962. {
  963. API_NS(point_double)(q,q);
  964. API_NS(point_double)(q,q);
  965. gf_mul_qnr(x, q->x);
  966. gf_copy(y, q->y);
  967. gf_copy(z, q->z);
  968. }
  969. #else
  970. {
  971. /* 4-isogeny: 2xy/(y^+x^2), (y^2-x^2)/(2z^2-y^2+x^2) */
  972. gf u;
  973. gf_sqr ( x, q->x );
  974. gf_sqr ( t, q->y );
  975. gf_add( u, x, t );
  976. gf_add( z, q->y, q->x );
  977. gf_sqr ( y, z);
  978. gf_sub ( y, u, y );
  979. gf_sub ( z, t, x );
  980. gf_sqr ( x, q->z );
  981. gf_add ( t, x, x);
  982. gf_sub ( t, t, z);
  983. gf_mul ( x, t, y );
  984. gf_mul ( y, z, u );
  985. gf_mul ( z, u, t );
  986. decaf_bzero(u,sizeof(u));
  987. }
  988. #endif
  989. /* Affinize */
  990. gf_invert(z,z);
  991. gf_mul(t,x,z);
  992. gf_mul(x,y,z);
  993. /* Encode */
  994. enc[DECAF_EDDSA_25519_PRIVATE_BYTES-1] = 0;
  995. gf_serialize(enc, x, 1);
  996. enc[DECAF_EDDSA_25519_PRIVATE_BYTES-1] |= 0x80 & gf_lobit(t);
  997. decaf_bzero(x,sizeof(x));
  998. decaf_bzero(y,sizeof(y));
  999. decaf_bzero(z,sizeof(z));
  1000. decaf_bzero(t,sizeof(t));
  1001. API_NS(point_destroy)(q);
  1002. }
  1003. decaf_error_t API_NS(point_decode_like_eddsa_and_ignore_cofactor) (
  1004. point_t p,
  1005. const uint8_t enc[DECAF_EDDSA_25519_PUBLIC_BYTES]
  1006. ) {
  1007. uint8_t enc2[DECAF_EDDSA_25519_PUBLIC_BYTES];
  1008. memcpy(enc2,enc,sizeof(enc2));
  1009. mask_t low = ~word_is_zero(enc2[DECAF_EDDSA_25519_PRIVATE_BYTES-1] & 0x80);
  1010. enc2[DECAF_EDDSA_25519_PRIVATE_BYTES-1] &= ~0x80;
  1011. mask_t succ = gf_deserialize(p->y, enc2, 1);
  1012. #if 7 == 0
  1013. succ &= word_is_zero(enc2[DECAF_EDDSA_25519_PRIVATE_BYTES-1]);
  1014. #endif
  1015. gf_sqr(p->x,p->y);
  1016. gf_sub(p->z,ONE,p->x); /* num = 1-y^2 */
  1017. #if EDDSA_USE_SIGMA_ISOGENY
  1018. gf_mulw(p->t,p->z,EDWARDS_D); /* d-dy^2 */
  1019. gf_mulw(p->x,p->z,EDWARDS_D-1); /* num = (1-y^2)(d-1) */
  1020. gf_copy(p->z,p->x);
  1021. #else
  1022. gf_mulw(p->t,p->x,EDWARDS_D); /* dy^2 */
  1023. #endif
  1024. gf_sub(p->t,ONE,p->t); /* denom = 1-dy^2 or 1-d + dy^2 */
  1025. gf_mul(p->x,p->z,p->t);
  1026. succ &= gf_isr(p->t,p->x); /* 1/sqrt(num * denom) */
  1027. gf_mul(p->x,p->t,p->z); /* sqrt(num / denom) */
  1028. gf_cond_neg(p->x,~gf_lobit(p->x)^low);
  1029. gf_copy(p->z,ONE);
  1030. #if EDDSA_USE_SIGMA_ISOGENY
  1031. {
  1032. /* Use 4-isogeny like ed25519:
  1033. * 2*x*y/sqrt(1-d/a)/(ax^2 + y^2 - 2)
  1034. * (y^2 - ax^2)/(y^2 + ax^2)
  1035. * (MAGIC: above formula may be off by a factor of -a
  1036. * or something somewhere; check it for other a)
  1037. *
  1038. * with a = -1, d = -EDWARDS_D:
  1039. * -2xy/sqrt(1-EDWARDS_D)/(2z^2-y^2+x^2)
  1040. * (y^2+x^2)/(y^2-x^2)
  1041. */
  1042. gf a, b, c, d;
  1043. gf_sqr ( c, p->x );
  1044. gf_sqr ( a, p->y );
  1045. gf_add ( d, c, a ); // x^2 + y^2
  1046. gf_add ( p->t, p->y, p->x );
  1047. gf_sqr ( b, p->t );
  1048. gf_sub ( b, b, d ); // 2xy
  1049. gf_sub ( p->t, a, c ); // y^2 - x^2
  1050. gf_sqr ( p->x, p->z );
  1051. gf_add ( p->z, p->x, p->x );
  1052. gf_sub ( a, p->z, p->t ); // 2z^2 - y^2 + x^2
  1053. gf_mul ( c, a, SQRT_ONE_MINUS_D );
  1054. gf_mul ( p->x, b, p->t); // (2xy)(y^2-x^2)
  1055. gf_mul ( p->z, p->t, c ); // (y^2-x^2)sd(2z^2 - y^2 + x^2)
  1056. gf_mul ( p->y, d, c ); // (y^2+x^2)sd(2z^2 - y^2 + x^2)
  1057. gf_mul ( p->t, d, b );
  1058. decaf_bzero(a,sizeof(a));
  1059. decaf_bzero(b,sizeof(b));
  1060. decaf_bzero(c,sizeof(c));
  1061. decaf_bzero(d,sizeof(d));
  1062. }
  1063. #elif IMAGINE_TWIST
  1064. {
  1065. gf_mul(p->t,p->x,SQRT_MINUS_ONE);
  1066. gf_copy(p->x,p->t);
  1067. gf_mul(p->t,p->x,p->y);
  1068. }
  1069. #else
  1070. {
  1071. /* 4-isogeny 2xy/(y^2-ax^2), (y^2+ax^2)/(2-y^2-ax^2) */
  1072. gf a, b, c, d;
  1073. gf_sqr ( c, p->x );
  1074. gf_sqr ( a, p->y );
  1075. gf_add ( d, c, a );
  1076. gf_add ( p->t, p->y, p->x );
  1077. gf_sqr ( b, p->t );
  1078. gf_sub ( b, b, d );
  1079. gf_sub ( p->t, a, c );
  1080. gf_sqr ( p->x, p->z );
  1081. gf_add ( p->z, p->x, p->x );
  1082. gf_sub ( a, p->z, d );
  1083. gf_mul ( p->x, a, b );
  1084. gf_mul ( p->z, p->t, a );
  1085. gf_mul ( p->y, p->t, d );
  1086. gf_mul ( p->t, b, d );
  1087. decaf_bzero(a,sizeof(a));
  1088. decaf_bzero(b,sizeof(b));
  1089. decaf_bzero(c,sizeof(c));
  1090. decaf_bzero(d,sizeof(d));
  1091. }
  1092. #endif
  1093. decaf_bzero(enc2,sizeof(enc2));
  1094. assert(API_NS(point_valid)(p) || ~succ);
  1095. return decaf_succeed_if(mask_to_bool(succ));
  1096. }
  1097. decaf_error_t decaf_x25519 (
  1098. uint8_t out[X_PUBLIC_BYTES],
  1099. const uint8_t base[X_PUBLIC_BYTES],
  1100. const uint8_t scalar[X_PRIVATE_BYTES]
  1101. ) {
  1102. gf x1, x2, z2, x3, z3, t1, t2;
  1103. ignore_result(gf_deserialize(x1,base,1));
  1104. gf_copy(x2,ONE);
  1105. gf_copy(z2,ZERO);
  1106. gf_copy(x3,x1);
  1107. gf_copy(z3,ONE);
  1108. int t;
  1109. mask_t swap = 0;
  1110. for (t = X_PRIVATE_BITS-1; t>=0; t--) {
  1111. uint8_t sb = scalar[t/8];
  1112. /* Scalar conditioning */
  1113. if (t/8==0) sb &= -(uint8_t)COFACTOR;
  1114. else if (t == X_PRIVATE_BITS-1) sb = -1;
  1115. mask_t k_t = (sb>>(t%8)) & 1;
  1116. k_t = -k_t; /* set to all 0s or all 1s */
  1117. swap ^= k_t;
  1118. gf_cond_swap(x2,x3,swap);
  1119. gf_cond_swap(z2,z3,swap);
  1120. swap = k_t;
  1121. gf_add_nr(t1,x2,z2); /* A = x2 + z2 */ /* 2+e */
  1122. gf_sub_nr(t2,x2,z2); /* B = x2 - z2 */ /* 3+e */
  1123. gf_sub_nr(z2,x3,z3); /* D = x3 - z3 */ /* 3+e */
  1124. gf_mul(x2,t1,z2); /* DA */
  1125. gf_add_nr(z2,z3,x3); /* C = x3 + z3 */ /* 2+e */
  1126. gf_mul(x3,t2,z2); /* CB */
  1127. gf_sub_nr(z3,x2,x3); /* DA-CB */ /* 3+e */
  1128. gf_sqr(z2,z3); /* (DA-CB)^2 */
  1129. gf_mul(z3,x1,z2); /* z3 = x1(DA-CB)^2 */
  1130. gf_add_nr(z2,x2,x3); /* (DA+CB) */ /* 2+e */
  1131. gf_sqr(x3,z2); /* x3 = (DA+CB)^2 */
  1132. gf_sqr(z2,t1); /* AA = A^2 */
  1133. gf_sqr(t1,t2); /* BB = B^2 */
  1134. gf_mul(x2,z2,t1); /* x2 = AA*BB */
  1135. gf_sub_nr(t2,z2,t1); /* E = AA-BB */ /* 3+e */
  1136. gf_mulw(t1,t2,-EDWARDS_D); /* E*-d = a24*E */
  1137. gf_add_nr(t1,t1,z2); /* AA + a24*E */ /* 2+e */
  1138. gf_mul(z2,t2,t1); /* z2 = E(AA+a24*E) */
  1139. }
  1140. /* Finish */
  1141. gf_cond_swap(x2,x3,swap);
  1142. gf_cond_swap(z2,z3,swap);
  1143. gf_invert(z2,z2);
  1144. gf_mul(x1,x2,z2);
  1145. gf_serialize(out,x1,1);
  1146. mask_t nz = ~gf_eq(x1,ZERO);
  1147. decaf_bzero(x1,sizeof(x1));
  1148. decaf_bzero(x2,sizeof(x2));
  1149. decaf_bzero(z2,sizeof(z2));
  1150. decaf_bzero(x3,sizeof(x3));
  1151. decaf_bzero(z3,sizeof(z3));
  1152. decaf_bzero(t1,sizeof(t1));
  1153. decaf_bzero(t2,sizeof(t2));
  1154. return decaf_succeed_if(mask_to_bool(nz));
  1155. }
  1156. /* Thanks Johan Pascal */
  1157. void decaf_ed25519_convert_public_key_to_x25519 (
  1158. uint8_t x[DECAF_X25519_PUBLIC_BYTES],
  1159. const uint8_t ed[DECAF_EDDSA_25519_PUBLIC_BYTES]
  1160. ) {
  1161. gf y;
  1162. {
  1163. uint8_t enc2[DECAF_EDDSA_25519_PUBLIC_BYTES];
  1164. memcpy(enc2,ed,sizeof(enc2));
  1165. /* retrieve y from the ed compressed point */
  1166. enc2[DECAF_EDDSA_25519_PUBLIC_BYTES-1] &= ~0x80;
  1167. ignore_result(gf_deserialize(y, enc2, 0));
  1168. decaf_bzero(enc2,sizeof(enc2));
  1169. }
  1170. {
  1171. gf n,d;
  1172. #if EDDSA_USE_SIGMA_ISOGENY
  1173. /* u = (1+y)/(1-y)*/
  1174. gf_add(n, y, ONE); /* n = y+1 */
  1175. gf_sub(d, ONE, y); /* d = 1-y */
  1176. gf_invert(d, d); /* d = 1/(1-y) */
  1177. gf_mul(y, n, d); /* u = (y+1)/(1-y) */
  1178. gf_serialize(x,y,1);
  1179. #else /* EDDSA_USE_SIGMA_ISOGENY */
  1180. /* u = y^2 * (1-dy^2) / (1-y^2) */
  1181. gf_sqr(n,y); /* y^2*/
  1182. gf_sub(d,ONE,n); /* 1-y^2*/
  1183. gf_invert(d,d); /* 1/(1-y^2)*/
  1184. gf_mul(y,n,d); /* y^2 / (1-y^2) */
  1185. gf_mulw(d,n,EDWARDS_D); /* dy^2*/
  1186. gf_sub(d, ONE, d); /* 1-dy^2*/
  1187. gf_mul(n, y, d); /* y^2 * (1-dy^2) / (1-y^2) */
  1188. gf_serialize(x,n,1);
  1189. #endif /* EDDSA_USE_SIGMA_ISOGENY */
  1190. decaf_bzero(y,sizeof(y));
  1191. decaf_bzero(n,sizeof(n));
  1192. decaf_bzero(d,sizeof(d));
  1193. }
  1194. }
  1195. void decaf_x25519_generate_key (
  1196. uint8_t out[X_PUBLIC_BYTES],
  1197. const uint8_t scalar[X_PRIVATE_BYTES]
  1198. ) {
  1199. decaf_x25519_derive_public_key(out,scalar);
  1200. }
  1201. void decaf_x25519_derive_public_key (
  1202. uint8_t out[X_PUBLIC_BYTES],
  1203. const uint8_t scalar[X_PRIVATE_BYTES]
  1204. ) {
  1205. /* Scalar conditioning */
  1206. uint8_t scalar2[X_PRIVATE_BYTES];
  1207. memcpy(scalar2,scalar,sizeof(scalar2));
  1208. scalar2[0] &= -(uint8_t)COFACTOR;
  1209. scalar2[X_PRIVATE_BYTES-1] &= ~(-1u<<((X_PRIVATE_BITS+7)%8));
  1210. scalar2[X_PRIVATE_BYTES-1] |= 1<<((X_PRIVATE_BITS+7)%8);
  1211. scalar_t the_scalar;
  1212. API_NS(scalar_decode_long)(the_scalar,scalar2,sizeof(scalar2));
  1213. /* We're gonna isogenize by 2, so divide by 2.
  1214. *
  1215. * Why by 2, even though it's a 4-isogeny?
  1216. *
  1217. * The isogeny map looks like
  1218. * Montgomery <-2-> Jacobi <-2-> Edwards
  1219. *
  1220. * Since the Jacobi base point is the PREimage of the iso to
  1221. * the Montgomery curve, and we're going
  1222. * Jacobi -> Edwards -> Jacobi -> Montgomery,
  1223. * we pick up only a factor of 2 over Jacobi -> Montgomery.
  1224. */
  1225. API_NS(scalar_halve)(the_scalar,the_scalar);
  1226. point_t p;
  1227. API_NS(precomputed_scalarmul)(p,API_NS(precomputed_base),the_scalar);
  1228. /* Isogenize to Montgomery curve */
  1229. gf_invert(p->t,p->x); /* 1/x */
  1230. gf_mul(p->z,p->t,p->y); /* y/x */
  1231. gf_sqr(p->y,p->z); /* (y/x)^2 */
  1232. #if IMAGINE_TWIST
  1233. gf_sub(p->y,ZERO,p->y);
  1234. #endif
  1235. gf_serialize(out,p->y,1);
  1236. decaf_bzero(scalar2,sizeof(scalar2));
  1237. API_NS(scalar_destroy)(the_scalar);
  1238. API_NS(point_destroy)(p);
  1239. }
  1240. /**
  1241. * @cond internal
  1242. * Control for variable-time scalar multiply algorithms.
  1243. */
  1244. struct smvt_control {
  1245. int power, addend;
  1246. };
  1247. static int recode_wnaf (
  1248. struct smvt_control *control, /* [nbits/(table_bits+1) + 3] */
  1249. const scalar_t scalar,
  1250. unsigned int table_bits
  1251. ) {
  1252. unsigned int table_size = SCALAR_BITS/(table_bits+1) + 3;
  1253. int position = table_size - 1; /* at the end */
  1254. /* place the end marker */
  1255. control[position].power = -1;
  1256. control[position].addend = 0;
  1257. position--;
  1258. /* PERF: Could negate scalar if it's large. But then would need more cases
  1259. * in the actual code that uses it, all for an expected reduction of like 1/5 op.
  1260. * Probably not worth it.
  1261. */
  1262. uint64_t current = scalar->limb[0] & 0xFFFF;
  1263. uint32_t mask = (1<<(table_bits+1))-1;
  1264. unsigned int w;
  1265. const unsigned int B_OVER_16 = sizeof(scalar->limb[0]) / 2;
  1266. for (w = 1; w<(SCALAR_BITS-1)/16+3; w++) {
  1267. if (w < (SCALAR_BITS-1)/16+1) {
  1268. /* Refill the 16 high bits of current */
  1269. current += (uint32_t)((scalar->limb[w/B_OVER_16]>>(16*(w%B_OVER_16)))<<16);
  1270. }
  1271. while (current & 0xFFFF) {
  1272. assert(position >= 0);
  1273. uint32_t pos = __builtin_ctz((uint32_t)current), odd = (uint32_t)current >> pos;
  1274. int32_t delta = odd & mask;
  1275. if (odd & 1<<(table_bits+1)) delta -= (1<<(table_bits+1));
  1276. current -= delta << pos;
  1277. control[position].power = pos + 16*(w-1);
  1278. control[position].addend = delta;
  1279. position--;
  1280. }
  1281. current >>= 16;
  1282. }
  1283. assert(current==0);
  1284. position++;
  1285. unsigned int n = table_size - position;
  1286. unsigned int i;
  1287. for (i=0; i<n; i++) {
  1288. control[i] = control[i+position];
  1289. }
  1290. return n-1;
  1291. }
  1292. static void
  1293. prepare_wnaf_table(
  1294. pniels_t *output,
  1295. const point_t working,
  1296. unsigned int tbits
  1297. ) {
  1298. point_t tmp;
  1299. int i;
  1300. pt_to_pniels(output[0], working);
  1301. if (tbits == 0) return;
  1302. API_NS(point_double)(tmp,working);
  1303. pniels_t twop;
  1304. pt_to_pniels(twop, tmp);
  1305. add_pniels_to_pt(tmp, output[0],0);
  1306. pt_to_pniels(output[1], tmp);
  1307. for (i=2; i < 1<<tbits; i++) {
  1308. add_pniels_to_pt(tmp, twop,0);
  1309. pt_to_pniels(output[i], tmp);
  1310. }
  1311. API_NS(point_destroy)(tmp);
  1312. decaf_bzero(twop,sizeof(twop));
  1313. }
  1314. extern const gf API_NS(precomputed_wnaf_as_fe)[];
  1315. static const niels_t *API_NS(wnaf_base) = (const niels_t *)API_NS(precomputed_wnaf_as_fe);
  1316. const size_t API_NS(sizeof_precomputed_wnafs) __attribute((visibility("hidden")))
  1317. = sizeof(niels_t)<<DECAF_WNAF_FIXED_TABLE_BITS;
  1318. void API_NS(precompute_wnafs) (
  1319. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1320. const point_t base
  1321. ) __attribute__ ((visibility ("hidden")));
  1322. void API_NS(precompute_wnafs) (
  1323. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1324. const point_t base
  1325. ) {
  1326. pniels_t tmp[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1327. gf zs[1<<DECAF_WNAF_FIXED_TABLE_BITS], zis[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1328. int i;
  1329. prepare_wnaf_table(tmp,base,DECAF_WNAF_FIXED_TABLE_BITS);
  1330. for (i=0; i<1<<DECAF_WNAF_FIXED_TABLE_BITS; i++) {
  1331. memcpy(out[i], tmp[i]->n, sizeof(niels_t));
  1332. gf_copy(zs[i], tmp[i]->z);
  1333. }
  1334. batch_normalize_niels(out, (const gf *)zs, zis, 1<<DECAF_WNAF_FIXED_TABLE_BITS);
  1335. decaf_bzero(tmp,sizeof(tmp));
  1336. decaf_bzero(zs,sizeof(zs));
  1337. decaf_bzero(zis,sizeof(zis));
  1338. }
  1339. void API_NS(base_double_scalarmul_non_secret) (
  1340. point_t combo,
  1341. const scalar_t scalar1,
  1342. const point_t base2,
  1343. const scalar_t scalar2
  1344. ) {
  1345. const int table_bits_var = DECAF_WNAF_VAR_TABLE_BITS,
  1346. table_bits_pre = DECAF_WNAF_FIXED_TABLE_BITS;
  1347. struct smvt_control control_var[SCALAR_BITS/(table_bits_var+1)+3];
  1348. struct smvt_control control_pre[SCALAR_BITS/(table_bits_pre+1)+3];
  1349. int ncb_pre = recode_wnaf(control_pre, scalar1, table_bits_pre);
  1350. int ncb_var = recode_wnaf(control_var, scalar2, table_bits_var);
  1351. pniels_t precmp_var[1<<table_bits_var];
  1352. prepare_wnaf_table(precmp_var, base2, table_bits_var);
  1353. int contp=0, contv=0, i = control_var[0].power;
  1354. if (i < 0) {
  1355. API_NS(point_copy)(combo, API_NS(point_identity));
  1356. return;
  1357. } else if (i > control_pre[0].power) {
  1358. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1359. contv++;
  1360. } else if (i == control_pre[0].power && i >=0 ) {
  1361. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1362. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1], i);
  1363. contv++; contp++;
  1364. } else {
  1365. i = control_pre[0].power;
  1366. niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1]);
  1367. contp++;
  1368. }
  1369. for (i--; i >= 0; i--) {
  1370. int cv = (i==control_var[contv].power), cp = (i==control_pre[contp].power);
  1371. point_double_internal(combo,combo,i && !(cv||cp));
  1372. if (cv) {
  1373. assert(control_var[contv].addend);
  1374. if (control_var[contv].addend > 0) {
  1375. add_pniels_to_pt(combo, precmp_var[control_var[contv].addend >> 1], i&&!cp);
  1376. } else {
  1377. sub_pniels_from_pt(combo, precmp_var[(-control_var[contv].addend) >> 1], i&&!cp);
  1378. }
  1379. contv++;
  1380. }
  1381. if (cp) {
  1382. assert(control_pre[contp].addend);
  1383. if (control_pre[contp].addend > 0) {
  1384. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[contp].addend >> 1], i);
  1385. } else {
  1386. sub_niels_from_pt(combo, API_NS(wnaf_base)[(-control_pre[contp].addend) >> 1], i);
  1387. }
  1388. contp++;
  1389. }
  1390. }
  1391. /* This function is non-secret, but whatever this is cheap. */
  1392. decaf_bzero(control_var,sizeof(control_var));
  1393. decaf_bzero(control_pre,sizeof(control_pre));
  1394. decaf_bzero(precmp_var,sizeof(precmp_var));
  1395. assert(contv == ncb_var); (void)ncb_var;
  1396. assert(contp == ncb_pre); (void)ncb_pre;
  1397. }
  1398. void API_NS(point_destroy) (
  1399. point_t point
  1400. ) {
  1401. decaf_bzero(point, sizeof(point_t));
  1402. }
  1403. void API_NS(precomputed_destroy) (
  1404. precomputed_s *pre
  1405. ) {
  1406. decaf_bzero(pre, API_NS(sizeof_precomputed_s));
  1407. }