You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1564 lines
45 KiB

  1. /** @brief Decaf high-level functions. */
  2. #define _XOPEN_SOURCE 600 /* for posix_memalign */
  3. #include "word.h"
  4. #include "field.h"
  5. #include <decaf.h>
  6. #include <decaf/ed$(gf_bits).h>
  7. /* Template stuff */
  8. #define API_NS(_id) $(c_ns)_##_id
  9. #define SCALAR_BITS $(C_NS)_SCALAR_BITS
  10. #define SCALAR_SER_BYTES $(C_NS)_SCALAR_BYTES
  11. #define SCALAR_LIMBS $(C_NS)_SCALAR_LIMBS
  12. #define scalar_t API_NS(scalar_t)
  13. #define point_t API_NS(point_t)
  14. #define precomputed_s API_NS(precomputed_s)
  15. #define IMAGINE_TWIST $(imagine_twist)
  16. #define COFACTOR $(cofactor)
  17. /* Comb config: number of combs, n, t, s. */
  18. #define COMBS_N $(combs.n)
  19. #define COMBS_T $(combs.t)
  20. #define COMBS_S $(combs.s)
  21. #define DECAF_WINDOW_BITS $(window_bits)
  22. #define DECAF_WNAF_FIXED_TABLE_BITS $(wnaf.fixed)
  23. #define DECAF_WNAF_VAR_TABLE_BITS $(wnaf.var)
  24. #define EDDSA_USE_SIGMA_ISOGENY $(eddsa_sigma_iso)
  25. static const int EDWARDS_D = $(d);
  26. static const scalar_t point_scalarmul_adjustment = {{{
  27. $(ser((2**(scalar_bits-1+window_bits - ((scalar_bits-1)%window_bits)) - 1) % q,64,"SC_LIMB"))
  28. }}}, precomputed_scalarmul_adjustment = {{{
  29. $(ser((2**(combs.n*combs.t*combs.s) - 1) % q,64,"SC_LIMB"))
  30. }}};
  31. const uint8_t decaf_x$(gf_shortname)_base_point[DECAF_X$(gf_shortname)_PUBLIC_BYTES] = { $(ser(mont_base,8)) };
  32. #if COFACTOR==8 || EDDSA_USE_SIGMA_ISOGENY
  33. static const gf SQRT_ONE_MINUS_D = {FIELD_LITERAL(
  34. $(ser(msqrt(1-d,modulus),gf_lit_limb_bits) if cofactor == 8 else "/* NONE */")
  35. )};
  36. #endif
  37. /* End of template stuff */
  38. /* Sanity */
  39. #if (COFACTOR == 8) && !IMAGINE_TWIST
  40. /* FUTURE MAGIC: Curve41417 doesn't have these properties. */
  41. #error "Currently require IMAGINE_TWIST (and thus p=5 mod 8) for cofactor 8"
  42. #endif
  43. #if IMAGINE_TWIST && (P_MOD_8 != 5)
  44. #error "Cannot use IMAGINE_TWIST except for p == 5 mod 8"
  45. #endif
  46. #if (COFACTOR != 8) && (COFACTOR != 4)
  47. #error "COFACTOR must be 4 or 8"
  48. #endif
  49. #if IMAGINE_TWIST
  50. extern const gf SQRT_MINUS_ONE;
  51. #endif
  52. #define WBITS DECAF_WORD_BITS /* NB this may be different from ARCH_WORD_BITS */
  53. extern const point_t API_NS(point_base);
  54. /* Projective Niels coordinates */
  55. typedef struct { gf a, b, c; } niels_s, niels_t[1];
  56. typedef struct { niels_t n; gf z; } __attribute__((aligned(sizeof(big_register_t))))
  57. pniels_s, pniels_t[1];
  58. /* Precomputed base */
  59. struct precomputed_s { niels_t table [COMBS_N<<(COMBS_T-1)]; };
  60. extern const gf API_NS(precomputed_base_as_fe)[];
  61. const precomputed_s *API_NS(precomputed_base) =
  62. (const precomputed_s *) &API_NS(precomputed_base_as_fe);
  63. const size_t API_NS(sizeof_precomputed_s) = sizeof(precomputed_s);
  64. const size_t API_NS(alignof_precomputed_s) = sizeof(big_register_t);
  65. /** Inverse. */
  66. static void
  67. gf_invert(gf y, const gf x) {
  68. gf t1, t2;
  69. gf_sqr(t1, x); // o^2
  70. mask_t ret = gf_isr(t2, t1); // +-1/sqrt(o^2) = +-1/o
  71. (void)ret; assert(ret);
  72. gf_sqr(t1, t2);
  73. gf_mul(t2, t1, x); // not direct to y in case of alias.
  74. gf_copy(y, t2);
  75. }
  76. /** Return high bit of x = low bit of 2x mod p */
  77. static mask_t gf_lobit(const gf x) {
  78. gf y;
  79. gf_copy(y,x);
  80. gf_strong_reduce(y);
  81. return -(y->limb[0]&1);
  82. }
  83. /** identity = (0,1) */
  84. const point_t API_NS(point_identity) = {{{{{0}}},{{{1}}},{{{1}}},{{{0}}}}};
  85. void API_NS(deisogenize) (
  86. gf_s *__restrict__ s,
  87. gf_s *__restrict__ minus_t_over_s,
  88. const point_t p,
  89. mask_t toggle_hibit_s,
  90. mask_t toggle_hibit_t_over_s,
  91. mask_t toggle_rotation
  92. );
  93. void API_NS(deisogenize) (
  94. gf_s *__restrict__ s,
  95. gf_s *__restrict__ minus_t_over_s,
  96. const point_t p,
  97. mask_t toggle_hibit_s,
  98. mask_t toggle_hibit_t_over_s,
  99. mask_t toggle_rotation
  100. ) {
  101. #if COFACTOR == 4 && !IMAGINE_TWIST
  102. (void) toggle_rotation;
  103. gf b, d;
  104. gf_s *c = s, *a = minus_t_over_s;
  105. gf_mulw(a, p->y, 1-EDWARDS_D);
  106. gf_mul(c, a, p->t); /* -dYT, with EDWARDS_D = d-1 */
  107. gf_mul(a, p->x, p->z);
  108. gf_sub(d, c, a); /* aXZ-dYT with a=-1 */
  109. gf_add(a, p->z, p->y);
  110. gf_sub(b, p->z, p->y);
  111. gf_mul(c, b, a);
  112. gf_mulw(b, c, -EDWARDS_D); /* (a-d)(Z+Y)(Z-Y) */
  113. mask_t ok = gf_isr (a,b); /* r in the paper */
  114. (void)ok; assert(ok | gf_eq(b,ZERO));
  115. gf_mulw (b, a, -EDWARDS_D); /* u in the paper */
  116. gf_mul(c,a,d); /* r(aZX-dYT) */
  117. gf_mul(a,b,p->z); /* uZ */
  118. gf_add(a,a,a); /* 2uZ */
  119. mask_t tg = toggle_hibit_t_over_s ^ ~gf_hibit(minus_t_over_s);
  120. gf_cond_neg(minus_t_over_s, tg); /* t/s <-? -t/s */
  121. gf_cond_neg(c, tg); /* u <- -u if negative. */
  122. gf_add(d,c,p->y);
  123. gf_mul(s,b,d);
  124. gf_cond_neg(s, toggle_hibit_s ^ gf_hibit(s));
  125. #else
  126. /* More complicated because of rotation */
  127. /* MAGIC This code is wrong for certain non-Curve25519 curves;
  128. * check if it's because of Cofactor==8 or IMAGINE_ROTATION */
  129. gf c, d;
  130. gf_s *b = s, *a = minus_t_over_s;
  131. #if IMAGINE_TWIST
  132. gf x, t;
  133. gf_div_qnr(x,p->x);
  134. gf_div_qnr(t,p->t);
  135. gf_add ( a, p->z, x );
  136. gf_sub ( b, p->z, x );
  137. gf_mul ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 - X^2 */
  138. #else
  139. const gf_s *x = p->x, *t = p->t;
  140. /* Won't hit the gf_cond_sel below because COFACTOR==8 requires IMAGINE_TWIST for now. */
  141. gf_sqr ( a, p->z );
  142. gf_sqr ( b, p->x );
  143. gf_add ( c, a, b ); /* "zx" = Z^2 - aX^2 = Z^2 + X^2 */
  144. #endif
  145. gf_mul ( a, p->z, t ); /* "tz" = T*Z */
  146. gf_sqr ( b, a );
  147. gf_mul ( d, b, c ); /* (TZ)^2 * (Z^2-aX^2) */
  148. mask_t ok = gf_isr(b, d);
  149. (void)ok; assert(ok | gf_eq(d,ZERO));
  150. gf_mul ( d, b, a ); /* "osx" = 1 / sqrt(z^2-ax^2) */
  151. gf_mul ( a, b, c );
  152. gf_mul ( b, a, d ); /* 1/tz */
  153. mask_t rotate;
  154. #if (COFACTOR == 8)
  155. gf e;
  156. gf_sqr(e, p->z);
  157. gf_mul(a, e, b); /* z^2 / tz = z/t = 1/xy */
  158. rotate = gf_hibit(a) ^ toggle_rotation;
  159. /* Curve25519: cond select between zx * 1/tz or sqrt(1-d); y=-x */
  160. gf_mul ( a, b, c );
  161. gf_cond_sel ( a, a, SQRT_ONE_MINUS_D, rotate );
  162. gf_cond_sel ( x, p->y, x, rotate );
  163. #else
  164. (void)toggle_rotation;
  165. rotate = 0;
  166. #endif
  167. gf_mul ( c, a, d ); // new "osx"
  168. gf_mul ( a, c, p->z );
  169. gf_add ( minus_t_over_s, a, a ); // 2 * "osx" * Z
  170. gf_mul ( d, b, p->z );
  171. mask_t tg = toggle_hibit_t_over_s ^~ gf_hibit(minus_t_over_s);
  172. gf_cond_neg ( minus_t_over_s, tg );
  173. gf_cond_neg ( c, rotate ^ tg );
  174. gf_add ( d, d, c );
  175. gf_mul ( s, d, x ); /* here "x" = y unless rotate */
  176. gf_cond_neg ( s, toggle_hibit_s ^ gf_hibit(s) );
  177. #endif
  178. }
  179. void API_NS(point_encode)( unsigned char ser[SER_BYTES], const point_t p ) {
  180. gf s, mtos;
  181. API_NS(deisogenize)(s,mtos,p,0,0,0);
  182. gf_serialize(ser,s,0);
  183. }
  184. decaf_error_t API_NS(point_decode) (
  185. point_t p,
  186. const unsigned char ser[SER_BYTES],
  187. decaf_bool_t allow_identity
  188. ) {
  189. gf s, a, b, c, d, e, f;
  190. mask_t succ = gf_deserialize(s, ser, 0);
  191. mask_t zero = gf_eq(s, ZERO);
  192. succ &= bool_to_mask(allow_identity) | ~zero;
  193. gf_sqr ( a, s );
  194. #if IMAGINE_TWIST
  195. gf_sub ( f, ONE, a ); /* f = 1-as^2 = 1-s^2*/
  196. #else
  197. gf_add ( f, ONE, a ); /* f = 1-as^2 = 1+s^2 */
  198. #endif
  199. succ &= ~ gf_eq( f, ZERO );
  200. gf_sqr ( b, f );
  201. gf_mulw ( c, a, 4*IMAGINE_TWIST-4*EDWARDS_D );
  202. gf_add ( c, c, b ); /* t^2 */
  203. gf_mul ( d, f, s ); /* s(1-as^2) for denoms */
  204. gf_sqr ( e, d );
  205. gf_mul ( b, c, e );
  206. succ &= gf_isr(e,b) | gf_eq(b,ZERO); /* e = 1/(t s (1-as^2)) */
  207. gf_mul ( b, e, d ); /* 1/t */
  208. gf_mul ( d, e, c ); /* d = t / (s(1-as^2)) */
  209. gf_mul ( e, d, f ); /* t/s */
  210. mask_t negtos = gf_hibit(e);
  211. gf_cond_neg(b, negtos);
  212. gf_cond_neg(d, negtos);
  213. #if IMAGINE_TWIST
  214. gf_add ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  215. #else
  216. gf_sub ( p->z, ONE, a); /* Z = 1+as^2 = 1-s^2 */
  217. #endif
  218. #if COFACTOR == 8
  219. gf_mul ( a, p->z, d); /* t(1+s^2) / s(1-s^2) = 2/xy */
  220. succ &= ~gf_lobit(a); /* = ~gf_hibit(a/2), since gf_hibit(x) = gf_lobit(2x) */
  221. #endif
  222. gf_mul ( a, f, b ); /* y = (1-s^2) / t */
  223. gf_mul ( p->y, p->z, a ); /* Y = yZ */
  224. #if IMAGINE_TWIST
  225. gf_add ( b, s, s );
  226. gf_mul(p->x, b, SQRT_MINUS_ONE); /* Curve25519 */
  227. #else
  228. gf_add ( p->x, s, s );
  229. #endif
  230. gf_mul ( p->t, p->x, a ); /* T = 2s (1-as^2)/t */
  231. p->y->limb[0] -= zero;
  232. assert(API_NS(point_valid)(p) | ~succ);
  233. return decaf_succeed_if(mask_to_bool(succ));
  234. }
  235. #if IMAGINE_TWIST
  236. #define TWISTED_D (-(EDWARDS_D))
  237. #else
  238. #define TWISTED_D ((EDWARDS_D)-1)
  239. #endif
  240. #if TWISTED_D < 0
  241. #define EFF_D (-(TWISTED_D))
  242. #define NEG_D 1
  243. #else
  244. #define EFF_D TWISTED_D
  245. #define NEG_D 0
  246. #endif
  247. void API_NS(point_sub) (
  248. point_t p,
  249. const point_t q,
  250. const point_t r
  251. ) {
  252. gf a, b, c, d;
  253. gf_sub_nr ( b, q->y, q->x ); /* 3+e */
  254. gf_sub_nr ( d, r->y, r->x ); /* 3+e */
  255. gf_add_nr ( c, r->y, r->x ); /* 2+e */
  256. gf_mul ( a, c, b );
  257. gf_add_nr ( b, q->y, q->x ); /* 2+e */
  258. gf_mul ( p->y, d, b );
  259. gf_mul ( b, r->t, q->t );
  260. gf_mulw ( p->x, b, 2*EFF_D );
  261. gf_add_nr ( b, a, p->y ); /* 2+e */
  262. gf_sub_nr ( c, p->y, a ); /* 3+e */
  263. gf_mul ( a, q->z, r->z );
  264. gf_add_nr ( a, a, a ); /* 2+e */
  265. if (GF_HEADROOM <= 3) gf_weak_reduce(a); /* or 1+e */
  266. #if NEG_D
  267. gf_sub_nr ( p->y, a, p->x ); /* 4+e or 3+e */
  268. gf_add_nr ( a, a, p->x ); /* 3+e or 2+e */
  269. #else
  270. gf_add_nr ( p->y, a, p->x ); /* 3+e or 2+e */
  271. gf_sub_nr ( a, a, p->x ); /* 4+e or 3+e */
  272. #endif
  273. gf_mul ( p->z, a, p->y );
  274. gf_mul ( p->x, p->y, c );
  275. gf_mul ( p->y, a, b );
  276. gf_mul ( p->t, b, c );
  277. }
  278. void API_NS(point_add) (
  279. point_t p,
  280. const point_t q,
  281. const point_t r
  282. ) {
  283. gf a, b, c, d;
  284. gf_sub_nr ( b, q->y, q->x ); /* 3+e */
  285. gf_sub_nr ( c, r->y, r->x ); /* 3+e */
  286. gf_add_nr ( d, r->y, r->x ); /* 2+e */
  287. gf_mul ( a, c, b );
  288. gf_add_nr ( b, q->y, q->x ); /* 2+e */
  289. gf_mul ( p->y, d, b );
  290. gf_mul ( b, r->t, q->t );
  291. gf_mulw ( p->x, b, 2*EFF_D );
  292. gf_add_nr ( b, a, p->y ); /* 2+e */
  293. gf_sub_nr ( c, p->y, a ); /* 3+e */
  294. gf_mul ( a, q->z, r->z );
  295. gf_add_nr ( a, a, a ); /* 2+e */
  296. if (GF_HEADROOM <= 3) gf_weak_reduce(a); /* or 1+e */
  297. #if NEG_D
  298. gf_add_nr ( p->y, a, p->x ); /* 3+e or 2+e */
  299. gf_sub_nr ( a, a, p->x ); /* 4+e or 3+e */
  300. #else
  301. gf_sub_nr ( p->y, a, p->x ); /* 4+e or 3+e */
  302. gf_add_nr ( a, a, p->x ); /* 3+e or 2+e */
  303. #endif
  304. gf_mul ( p->z, a, p->y );
  305. gf_mul ( p->x, p->y, c );
  306. gf_mul ( p->y, a, b );
  307. gf_mul ( p->t, b, c );
  308. }
  309. static NOINLINE void
  310. point_double_internal (
  311. point_t p,
  312. const point_t q,
  313. int before_double
  314. ) {
  315. gf a, b, c, d;
  316. gf_sqr ( c, q->x );
  317. gf_sqr ( a, q->y );
  318. gf_add_nr ( d, c, a ); /* 2+e */
  319. gf_add_nr ( p->t, q->y, q->x ); /* 2+e */
  320. gf_sqr ( b, p->t );
  321. gf_subx_nr ( b, b, d, 3 ); /* 4+e */
  322. gf_sub_nr ( p->t, a, c ); /* 3+e */
  323. gf_sqr ( p->x, q->z );
  324. gf_add_nr ( p->z, p->x, p->x ); /* 2+e */
  325. gf_subx_nr ( a, p->z, p->t, 4 ); /* 6+e */
  326. if (GF_HEADROOM == 5) gf_weak_reduce(a); /* or 1+e */
  327. gf_mul ( p->x, a, b );
  328. gf_mul ( p->z, p->t, a );
  329. gf_mul ( p->y, p->t, d );
  330. if (!before_double) gf_mul ( p->t, b, d );
  331. }
  332. void API_NS(point_double)(point_t p, const point_t q) {
  333. point_double_internal(p,q,0);
  334. }
  335. void API_NS(point_negate) (
  336. point_t nega,
  337. const point_t a
  338. ) {
  339. gf_sub(nega->x, ZERO, a->x);
  340. gf_copy(nega->y, a->y);
  341. gf_copy(nega->z, a->z);
  342. gf_sub(nega->t, ZERO, a->t);
  343. }
  344. /* Operations on [p]niels */
  345. static INLINE void
  346. cond_neg_niels (
  347. niels_t n,
  348. mask_t neg
  349. ) {
  350. gf_cond_swap(n->a, n->b, neg);
  351. gf_cond_neg(n->c, neg);
  352. }
  353. static NOINLINE void pt_to_pniels (
  354. pniels_t b,
  355. const point_t a
  356. ) {
  357. gf_sub ( b->n->a, a->y, a->x );
  358. gf_add ( b->n->b, a->x, a->y );
  359. gf_mulw ( b->n->c, a->t, 2*TWISTED_D );
  360. gf_add ( b->z, a->z, a->z );
  361. }
  362. static NOINLINE void pniels_to_pt (
  363. point_t e,
  364. const pniels_t d
  365. ) {
  366. gf eu;
  367. gf_add ( eu, d->n->b, d->n->a );
  368. gf_sub ( e->y, d->n->b, d->n->a );
  369. gf_mul ( e->t, e->y, eu);
  370. gf_mul ( e->x, d->z, e->y );
  371. gf_mul ( e->y, d->z, eu );
  372. gf_sqr ( e->z, d->z );
  373. }
  374. static NOINLINE void
  375. niels_to_pt (
  376. point_t e,
  377. const niels_t n
  378. ) {
  379. gf_add ( e->y, n->b, n->a );
  380. gf_sub ( e->x, n->b, n->a );
  381. gf_mul ( e->t, e->y, e->x );
  382. gf_copy ( e->z, ONE );
  383. }
  384. static NOINLINE void
  385. add_niels_to_pt (
  386. point_t d,
  387. const niels_t e,
  388. int before_double
  389. ) {
  390. gf a, b, c;
  391. gf_sub_nr ( b, d->y, d->x ); /* 3+e */
  392. gf_mul ( a, e->a, b );
  393. gf_add_nr ( b, d->x, d->y ); /* 2+e */
  394. gf_mul ( d->y, e->b, b );
  395. gf_mul ( d->x, e->c, d->t );
  396. gf_add_nr ( c, a, d->y ); /* 2+e */
  397. gf_sub_nr ( b, d->y, a ); /* 3+e */
  398. gf_sub_nr ( d->y, d->z, d->x ); /* 3+e */
  399. gf_add_nr ( a, d->x, d->z ); /* 2+e */
  400. gf_mul ( d->z, a, d->y );
  401. gf_mul ( d->x, d->y, b );
  402. gf_mul ( d->y, a, c );
  403. if (!before_double) gf_mul ( d->t, b, c );
  404. }
  405. static NOINLINE void
  406. sub_niels_from_pt (
  407. point_t d,
  408. const niels_t e,
  409. int before_double
  410. ) {
  411. gf a, b, c;
  412. gf_sub_nr ( b, d->y, d->x ); /* 3+e */
  413. gf_mul ( a, e->b, b );
  414. gf_add_nr ( b, d->x, d->y ); /* 2+e */
  415. gf_mul ( d->y, e->a, b );
  416. gf_mul ( d->x, e->c, d->t );
  417. gf_add_nr ( c, a, d->y ); /* 2+e */
  418. gf_sub_nr ( b, d->y, a ); /* 3+e */
  419. gf_add_nr ( d->y, d->z, d->x ); /* 2+e */
  420. gf_sub_nr ( a, d->z, d->x ); /* 3+e */
  421. gf_mul ( d->z, a, d->y );
  422. gf_mul ( d->x, d->y, b );
  423. gf_mul ( d->y, a, c );
  424. if (!before_double) gf_mul ( d->t, b, c );
  425. }
  426. static void
  427. add_pniels_to_pt (
  428. point_t p,
  429. const pniels_t pn,
  430. int before_double
  431. ) {
  432. gf L0;
  433. gf_mul ( L0, p->z, pn->z );
  434. gf_copy ( p->z, L0 );
  435. add_niels_to_pt( p, pn->n, before_double );
  436. }
  437. static void
  438. sub_pniels_from_pt (
  439. point_t p,
  440. const pniels_t pn,
  441. int before_double
  442. ) {
  443. gf L0;
  444. gf_mul ( L0, p->z, pn->z );
  445. gf_copy ( p->z, L0 );
  446. sub_niels_from_pt( p, pn->n, before_double );
  447. }
  448. static NOINLINE void
  449. prepare_fixed_window(
  450. pniels_t *multiples,
  451. const point_t b,
  452. int ntable
  453. ) {
  454. point_t tmp;
  455. pniels_t pn;
  456. int i;
  457. point_double_internal(tmp, b, 0);
  458. pt_to_pniels(pn, tmp);
  459. pt_to_pniels(multiples[0], b);
  460. API_NS(point_copy)(tmp, b);
  461. for (i=1; i<ntable; i++) {
  462. add_pniels_to_pt(tmp, pn, 0);
  463. pt_to_pniels(multiples[i], tmp);
  464. }
  465. decaf_bzero(pn,sizeof(pn));
  466. decaf_bzero(tmp,sizeof(tmp));
  467. }
  468. void API_NS(point_scalarmul) (
  469. point_t a,
  470. const point_t b,
  471. const scalar_t scalar
  472. ) {
  473. const int WINDOW = DECAF_WINDOW_BITS,
  474. WINDOW_MASK = (1<<WINDOW)-1,
  475. WINDOW_T_MASK = WINDOW_MASK >> 1,
  476. NTABLE = 1<<(WINDOW-1);
  477. scalar_t scalar1x;
  478. API_NS(scalar_add)(scalar1x, scalar, point_scalarmul_adjustment);
  479. API_NS(scalar_halve)(scalar1x,scalar1x);
  480. /* Set up a precomputed table with odd multiples of b. */
  481. pniels_t pn, multiples[NTABLE];
  482. point_t tmp;
  483. prepare_fixed_window(multiples, b, NTABLE);
  484. /* Initialize. */
  485. int i,j,first=1;
  486. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  487. for (; i>=0; i-=WINDOW) {
  488. /* Fetch another block of bits */
  489. word_t bits = scalar1x->limb[i/WBITS] >> (i%WBITS);
  490. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  491. bits ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  492. }
  493. bits &= WINDOW_MASK;
  494. mask_t inv = (bits>>(WINDOW-1))-1;
  495. bits ^= inv;
  496. /* Add in from table. Compute t only on last iteration. */
  497. constant_time_lookup(pn, multiples, sizeof(pn), NTABLE, bits & WINDOW_T_MASK);
  498. cond_neg_niels(pn->n, inv);
  499. if (first) {
  500. pniels_to_pt(tmp, pn);
  501. first = 0;
  502. } else {
  503. /* Using Hisil et al's lookahead method instead of extensible here
  504. * for no particular reason. Double WINDOW times, but only compute t on
  505. * the last one.
  506. */
  507. for (j=0; j<WINDOW-1; j++)
  508. point_double_internal(tmp, tmp, -1);
  509. point_double_internal(tmp, tmp, 0);
  510. add_pniels_to_pt(tmp, pn, i ? -1 : 0);
  511. }
  512. }
  513. /* Write out the answer */
  514. API_NS(point_copy)(a,tmp);
  515. decaf_bzero(scalar1x,sizeof(scalar1x));
  516. decaf_bzero(pn,sizeof(pn));
  517. decaf_bzero(multiples,sizeof(multiples));
  518. decaf_bzero(tmp,sizeof(tmp));
  519. }
  520. void API_NS(point_double_scalarmul) (
  521. point_t a,
  522. const point_t b,
  523. const scalar_t scalarb,
  524. const point_t c,
  525. const scalar_t scalarc
  526. ) {
  527. const int WINDOW = DECAF_WINDOW_BITS,
  528. WINDOW_MASK = (1<<WINDOW)-1,
  529. WINDOW_T_MASK = WINDOW_MASK >> 1,
  530. NTABLE = 1<<(WINDOW-1);
  531. scalar_t scalar1x, scalar2x;
  532. API_NS(scalar_add)(scalar1x, scalarb, point_scalarmul_adjustment);
  533. API_NS(scalar_halve)(scalar1x,scalar1x);
  534. API_NS(scalar_add)(scalar2x, scalarc, point_scalarmul_adjustment);
  535. API_NS(scalar_halve)(scalar2x,scalar2x);
  536. /* Set up a precomputed table with odd multiples of b. */
  537. pniels_t pn, multiples1[NTABLE], multiples2[NTABLE];
  538. point_t tmp;
  539. prepare_fixed_window(multiples1, b, NTABLE);
  540. prepare_fixed_window(multiples2, c, NTABLE);
  541. /* Initialize. */
  542. int i,j,first=1;
  543. i = SCALAR_BITS - ((SCALAR_BITS-1) % WINDOW) - 1;
  544. for (; i>=0; i-=WINDOW) {
  545. /* Fetch another block of bits */
  546. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  547. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  548. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  549. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  550. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  551. }
  552. bits1 &= WINDOW_MASK;
  553. bits2 &= WINDOW_MASK;
  554. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  555. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  556. bits1 ^= inv1;
  557. bits2 ^= inv2;
  558. /* Add in from table. Compute t only on last iteration. */
  559. constant_time_lookup(pn, multiples1, sizeof(pn), NTABLE, bits1 & WINDOW_T_MASK);
  560. cond_neg_niels(pn->n, inv1);
  561. if (first) {
  562. pniels_to_pt(tmp, pn);
  563. first = 0;
  564. } else {
  565. /* Using Hisil et al's lookahead method instead of extensible here
  566. * for no particular reason. Double WINDOW times, but only compute t on
  567. * the last one.
  568. */
  569. for (j=0; j<WINDOW-1; j++)
  570. point_double_internal(tmp, tmp, -1);
  571. point_double_internal(tmp, tmp, 0);
  572. add_pniels_to_pt(tmp, pn, 0);
  573. }
  574. constant_time_lookup(pn, multiples2, sizeof(pn), NTABLE, bits2 & WINDOW_T_MASK);
  575. cond_neg_niels(pn->n, inv2);
  576. add_pniels_to_pt(tmp, pn, i?-1:0);
  577. }
  578. /* Write out the answer */
  579. API_NS(point_copy)(a,tmp);
  580. decaf_bzero(scalar1x,sizeof(scalar1x));
  581. decaf_bzero(scalar2x,sizeof(scalar2x));
  582. decaf_bzero(pn,sizeof(pn));
  583. decaf_bzero(multiples1,sizeof(multiples1));
  584. decaf_bzero(multiples2,sizeof(multiples2));
  585. decaf_bzero(tmp,sizeof(tmp));
  586. }
  587. void API_NS(point_dual_scalarmul) (
  588. point_t a1,
  589. point_t a2,
  590. const point_t b,
  591. const scalar_t scalar1,
  592. const scalar_t scalar2
  593. ) {
  594. const int WINDOW = DECAF_WINDOW_BITS,
  595. WINDOW_MASK = (1<<WINDOW)-1,
  596. WINDOW_T_MASK = WINDOW_MASK >> 1,
  597. NTABLE = 1<<(WINDOW-1);
  598. scalar_t scalar1x, scalar2x;
  599. API_NS(scalar_add)(scalar1x, scalar1, point_scalarmul_adjustment);
  600. API_NS(scalar_halve)(scalar1x,scalar1x);
  601. API_NS(scalar_add)(scalar2x, scalar2, point_scalarmul_adjustment);
  602. API_NS(scalar_halve)(scalar2x,scalar2x);
  603. /* Set up a precomputed table with odd multiples of b. */
  604. point_t multiples1[NTABLE], multiples2[NTABLE], working, tmp;
  605. pniels_t pn;
  606. API_NS(point_copy)(working, b);
  607. /* Initialize. */
  608. int i,j;
  609. for (i=0; i<NTABLE; i++) {
  610. API_NS(point_copy)(multiples1[i], API_NS(point_identity));
  611. API_NS(point_copy)(multiples2[i], API_NS(point_identity));
  612. }
  613. for (i=0; i<SCALAR_BITS; i+=WINDOW) {
  614. if (i) {
  615. for (j=0; j<WINDOW-1; j++)
  616. point_double_internal(working, working, -1);
  617. point_double_internal(working, working, 0);
  618. }
  619. /* Fetch another block of bits */
  620. word_t bits1 = scalar1x->limb[i/WBITS] >> (i%WBITS),
  621. bits2 = scalar2x->limb[i/WBITS] >> (i%WBITS);
  622. if (i%WBITS >= WBITS-WINDOW && i/WBITS<SCALAR_LIMBS-1) {
  623. bits1 ^= scalar1x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  624. bits2 ^= scalar2x->limb[i/WBITS+1] << (WBITS - (i%WBITS));
  625. }
  626. bits1 &= WINDOW_MASK;
  627. bits2 &= WINDOW_MASK;
  628. mask_t inv1 = (bits1>>(WINDOW-1))-1;
  629. mask_t inv2 = (bits2>>(WINDOW-1))-1;
  630. bits1 ^= inv1;
  631. bits2 ^= inv2;
  632. pt_to_pniels(pn, working);
  633. constant_time_lookup(tmp, multiples1, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  634. cond_neg_niels(pn->n, inv1);
  635. /* add_pniels_to_pt(multiples1[bits1 & WINDOW_T_MASK], pn, 0); */
  636. add_pniels_to_pt(tmp, pn, 0);
  637. constant_time_insert(multiples1, tmp, sizeof(tmp), NTABLE, bits1 & WINDOW_T_MASK);
  638. constant_time_lookup(tmp, multiples2, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  639. cond_neg_niels(pn->n, inv1^inv2);
  640. /* add_pniels_to_pt(multiples2[bits2 & WINDOW_T_MASK], pn, 0); */
  641. add_pniels_to_pt(tmp, pn, 0);
  642. constant_time_insert(multiples2, tmp, sizeof(tmp), NTABLE, bits2 & WINDOW_T_MASK);
  643. }
  644. if (NTABLE > 1) {
  645. API_NS(point_copy)(working, multiples1[NTABLE-1]);
  646. API_NS(point_copy)(tmp , multiples2[NTABLE-1]);
  647. for (i=NTABLE-1; i>1; i--) {
  648. API_NS(point_add)(multiples1[i-1], multiples1[i-1], multiples1[i]);
  649. API_NS(point_add)(multiples2[i-1], multiples2[i-1], multiples2[i]);
  650. API_NS(point_add)(working, working, multiples1[i-1]);
  651. API_NS(point_add)(tmp, tmp, multiples2[i-1]);
  652. }
  653. API_NS(point_add)(multiples1[0], multiples1[0], multiples1[1]);
  654. API_NS(point_add)(multiples2[0], multiples2[0], multiples2[1]);
  655. point_double_internal(working, working, 0);
  656. point_double_internal(tmp, tmp, 0);
  657. API_NS(point_add)(a1, working, multiples1[0]);
  658. API_NS(point_add)(a2, tmp, multiples2[0]);
  659. } else {
  660. API_NS(point_copy)(a1, multiples1[0]);
  661. API_NS(point_copy)(a2, multiples2[0]);
  662. }
  663. decaf_bzero(scalar1x,sizeof(scalar1x));
  664. decaf_bzero(scalar2x,sizeof(scalar2x));
  665. decaf_bzero(pn,sizeof(pn));
  666. decaf_bzero(multiples1,sizeof(multiples1));
  667. decaf_bzero(multiples2,sizeof(multiples2));
  668. decaf_bzero(tmp,sizeof(tmp));
  669. decaf_bzero(working,sizeof(working));
  670. }
  671. decaf_bool_t API_NS(point_eq) ( const point_t p, const point_t q ) {
  672. /* equality mod 2-torsion compares x/y */
  673. gf a, b;
  674. gf_mul ( a, p->y, q->x );
  675. gf_mul ( b, q->y, p->x );
  676. mask_t succ = gf_eq(a,b);
  677. #if (COFACTOR == 8) && IMAGINE_TWIST
  678. gf_mul ( a, p->y, q->y );
  679. gf_mul ( b, q->x, p->x );
  680. #if !(IMAGINE_TWIST)
  681. gf_sub ( a, ZERO, a );
  682. #else
  683. /* Interesting note: the 4tor would normally be rotation.
  684. * But because of the *i twist, it's actually
  685. * (x,y) <-> (iy,ix)
  686. */
  687. /* No code, just a comment. */
  688. #endif
  689. succ |= gf_eq(a,b);
  690. #endif
  691. return mask_to_bool(succ);
  692. }
  693. decaf_bool_t API_NS(point_valid) (
  694. const point_t p
  695. ) {
  696. gf a,b,c;
  697. gf_mul(a,p->x,p->y);
  698. gf_mul(b,p->z,p->t);
  699. mask_t out = gf_eq(a,b);
  700. gf_sqr(a,p->x);
  701. gf_sqr(b,p->y);
  702. gf_sub(a,b,a);
  703. gf_sqr(b,p->t);
  704. gf_mulw(c,b,TWISTED_D);
  705. gf_sqr(b,p->z);
  706. gf_add(b,b,c);
  707. out &= gf_eq(a,b);
  708. out &= ~gf_eq(p->z,ZERO);
  709. return mask_to_bool(out);
  710. }
  711. void API_NS(point_debugging_torque) (
  712. point_t q,
  713. const point_t p
  714. ) {
  715. #if COFACTOR == 8
  716. gf tmp;
  717. gf_mul(tmp,p->x,SQRT_MINUS_ONE);
  718. gf_mul(q->x,p->y,SQRT_MINUS_ONE);
  719. gf_copy(q->y,tmp);
  720. gf_copy(q->z,p->z);
  721. gf_sub(q->t,ZERO,p->t);
  722. #else
  723. gf_sub(q->x,ZERO,p->x);
  724. gf_sub(q->y,ZERO,p->y);
  725. gf_copy(q->z,p->z);
  726. gf_copy(q->t,p->t);
  727. #endif
  728. }
  729. void API_NS(point_debugging_pscale) (
  730. point_t q,
  731. const point_t p,
  732. const uint8_t factor[SER_BYTES]
  733. ) {
  734. gf gfac,tmp;
  735. /* NB this means you'll never pscale by negative numbers for p521 */
  736. ignore_result(gf_deserialize(gfac,factor,0));
  737. gf_cond_sel(gfac,gfac,ONE,gf_eq(gfac,ZERO));
  738. gf_mul(tmp,p->x,gfac);
  739. gf_copy(q->x,tmp);
  740. gf_mul(tmp,p->y,gfac);
  741. gf_copy(q->y,tmp);
  742. gf_mul(tmp,p->z,gfac);
  743. gf_copy(q->z,tmp);
  744. gf_mul(tmp,p->t,gfac);
  745. gf_copy(q->t,tmp);
  746. }
  747. static void gf_batch_invert (
  748. gf *__restrict__ out,
  749. const gf *in,
  750. unsigned int n
  751. ) {
  752. gf t1;
  753. assert(n>1);
  754. gf_copy(out[1], in[0]);
  755. int i;
  756. for (i=1; i<(int) (n-1); i++) {
  757. gf_mul(out[i+1], out[i], in[i]);
  758. }
  759. gf_mul(out[0], out[n-1], in[n-1]);
  760. gf_invert(out[0], out[0]);
  761. for (i=n-1; i>0; i--) {
  762. gf_mul(t1, out[i], out[0]);
  763. gf_copy(out[i], t1);
  764. gf_mul(t1, out[0], in[i]);
  765. gf_copy(out[0], t1);
  766. }
  767. }
  768. static void batch_normalize_niels (
  769. niels_t *table,
  770. const gf *zs,
  771. gf *__restrict__ zis,
  772. int n
  773. ) {
  774. int i;
  775. gf product;
  776. gf_batch_invert(zis, zs, n);
  777. for (i=0; i<n; i++) {
  778. gf_mul(product, table[i]->a, zis[i]);
  779. gf_strong_reduce(product);
  780. gf_copy(table[i]->a, product);
  781. gf_mul(product, table[i]->b, zis[i]);
  782. gf_strong_reduce(product);
  783. gf_copy(table[i]->b, product);
  784. gf_mul(product, table[i]->c, zis[i]);
  785. gf_strong_reduce(product);
  786. gf_copy(table[i]->c, product);
  787. }
  788. decaf_bzero(product,sizeof(product));
  789. }
  790. void API_NS(precompute) (
  791. precomputed_s *table,
  792. const point_t base
  793. ) {
  794. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  795. assert(n*t*s >= SCALAR_BITS);
  796. point_t working, start, doubles[t-1];
  797. API_NS(point_copy)(working, base);
  798. pniels_t pn_tmp;
  799. gf zs[n<<(t-1)], zis[n<<(t-1)];
  800. unsigned int i,j,k;
  801. /* Compute n tables */
  802. for (i=0; i<n; i++) {
  803. /* Doubling phase */
  804. for (j=0; j<t; j++) {
  805. if (j) API_NS(point_add)(start, start, working);
  806. else API_NS(point_copy)(start, working);
  807. if (j==t-1 && i==n-1) break;
  808. point_double_internal(working, working,0);
  809. if (j<t-1) API_NS(point_copy)(doubles[j], working);
  810. for (k=0; k<s-1; k++)
  811. point_double_internal(working, working, k<s-2);
  812. }
  813. /* Gray-code phase */
  814. for (j=0;; j++) {
  815. int gray = j ^ (j>>1);
  816. int idx = (((i+1)<<(t-1))-1) ^ gray;
  817. pt_to_pniels(pn_tmp, start);
  818. memcpy(table->table[idx], pn_tmp->n, sizeof(pn_tmp->n));
  819. gf_copy(zs[idx], pn_tmp->z);
  820. if (j >= (1u<<(t-1)) - 1) break;
  821. int delta = (j+1) ^ ((j+1)>>1) ^ gray;
  822. for (k=0; delta>1; k++)
  823. delta >>=1;
  824. if (gray & (1<<k)) {
  825. API_NS(point_add)(start, start, doubles[k]);
  826. } else {
  827. API_NS(point_sub)(start, start, doubles[k]);
  828. }
  829. }
  830. }
  831. batch_normalize_niels(table->table,(const gf *)zs,zis,n<<(t-1));
  832. decaf_bzero(zs,sizeof(zs));
  833. decaf_bzero(zis,sizeof(zis));
  834. decaf_bzero(pn_tmp,sizeof(pn_tmp));
  835. decaf_bzero(working,sizeof(working));
  836. decaf_bzero(start,sizeof(start));
  837. decaf_bzero(doubles,sizeof(doubles));
  838. }
  839. static INLINE void
  840. constant_time_lookup_niels (
  841. niels_s *__restrict__ ni,
  842. const niels_t *table,
  843. int nelts,
  844. int idx
  845. ) {
  846. constant_time_lookup(ni, table, sizeof(niels_s), nelts, idx);
  847. }
  848. void API_NS(precomputed_scalarmul) (
  849. point_t out,
  850. const precomputed_s *table,
  851. const scalar_t scalar
  852. ) {
  853. int i;
  854. unsigned j,k;
  855. const unsigned int n = COMBS_N, t = COMBS_T, s = COMBS_S;
  856. scalar_t scalar1x;
  857. API_NS(scalar_add)(scalar1x, scalar, precomputed_scalarmul_adjustment);
  858. API_NS(scalar_halve)(scalar1x,scalar1x);
  859. niels_t ni;
  860. for (i=s-1; i>=0; i--) {
  861. if (i != (int)s-1) point_double_internal(out,out,0);
  862. for (j=0; j<n; j++) {
  863. int tab = 0;
  864. for (k=0; k<t; k++) {
  865. unsigned int bit = i + s*(k + j*t);
  866. if (bit < SCALAR_BITS) {
  867. tab |= (scalar1x->limb[bit/WBITS] >> (bit%WBITS) & 1) << k;
  868. }
  869. }
  870. mask_t invert = (tab>>(t-1))-1;
  871. tab ^= invert;
  872. tab &= (1<<(t-1)) - 1;
  873. constant_time_lookup_niels(ni, &table->table[j<<(t-1)], 1<<(t-1), tab);
  874. cond_neg_niels(ni, invert);
  875. if ((i!=(int)s-1)||j) {
  876. add_niels_to_pt(out, ni, j==n-1 && i);
  877. } else {
  878. niels_to_pt(out, ni);
  879. }
  880. }
  881. }
  882. decaf_bzero(ni,sizeof(ni));
  883. decaf_bzero(scalar1x,sizeof(scalar1x));
  884. }
  885. void API_NS(point_cond_sel) (
  886. point_t out,
  887. const point_t a,
  888. const point_t b,
  889. decaf_bool_t pick_b
  890. ) {
  891. constant_time_select(out,a,b,sizeof(point_t),bool_to_mask(pick_b),0);
  892. }
  893. /* FUTURE: restore Curve25519 Montgomery ladder? */
  894. decaf_error_t API_NS(direct_scalarmul) (
  895. uint8_t scaled[SER_BYTES],
  896. const uint8_t base[SER_BYTES],
  897. const scalar_t scalar,
  898. decaf_bool_t allow_identity,
  899. decaf_bool_t short_circuit
  900. ) {
  901. point_t basep;
  902. decaf_error_t succ = API_NS(point_decode)(basep, base, allow_identity);
  903. if (short_circuit && succ != DECAF_SUCCESS) return succ;
  904. API_NS(point_cond_sel)(basep, API_NS(point_base), basep, succ);
  905. API_NS(point_scalarmul)(basep, basep, scalar);
  906. API_NS(point_encode)(scaled, basep);
  907. API_NS(point_destroy)(basep);
  908. return succ;
  909. }
  910. void API_NS(point_mul_by_cofactor_and_encode_like_eddsa) (
  911. uint8_t enc[DECAF_EDDSA_$(gf_shortname)_PUBLIC_BYTES],
  912. const point_t p
  913. ) {
  914. /* The point is now on the twisted curve. Move it to untwisted. */
  915. gf x, y, z, t;
  916. point_t q;
  917. #if COFACTOR == 8
  918. API_NS(point_double)(q,p);
  919. #else
  920. API_NS(point_copy)(q,p);
  921. #endif
  922. #if EDDSA_USE_SIGMA_ISOGENY
  923. {
  924. /* Use 4-isogeny like ed25519:
  925. * 2*x*y*sqrt(d/a-1)/(ax^2 + y^2 - 2)
  926. * (y^2 - ax^2)/(y^2 + ax^2)
  927. * with a = -1, d = -EDWARDS_D:
  928. * -2xysqrt(EDWARDS_D-1)/(2z^2-y^2+x^2)
  929. * (y^2+x^2)/(y^2-x^2)
  930. */
  931. gf u;
  932. gf_sqr ( x, q->x ); // x^2
  933. gf_sqr ( t, q->y ); // y^2
  934. gf_add( u, x, t ); // x^2 + y^2
  935. gf_add( z, q->y, q->x );
  936. gf_sqr ( y, z);
  937. gf_sub ( y, u, y ); // -2xy
  938. gf_sub ( z, t, x ); // y^2 - x^2
  939. gf_sqr ( x, q->z );
  940. gf_add ( t, x, x);
  941. gf_sub ( t, t, z); // 2z^2 - y^2 + x^2
  942. gf_mul ( x, y, z ); // 2xy(y^2-x^2)
  943. gf_mul ( y, u, t ); // (x^2+y^2)(2z^2-y^2+x^2)
  944. gf_mul ( u, z, t );
  945. gf_copy( z, u );
  946. gf_mul ( u, x, SQRT_ONE_MINUS_D );
  947. gf_copy( x, u );
  948. decaf_bzero(u,sizeof(u));
  949. }
  950. #elif IMAGINE_TWIST
  951. {
  952. API_NS(point_double)(q,q);
  953. API_NS(point_double)(q,q);
  954. gf_mul_qnr(x, q->x);
  955. gf_copy(y, q->y);
  956. gf_copy(z, q->z);
  957. }
  958. #else
  959. {
  960. /* 4-isogeny: 2xy/(y^+x^2), (y^2-x^2)/(2z^2-y^2+x^2) */
  961. gf u;
  962. gf_sqr ( x, q->x );
  963. gf_sqr ( t, q->y );
  964. gf_add( u, x, t );
  965. gf_add( z, q->y, q->x );
  966. gf_sqr ( y, z);
  967. gf_sub ( y, u, y );
  968. gf_sub ( z, t, x );
  969. gf_sqr ( x, q->z );
  970. gf_add ( t, x, x);
  971. gf_sub ( t, t, z);
  972. gf_mul ( x, t, y );
  973. gf_mul ( y, z, u );
  974. gf_mul ( z, u, t );
  975. decaf_bzero(u,sizeof(u));
  976. }
  977. #endif
  978. /* Affinize */
  979. gf_invert(z,z);
  980. gf_mul(t,x,z);
  981. gf_mul(x,y,z);
  982. /* Encode */
  983. enc[DECAF_EDDSA_$(gf_shortname)_PRIVATE_BYTES-1] = 0;
  984. gf_serialize(enc, x, 1);
  985. enc[DECAF_EDDSA_$(gf_shortname)_PRIVATE_BYTES-1] |= 0x80 & gf_lobit(t);
  986. decaf_bzero(x,sizeof(x));
  987. decaf_bzero(y,sizeof(y));
  988. decaf_bzero(z,sizeof(z));
  989. decaf_bzero(t,sizeof(t));
  990. API_NS(point_destroy)(q);
  991. }
  992. decaf_error_t API_NS(point_decode_like_eddsa_and_ignore_cofactor) (
  993. point_t p,
  994. const uint8_t enc[DECAF_EDDSA_$(gf_shortname)_PUBLIC_BYTES]
  995. ) {
  996. uint8_t enc2[DECAF_EDDSA_$(gf_shortname)_PUBLIC_BYTES];
  997. memcpy(enc2,enc,sizeof(enc2));
  998. mask_t low = ~word_is_zero(enc2[DECAF_EDDSA_$(gf_shortname)_PRIVATE_BYTES-1] & 0x80);
  999. enc2[DECAF_EDDSA_$(gf_shortname)_PRIVATE_BYTES-1] &= ~0x80;
  1000. mask_t succ = DECAF_TRUE;
  1001. #if $(gf_bits % 8) == 0
  1002. succ = word_is_zero(enc2[DECAF_EDDSA_$(gf_shortname)_PRIVATE_BYTES-1]);
  1003. #endif
  1004. succ &= gf_deserialize(p->y, enc2, 1);
  1005. gf_sqr(p->x,p->y);
  1006. gf_sub(p->z,ONE,p->x); /* num = 1-y^2 */
  1007. #if EDDSA_USE_SIGMA_ISOGENY
  1008. gf_mulw(p->t,p->z,EDWARDS_D); /* d-dy^2 */
  1009. gf_mulw(p->x,p->z,EDWARDS_D-1); /* num = (1-y^2)(d-1) */
  1010. gf_copy(p->z,p->x);
  1011. #else
  1012. gf_mulw(p->t,p->x,EDWARDS_D); /* dy^2 */
  1013. #endif
  1014. gf_sub(p->t,ONE,p->t); /* denom = 1-dy^2 or 1-d + dy^2 */
  1015. gf_mul(p->x,p->z,p->t);
  1016. succ &= gf_isr(p->t,p->x); /* 1/sqrt(num * denom) */
  1017. gf_mul(p->x,p->t,p->z); /* sqrt(num / denom) */
  1018. gf_cond_neg(p->x,~gf_lobit(p->x)^low);
  1019. gf_copy(p->z,ONE);
  1020. #if EDDSA_USE_SIGMA_ISOGENY
  1021. {
  1022. /* Use 4-isogeny like ed25519:
  1023. * 2*x*y/sqrt(1-d/a)/(ax^2 + y^2 - 2)
  1024. * (y^2 - ax^2)/(y^2 + ax^2)
  1025. * (MAGIC: above formula may be off by a factor of -a
  1026. * or something somewhere; check it for other a)
  1027. *
  1028. * with a = -1, d = -EDWARDS_D:
  1029. * -2xy/sqrt(1-EDWARDS_D)/(2z^2-y^2+x^2)
  1030. * (y^2+x^2)/(y^2-x^2)
  1031. */
  1032. gf a, b, c, d;
  1033. gf_sqr ( c, p->x );
  1034. gf_sqr ( a, p->y );
  1035. gf_add ( d, c, a ); // x^2 + y^2
  1036. gf_add ( p->t, p->y, p->x );
  1037. gf_sqr ( b, p->t );
  1038. gf_sub ( b, b, d ); // 2xy
  1039. gf_sub ( p->t, a, c ); // y^2 - x^2
  1040. gf_sqr ( p->x, p->z );
  1041. gf_add ( p->z, p->x, p->x );
  1042. gf_sub ( a, p->z, p->t ); // 2z^2 - y^2 + x^2
  1043. gf_mul ( c, a, SQRT_ONE_MINUS_D );
  1044. gf_mul ( p->x, b, p->t); // (2xy)(y^2-x^2)
  1045. gf_mul ( p->z, p->t, c ); // (y^2-x^2)sd(2z^2 - y^2 + x^2)
  1046. gf_mul ( p->y, d, c ); // (y^2+x^2)sd(2z^2 - y^2 + x^2)
  1047. gf_mul ( p->t, d, b );
  1048. decaf_bzero(a,sizeof(a));
  1049. decaf_bzero(b,sizeof(b));
  1050. decaf_bzero(c,sizeof(c));
  1051. decaf_bzero(d,sizeof(d));
  1052. }
  1053. #elif IMAGINE_TWIST
  1054. {
  1055. gf_mul(p->t,p->x,SQRT_MINUS_ONE);
  1056. gf_copy(p->x,p->t);
  1057. gf_mul(p->t,p->x,p->y);
  1058. }
  1059. #else
  1060. {
  1061. /* 4-isogeny 2xy/(y^2-ax^2), (y^2+ax^2)/(2-y^2-ax^2) */
  1062. gf a, b, c, d;
  1063. gf_sqr ( c, p->x );
  1064. gf_sqr ( a, p->y );
  1065. gf_add ( d, c, a );
  1066. gf_add ( p->t, p->y, p->x );
  1067. gf_sqr ( b, p->t );
  1068. gf_sub ( b, b, d );
  1069. gf_sub ( p->t, a, c );
  1070. gf_sqr ( p->x, p->z );
  1071. gf_add ( p->z, p->x, p->x );
  1072. gf_sub ( a, p->z, d );
  1073. gf_mul ( p->x, a, b );
  1074. gf_mul ( p->z, p->t, a );
  1075. gf_mul ( p->y, p->t, d );
  1076. gf_mul ( p->t, b, d );
  1077. decaf_bzero(a,sizeof(a));
  1078. decaf_bzero(b,sizeof(b));
  1079. decaf_bzero(c,sizeof(c));
  1080. decaf_bzero(d,sizeof(d));
  1081. }
  1082. #endif
  1083. decaf_bzero(enc2,sizeof(enc2));
  1084. assert(API_NS(point_valid)(p) || ~succ);
  1085. return decaf_succeed_if(succ);
  1086. }
  1087. decaf_error_t decaf_x$(gf_shortname) (
  1088. uint8_t out[X_PUBLIC_BYTES],
  1089. const uint8_t base[X_PUBLIC_BYTES],
  1090. const uint8_t scalar[X_PRIVATE_BYTES]
  1091. ) {
  1092. gf x1, x2, z2, x3, z3, t1, t2;
  1093. ignore_result(gf_deserialize(x1,base,1));
  1094. gf_copy(x2,ONE);
  1095. gf_copy(z2,ZERO);
  1096. gf_copy(x3,x1);
  1097. gf_copy(z3,ONE);
  1098. int t;
  1099. mask_t swap = 0;
  1100. for (t = X_PRIVATE_BITS-1; t>=0; t--) {
  1101. uint8_t sb = scalar[t/8];
  1102. /* Scalar conditioning */
  1103. if (t/8==0) sb &= -(uint8_t)COFACTOR;
  1104. else if (t == X_PRIVATE_BITS-1) sb = -1;
  1105. mask_t k_t = (sb>>(t%8)) & 1;
  1106. k_t = -k_t; /* set to all 0s or all 1s */
  1107. swap ^= k_t;
  1108. gf_cond_swap(x2,x3,swap);
  1109. gf_cond_swap(z2,z3,swap);
  1110. swap = k_t;
  1111. gf_add_nr(t1,x2,z2); /* A = x2 + z2 */ /* 2+e */
  1112. gf_sub_nr(t2,x2,z2); /* B = x2 - z2 */ /* 3+e */
  1113. gf_sub_nr(z2,x3,z3); /* D = x3 - z3 */ /* 3+e */
  1114. gf_mul(x2,t1,z2); /* DA */
  1115. gf_add_nr(z2,z3,x3); /* C = x3 + z3 */ /* 2+e */
  1116. gf_mul(x3,t2,z2); /* CB */
  1117. gf_sub_nr(z3,x2,x3); /* DA-CB */ /* 3+e */
  1118. gf_sqr(z2,z3); /* (DA-CB)^2 */
  1119. gf_mul(z3,x1,z2); /* z3 = x1(DA-CB)^2 */
  1120. gf_add_nr(z2,x2,x3); /* (DA+CB) */ /* 2+e */
  1121. gf_sqr(x3,z2); /* x3 = (DA+CB)^2 */
  1122. gf_sqr(z2,t1); /* AA = A^2 */
  1123. gf_sqr(t1,t2); /* BB = B^2 */
  1124. gf_mul(x2,z2,t1); /* x2 = AA*BB */
  1125. gf_sub_nr(t2,z2,t1); /* E = AA-BB */ /* 3+e */
  1126. gf_mulw(t1,t2,-EDWARDS_D); /* E*-d = a24*E */
  1127. gf_add_nr(t1,t1,z2); /* AA + a24*E */ /* 2+e */
  1128. gf_mul(z2,t2,t1); /* z2 = E(AA+a24*E) */
  1129. }
  1130. /* Finish */
  1131. gf_cond_swap(x2,x3,swap);
  1132. gf_cond_swap(z2,z3,swap);
  1133. gf_invert(z2,z2);
  1134. gf_mul(x1,x2,z2);
  1135. gf_serialize(out,x1,1);
  1136. mask_t nz = ~gf_eq(x1,ZERO);
  1137. decaf_bzero(x1,sizeof(x1));
  1138. decaf_bzero(x2,sizeof(x2));
  1139. decaf_bzero(z2,sizeof(z2));
  1140. decaf_bzero(x3,sizeof(x3));
  1141. decaf_bzero(z3,sizeof(z3));
  1142. decaf_bzero(t1,sizeof(t1));
  1143. decaf_bzero(t2,sizeof(t2));
  1144. return decaf_succeed_if(mask_to_bool(nz));
  1145. }
  1146. void decaf_x$(gf_shortname)_generate_key (
  1147. uint8_t out[X_PUBLIC_BYTES],
  1148. const uint8_t scalar[X_PRIVATE_BYTES]
  1149. ) {
  1150. decaf_x$(gf_shortname)_derive_public_key(out,scalar);
  1151. }
  1152. void decaf_x$(gf_shortname)_derive_public_key (
  1153. uint8_t out[X_PUBLIC_BYTES],
  1154. const uint8_t scalar[X_PRIVATE_BYTES]
  1155. ) {
  1156. /* Scalar conditioning */
  1157. uint8_t scalar2[X_PRIVATE_BYTES];
  1158. memcpy(scalar2,scalar,sizeof(scalar2));
  1159. scalar2[0] &= -(uint8_t)COFACTOR;
  1160. scalar2[X_PRIVATE_BYTES-1] &= ~(-1u<<((X_PRIVATE_BITS+7)%8));
  1161. scalar2[X_PRIVATE_BYTES-1] |= 1<<((X_PRIVATE_BITS+7)%8);
  1162. scalar_t the_scalar;
  1163. API_NS(scalar_decode_long)(the_scalar,scalar2,sizeof(scalar2));
  1164. /* We're gonna isogenize by 2, so divide by 2.
  1165. *
  1166. * Why by 2, even though it's a 4-isogeny?
  1167. *
  1168. * The isogeny map looks like
  1169. * Montgomery <-2-> Jacobi <-2-> Edwards
  1170. *
  1171. * Since the Jacobi base point is the PREimage of the iso to
  1172. * the Montgomery curve, and we're going
  1173. * Jacobi -> Edwards -> Jacobi -> Montgomery,
  1174. * we pick up only a factor of 2 over Jacobi -> Montgomery.
  1175. */
  1176. API_NS(scalar_halve)(the_scalar,the_scalar);
  1177. point_t p;
  1178. API_NS(precomputed_scalarmul)(p,API_NS(precomputed_base),the_scalar);
  1179. /* Isogenize to Montgomery curve */
  1180. gf_invert(p->t,p->x); /* 1/x */
  1181. gf_mul(p->z,p->t,p->y); /* y/x */
  1182. gf_sqr(p->y,p->z); /* (y/x)^2 */
  1183. #if IMAGINE_TWIST
  1184. gf_sub(p->y,ZERO,p->y);
  1185. #endif
  1186. gf_serialize(out,p->y,1);
  1187. decaf_bzero(scalar2,sizeof(scalar2));
  1188. API_NS(scalar_destroy)(the_scalar);
  1189. API_NS(point_destroy)(p);
  1190. }
  1191. /**
  1192. * @cond internal
  1193. * Control for variable-time scalar multiply algorithms.
  1194. */
  1195. struct smvt_control {
  1196. int power, addend;
  1197. };
  1198. static int recode_wnaf (
  1199. struct smvt_control *control, /* [nbits/(table_bits+1) + 3] */
  1200. const scalar_t scalar,
  1201. unsigned int table_bits
  1202. ) {
  1203. unsigned int table_size = SCALAR_BITS/(table_bits+1) + 3;
  1204. int position = table_size - 1; /* at the end */
  1205. /* place the end marker */
  1206. control[position].power = -1;
  1207. control[position].addend = 0;
  1208. position--;
  1209. /* PERF: Could negate scalar if it's large. But then would need more cases
  1210. * in the actual code that uses it, all for an expected reduction of like 1/5 op.
  1211. * Probably not worth it.
  1212. */
  1213. uint64_t current = scalar->limb[0] & 0xFFFF;
  1214. uint32_t mask = (1<<(table_bits+1))-1;
  1215. unsigned int w;
  1216. const unsigned int B_OVER_16 = sizeof(scalar->limb[0]) / 2;
  1217. for (w = 1; w<(SCALAR_BITS-1)/16+3; w++) {
  1218. if (w < (SCALAR_BITS-1)/16+1) {
  1219. /* Refill the 16 high bits of current */
  1220. current += (uint32_t)((scalar->limb[w/B_OVER_16]>>(16*(w%B_OVER_16)))<<16);
  1221. }
  1222. while (current & 0xFFFF) {
  1223. assert(position >= 0);
  1224. uint32_t pos = __builtin_ctz((uint32_t)current), odd = (uint32_t)current >> pos;
  1225. int32_t delta = odd & mask;
  1226. if (odd & 1<<(table_bits+1)) delta -= (1<<(table_bits+1));
  1227. current -= delta << pos;
  1228. control[position].power = pos + 16*(w-1);
  1229. control[position].addend = delta;
  1230. position--;
  1231. }
  1232. current >>= 16;
  1233. }
  1234. assert(current==0);
  1235. position++;
  1236. unsigned int n = table_size - position;
  1237. unsigned int i;
  1238. for (i=0; i<n; i++) {
  1239. control[i] = control[i+position];
  1240. }
  1241. return n-1;
  1242. }
  1243. static void
  1244. prepare_wnaf_table(
  1245. pniels_t *output,
  1246. const point_t working,
  1247. unsigned int tbits
  1248. ) {
  1249. point_t tmp;
  1250. int i;
  1251. pt_to_pniels(output[0], working);
  1252. if (tbits == 0) return;
  1253. API_NS(point_double)(tmp,working);
  1254. pniels_t twop;
  1255. pt_to_pniels(twop, tmp);
  1256. add_pniels_to_pt(tmp, output[0],0);
  1257. pt_to_pniels(output[1], tmp);
  1258. for (i=2; i < 1<<tbits; i++) {
  1259. add_pniels_to_pt(tmp, twop,0);
  1260. pt_to_pniels(output[i], tmp);
  1261. }
  1262. API_NS(point_destroy)(tmp);
  1263. decaf_bzero(twop,sizeof(twop));
  1264. }
  1265. extern const gf API_NS(precomputed_wnaf_as_fe)[];
  1266. static const niels_t *API_NS(wnaf_base) = (const niels_t *)API_NS(precomputed_wnaf_as_fe);
  1267. const size_t API_NS(sizeof_precomputed_wnafs) __attribute((visibility("hidden")))
  1268. = sizeof(niels_t)<<DECAF_WNAF_FIXED_TABLE_BITS;
  1269. void API_NS(precompute_wnafs) (
  1270. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1271. const point_t base
  1272. ) __attribute__ ((visibility ("hidden")));
  1273. void API_NS(precompute_wnafs) (
  1274. niels_t out[1<<DECAF_WNAF_FIXED_TABLE_BITS],
  1275. const point_t base
  1276. ) {
  1277. pniels_t tmp[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1278. gf zs[1<<DECAF_WNAF_FIXED_TABLE_BITS], zis[1<<DECAF_WNAF_FIXED_TABLE_BITS];
  1279. int i;
  1280. prepare_wnaf_table(tmp,base,DECAF_WNAF_FIXED_TABLE_BITS);
  1281. for (i=0; i<1<<DECAF_WNAF_FIXED_TABLE_BITS; i++) {
  1282. memcpy(out[i], tmp[i]->n, sizeof(niels_t));
  1283. gf_copy(zs[i], tmp[i]->z);
  1284. }
  1285. batch_normalize_niels(out, (const gf *)zs, zis, 1<<DECAF_WNAF_FIXED_TABLE_BITS);
  1286. decaf_bzero(tmp,sizeof(tmp));
  1287. decaf_bzero(zs,sizeof(zs));
  1288. decaf_bzero(zis,sizeof(zis));
  1289. }
  1290. void API_NS(base_double_scalarmul_non_secret) (
  1291. point_t combo,
  1292. const scalar_t scalar1,
  1293. const point_t base2,
  1294. const scalar_t scalar2
  1295. ) {
  1296. const int table_bits_var = DECAF_WNAF_VAR_TABLE_BITS,
  1297. table_bits_pre = DECAF_WNAF_FIXED_TABLE_BITS;
  1298. struct smvt_control control_var[SCALAR_BITS/(table_bits_var+1)+3];
  1299. struct smvt_control control_pre[SCALAR_BITS/(table_bits_pre+1)+3];
  1300. int ncb_pre = recode_wnaf(control_pre, scalar1, table_bits_pre);
  1301. int ncb_var = recode_wnaf(control_var, scalar2, table_bits_var);
  1302. pniels_t precmp_var[1<<table_bits_var];
  1303. prepare_wnaf_table(precmp_var, base2, table_bits_var);
  1304. int contp=0, contv=0, i = control_var[0].power;
  1305. if (i < 0) {
  1306. API_NS(point_copy)(combo, API_NS(point_identity));
  1307. return;
  1308. } else if (i > control_pre[0].power) {
  1309. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1310. contv++;
  1311. } else if (i == control_pre[0].power && i >=0 ) {
  1312. pniels_to_pt(combo, precmp_var[control_var[0].addend >> 1]);
  1313. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1], i);
  1314. contv++; contp++;
  1315. } else {
  1316. i = control_pre[0].power;
  1317. niels_to_pt(combo, API_NS(wnaf_base)[control_pre[0].addend >> 1]);
  1318. contp++;
  1319. }
  1320. for (i--; i >= 0; i--) {
  1321. int cv = (i==control_var[contv].power), cp = (i==control_pre[contp].power);
  1322. point_double_internal(combo,combo,i && !(cv||cp));
  1323. if (cv) {
  1324. assert(control_var[contv].addend);
  1325. if (control_var[contv].addend > 0) {
  1326. add_pniels_to_pt(combo, precmp_var[control_var[contv].addend >> 1], i&&!cp);
  1327. } else {
  1328. sub_pniels_from_pt(combo, precmp_var[(-control_var[contv].addend) >> 1], i&&!cp);
  1329. }
  1330. contv++;
  1331. }
  1332. if (cp) {
  1333. assert(control_pre[contp].addend);
  1334. if (control_pre[contp].addend > 0) {
  1335. add_niels_to_pt(combo, API_NS(wnaf_base)[control_pre[contp].addend >> 1], i);
  1336. } else {
  1337. sub_niels_from_pt(combo, API_NS(wnaf_base)[(-control_pre[contp].addend) >> 1], i);
  1338. }
  1339. contp++;
  1340. }
  1341. }
  1342. /* This function is non-secret, but whatever this is cheap. */
  1343. decaf_bzero(control_var,sizeof(control_var));
  1344. decaf_bzero(control_pre,sizeof(control_pre));
  1345. decaf_bzero(precmp_var,sizeof(precmp_var));
  1346. assert(contv == ncb_var); (void)ncb_var;
  1347. assert(contp == ncb_pre); (void)ncb_pre;
  1348. }
  1349. void API_NS(point_destroy) (
  1350. point_t point
  1351. ) {
  1352. decaf_bzero(point, sizeof(point_t));
  1353. }
  1354. void API_NS(precomputed_destroy) (
  1355. precomputed_s *pre
  1356. ) {
  1357. decaf_bzero(pre, API_NS(sizeof_precomputed_s));
  1358. }