You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

970 lines
28 KiB

  1. /**
  2. * @cond internal
  3. * @file ec_point.c
  4. * @copyright
  5. * Copyright (c) 2014 Cryptography Research, Inc. \n
  6. * Released under the MIT License. See LICENSE.txt for license information.
  7. * @author Mike Hamburg
  8. * @warning This file was automatically generated.
  9. * Then it was edited by hand. Good luck, have fun.
  10. */
  11. #include "ec_point.h"
  12. #include "magic.h"
  13. void
  14. add_tw_niels_to_tw_extensible (
  15. struct tw_extensible_t* d,
  16. const struct tw_niels_t* e
  17. ) {
  18. ANALYZE_THIS_ROUTINE_CAREFULLY;
  19. field_a_t L0, L1;
  20. field_sub ( L1, d->y, d->x );
  21. field_mul ( L0, e->a, L1 );
  22. field_add_nr ( L1, d->x, d->y );
  23. field_mul ( d->y, e->b, L1 );
  24. field_mul ( L1, d->u, d->t );
  25. field_mul ( d->x, e->c, L1 );
  26. field_add_nr ( d->u, L0, d->y );
  27. field_sub_nr ( d->t, d->y, L0 );
  28. field_sub_nr ( d->y, d->z, d->x );
  29. field_add_nr ( L0, d->x, d->z );
  30. field_mul ( d->z, L0, d->y );
  31. field_mul ( d->x, d->y, d->t );
  32. field_mul ( d->y, L0, d->u );
  33. }
  34. void
  35. sub_tw_niels_from_tw_extensible (
  36. struct tw_extensible_t* d,
  37. const struct tw_niels_t* e
  38. ) {
  39. ANALYZE_THIS_ROUTINE_CAREFULLY;
  40. field_a_t L0, L1;
  41. field_sub_nr ( L1, d->y, d->x );
  42. field_mul ( L0, e->b, L1 );
  43. field_add_nr ( L1, d->x, d->y );
  44. field_mul ( d->y, e->a, L1 );
  45. field_mul ( L1, d->u, d->t );
  46. field_mul ( d->x, e->c, L1 );
  47. field_add_nr ( d->u, L0, d->y );
  48. field_sub_nr ( d->t, d->y, L0 );
  49. field_add_nr ( d->y, d->x, d->z );
  50. field_sub_nr ( L0, d->z, d->x );
  51. field_mul ( d->z, L0, d->y );
  52. field_mul ( d->x, d->y, d->t );
  53. field_mul ( d->y, L0, d->u );
  54. }
  55. void
  56. add_tw_pniels_to_tw_extensible (
  57. struct tw_extensible_t* e,
  58. const struct tw_pniels_t* a
  59. ) {
  60. field_a_t L0;
  61. field_mul ( L0, e->z, a->z );
  62. field_copy ( e->z, L0 );
  63. add_tw_niels_to_tw_extensible( e, &a->n );
  64. }
  65. void
  66. sub_tw_pniels_from_tw_extensible (
  67. struct tw_extensible_t* e,
  68. const struct tw_pniels_t* a
  69. ) {
  70. field_a_t L0;
  71. field_mul ( L0, e->z, a->z );
  72. field_copy ( e->z, L0 );
  73. sub_tw_niels_from_tw_extensible( e, &a->n );
  74. }
  75. void
  76. double_tw_extensible (
  77. struct tw_extensible_t* a
  78. ) {
  79. ANALYZE_THIS_ROUTINE_CAREFULLY;
  80. field_a_t L0, L1, L2;
  81. field_sqr ( L2, a->x );
  82. field_sqr ( L0, a->y );
  83. field_add_nr ( a->u, L2, L0 );
  84. field_add_nr ( a->t, a->y, a->x );
  85. field_sqr ( L1, a->t );
  86. field_sub_nr ( a->t, L1, a->u );
  87. field_bias ( a->t, 3 );
  88. IF32( field_weak_reduce( a->t ) );
  89. field_sub_nr ( L1, L0, L2 );
  90. field_sqr ( a->x, a->z );
  91. field_bias ( a->x, 2-is32 /*is32 ? 1 : 2*/ );
  92. field_add_nr ( a->z, a->x, a->x );
  93. field_sub_nr ( L0, a->z, L1 );
  94. IF32( field_weak_reduce( L0 ) );
  95. field_mul ( a->z, L1, L0 );
  96. field_mul ( a->x, L0, a->t );
  97. field_mul ( a->y, L1, a->u );
  98. }
  99. void
  100. double_extensible (
  101. struct extensible_t* a
  102. ) {
  103. ANALYZE_THIS_ROUTINE_CAREFULLY;
  104. field_a_t L0, L1, L2;
  105. field_sqr ( L2, a->x );
  106. field_sqr ( L0, a->y );
  107. field_add_nr ( L1, L2, L0 );
  108. field_add_nr ( a->t, a->y, a->x );
  109. field_sqr ( a->u, a->t );
  110. field_sub_nr ( a->t, a->u, L1 );
  111. field_bias ( a->t, 3 );
  112. IF32( field_weak_reduce( a->t ) );
  113. field_sub_nr ( a->u, L0, L2 );
  114. field_sqr ( a->x, a->z );
  115. field_bias ( a->x, 2 );
  116. field_add_nr ( a->z, a->x, a->x );
  117. field_sub_nr ( L0, a->z, L1 );
  118. IF32( field_weak_reduce( L0 ) );
  119. field_mul ( a->z, L1, L0 );
  120. field_mul ( a->x, L0, a->t );
  121. field_mul ( a->y, L1, a->u );
  122. }
  123. void
  124. twist_and_double (
  125. struct tw_extensible_t* b,
  126. const struct extensible_t* a
  127. ) {
  128. field_a_t L0;
  129. field_sqr ( b->x, a->x );
  130. field_sqr ( b->z, a->y );
  131. field_add ( b->u, b->x, b->z );
  132. field_add ( b->t, a->y, a->x );
  133. field_sqr ( L0, b->t );
  134. field_sub ( b->t, L0, b->u );
  135. field_sub ( L0, b->z, b->x );
  136. field_sqr ( b->x, a->z );
  137. field_add ( b->z, b->x, b->x );
  138. field_sub ( b->y, b->z, b->u );
  139. field_mul ( b->z, L0, b->y );
  140. field_mul ( b->x, b->y, b->t );
  141. field_mul ( b->y, L0, b->u );
  142. }
  143. void
  144. untwist_and_double (
  145. struct extensible_t* b,
  146. const struct tw_extensible_t* a
  147. ) {
  148. field_a_t L0;
  149. field_sqr ( b->x, a->x );
  150. field_sqr ( b->z, a->y );
  151. field_add ( L0, b->x, b->z );
  152. field_add ( b->t, a->y, a->x );
  153. field_sqr ( b->u, b->t );
  154. field_sub ( b->t, b->u, L0 );
  155. field_sub ( b->u, b->z, b->x );
  156. field_sqr ( b->x, a->z );
  157. field_add ( b->z, b->x, b->x );
  158. field_sub ( b->y, b->z, b->u );
  159. field_mul ( b->z, L0, b->y );
  160. field_mul ( b->x, b->y, b->t );
  161. field_mul ( b->y, L0, b->u );
  162. }
  163. void
  164. convert_tw_affine_to_tw_pniels (
  165. struct tw_pniels_t* b,
  166. const struct tw_affine_t* a
  167. ) {
  168. field_sub ( b->n.a, a->y, a->x );
  169. field_add ( b->n.b, a->x, a->y );
  170. field_mul ( b->z, a->y, a->x );
  171. field_mulw_scc_wr ( b->n.c, b->z, 2*EDWARDS_D-2 );
  172. field_set_ui( b->z, 2 );
  173. }
  174. void
  175. convert_tw_affine_to_tw_extensible (
  176. struct tw_extensible_t* b,
  177. const struct tw_affine_t* a
  178. ) {
  179. field_copy ( b->x, a->x );
  180. field_copy ( b->y, a->y );
  181. field_set_ui( b->z, 1 );
  182. field_copy ( b->t, a->x );
  183. field_copy ( b->u, a->y );
  184. }
  185. void
  186. convert_affine_to_extensible (
  187. struct extensible_t* b,
  188. const struct affine_t* a
  189. ) {
  190. field_copy ( b->x, a->x );
  191. field_copy ( b->y, a->y );
  192. field_set_ui( b->z, 1 );
  193. field_copy ( b->t, a->x );
  194. field_copy ( b->u, a->y );
  195. }
  196. void
  197. convert_tw_extensible_to_tw_pniels (
  198. struct tw_pniels_t* b,
  199. const struct tw_extensible_t* a
  200. ) {
  201. field_sub ( b->n.a, a->y, a->x );
  202. field_add ( b->n.b, a->x, a->y );
  203. field_mul ( b->z, a->u, a->t );
  204. field_mulw_scc_wr ( b->n.c, b->z, 2*EDWARDS_D-2 );
  205. field_add ( b->z, a->z, a->z );
  206. }
  207. void
  208. convert_tw_pniels_to_tw_extensible (
  209. struct tw_extensible_t* e,
  210. const struct tw_pniels_t* d
  211. ) {
  212. field_add ( e->u, d->n.b, d->n.a );
  213. field_sub ( e->t, d->n.b, d->n.a );
  214. field_mul ( e->x, d->z, e->t );
  215. field_mul ( e->y, d->z, e->u );
  216. field_sqr ( e->z, d->z );
  217. }
  218. void
  219. convert_tw_niels_to_tw_extensible (
  220. struct tw_extensible_t* e,
  221. const struct tw_niels_t* d
  222. ) {
  223. field_add ( e->y, d->b, d->a );
  224. field_sub ( e->x, d->b, d->a );
  225. field_set_ui( e->z, 1 );
  226. field_copy ( e->t, e->x );
  227. field_copy ( e->u, e->y );
  228. }
  229. void
  230. deserialize_montgomery_decaf (
  231. struct montgomery_aux_t* a,
  232. const field_a_t s
  233. ) {
  234. field_copy ( a->s0, s );
  235. field_copy ( a->xa, s );
  236. field_set_ui ( a->za, 1 );
  237. field_set_ui ( a->xd, 1 );
  238. field_set_ui ( a->zd, 0 );
  239. }
  240. void
  241. montgomery_aux_step (
  242. struct montgomery_aux_t* a
  243. ) {
  244. field_add ( a->xs, a->xa, a->za ); // xs = C
  245. field_sub ( a->zs, a->xa, a->za ); // zs = D
  246. field_add ( a->xa, a->xd, a->zd ); // xa = A
  247. field_sub ( a->za, a->xd, a->zd ); // za = B
  248. field_mul ( a->xd, a->xa, a->zs ); // xd = DA
  249. field_mul ( a->zd, a->xs, a->za ); // zd = CB
  250. field_add ( a->xs, a->xd, a->zd ); // xs = DA+CB
  251. field_sub ( a->zd, a->xd, a->zd ); // zd = DA-CB
  252. field_mul ( a->zs, a->zd, a->s0 ); // zs = (DA-CB)*s0
  253. field_sqr ( a->zd, a->xa ); // zd = AA
  254. field_sqr ( a->xa, a->za ); // xa = BB
  255. field_sub ( a->za, a->zd, a->xa ); // za = E
  256. field_mul ( a->xd, a->xa, a->zd ); // xd final
  257. field_mulw_scc_wr ( a->zd, a->xa, 1-EDWARDS_D ); // zd = (1-d)*E
  258. field_add ( a->xa, a->za, a->zd ); // BB + (1-d)*E
  259. field_mul ( a->zd, a->xa, a->za ); // zd final
  260. field_sqr ( a->xa, a->xs ); // (DA+CB)^2
  261. field_sqr ( a->za, a->zs ); // (DA-CB)^2*s0^2
  262. }
  263. void
  264. montgomery_step (
  265. struct montgomery_t* a
  266. ) {
  267. ANALYZE_THIS_ROUTINE_CAREFULLY;
  268. field_a_t L0, L1;
  269. field_add_nr ( L0, a->zd, a->xd );
  270. field_sub ( L1, a->xd, a->zd );
  271. field_sub ( a->zd, a->xa, a->za );
  272. field_mul ( a->xd, L0, a->zd );
  273. field_add_nr ( a->zd, a->za, a->xa );
  274. field_mul ( a->za, L1, a->zd );
  275. field_add_nr ( a->xa, a->za, a->xd );
  276. field_sqr ( a->zd, a->xa );
  277. field_mul ( a->xa, a->z0, a->zd );
  278. field_sub ( a->zd, a->xd, a->za );
  279. field_sqr ( a->za, a->zd );
  280. field_sqr ( a->xd, L0 );
  281. field_sqr ( L0, L1 );
  282. field_mulw_scc ( a->zd, a->xd, 1-EDWARDS_D ); /* FIXME PERF MULW */
  283. field_sub ( L1, a->xd, L0 );
  284. field_mul ( a->xd, L0, a->zd );
  285. field_sub_nr ( L0, a->zd, L1 );
  286. field_bias ( L0, 4 - 2*is32 /*is32 ? 2 : 4*/ );
  287. IF32( field_weak_reduce( L0 ) );
  288. field_mul ( a->zd, L0, L1 );
  289. }
  290. void
  291. deserialize_montgomery (
  292. struct montgomery_t* a,
  293. const field_a_t sbz
  294. ) {
  295. field_sqr ( a->z0, sbz );
  296. field_set_ui( a->xd, 1 );
  297. field_set_ui( a->zd, 0 );
  298. field_set_ui( a->xa, 1 );
  299. field_copy ( a->za, a->z0 );
  300. }
  301. mask_t
  302. serialize_montgomery (
  303. field_a_t b,
  304. const struct montgomery_t* a,
  305. const field_a_t sbz
  306. ) {
  307. mask_t L4, L5, L6;
  308. field_a_t L0, L1, L2, L3;
  309. field_mul ( L3, a->z0, a->zd );
  310. field_sub ( L1, L3, a->xd );
  311. field_mul ( L3, a->za, L1 );
  312. field_mul ( L2, a->z0, a->xd );
  313. field_sub ( L1, L2, a->zd );
  314. field_mul ( L0, a->xa, L1 );
  315. field_add ( L2, L0, L3 );
  316. field_sub ( L1, L3, L0 );
  317. field_mul ( L3, L1, L2 );
  318. field_copy ( L2, a->z0 );
  319. field_addw ( L2, 1 );
  320. field_sqr ( L0, L2 );
  321. field_mulw_scc_wr ( L1, L0, EDWARDS_D-1 );
  322. field_add ( L2, a->z0, a->z0 );
  323. field_add ( L0, L2, L2 );
  324. field_add ( L2, L0, L1 );
  325. field_mul ( L0, a->xd, L2 );
  326. L5 = field_is_zero( a->zd );
  327. L6 = - L5;
  328. constant_time_mask ( L1, L0, sizeof(L1), L5 );
  329. field_add ( L2, L1, a->zd );
  330. L4 = ~ L5;
  331. field_mul ( L1, sbz, L3 );
  332. field_addw ( L1, L6 );
  333. field_mul ( L3, L2, L1 );
  334. field_mul ( L1, L3, L2 );
  335. field_mul ( L2, L3, a->xd );
  336. field_mul ( L3, L1, L2 );
  337. field_isr ( L0, L3 );
  338. field_mul ( L2, L1, L0 );
  339. field_sqr ( L1, L0 );
  340. field_mul ( L0, L3, L1 );
  341. constant_time_mask ( b, L2, sizeof(L1), L4 );
  342. field_subw( L0, 1 );
  343. L5 = field_is_zero( L0 );
  344. L4 = field_is_zero( sbz );
  345. return L5 | L4;
  346. }
  347. void
  348. serialize_extensible (
  349. field_a_t b,
  350. const struct extensible_t* a
  351. ) {
  352. field_a_t L0, L1, L2;
  353. field_sub ( L0, a->y, a->z );
  354. field_add ( b, a->z, a->y );
  355. field_mul ( L1, a->z, a->x );
  356. field_mul ( L2, L0, L1 );
  357. field_mul ( L1, L2, L0 );
  358. field_mul ( L0, L2, b );
  359. field_mul ( L2, L1, L0 );
  360. field_isr ( L0, L2 );
  361. field_mul ( b, L1, L0 );
  362. field_sqr ( L1, L0 );
  363. field_mul ( L0, L2, L1 );
  364. }
  365. void
  366. decaf_make_even (
  367. field_a_t a
  368. ) {
  369. field_cond_neg ( a, field_low_bit(a) );
  370. field_strong_reduce ( a );
  371. }
  372. void
  373. decaf_serialize_extensible (
  374. field_a_t b,
  375. const struct extensible_t* a
  376. ) {
  377. field_a_t L0, L1, L2, L3;
  378. field_mulw_scc ( L2, a->y, EDWARDS_D ); // L2 = d*y
  379. field_mul ( L3, L2, a->t ); // L3 = d*y*t_
  380. field_mul ( L2, L3, a->u ); // L2 = d*y*t
  381. field_mul ( L0, a->x, a->z ); // L0 = x*z
  382. field_sub ( L3, L2, L0 ); // L3 = d*y*t - x*z
  383. field_add ( L0, a->y, a->z ); // L0 = y+z
  384. field_sub ( L1, a->y, a->z ); // L1 = y-z
  385. field_mul ( L2, L1, L0 ); // L2 = y^2-z^2
  386. field_isr ( L2, L2 ); // L2 = 1/sqrt(y^2-z^2)
  387. field_sqr ( L1, L2 ); // L1 = 1/(y^2-z^2)
  388. field_mul ( L0, L1, L3 ); // L0 = (d*y*t - z*x)/(y^2-z^2) = 1/x
  389. field_mul ( L1, L2, sqrt_d_minus_1 ); // L1 = sy
  390. field_add ( L3, L1, L1 ); // L3 = 2*sy
  391. field_neg ( L3, L3 ); // L3 = -2*sy
  392. field_mul ( L2, L3, a->z ); // L2 = -2*sy*z
  393. field_cond_neg ( L1, field_low_bit(L2) ); // cond-neg sy
  394. field_mul ( L2, L1, a->y ); // L2 = 2*sy*y
  395. field_add ( b, L0, L2 );
  396. decaf_make_even ( b );
  397. }
  398. void
  399. decaf_serialize_tw_extensible (
  400. field_a_t b,
  401. const struct tw_extensible_t* a
  402. ) {
  403. field_a_t L0, L1, L2, L3;
  404. field_mulw_scc ( L2, a->y, 1-EDWARDS_D ); // L2 = (1-d)*y
  405. field_mul ( L3, L2, a->t ); // L3 = (1-d)*y*t_
  406. field_mul ( L2, L3, a->u ); // L2 = (1-d)*y*t
  407. field_mul ( L0, a->x, a->z ); // L0 = x*z
  408. field_sub ( L3, L2, L0 ); // L3 = d*y*t - x*z
  409. field_add ( L0, a->z, a->y ); // L0 = y+z
  410. field_sub ( L1, a->z, a->y ); // L1 = z-y
  411. field_mul ( L2, L1, L0 ); // L2 = z^2-y^2
  412. field_isr ( L2, L2 ); // L2 = 1/sqrt(z^2-y^2)
  413. field_sqr ( L1, L2 ); // L1 = 1/(z^2-y^2)
  414. field_mul ( L0, L1, L3 ); // L0 = ((1-d)*y*t - z*x)/(y^2-z^2) = 1/x
  415. field_mul ( L1, L2, sqrt_minus_d ); // L1 = sy
  416. field_add ( L3, L1, L1 ); // L3 = 2*sy
  417. field_neg ( L3, L3 ); // L3 = -2*sy
  418. field_mul ( L2, L3, a->z ); // L2 = -2*sy*z
  419. field_cond_neg ( L1, field_low_bit(L2) ); // cond-neg sy
  420. field_mul ( L2, L1, a->y ); // L2 = 2*sy*y
  421. field_add ( b, L0, L2 );
  422. decaf_make_even ( b );
  423. }
  424. mask_t
  425. decaf_deserialize_affine (
  426. struct affine_t *a,
  427. const field_a_t s,
  428. mask_t allow_identity
  429. ) {
  430. field_a_t L0, L1, L2, L3, L4, L5;
  431. mask_t succ, zero;
  432. zero = field_is_zero(s);
  433. succ = allow_identity | ~zero;
  434. succ &= ~field_low_bit(s);
  435. field_sqr ( L0, s );
  436. field_copy ( L1, L0 );
  437. field_addw ( L1, 1 );
  438. field_make_nonzero ( L1 );
  439. field_sqr ( L2, L1 );
  440. field_mulw_scc_wr ( L3, L0, -4*EDWARDS_D );
  441. field_add ( L3, L3, L2 );
  442. field_mul ( L4, L3, L2 );
  443. field_mul ( L2, L4, L0 );
  444. field_isr ( L4, L2 );
  445. field_sqr ( L5, L4 );
  446. field_mul ( L0, L5, L2 );
  447. field_addw( L0, 1 );
  448. succ &= ~field_is_zero( L0 );
  449. field_mul ( L2, L3, L1 );
  450. field_mul ( L3, L2, L4 );
  451. field_cond_neg ( L4, field_low_bit(L3) );
  452. field_mul ( L3, L4, s );
  453. field_sqr ( L4, L3 );
  454. field_mul ( L0, L2, L4 );
  455. field_add ( L0, L0, L0 );
  456. field_mul ( a->x, L0, s );
  457. field_mul ( L2, L1, L3 );
  458. field_neg ( L1, L1 );
  459. field_addw ( L1, 2 );
  460. field_mul ( a->y, L1, L2 );
  461. field_addw ( a->y, -zero );
  462. return succ;
  463. }
  464. mask_t
  465. decaf_deserialize_tw_affine (
  466. struct tw_affine_t *a,
  467. const field_a_t s,
  468. mask_t allow_identity
  469. ) {
  470. field_a_t L0, L1, L2, L3, L4, L5;
  471. mask_t succ, zero;
  472. zero = field_is_zero(s);
  473. succ = allow_identity | ~zero;
  474. succ &= ~field_low_bit(s);
  475. field_sqr ( L0, s );
  476. field_neg ( L1, L0 );
  477. field_addw ( L1, 1 );
  478. field_make_nonzero ( L1 );
  479. field_sqr ( L2, L1 );
  480. field_mulw_scc_wr ( L3, L0, 4-4*EDWARDS_D );
  481. field_add ( L3, L3, L2 );
  482. field_mul ( L4, L3, L2 );
  483. field_mul ( L2, L4, L0 );
  484. field_isr ( L4, L2 );
  485. field_sqr ( L5, L4 );
  486. field_mul ( L0, L5, L2 );
  487. field_addw( L0, 1 );
  488. succ &= ~field_is_zero( L0 );
  489. field_mul ( L2, L3, L1 );
  490. field_mul ( L3, L2, L4 );
  491. field_cond_neg ( L4, field_low_bit(L3) );
  492. field_mul ( L3, L4, s );
  493. field_sqr ( L4, L3 );
  494. field_mul ( L0, L2, L4 );
  495. field_add ( L0, L0, L0 );
  496. field_mul ( a->x, L0, s );
  497. field_mul ( L2, L1, L3 );
  498. field_neg ( L1, L1 );
  499. field_addw ( L1, 2 );
  500. field_mul ( a->y, L1, L2 );
  501. field_addw ( a->y, -zero );
  502. return succ;
  503. }
  504. void
  505. untwist_and_double_and_serialize (
  506. field_a_t b,
  507. const struct tw_extensible_t* a
  508. ) {
  509. field_a_t L0, L1, L2, L3;
  510. field_mul ( L3, a->y, a->x );
  511. field_add ( b, a->y, a->x );
  512. field_sqr ( L1, b );
  513. field_add ( L2, L3, L3 );
  514. field_sub ( b, L1, L2 );
  515. field_sqr ( L2, a->z );
  516. field_sqr ( L1, L2 );
  517. field_add ( b, b, b );
  518. field_mulw_scc ( L2, b, EDWARDS_D-1 );
  519. field_mulw_scc ( b, L2, EDWARDS_D-1 );
  520. field_mul ( L0, L2, L1 );
  521. field_mul ( L2, b, L0 );
  522. field_isr ( L0, L2 );
  523. field_mul ( L1, b, L0 );
  524. field_sqr ( b, L0 );
  525. field_mul ( L0, L2, b );
  526. field_mul ( b, L1, L3 );
  527. }
  528. void
  529. twist_even (
  530. struct tw_extensible_t* b,
  531. const struct extensible_t* a
  532. ) {
  533. field_sqr ( b->y, a->z );
  534. field_sqr ( b->z, a->x );
  535. field_sub ( b->u, b->y, b->z );
  536. field_sub ( b->z, a->z, a->x );
  537. field_mul ( b->y, b->z, a->y );
  538. field_sub ( b->z, a->z, a->y );
  539. field_mul ( b->x, b->z, b->y );
  540. field_mul ( b->t, b->x, b->u );
  541. field_mul ( b->y, b->x, b->t );
  542. field_isr ( b->t, b->y );
  543. field_mul ( b->u, b->x, b->t );
  544. field_sqr ( b->x, b->t );
  545. field_mul ( b->t, b->y, b->x );
  546. field_mul ( b->x, a->x, b->u );
  547. field_mul ( b->y, a->y, b->u );
  548. field_addw ( b->y, -field_is_zero( b->z ) );
  549. field_set_ui( b->z, 1 );
  550. field_copy ( b->t, b->x );
  551. field_copy ( b->u, b->y );
  552. }
  553. void
  554. test_only_twist (
  555. struct tw_extensible_t* b,
  556. const struct extensible_t* a
  557. ) {
  558. field_a_t L0, L1;
  559. field_sqr ( b->u, a->z );
  560. field_sqr ( b->y, a->x );
  561. field_sub ( b->z, b->u, b->y );
  562. field_add ( b->y, b->z, b->z );
  563. field_add ( b->u, b->y, b->y );
  564. field_sub ( b->y, a->z, a->x );
  565. field_mul ( b->x, b->y, a->y );
  566. field_sub ( b->z, a->z, a->y );
  567. field_mul ( b->t, b->z, b->x );
  568. field_mul ( L1, b->t, b->u );
  569. field_mul ( b->x, b->t, L1 );
  570. field_isr ( L0, b->x );
  571. field_mul ( b->u, b->t, L0 );
  572. field_sqr ( L1, L0 );
  573. field_mul ( b->t, b->x, L1 );
  574. field_add ( L1, a->y, a->x );
  575. field_sub ( L0, a->x, a->y );
  576. field_mul ( b->x, b->t, L0 );
  577. field_add ( L0, b->x, L1 );
  578. field_sub ( b->t, L1, b->x );
  579. field_mul ( b->x, L0, b->u );
  580. field_addw ( b->x, -field_is_zero( b->y ) );
  581. field_mul ( b->y, b->t, b->u );
  582. field_addw ( b->y, -field_is_zero( b->z ) );
  583. field_set_ui( b->z, 1+field_is_zero( a->y ) );
  584. field_copy ( b->t, b->x );
  585. field_copy ( b->u, b->y );
  586. }
  587. mask_t
  588. is_even_pt (
  589. const struct extensible_t* a
  590. ) {
  591. field_a_t L0, L1, L2;
  592. field_sqr ( L2, a->z );
  593. field_sqr ( L1, a->x );
  594. field_sub ( L0, L2, L1 );
  595. return field_is_square ( L0 );
  596. }
  597. mask_t
  598. is_even_tw (
  599. const struct tw_extensible_t* a
  600. ) {
  601. field_a_t L0, L1, L2;
  602. field_sqr ( L2, a->z );
  603. field_sqr ( L1, a->x );
  604. field_add ( L0, L1, L2 );
  605. return field_is_square ( L0 );
  606. }
  607. mask_t
  608. deserialize_affine (
  609. struct affine_t* a,
  610. const field_a_t sz
  611. ) {
  612. field_a_t L0, L1, L2, L3;
  613. field_sqr ( L1, sz );
  614. field_copy ( L3, L1 );
  615. field_addw ( L3, 1 );
  616. field_sqr ( L2, L3 );
  617. field_mulw_scc ( a->x, L2, EDWARDS_D-1 ); /* PERF MULW */
  618. field_add ( L3, L1, L1 ); /* FIXME: i adjusted the bias here, was it right? */
  619. field_add ( a->y, L3, L3 );
  620. field_add ( L3, a->y, a->x );
  621. field_copy ( a->y, L1 );
  622. field_neg ( a->x, a->y );
  623. field_addw ( a->x, 1 );
  624. field_mul ( a->y, a->x, L3 );
  625. field_sqr ( L2, a->x );
  626. field_mul ( L0, L2, a->y );
  627. field_mul ( a->y, a->x, L0 );
  628. field_isr ( L3, a->y );
  629. field_mul ( a->y, L2, L3 );
  630. field_sqr ( L2, L3 );
  631. field_mul ( L3, L0, L2 );
  632. field_mul ( L0, a->x, L3 );
  633. field_add ( L2, a->y, a->y );
  634. field_mul ( a->x, sz, L2 );
  635. field_addw ( L1, 1 );
  636. field_mul ( a->y, L1, L3 );
  637. field_subw( L0, 1 );
  638. return field_is_zero( L0 );
  639. }
  640. mask_t
  641. deserialize_and_twist_approx (
  642. struct tw_extensible_t* a,
  643. const field_a_t sz
  644. ) {
  645. field_a_t L0, L1;
  646. field_sqr ( a->z, sz );
  647. field_copy ( a->y, a->z );
  648. field_addw ( a->y, 1 );
  649. field_sqr ( L0, a->y );
  650. field_mulw_scc ( a->x, L0, EDWARDS_D-1 );
  651. field_add ( a->y, a->z, a->z );
  652. field_add ( a->u, a->y, a->y );
  653. field_add ( a->y, a->u, a->x );
  654. field_sqr ( a->x, a->z );
  655. field_neg ( a->u, a->x );
  656. field_addw ( a->u, 1 );
  657. field_mul ( a->x, sqrt_d_minus_1, a->u );
  658. field_mul ( L0, a->x, a->y );
  659. field_mul ( a->t, L0, a->y );
  660. field_mul ( a->u, a->x, a->t );
  661. field_mul ( a->t, a->u, L0 );
  662. field_mul ( a->y, a->x, a->t );
  663. field_isr ( L0, a->y );
  664. field_mul ( a->y, a->u, L0 );
  665. field_sqr ( L1, L0 );
  666. field_mul ( a->u, a->t, L1 );
  667. field_mul ( a->t, a->x, a->u );
  668. field_add ( a->x, sz, sz );
  669. field_mul ( L0, a->u, a->x );
  670. field_copy ( a->x, a->z );
  671. field_neg ( L1, a->x );
  672. field_addw ( L1, 1 );
  673. field_mul ( a->x, L1, L0 );
  674. field_mul ( L0, a->u, a->y );
  675. field_addw ( a->z, 1 );
  676. field_mul ( a->y, a->z, L0 );
  677. field_subw( a->t, 1 );
  678. mask_t ret = field_is_zero( a->t );
  679. field_set_ui( a->z, 1 );
  680. field_copy ( a->t, a->x );
  681. field_copy ( a->u, a->y );
  682. return ret;
  683. }
  684. void
  685. set_identity_extensible (
  686. struct extensible_t* a
  687. ) {
  688. field_set_ui( a->x, 0 );
  689. field_set_ui( a->y, 1 );
  690. field_set_ui( a->z, 1 );
  691. field_set_ui( a->t, 0 );
  692. field_set_ui( a->u, 0 );
  693. }
  694. void
  695. set_identity_tw_extensible (
  696. struct tw_extensible_t* a
  697. ) {
  698. field_set_ui( a->x, 0 );
  699. field_set_ui( a->y, 1 );
  700. field_set_ui( a->z, 1 );
  701. field_set_ui( a->t, 0 );
  702. field_set_ui( a->u, 0 );
  703. }
  704. void
  705. set_identity_affine (
  706. struct affine_t* a
  707. ) {
  708. field_set_ui( a->x, 0 );
  709. field_set_ui( a->y, 1 );
  710. }
  711. mask_t
  712. decaf_eq_extensible (
  713. const struct extensible_t* a,
  714. const struct extensible_t* b
  715. ) {
  716. field_a_t L0, L1, L2;
  717. field_mul ( L2, b->y, a->x );
  718. field_mul ( L1, a->y, b->x );
  719. field_sub ( L0, L2, L1 );
  720. field_bias ( L0, 2 );
  721. return field_is_zero ( L0 );
  722. }
  723. mask_t
  724. decaf_eq_tw_extensible (
  725. const struct tw_extensible_t* a,
  726. const struct tw_extensible_t* b
  727. ) {
  728. field_a_t L0, L1, L2;
  729. field_mul ( L2, b->y, a->x );
  730. field_mul ( L1, a->y, b->x );
  731. field_sub ( L0, L2, L1 );
  732. field_bias ( L0, 2 );
  733. return field_is_zero ( L0 );
  734. }
  735. mask_t
  736. eq_affine (
  737. const struct affine_t* a,
  738. const struct affine_t* b
  739. ) {
  740. mask_t L1, L2;
  741. field_a_t L0;
  742. field_sub ( L0, a->x, b->x );
  743. L2 = field_is_zero( L0 );
  744. field_sub ( L0, a->y, b->y );
  745. L1 = field_is_zero( L0 );
  746. return L2 & L1;
  747. }
  748. mask_t
  749. eq_extensible (
  750. const struct extensible_t* a,
  751. const struct extensible_t* b
  752. ) {
  753. mask_t L3, L4;
  754. field_a_t L0, L1, L2;
  755. field_mul ( L2, b->z, a->x );
  756. field_mul ( L1, a->z, b->x );
  757. field_sub ( L0, L2, L1 );
  758. L4 = field_is_zero( L0 );
  759. field_mul ( L2, b->z, a->y );
  760. field_mul ( L1, a->z, b->y );
  761. field_sub ( L0, L2, L1 );
  762. L3 = field_is_zero( L0 );
  763. return L4 & L3;
  764. }
  765. mask_t
  766. eq_tw_extensible (
  767. const struct tw_extensible_t* a,
  768. const struct tw_extensible_t* b
  769. ) {
  770. mask_t L3, L4;
  771. field_a_t L0, L1, L2;
  772. field_mul ( L2, b->z, a->x );
  773. field_mul ( L1, a->z, b->x );
  774. field_sub ( L0, L2, L1 );
  775. L4 = field_is_zero( L0 );
  776. field_mul ( L2, b->z, a->y );
  777. field_mul ( L1, a->z, b->y );
  778. field_sub ( L0, L2, L1 );
  779. L3 = field_is_zero( L0 );
  780. return L4 & L3;
  781. }
  782. void
  783. elligator_2s_inject (
  784. struct affine_t* a,
  785. const field_a_t r
  786. ) {
  787. field_a_t L2, L3, L4, L5, L6, L7, L8;
  788. field_sqr ( a->x, r );
  789. field_sqr ( L3, a->x );
  790. field_copy ( a->y, L3 );
  791. field_neg ( L4, a->y );
  792. field_addw ( L4, 1 );
  793. field_sqr ( L2, L4 );
  794. field_mulw ( L7, L2, (EDWARDS_D-1)*(EDWARDS_D-1) );
  795. field_mulw ( L8, L3, 4*(EDWARDS_D+1)*(EDWARDS_D+1) );
  796. field_add ( a->y, L8, L7 );
  797. field_mulw ( L8, L2, 4*(EDWARDS_D)*(EDWARDS_D-1) );
  798. field_sub ( L7, a->y, L8 );
  799. field_mulw_scc ( L6, a->y, -2-2*EDWARDS_D );
  800. field_mul ( L5, L7, L6 );
  801. /* FIXME Stability problem (API stability, not crash) / possible bug.
  802. * change to: p448_mul ( L5, L7, L4 ); ?
  803. * This isn't a deep change: it's for sign adjustment.
  804. * Need to check which one leads to the correct sign, probably by writig
  805. * the invert routine.
  806. *
  807. * Also, the tool doesn't produce the optimal route to this.
  808. * Let incoming L6 = a, L7 = e, L4 = b.
  809. *
  810. * Could compute be, (be)^2, (be)^3, a b^3 e^3, a b^3 e^4. = 4M+S
  811. * instead of 6M.
  812. */
  813. field_mul ( L8, L5, L4 );
  814. field_mul ( L4, L5, L6 );
  815. field_mul ( L5, L7, L8 );
  816. field_mul ( L8, L5, L4 );
  817. field_mul ( L4, L7, L8 );
  818. field_isr ( L6, L4 );
  819. field_mul ( L4, L5, L6 );
  820. field_sqr ( L5, L6 );
  821. field_mul ( L6, L8, L5 );
  822. field_mul ( L8, L7, L6 );
  823. field_mul ( L7, L8, L6 );
  824. field_copy ( L6, a->x );
  825. field_addw ( a->x, 1 );
  826. field_mul ( L5, a->x, L8 );
  827. field_addw ( L5, 1 );
  828. field_sub ( a->x, L6, L5 );
  829. field_mul ( L5, L4, a->x );
  830. field_mulw_scc_wr ( a->x, L5, -2-2*EDWARDS_D );
  831. field_add ( L4, L3, L3 );
  832. field_add ( L3, L4, L2 );
  833. field_subw( L3, 2 );
  834. field_mul ( L2, L3, L8 );
  835. field_mulw ( L3, L2, 2*(EDWARDS_D+1)*(EDWARDS_D-1) );
  836. field_add ( L2, L3, a->y );
  837. field_mul ( a->y, L7, L2 );
  838. field_addw ( a->y, -field_is_zero( L8 ) );
  839. }
  840. mask_t
  841. validate_affine (
  842. const struct affine_t* a
  843. ) {
  844. field_a_t L0, L1, L2, L3;
  845. field_sqr ( L0, a->y );
  846. field_sqr ( L1, a->x );
  847. field_add ( L3, L1, L0 );
  848. field_mulw_scc ( L2, L1, EDWARDS_D );
  849. field_mul ( L1, L0, L2 );
  850. field_addw ( L1, 1 );
  851. field_sub ( L0, L3, L1 );
  852. return field_is_zero( L0 );
  853. }
  854. mask_t
  855. validate_tw_extensible (
  856. const struct tw_extensible_t* ext
  857. ) {
  858. mask_t L4, L5;
  859. field_a_t L0, L1, L2, L3;
  860. /*
  861. * Check invariant:
  862. * 0 = -x*y + z*t*u
  863. */
  864. field_mul ( L1, ext->t, ext->u );
  865. field_mul ( L2, ext->z, L1 );
  866. field_mul ( L0, ext->x, ext->y );
  867. field_neg ( L1, L0 );
  868. field_add ( L0, L1, L2 );
  869. L5 = field_is_zero( L0 );
  870. /*
  871. * Check invariant:
  872. * 0 = d*t^2*u^2 + x^2 - y^2 + z^2 - t^2*u^2
  873. */
  874. field_sqr ( L2, ext->y );
  875. field_neg ( L1, L2 );
  876. field_sqr ( L0, ext->x );
  877. field_add ( L2, L0, L1 );
  878. field_sqr ( L3, ext->u );
  879. field_sqr ( L0, ext->t );
  880. field_mul ( L1, L0, L3 );
  881. field_mulw_scc ( L3, L1, EDWARDS_D );
  882. field_add ( L0, L3, L2 );
  883. field_neg ( L3, L1 );
  884. field_add ( L2, L3, L0 );
  885. field_sqr ( L1, ext->z );
  886. field_add ( L0, L1, L2 );
  887. L4 = field_is_zero( L0 );
  888. return L5 & L4 &~ field_is_zero(ext->z);
  889. }
  890. mask_t
  891. validate_extensible (
  892. const struct extensible_t* ext
  893. ) {
  894. mask_t L4, L5;
  895. field_a_t L0, L1, L2, L3;
  896. /*
  897. * Check invariant:
  898. * 0 = d*t^2*u^2 - x^2 - y^2 + z^2
  899. */
  900. field_sqr ( L2, ext->y );
  901. field_neg ( L1, L2 );
  902. field_sqr ( L0, ext->z );
  903. field_add ( L2, L0, L1 );
  904. field_sqr ( L3, ext->u );
  905. field_sqr ( L0, ext->t );
  906. field_mul ( L1, L0, L3 );
  907. field_mulw_scc ( L0, L1, EDWARDS_D );
  908. field_add ( L1, L0, L2 );
  909. field_sqr ( L0, ext->x );
  910. field_neg ( L2, L0 );
  911. field_add ( L0, L2, L1 );
  912. L5 = field_is_zero( L0 );
  913. /*
  914. * Check invariant:
  915. * 0 = -x*y + z*t*u
  916. */
  917. field_mul ( L1, ext->t, ext->u );
  918. field_mul ( L2, ext->z, L1 );
  919. field_mul ( L0, ext->x, ext->y );
  920. field_neg ( L1, L0 );
  921. field_add ( L0, L1, L2 );
  922. L4 = field_is_zero( L0 );
  923. return L5 & L4 &~ field_is_zero(ext->z);
  924. }