From c3adda842592cd91d7d18b1a90a6defd628b7415 Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 16:40:16 -0800 Subject: [PATCH 01/10] import new 'ref10' ed25519 code verbatim from SUPERCOP-20120210 Still needs to be adapted to build outside of SUPERCOP. --- src/ed25519-supercop-ref10/Makefile | 41 + src/ed25519-supercop-ref10/api.h | 3 + src/ed25519-supercop-ref10/base.h | 1344 +++++++++++++++++ src/ed25519-supercop-ref10/base.py | 65 + src/ed25519-supercop-ref10/base2.h | 40 + src/ed25519-supercop-ref10/base2.py | 60 + src/ed25519-supercop-ref10/d.h | 1 + src/ed25519-supercop-ref10/d.py | 28 + src/ed25519-supercop-ref10/d2.h | 1 + src/ed25519-supercop-ref10/d2.py | 28 + src/ed25519-supercop-ref10/fe.h | 56 + src/ed25519-supercop-ref10/fe_0.c | 19 + src/ed25519-supercop-ref10/fe_1.c | 19 + src/ed25519-supercop-ref10/fe_add.c | 57 + src/ed25519-supercop-ref10/fe_cmov.c | 63 + src/ed25519-supercop-ref10/fe_copy.c | 29 + src/ed25519-supercop-ref10/fe_frombytes.c | 73 + src/ed25519-supercop-ref10/fe_invert.c | 14 + src/ed25519-supercop-ref10/fe_isnegative.c | 16 + src/ed25519-supercop-ref10/fe_isnonzero.c | 19 + src/ed25519-supercop-ref10/fe_mul.c | 253 ++++ src/ed25519-supercop-ref10/fe_neg.c | 45 + src/ed25519-supercop-ref10/fe_pow22523.c | 13 + src/ed25519-supercop-ref10/fe_sq.c | 149 ++ src/ed25519-supercop-ref10/fe_sq2.c | 160 ++ src/ed25519-supercop-ref10/fe_sub.c | 57 + src/ed25519-supercop-ref10/fe_tobytes.c | 119 ++ src/ed25519-supercop-ref10/ge.h | 95 ++ src/ed25519-supercop-ref10/ge_add.c | 11 + src/ed25519-supercop-ref10/ge_add.h | 97 ++ src/ed25519-supercop-ref10/ge_add.q | 49 + .../ge_double_scalarmult.c | 96 ++ src/ed25519-supercop-ref10/ge_frombytes.c | 50 + src/ed25519-supercop-ref10/ge_madd.c | 11 + src/ed25519-supercop-ref10/ge_madd.h | 88 ++ src/ed25519-supercop-ref10/ge_madd.q | 46 + src/ed25519-supercop-ref10/ge_msub.c | 11 + src/ed25519-supercop-ref10/ge_msub.h | 88 ++ src/ed25519-supercop-ref10/ge_msub.q | 46 + src/ed25519-supercop-ref10/ge_p1p1_to_p2.c | 12 + src/ed25519-supercop-ref10/ge_p1p1_to_p3.c | 13 + src/ed25519-supercop-ref10/ge_p2_0.c | 8 + src/ed25519-supercop-ref10/ge_p2_dbl.c | 11 + src/ed25519-supercop-ref10/ge_p2_dbl.h | 73 + src/ed25519-supercop-ref10/ge_p2_dbl.q | 41 + src/ed25519-supercop-ref10/ge_p3_0.c | 9 + src/ed25519-supercop-ref10/ge_p3_dbl.c | 12 + src/ed25519-supercop-ref10/ge_p3_to_cached.c | 17 + src/ed25519-supercop-ref10/ge_p3_to_p2.c | 12 + src/ed25519-supercop-ref10/ge_p3_tobytes.c | 14 + src/ed25519-supercop-ref10/ge_precomp_0.c | 8 + .../ge_scalarmult_base.c | 105 ++ src/ed25519-supercop-ref10/ge_sub.c | 11 + src/ed25519-supercop-ref10/ge_sub.h | 97 ++ src/ed25519-supercop-ref10/ge_sub.q | 49 + src/ed25519-supercop-ref10/ge_tobytes.c | 14 + src/ed25519-supercop-ref10/keypair.c | 22 + src/ed25519-supercop-ref10/open.c | 40 + src/ed25519-supercop-ref10/pow22523.h | 160 ++ src/ed25519-supercop-ref10/pow22523.q | 61 + src/ed25519-supercop-ref10/pow225521.h | 160 ++ src/ed25519-supercop-ref10/pow225521.q | 61 + src/ed25519-supercop-ref10/q2h.sh | 4 + src/ed25519-supercop-ref10/sc.h | 15 + src/ed25519-supercop-ref10/sc_muladd.c | 368 +++++ src/ed25519-supercop-ref10/sc_reduce.c | 275 ++++ src/ed25519-supercop-ref10/sign.c | 38 + src/ed25519-supercop-ref10/sqrtm1.h | 1 + src/ed25519-supercop-ref10/sqrtm1.py | 28 + 69 files changed, 5199 insertions(+) create mode 100644 src/ed25519-supercop-ref10/Makefile create mode 100644 src/ed25519-supercop-ref10/api.h create mode 100644 src/ed25519-supercop-ref10/base.h create mode 100644 src/ed25519-supercop-ref10/base.py create mode 100644 src/ed25519-supercop-ref10/base2.h create mode 100644 src/ed25519-supercop-ref10/base2.py create mode 100644 src/ed25519-supercop-ref10/d.h create mode 100644 src/ed25519-supercop-ref10/d.py create mode 100644 src/ed25519-supercop-ref10/d2.h create mode 100644 src/ed25519-supercop-ref10/d2.py create mode 100644 src/ed25519-supercop-ref10/fe.h create mode 100644 src/ed25519-supercop-ref10/fe_0.c create mode 100644 src/ed25519-supercop-ref10/fe_1.c create mode 100644 src/ed25519-supercop-ref10/fe_add.c create mode 100644 src/ed25519-supercop-ref10/fe_cmov.c create mode 100644 src/ed25519-supercop-ref10/fe_copy.c create mode 100644 src/ed25519-supercop-ref10/fe_frombytes.c create mode 100644 src/ed25519-supercop-ref10/fe_invert.c create mode 100644 src/ed25519-supercop-ref10/fe_isnegative.c create mode 100644 src/ed25519-supercop-ref10/fe_isnonzero.c create mode 100644 src/ed25519-supercop-ref10/fe_mul.c create mode 100644 src/ed25519-supercop-ref10/fe_neg.c create mode 100644 src/ed25519-supercop-ref10/fe_pow22523.c create mode 100644 src/ed25519-supercop-ref10/fe_sq.c create mode 100644 src/ed25519-supercop-ref10/fe_sq2.c create mode 100644 src/ed25519-supercop-ref10/fe_sub.c create mode 100644 src/ed25519-supercop-ref10/fe_tobytes.c create mode 100644 src/ed25519-supercop-ref10/ge.h create mode 100644 src/ed25519-supercop-ref10/ge_add.c create mode 100644 src/ed25519-supercop-ref10/ge_add.h create mode 100644 src/ed25519-supercop-ref10/ge_add.q create mode 100644 src/ed25519-supercop-ref10/ge_double_scalarmult.c create mode 100644 src/ed25519-supercop-ref10/ge_frombytes.c create mode 100644 src/ed25519-supercop-ref10/ge_madd.c create mode 100644 src/ed25519-supercop-ref10/ge_madd.h create mode 100644 src/ed25519-supercop-ref10/ge_madd.q create mode 100644 src/ed25519-supercop-ref10/ge_msub.c create mode 100644 src/ed25519-supercop-ref10/ge_msub.h create mode 100644 src/ed25519-supercop-ref10/ge_msub.q create mode 100644 src/ed25519-supercop-ref10/ge_p1p1_to_p2.c create mode 100644 src/ed25519-supercop-ref10/ge_p1p1_to_p3.c create mode 100644 src/ed25519-supercop-ref10/ge_p2_0.c create mode 100644 src/ed25519-supercop-ref10/ge_p2_dbl.c create mode 100644 src/ed25519-supercop-ref10/ge_p2_dbl.h create mode 100644 src/ed25519-supercop-ref10/ge_p2_dbl.q create mode 100644 src/ed25519-supercop-ref10/ge_p3_0.c create mode 100644 src/ed25519-supercop-ref10/ge_p3_dbl.c create mode 100644 src/ed25519-supercop-ref10/ge_p3_to_cached.c create mode 100644 src/ed25519-supercop-ref10/ge_p3_to_p2.c create mode 100644 src/ed25519-supercop-ref10/ge_p3_tobytes.c create mode 100644 src/ed25519-supercop-ref10/ge_precomp_0.c create mode 100644 src/ed25519-supercop-ref10/ge_scalarmult_base.c create mode 100644 src/ed25519-supercop-ref10/ge_sub.c create mode 100644 src/ed25519-supercop-ref10/ge_sub.h create mode 100644 src/ed25519-supercop-ref10/ge_sub.q create mode 100644 src/ed25519-supercop-ref10/ge_tobytes.c create mode 100644 src/ed25519-supercop-ref10/keypair.c create mode 100644 src/ed25519-supercop-ref10/open.c create mode 100644 src/ed25519-supercop-ref10/pow22523.h create mode 100644 src/ed25519-supercop-ref10/pow22523.q create mode 100644 src/ed25519-supercop-ref10/pow225521.h create mode 100644 src/ed25519-supercop-ref10/pow225521.q create mode 100755 src/ed25519-supercop-ref10/q2h.sh create mode 100644 src/ed25519-supercop-ref10/sc.h create mode 100644 src/ed25519-supercop-ref10/sc_muladd.c create mode 100644 src/ed25519-supercop-ref10/sc_reduce.c create mode 100644 src/ed25519-supercop-ref10/sign.c create mode 100644 src/ed25519-supercop-ref10/sqrtm1.h create mode 100644 src/ed25519-supercop-ref10/sqrtm1.py diff --git a/src/ed25519-supercop-ref10/Makefile b/src/ed25519-supercop-ref10/Makefile new file mode 100644 index 0000000..9b0ba7a --- /dev/null +++ b/src/ed25519-supercop-ref10/Makefile @@ -0,0 +1,41 @@ +all: d.h d2.h sqrtm1.h base.h base2.h \ +ge_add.h ge_sub.h \ +ge_madd.h ge_msub.h \ +ge_p2_dbl.h \ +pow225521.h pow22523.h + +d.h: d.py + python d.py > d.h + +d2.h: d2.py + python d2.py > d2.h + +sqrtm1.h: sqrtm1.py + python sqrtm1.py > sqrtm1.h + +base.h: base.py + python base.py > base.h + +base2.h: base2.py + python base2.py > base2.h + +ge_add.h: ge_add.q q2h.sh + ./q2h.sh < ge_add.q > ge_add.h + +ge_sub.h: ge_sub.q q2h.sh + ./q2h.sh < ge_sub.q > ge_sub.h + +ge_madd.h: ge_madd.q q2h.sh + ./q2h.sh < ge_madd.q > ge_madd.h + +ge_msub.h: ge_msub.q q2h.sh + ./q2h.sh < ge_msub.q > ge_msub.h + +ge_p2_dbl.h: ge_p2_dbl.q q2h.sh + ./q2h.sh < ge_p2_dbl.q > ge_p2_dbl.h + +pow22523.h: pow22523.q q2h.sh + ./q2h.sh < pow22523.q > pow22523.h + +pow225521.h: pow225521.q q2h.sh + ./q2h.sh < pow225521.q > pow225521.h diff --git a/src/ed25519-supercop-ref10/api.h b/src/ed25519-supercop-ref10/api.h new file mode 100644 index 0000000..352240c --- /dev/null +++ b/src/ed25519-supercop-ref10/api.h @@ -0,0 +1,3 @@ +#define CRYPTO_SECRETKEYBYTES 64 +#define CRYPTO_PUBLICKEYBYTES 32 +#define CRYPTO_BYTES 64 diff --git a/src/ed25519-supercop-ref10/base.h b/src/ed25519-supercop-ref10/base.h new file mode 100644 index 0000000..573bd8a --- /dev/null +++ b/src/ed25519-supercop-ref10/base.h @@ -0,0 +1,1344 @@ +{ + { + { 25967493,-14356035,29566456,3660896,-12694345,4014787,27544626,-11754271,-6079156,2047605 }, + { -12545711,934262,-2722910,3049990,-727428,9406986,12720692,5043384,19500929,-15469378 }, + { -8738181,4489570,9688441,-14785194,10184609,-12363380,29287919,11864899,-24514362,-4438546 }, + }, + { + { -12815894,-12976347,-21581243,11784320,-25355658,-2750717,-11717903,-3814571,-358445,-10211303 }, + { -21703237,6903825,27185491,6451973,-29577724,-9554005,-15616551,11189268,-26829678,-5319081 }, + { 26966642,11152617,32442495,15396054,14353839,-12752335,-3128826,-9541118,-15472047,-4166697 }, + }, + { + { 15636291,-9688557,24204773,-7912398,616977,-16685262,27787600,-14772189,28944400,-1550024 }, + { 16568933,4717097,-11556148,-1102322,15682896,-11807043,16354577,-11775962,7689662,11199574 }, + { 30464156,-5976125,-11779434,-15670865,23220365,15915852,7512774,10017326,-17749093,-9920357 }, + }, + { + { -17036878,13921892,10945806,-6033431,27105052,-16084379,-28926210,15006023,3284568,-6276540 }, + { 23599295,-8306047,-11193664,-7687416,13236774,10506355,7464579,9656445,13059162,10374397 }, + { 7798556,16710257,3033922,2874086,28997861,2835604,32406664,-3839045,-641708,-101325 }, + }, + { + { 10861363,11473154,27284546,1981175,-30064349,12577861,32867885,14515107,-15438304,10819380 }, + { 4708026,6336745,20377586,9066809,-11272109,6594696,-25653668,12483688,-12668491,5581306 }, + { 19563160,16186464,-29386857,4097519,10237984,-4348115,28542350,13850243,-23678021,-15815942 }, + }, + { + { -15371964,-12862754,32573250,4720197,-26436522,5875511,-19188627,-15224819,-9818940,-12085777 }, + { -8549212,109983,15149363,2178705,22900618,4543417,3044240,-15689887,1762328,14866737 }, + { -18199695,-15951423,-10473290,1707278,-17185920,3916101,-28236412,3959421,27914454,4383652 }, + }, + { + { 5153746,9909285,1723747,-2777874,30523605,5516873,19480852,5230134,-23952439,-15175766 }, + { -30269007,-3463509,7665486,10083793,28475525,1649722,20654025,16520125,30598449,7715701 }, + { 28881845,14381568,9657904,3680757,-20181635,7843316,-31400660,1370708,29794553,-1409300 }, + }, + { + { 14499471,-2729599,-33191113,-4254652,28494862,14271267,30290735,10876454,-33154098,2381726 }, + { -7195431,-2655363,-14730155,462251,-27724326,3941372,-6236617,3696005,-32300832,15351955 }, + { 27431194,8222322,16448760,-3907995,-18707002,11938355,-32961401,-2970515,29551813,10109425 }, + }, +}, +{ + { + { -13657040,-13155431,-31283750,11777098,21447386,6519384,-2378284,-1627556,10092783,-4764171 }, + { 27939166,14210322,4677035,16277044,-22964462,-12398139,-32508754,12005538,-17810127,12803510 }, + { 17228999,-15661624,-1233527,300140,-1224870,-11714777,30364213,-9038194,18016357,4397660 }, + }, + { + { -10958843,-7690207,4776341,-14954238,27850028,-15602212,-26619106,14544525,-17477504,982639 }, + { 29253598,15796703,-2863982,-9908884,10057023,3163536,7332899,-4120128,-21047696,9934963 }, + { 5793303,16271923,-24131614,-10116404,29188560,1206517,-14747930,4559895,-30123922,-10897950 }, + }, + { + { -27643952,-11493006,16282657,-11036493,28414021,-15012264,24191034,4541697,-13338309,5500568 }, + { 12650548,-1497113,9052871,11355358,-17680037,-8400164,-17430592,12264343,10874051,13524335 }, + { 25556948,-3045990,714651,2510400,23394682,-10415330,33119038,5080568,-22528059,5376628 }, + }, + { + { -26088264,-4011052,-17013699,-3537628,-6726793,1920897,-22321305,-9447443,4535768,1569007 }, + { -2255422,14606630,-21692440,-8039818,28430649,8775819,-30494562,3044290,31848280,12543772 }, + { -22028579,2943893,-31857513,6777306,13784462,-4292203,-27377195,-2062731,7718482,14474653 }, + }, + { + { 2385315,2454213,-22631320,46603,-4437935,-15680415,656965,-7236665,24316168,-5253567 }, + { 13741529,10911568,-33233417,-8603737,-20177830,-1033297,33040651,-13424532,-20729456,8321686 }, + { 21060490,-2212744,15712757,-4336099,1639040,10656336,23845965,-11874838,-9984458,608372 }, + }, + { + { -13672732,-15087586,-10889693,-7557059,-6036909,11305547,1123968,-6780577,27229399,23887 }, + { -23244140,-294205,-11744728,14712571,-29465699,-2029617,12797024,-6440308,-1633405,16678954 }, + { -29500620,4770662,-16054387,14001338,7830047,9564805,-1508144,-4795045,-17169265,4904953 }, + }, + { + { 24059557,14617003,19037157,-15039908,19766093,-14906429,5169211,16191880,2128236,-4326833 }, + { -16981152,4124966,-8540610,-10653797,30336522,-14105247,-29806336,916033,-6882542,-2986532 }, + { -22630907,12419372,-7134229,-7473371,-16478904,16739175,285431,2763829,15736322,4143876 }, + }, + { + { 2379352,11839345,-4110402,-5988665,11274298,794957,212801,-14594663,23527084,-16458268 }, + { 33431127,-11130478,-17838966,-15626900,8909499,8376530,-32625340,4087881,-15188911,-14416214 }, + { 1767683,7197987,-13205226,-2022635,-13091350,448826,5799055,4357868,-4774191,-16323038 }, + }, +}, +{ + { + { 6721966,13833823,-23523388,-1551314,26354293,-11863321,23365147,-3949732,7390890,2759800 }, + { 4409041,2052381,23373853,10530217,7676779,-12885954,21302353,-4264057,1244380,-12919645 }, + { -4421239,7169619,4982368,-2957590,30256825,-2777540,14086413,9208236,15886429,16489664 }, + }, + { + { 1996075,10375649,14346367,13311202,-6874135,-16438411,-13693198,398369,-30606455,-712933 }, + { -25307465,9795880,-2777414,14878809,-33531835,14780363,13348553,12076947,-30836462,5113182 }, + { -17770784,11797796,31950843,13929123,-25888302,12288344,-30341101,-7336386,13847711,5387222 }, + }, + { + { -18582163,-3416217,17824843,-2340966,22744343,-10442611,8763061,3617786,-19600662,10370991 }, + { 20246567,-14369378,22358229,-543712,18507283,-10413996,14554437,-8746092,32232924,16763880 }, + { 9648505,10094563,26416693,14745928,-30374318,-6472621,11094161,15689506,3140038,-16510092 }, + }, + { + { -16160072,5472695,31895588,4744994,8823515,10365685,-27224800,9448613,-28774454,366295 }, + { 19153450,11523972,-11096490,-6503142,-24647631,5420647,28344573,8041113,719605,11671788 }, + { 8678025,2694440,-6808014,2517372,4964326,11152271,-15432916,-15266516,27000813,-10195553 }, + }, + { + { -15157904,7134312,8639287,-2814877,-7235688,10421742,564065,5336097,6750977,-14521026 }, + { 11836410,-3979488,26297894,16080799,23455045,15735944,1695823,-8819122,8169720,16220347 }, + { -18115838,8653647,17578566,-6092619,-8025777,-16012763,-11144307,-2627664,-5990708,-14166033 }, + }, + { + { -23308498,-10968312,15213228,-10081214,-30853605,-11050004,27884329,2847284,2655861,1738395 }, + { -27537433,-14253021,-25336301,-8002780,-9370762,8129821,21651608,-3239336,-19087449,-11005278 }, + { 1533110,3437855,23735889,459276,29970501,11335377,26030092,5821408,10478196,8544890 }, + }, + { + { 32173121,-16129311,24896207,3921497,22579056,-3410854,19270449,12217473,17789017,-3395995 }, + { -30552961,-2228401,-15578829,-10147201,13243889,517024,15479401,-3853233,30460520,1052596 }, + { -11614875,13323618,32618793,8175907,-15230173,12596687,27491595,-4612359,3179268,-9478891 }, + }, + { + { 31947069,-14366651,-4640583,-15339921,-15125977,-6039709,-14756777,-16411740,19072640,-9511060 }, + { 11685058,11822410,3158003,-13952594,33402194,-4165066,5977896,-5215017,473099,5040608 }, + { -20290863,8198642,-27410132,11602123,1290375,-2799760,28326862,1721092,-19558642,-3131606 }, + }, +}, +{ + { + { 7881532,10687937,7578723,7738378,-18951012,-2553952,21820786,8076149,-27868496,11538389 }, + { -19935666,3899861,18283497,-6801568,-15728660,-11249211,8754525,7446702,-5676054,5797016 }, + { -11295600,-3793569,-15782110,-7964573,12708869,-8456199,2014099,-9050574,-2369172,-5877341 }, + }, + { + { -22472376,-11568741,-27682020,1146375,18956691,16640559,1192730,-3714199,15123619,10811505 }, + { 14352098,-3419715,-18942044,10822655,32750596,4699007,-70363,15776356,-28886779,-11974553 }, + { -28241164,-8072475,-4978962,-5315317,29416931,1847569,-20654173,-16484855,4714547,-9600655 }, + }, + { + { 15200332,8368572,19679101,15970074,-31872674,1959451,24611599,-4543832,-11745876,12340220 }, + { 12876937,-10480056,33134381,6590940,-6307776,14872440,9613953,8241152,15370987,9608631 }, + { -4143277,-12014408,8446281,-391603,4407738,13629032,-7724868,15866074,-28210621,-8814099 }, + }, + { + { 26660628,-15677655,8393734,358047,-7401291,992988,-23904233,858697,20571223,8420556 }, + { 14620715,13067227,-15447274,8264467,14106269,15080814,33531827,12516406,-21574435,-12476749 }, + { 236881,10476226,57258,-14677024,6472998,2466984,17258519,7256740,8791136,15069930 }, + }, + { + { 1276410,-9371918,22949635,-16322807,-23493039,-5702186,14711875,4874229,-30663140,-2331391 }, + { 5855666,4990204,-13711848,7294284,-7804282,1924647,-1423175,-7912378,-33069337,9234253 }, + { 20590503,-9018988,31529744,-7352666,-2706834,10650548,31559055,-11609587,18979186,13396066 }, + }, + { + { 24474287,4968103,22267082,4407354,24063882,-8325180,-18816887,13594782,33514650,7021958 }, + { -11566906,-6565505,-21365085,15928892,-26158305,4315421,-25948728,-3916677,-21480480,12868082 }, + { -28635013,13504661,19988037,-2132761,21078225,6443208,-21446107,2244500,-12455797,-8089383 }, + }, + { + { -30595528,13793479,-5852820,319136,-25723172,-6263899,33086546,8957937,-15233648,5540521 }, + { -11630176,-11503902,-8119500,-7643073,2620056,1022908,-23710744,-1568984,-16128528,-14962807 }, + { 23152971,775386,27395463,14006635,-9701118,4649512,1689819,892185,-11513277,-15205948 }, + }, + { + { 9770129,9586738,26496094,4324120,1556511,-3550024,27453819,4763127,-19179614,5867134 }, + { -32765025,1927590,31726409,-4753295,23962434,-16019500,27846559,5931263,-29749703,-16108455 }, + { 27461885,-2977536,22380810,1815854,-23033753,-3031938,7283490,-15148073,-19526700,7734629 }, + }, +}, +{ + { + { -8010264,-9590817,-11120403,6196038,29344158,-13430885,7585295,-3176626,18549497,15302069 }, + { -32658337,-6171222,-7672793,-11051681,6258878,13504381,10458790,-6418461,-8872242,8424746 }, + { 24687205,8613276,-30667046,-3233545,1863892,-1830544,19206234,7134917,-11284482,-828919 }, + }, + { + { 11334899,-9218022,8025293,12707519,17523892,-10476071,10243738,-14685461,-5066034,16498837 }, + { 8911542,6887158,-9584260,-6958590,11145641,-9543680,17303925,-14124238,6536641,10543906 }, + { -28946384,15479763,-17466835,568876,-1497683,11223454,-2669190,-16625574,-27235709,8876771 }, + }, + { + { -25742899,-12566864,-15649966,-846607,-33026686,-796288,-33481822,15824474,-604426,-9039817 }, + { 10330056,70051,7957388,-9002667,9764902,15609756,27698697,-4890037,1657394,3084098 }, + { 10477963,-7470260,12119566,-13250805,29016247,-5365589,31280319,14396151,-30233575,15272409 }, + }, + { + { -12288309,3169463,28813183,16658753,25116432,-5630466,-25173957,-12636138,-25014757,1950504 }, + { -26180358,9489187,11053416,-14746161,-31053720,5825630,-8384306,-8767532,15341279,8373727 }, + { 28685821,7759505,-14378516,-12002860,-31971820,4079242,298136,-10232602,-2878207,15190420 }, + }, + { + { -32932876,13806336,-14337485,-15794431,-24004620,10940928,8669718,2742393,-26033313,-6875003 }, + { -1580388,-11729417,-25979658,-11445023,-17411874,-10912854,9291594,-16247779,-12154742,6048605 }, + { -30305315,14843444,1539301,11864366,20201677,1900163,13934231,5128323,11213262,9168384 }, + }, + { + { -26280513,11007847,19408960,-940758,-18592965,-4328580,-5088060,-11105150,20470157,-16398701 }, + { -23136053,9282192,14855179,-15390078,-7362815,-14408560,-22783952,14461608,14042978,5230683 }, + { 29969567,-2741594,-16711867,-8552442,9175486,-2468974,21556951,3506042,-5933891,-12449708 }, + }, + { + { -3144746,8744661,19704003,4581278,-20430686,6830683,-21284170,8971513,-28539189,15326563 }, + { -19464629,10110288,-17262528,-3503892,-23500387,1355669,-15523050,15300988,-20514118,9168260 }, + { -5353335,4488613,-23803248,16314347,7780487,-15638939,-28948358,9601605,33087103,-9011387 }, + }, + { + { -19443170,-15512900,-20797467,-12445323,-29824447,10229461,-27444329,-15000531,-5996870,15664672 }, + { 23294591,-16632613,-22650781,-8470978,27844204,11461195,13099750,-2460356,18151676,13417686 }, + { -24722913,-4176517,-31150679,5988919,-26858785,6685065,1661597,-12551441,15271676,-15452665 }, + }, +}, +{ + { + { 11433042,-13228665,8239631,-5279517,-1985436,-725718,-18698764,2167544,-6921301,-13440182 }, + { -31436171,15575146,30436815,12192228,-22463353,9395379,-9917708,-8638997,12215110,12028277 }, + { 14098400,6555944,23007258,5757252,-15427832,-12950502,30123440,4617780,-16900089,-655628 }, + }, + { + { -4026201,-15240835,11893168,13718664,-14809462,1847385,-15819999,10154009,23973261,-12684474 }, + { -26531820,-3695990,-1908898,2534301,-31870557,-16550355,18341390,-11419951,32013174,-10103539 }, + { -25479301,10876443,-11771086,-14625140,-12369567,1838104,21911214,6354752,4425632,-837822 }, + }, + { + { -10433389,-14612966,22229858,-3091047,-13191166,776729,-17415375,-12020462,4725005,14044970 }, + { 19268650,-7304421,1555349,8692754,-21474059,-9910664,6347390,-1411784,-19522291,-16109756 }, + { -24864089,12986008,-10898878,-5558584,-11312371,-148526,19541418,8180106,9282262,10282508 }, + }, + { + { -26205082,4428547,-8661196,-13194263,4098402,-14165257,15522535,8372215,5542595,-10702683 }, + { -10562541,14895633,26814552,-16673850,-17480754,-2489360,-2781891,6993761,-18093885,10114655 }, + { -20107055,-929418,31422704,10427861,-7110749,6150669,-29091755,-11529146,25953725,-106158 }, + }, + { + { -4234397,-8039292,-9119125,3046000,2101609,-12607294,19390020,6094296,-3315279,12831125 }, + { -15998678,7578152,5310217,14408357,-33548620,-224739,31575954,6326196,7381791,-2421839 }, + { -20902779,3296811,24736065,-16328389,18374254,7318640,6295303,8082724,-15362489,12339664 }, + }, + { + { 27724736,2291157,6088201,-14184798,1792727,5857634,13848414,15768922,25091167,14856294 }, + { -18866652,8331043,24373479,8541013,-701998,-9269457,12927300,-12695493,-22182473,-9012899 }, + { -11423429,-5421590,11632845,3405020,30536730,-11674039,-27260765,13866390,30146206,9142070 }, + }, + { + { 3924129,-15307516,-13817122,-10054960,12291820,-668366,-27702774,9326384,-8237858,4171294 }, + { -15921940,16037937,6713787,16606682,-21612135,2790944,26396185,3731949,345228,-5462949 }, + { -21327538,13448259,25284571,1143661,20614966,-8849387,2031539,-12391231,-16253183,-13582083 }, + }, + { + { 31016211,-16722429,26371392,-14451233,-5027349,14854137,17477601,3842657,28012650,-16405420 }, + { -5075835,9368966,-8562079,-4600902,-15249953,6970560,-9189873,16292057,-8867157,3507940 }, + { 29439664,3537914,23333589,6997794,-17555561,-11018068,-15209202,-15051267,-9164929,6580396 }, + }, +}, +{ + { + { -12185861,-7679788,16438269,10826160,-8696817,-6235611,17860444,-9273846,-2095802,9304567 }, + { 20714564,-4336911,29088195,7406487,11426967,-5095705,14792667,-14608617,5289421,-477127 }, + { -16665533,-10650790,-6160345,-13305760,9192020,-1802462,17271490,12349094,26939669,-3752294 }, + }, + { + { -12889898,9373458,31595848,16374215,21471720,13221525,-27283495,-12348559,-3698806,117887 }, + { 22263325,-6560050,3984570,-11174646,-15114008,-566785,28311253,5358056,-23319780,541964 }, + { 16259219,3261970,2309254,-15534474,-16885711,-4581916,24134070,-16705829,-13337066,-13552195 }, + }, + { + { 9378160,-13140186,-22845982,-12745264,28198281,-7244098,-2399684,-717351,690426,14876244 }, + { 24977353,-314384,-8223969,-13465086,28432343,-1176353,-13068804,-12297348,-22380984,6618999 }, + { -1538174,11685646,12944378,13682314,-24389511,-14413193,8044829,-13817328,32239829,-5652762 }, + }, + { + { -18603066,4762990,-926250,8885304,-28412480,-3187315,9781647,-10350059,32779359,5095274 }, + { -33008130,-5214506,-32264887,-3685216,9460461,-9327423,-24601656,14506724,21639561,-2630236 }, + { -16400943,-13112215,25239338,15531969,3987758,-4499318,-1289502,-6863535,17874574,558605 }, + }, + { + { -13600129,10240081,9171883,16131053,-20869254,9599700,33499487,5080151,2085892,5119761 }, + { -22205145,-2519528,-16381601,414691,-25019550,2170430,30634760,-8363614,-31999993,-5759884 }, + { -6845704,15791202,8550074,-1312654,29928809,-12092256,27534430,-7192145,-22351378,12961482 }, + }, + { + { -24492060,-9570771,10368194,11582341,-23397293,-2245287,16533930,8206996,-30194652,-5159638 }, + { -11121496,-3382234,2307366,6362031,-135455,8868177,-16835630,7031275,7589640,8945490 }, + { -32152748,8917967,6661220,-11677616,-1192060,-15793393,7251489,-11182180,24099109,-14456170 }, + }, + { + { 5019558,-7907470,4244127,-14714356,-26933272,6453165,-19118182,-13289025,-6231896,-10280736 }, + { 10853594,10721687,26480089,5861829,-22995819,1972175,-1866647,-10557898,-3363451,-6441124 }, + { -17002408,5906790,221599,-6563147,7828208,-13248918,24362661,-2008168,-13866408,7421392 }, + }, + { + { 8139927,-6546497,32257646,-5890546,30375719,1886181,-21175108,15441252,28826358,-4123029 }, + { 6267086,9695052,7709135,-16603597,-32869068,-1886135,14795160,-7840124,13746021,-1742048 }, + { 28584902,7787108,-6732942,-15050729,22846041,-7571236,-3181936,-363524,4771362,-8419958 }, + }, +}, +{ + { + { 24949256,6376279,-27466481,-8174608,-18646154,-9930606,33543569,-12141695,3569627,11342593 }, + { 26514989,4740088,27912651,3697550,19331575,-11472339,6809886,4608608,7325975,-14801071 }, + { -11618399,-14554430,-24321212,7655128,-1369274,5214312,-27400540,10258390,-17646694,-8186692 }, + }, + { + { 11431204,15823007,26570245,14329124,18029990,4796082,-31446179,15580664,9280358,-3973687 }, + { -160783,-10326257,-22855316,-4304997,-20861367,-13621002,-32810901,-11181622,-15545091,4387441 }, + { -20799378,12194512,3937617,-5805892,-27154820,9340370,-24513992,8548137,20617071,-7482001 }, + }, + { + { -938825,-3930586,-8714311,16124718,24603125,-6225393,-13775352,-11875822,24345683,10325460 }, + { -19855277,-1568885,-22202708,8714034,14007766,6928528,16318175,-1010689,4766743,3552007 }, + { -21751364,-16730916,1351763,-803421,-4009670,3950935,3217514,14481909,10988822,-3994762 }, + }, + { + { 15564307,-14311570,3101243,5684148,30446780,-8051356,12677127,-6505343,-8295852,13296005 }, + { -9442290,6624296,-30298964,-11913677,-4670981,-2057379,31521204,9614054,-30000824,12074674 }, + { 4771191,-135239,14290749,-13089852,27992298,14998318,-1413936,-1556716,29832613,-16391035 }, + }, + { + { 7064884,-7541174,-19161962,-5067537,-18891269,-2912736,25825242,5293297,-27122660,13101590 }, + { -2298563,2439670,-7466610,1719965,-27267541,-16328445,32512469,-5317593,-30356070,-4190957 }, + { -30006540,10162316,-33180176,3981723,-16482138,-13070044,14413974,9515896,19568978,9628812 }, + }, + { + { 33053803,199357,15894591,1583059,27380243,-4580435,-17838894,-6106839,-6291786,3437740 }, + { -18978877,3884493,19469877,12726490,15913552,13614290,-22961733,70104,7463304,4176122 }, + { -27124001,10659917,11482427,-16070381,12771467,-6635117,-32719404,-5322751,24216882,5944158 }, + }, + { + { 8894125,7450974,-2664149,-9765752,-28080517,-12389115,19345746,14680796,11632993,5847885 }, + { 26942781,-2315317,9129564,-4906607,26024105,11769399,-11518837,6367194,-9727230,4782140 }, + { 19916461,-4828410,-22910704,-11414391,25606324,-5972441,33253853,8220911,6358847,-1873857 }, + }, + { + { 801428,-2081702,16569428,11065167,29875704,96627,7908388,-4480480,-13538503,1387155 }, + { 19646058,5720633,-11416706,12814209,11607948,12749789,14147075,15156355,-21866831,11835260 }, + { 19299512,1155910,28703737,14890794,2925026,7269399,26121523,15467869,-26560550,5052483 }, + }, +}, +{ + { + { -3017432,10058206,1980837,3964243,22160966,12322533,-6431123,-12618185,12228557,-7003677 }, + { 32944382,14922211,-22844894,5188528,21913450,-8719943,4001465,13238564,-6114803,8653815 }, + { 22865569,-4652735,27603668,-12545395,14348958,8234005,24808405,5719875,28483275,2841751 }, + }, + { + { -16420968,-1113305,-327719,-12107856,21886282,-15552774,-1887966,-315658,19932058,-12739203 }, + { -11656086,10087521,-8864888,-5536143,-19278573,-3055912,3999228,13239134,-4777469,-13910208 }, + { 1382174,-11694719,17266790,9194690,-13324356,9720081,20403944,11284705,-14013818,3093230 }, + }, + { + { 16650921,-11037932,-1064178,1570629,-8329746,7352753,-302424,16271225,-24049421,-6691850 }, + { -21911077,-5927941,-4611316,-5560156,-31744103,-10785293,24123614,15193618,-21652117,-16739389 }, + { -9935934,-4289447,-25279823,4372842,2087473,10399484,31870908,14690798,17361620,11864968 }, + }, + { + { -11307610,6210372,13206574,5806320,-29017692,-13967200,-12331205,-7486601,-25578460,-16240689 }, + { 14668462,-12270235,26039039,15305210,25515617,4542480,10453892,6577524,9145645,-6443880 }, + { 5974874,3053895,-9433049,-10385191,-31865124,3225009,-7972642,3936128,-5652273,-3050304 }, + }, + { + { 30625386,-4729400,-25555961,-12792866,-20484575,7695099,17097188,-16303496,-27999779,1803632 }, + { -3553091,9865099,-5228566,4272701,-5673832,-16689700,14911344,12196514,-21405489,7047412 }, + { 20093277,9920966,-11138194,-5343857,13161587,12044805,-32856851,4124601,-32343828,-10257566 }, + }, + { + { -20788824,14084654,-13531713,7842147,19119038,-13822605,4752377,-8714640,-21679658,2288038 }, + { -26819236,-3283715,29965059,3039786,-14473765,2540457,29457502,14625692,-24819617,12570232 }, + { -1063558,-11551823,16920318,12494842,1278292,-5869109,-21159943,-3498680,-11974704,4724943 }, + }, + { + { 17960970,-11775534,-4140968,-9702530,-8876562,-1410617,-12907383,-8659932,-29576300,1903856 }, + { 23134274,-14279132,-10681997,-1611936,20684485,15770816,-12989750,3190296,26955097,14109738 }, + { 15308788,5320727,-30113809,-14318877,22902008,7767164,29425325,-11277562,31960942,11934971 }, + }, + { + { -27395711,8435796,4109644,12222639,-24627868,14818669,20638173,4875028,10491392,1379718 }, + { -13159415,9197841,3875503,-8936108,-1383712,-5879801,33518459,16176658,21432314,12180697 }, + { -11787308,11500838,13787581,-13832590,-22430679,10140205,1465425,12689540,-10301319,-13872883 }, + }, +}, +{ + { + { 5414091,-15386041,-21007664,9643570,12834970,1186149,-2622916,-1342231,26128231,6032912 }, + { -26337395,-13766162,32496025,-13653919,17847801,-12669156,3604025,8316894,-25875034,-10437358 }, + { 3296484,6223048,24680646,-12246460,-23052020,5903205,-8862297,-4639164,12376617,3188849 }, + }, + { + { 29190488,-14659046,27549113,-1183516,3520066,-10697301,32049515,-7309113,-16109234,-9852307 }, + { -14744486,-9309156,735818,-598978,-20407687,-5057904,25246078,-15795669,18640741,-960977 }, + { -6928835,-16430795,10361374,5642961,4910474,12345252,-31638386,-494430,10530747,1053335 }, + }, + { + { -29265967,-14186805,-13538216,-12117373,-19457059,-10655384,-31462369,-2948985,24018831,15026644 }, + { -22592535,-3145277,-2289276,5953843,-13440189,9425631,25310643,13003497,-2314791,-15145616 }, + { -27419985,-603321,-8043984,-1669117,-26092265,13987819,-27297622,187899,-23166419,-2531735 }, + }, + { + { -21744398,-13810475,1844840,5021428,-10434399,-15911473,9716667,16266922,-5070217,726099 }, + { 29370922,-6053998,7334071,-15342259,9385287,2247707,-13661962,-4839461,30007388,-15823341 }, + { -936379,16086691,23751945,-543318,-1167538,-5189036,9137109,730663,9835848,4555336 }, + }, + { + { -23376435,1410446,-22253753,-12899614,30867635,15826977,17693930,544696,-11985298,12422646 }, + { 31117226,-12215734,-13502838,6561947,-9876867,-12757670,-5118685,-4096706,29120153,13924425 }, + { -17400879,-14233209,19675799,-2734756,-11006962,-5858820,-9383939,-11317700,7240931,-237388 }, + }, + { + { -31361739,-11346780,-15007447,-5856218,-22453340,-12152771,1222336,4389483,3293637,-15551743 }, + { -16684801,-14444245,11038544,11054958,-13801175,-3338533,-24319580,7733547,12796905,-6335822 }, + { -8759414,-10817836,-25418864,10783769,-30615557,-9746811,-28253339,3647836,3222231,-11160462 }, + }, + { + { 18606113,1693100,-25448386,-15170272,4112353,10045021,23603893,-2048234,-7550776,2484985 }, + { 9255317,-3131197,-12156162,-1004256,13098013,-9214866,16377220,-2102812,-19802075,-3034702 }, + { -22729289,7496160,-5742199,11329249,19991973,-3347502,-31718148,9936966,-30097688,-10618797 }, + }, + { + { 21878590,-5001297,4338336,13643897,-3036865,13160960,19708896,5415497,-7360503,-4109293 }, + { 27736861,10103576,12500508,8502413,-3413016,-9633558,10436918,-1550276,-23659143,-8132100 }, + { 19492550,-12104365,-29681976,-852630,-3208171,12403437,30066266,8367329,13243957,8709688 }, + }, +}, +{ + { + { 12015105,2801261,28198131,10151021,24818120,-4743133,-11194191,-5645734,5150968,7274186 }, + { 2831366,-12492146,1478975,6122054,23825128,-12733586,31097299,6083058,31021603,-9793610 }, + { -2529932,-2229646,445613,10720828,-13849527,-11505937,-23507731,16354465,15067285,-14147707 }, + }, + { + { 7840942,14037873,-33364863,15934016,-728213,-3642706,21403988,1057586,-19379462,-12403220 }, + { 915865,-16469274,15608285,-8789130,-24357026,6060030,-17371319,8410997,-7220461,16527025 }, + { 32922597,-556987,20336074,-16184568,10903705,-5384487,16957574,52992,23834301,6588044 }, + }, + { + { 32752030,11232950,3381995,-8714866,22652988,-10744103,17159699,16689107,-20314580,-1305992 }, + { -4689649,9166776,-25710296,-10847306,11576752,12733943,7924251,-2752281,1976123,-7249027 }, + { 21251222,16309901,-2983015,-6783122,30810597,12967303,156041,-3371252,12331345,-8237197 }, + }, + { + { 8651614,-4477032,-16085636,-4996994,13002507,2950805,29054427,-5106970,10008136,-4667901 }, + { 31486080,15114593,-14261250,12951354,14369431,-7387845,16347321,-13662089,8684155,-10532952 }, + { 19443825,11385320,24468943,-9659068,-23919258,2187569,-26263207,-6086921,31316348,14219878 }, + }, + { + { -28594490,1193785,32245219,11392485,31092169,15722801,27146014,6992409,29126555,9207390 }, + { 32382935,1110093,18477781,11028262,-27411763,-7548111,-4980517,10843782,-7957600,-14435730 }, + { 2814918,7836403,27519878,-7868156,-20894015,-11553689,-21494559,8550130,28346258,1994730 }, + }, + { + { -19578299,8085545,-14000519,-3948622,2785838,-16231307,-19516951,7174894,22628102,8115180 }, + { -30405132,955511,-11133838,-15078069,-32447087,-13278079,-25651578,3317160,-9943017,930272 }, + { -15303681,-6833769,28856490,1357446,23421993,1057177,24091212,-1388970,-22765376,-10650715 }, + }, + { + { -22751231,-5303997,-12907607,-12768866,-15811511,-7797053,-14839018,-16554220,-1867018,8398970 }, + { -31969310,2106403,-4736360,1362501,12813763,16200670,22981545,-6291273,18009408,-15772772 }, + { -17220923,-9545221,-27784654,14166835,29815394,7444469,29551787,-3727419,19288549,1325865 }, + }, + { + { 15100157,-15835752,-23923978,-1005098,-26450192,15509408,12376730,-3479146,33166107,-8042750 }, + { 20909231,13023121,-9209752,16251778,-5778415,-8094914,12412151,10018715,2213263,-13878373 }, + { 32529814,-11074689,30361439,-16689753,-9135940,1513226,22922121,6382134,-5766928,8371348 }, + }, +}, +{ + { + { 9923462,11271500,12616794,3544722,-29998368,-1721626,12891687,-8193132,-26442943,10486144 }, + { -22597207,-7012665,8587003,-8257861,4084309,-12970062,361726,2610596,-23921530,-11455195 }, + { 5408411,-1136691,-4969122,10561668,24145918,14240566,31319731,-4235541,19985175,-3436086 }, + }, + { + { -13994457,16616821,14549246,3341099,32155958,13648976,-17577068,8849297,65030,8370684 }, + { -8320926,-12049626,31204563,5839400,-20627288,-1057277,-19442942,6922164,12743482,-9800518 }, + { -2361371,12678785,28815050,4759974,-23893047,4884717,23783145,11038569,18800704,255233 }, + }, + { + { -5269658,-1773886,13957886,7990715,23132995,728773,13393847,9066957,19258688,-14753793 }, + { -2936654,-10827535,-10432089,14516793,-3640786,4372541,-31934921,2209390,-1524053,2055794 }, + { 580882,16705327,5468415,-2683018,-30926419,-14696000,-7203346,-8994389,-30021019,7394435 }, + }, + { + { 23838809,1822728,-15738443,15242727,8318092,-3733104,-21672180,-3492205,-4821741,14799921 }, + { 13345610,9759151,3371034,-16137791,16353039,8577942,31129804,13496856,-9056018,7402518 }, + { 2286874,-4435931,-20042458,-2008336,-13696227,5038122,11006906,-15760352,8205061,1607563 }, + }, + { + { 14414086,-8002132,3331830,-3208217,22249151,-5594188,18364661,-2906958,30019587,-9029278 }, + { -27688051,1585953,-10775053,931069,-29120221,-11002319,-14410829,12029093,9944378,8024 }, + { 4368715,-3709630,29874200,-15022983,-20230386,-11410704,-16114594,-999085,-8142388,5640030 }, + }, + { + { 10299610,13746483,11661824,16234854,7630238,5998374,9809887,-16694564,15219798,-14327783 }, + { 27425505,-5719081,3055006,10660664,23458024,595578,-15398605,-1173195,-18342183,9742717 }, + { 6744077,2427284,26042789,2720740,-847906,1118974,32324614,7406442,12420155,1994844 }, + }, + { + { 14012521,-5024720,-18384453,-9578469,-26485342,-3936439,-13033478,-10909803,24319929,-6446333 }, + { 16412690,-4507367,10772641,15929391,-17068788,-4658621,10555945,-10484049,-30102368,-4739048 }, + { 22397382,-7767684,-9293161,-12792868,17166287,-9755136,-27333065,6199366,21880021,-12250760 }, + }, + { + { -4283307,5368523,-31117018,8163389,-30323063,3209128,16557151,8890729,8840445,4957760 }, + { -15447727,709327,-6919446,-10870178,-29777922,6522332,-21720181,12130072,-14796503,5005757 }, + { -2114751,-14308128,23019042,15765735,-25269683,6002752,10183197,-13239326,-16395286,-2176112 }, + }, +}, +{ + { + { -19025756,1632005,13466291,-7995100,-23640451,16573537,-32013908,-3057104,22208662,2000468 }, + { 3065073,-1412761,-25598674,-361432,-17683065,-5703415,-8164212,11248527,-3691214,-7414184 }, + { 10379208,-6045554,8877319,1473647,-29291284,-12507580,16690915,2553332,-3132688,16400289 }, + }, + { + { 15716668,1254266,-18472690,7446274,-8448918,6344164,-22097271,-7285580,26894937,9132066 }, + { 24158887,12938817,11085297,-8177598,-28063478,-4457083,-30576463,64452,-6817084,-2692882 }, + { 13488534,7794716,22236231,5989356,25426474,-12578208,2350710,-3418511,-4688006,2364226 }, + }, + { + { 16335052,9132434,25640582,6678888,1725628,8517937,-11807024,-11697457,15445875,-7798101 }, + { 29004207,-7867081,28661402,-640412,-12794003,-7943086,31863255,-4135540,-278050,-15759279 }, + { -6122061,-14866665,-28614905,14569919,-10857999,-3591829,10343412,-6976290,-29828287,-10815811 }, + }, + { + { 27081650,3463984,14099042,-4517604,1616303,-6205604,29542636,15372179,17293797,960709 }, + { 20263915,11434237,-5765435,11236810,13505955,-10857102,-16111345,6493122,-19384511,7639714 }, + { -2830798,-14839232,25403038,-8215196,-8317012,-16173699,18006287,-16043750,29994677,-15808121 }, + }, + { + { 9769828,5202651,-24157398,-13631392,-28051003,-11561624,-24613141,-13860782,-31184575,709464 }, + { 12286395,13076066,-21775189,-1176622,-25003198,4057652,-32018128,-8890874,16102007,13205847 }, + { 13733362,5599946,10557076,3195751,-5557991,8536970,-25540170,8525972,10151379,10394400 }, + }, + { + { 4024660,-16137551,22436262,12276534,-9099015,-2686099,19698229,11743039,-33302334,8934414 }, + { -15879800,-4525240,-8580747,-2934061,14634845,-698278,-9449077,3137094,-11536886,11721158 }, + { 17555939,-5013938,8268606,2331751,-22738815,9761013,9319229,8835153,-9205489,-1280045 }, + }, + { + { -461409,-7830014,20614118,16688288,-7514766,-4807119,22300304,505429,6108462,-6183415 }, + { -5070281,12367917,-30663534,3234473,32617080,-8422642,29880583,-13483331,-26898490,-7867459 }, + { -31975283,5726539,26934134,10237677,-3173717,-605053,24199304,3795095,7592688,-14992079 }, + }, + { + { 21594432,-14964228,17466408,-4077222,32537084,2739898,6407723,12018833,-28256052,4298412 }, + { -20650503,-11961496,-27236275,570498,3767144,-1717540,13891942,-1569194,13717174,10805743 }, + { -14676630,-15644296,15287174,11927123,24177847,-8175568,-796431,14860609,-26938930,-5863836 }, + }, +}, +{ + { + { 12962541,5311799,-10060768,11658280,18855286,-7954201,13286263,-12808704,-4381056,9882022 }, + { 18512079,11319350,-20123124,15090309,18818594,5271736,-22727904,3666879,-23967430,-3299429 }, + { -6789020,-3146043,16192429,13241070,15898607,-14206114,-10084880,-6661110,-2403099,5276065 }, + }, + { + { 30169808,-5317648,26306206,-11750859,27814964,7069267,7152851,3684982,1449224,13082861 }, + { 10342826,3098505,2119311,193222,25702612,12233820,23697382,15056736,-21016438,-8202000 }, + { -33150110,3261608,22745853,7948688,19370557,-15177665,-26171976,6482814,-10300080,-11060101 }, + }, + { + { 32869458,-5408545,25609743,15678670,-10687769,-15471071,26112421,2521008,-22664288,6904815 }, + { 29506923,4457497,3377935,-9796444,-30510046,12935080,1561737,3841096,-29003639,-6657642 }, + { 10340844,-6630377,-18656632,-2278430,12621151,-13339055,30878497,-11824370,-25584551,5181966 }, + }, + { + { 25940115,-12658025,17324188,-10307374,-8671468,15029094,24396252,-16450922,-2322852,-12388574 }, + { -21765684,9916823,-1300409,4079498,-1028346,11909559,1782390,12641087,20603771,-6561742 }, + { -18882287,-11673380,24849422,11501709,13161720,-4768874,1925523,11914390,4662781,7820689 }, + }, + { + { 12241050,-425982,8132691,9393934,32846760,-1599620,29749456,12172924,16136752,15264020 }, + { -10349955,-14680563,-8211979,2330220,-17662549,-14545780,10658213,6671822,19012087,3772772 }, + { 3753511,-3421066,10617074,2028709,14841030,-6721664,28718732,-15762884,20527771,12988982 }, + }, + { + { -14822485,-5797269,-3707987,12689773,-898983,-10914866,-24183046,-10564943,3299665,-12424953 }, + { -16777703,-15253301,-9642417,4978983,3308785,8755439,6943197,6461331,-25583147,8991218 }, + { -17226263,1816362,-1673288,-6086439,31783888,-8175991,-32948145,7417950,-30242287,1507265 }, + }, + { + { 29692663,6829891,-10498800,4334896,20945975,-11906496,-28887608,8209391,14606362,-10647073 }, + { -3481570,8707081,32188102,5672294,22096700,1711240,-33020695,9761487,4170404,-2085325 }, + { -11587470,14855945,-4127778,-1531857,-26649089,15084046,22186522,16002000,-14276837,-8400798 }, + }, + { + { -4811456,13761029,-31703877,-2483919,-3312471,7869047,-7113572,-9620092,13240845,10965870 }, + { -7742563,-8256762,-14768334,-13656260,-23232383,12387166,4498947,14147411,29514390,4302863 }, + { -13413405,-12407859,20757302,-13801832,14785143,8976368,-5061276,-2144373,17846988,-13971927 }, + }, +}, +{ + { + { -2244452,-754728,-4597030,-1066309,-6247172,1455299,-21647728,-9214789,-5222701,12650267 }, + { -9906797,-16070310,21134160,12198166,-27064575,708126,387813,13770293,-19134326,10958663 }, + { 22470984,12369526,23446014,-5441109,-21520802,-9698723,-11772496,-11574455,-25083830,4271862 }, + }, + { + { -25169565,-10053642,-19909332,15361595,-5984358,2159192,75375,-4278529,-32526221,8469673 }, + { 15854970,4148314,-8893890,7259002,11666551,13824734,-30531198,2697372,24154791,-9460943 }, + { 15446137,-15806644,29759747,14019369,30811221,-9610191,-31582008,12840104,24913809,9815020 }, + }, + { + { -4709286,-5614269,-31841498,-12288893,-14443537,10799414,-9103676,13438769,18735128,9466238 }, + { 11933045,9281483,5081055,-5183824,-2628162,-4905629,-7727821,-10896103,-22728655,16199064 }, + { 14576810,379472,-26786533,-8317236,-29426508,-10812974,-102766,1876699,30801119,2164795 }, + }, + { + { 15995086,3199873,13672555,13712240,-19378835,-4647646,-13081610,-15496269,-13492807,1268052 }, + { -10290614,-3659039,-3286592,10948818,23037027,3794475,-3470338,-12600221,-17055369,3565904 }, + { 29210088,-9419337,-5919792,-4952785,10834811,-13327726,-16512102,-10820713,-27162222,-14030531 }, + }, + { + { -13161890,15508588,16663704,-8156150,-28349942,9019123,-29183421,-3769423,2244111,-14001979 }, + { -5152875,-3800936,-9306475,-6071583,16243069,14684434,-25673088,-16180800,13491506,4641841 }, + { 10813417,643330,-19188515,-728916,30292062,-16600078,27548447,-7721242,14476989,-12767431 }, + }, + { + { 10292079,9984945,6481436,8279905,-7251514,7032743,27282937,-1644259,-27912810,12651324 }, + { -31185513,-813383,22271204,11835308,10201545,15351028,17099662,3988035,21721536,-3148940 }, + { 10202177,-6545839,-31373232,-9574638,-32150642,-8119683,-12906320,3852694,13216206,14842320 }, + }, + { + { -15815640,-10601066,-6538952,-7258995,-6984659,-6581778,-31500847,13765824,-27434397,9900184 }, + { 14465505,-13833331,-32133984,-14738873,-27443187,12990492,33046193,15796406,-7051866,-8040114 }, + { 30924417,-8279620,6359016,-12816335,16508377,9071735,-25488601,15413635,9524356,-7018878 }, + }, + { + { 12274201,-13175547,32627641,-1785326,6736625,13267305,5237659,-5109483,15663516,4035784 }, + { -2951309,8903985,17349946,601635,-16432815,-4612556,-13732739,-15889334,-22258478,4659091 }, + { -16916263,-4952973,-30393711,-15158821,20774812,15897498,5736189,15026997,-2178256,-13455585 }, + }, +}, +{ + { + { -8858980,-2219056,28571666,-10155518,-474467,-10105698,-3801496,278095,23440562,-290208 }, + { 10226241,-5928702,15139956,120818,-14867693,5218603,32937275,11551483,-16571960,-7442864 }, + { 17932739,-12437276,-24039557,10749060,11316803,7535897,22503767,5561594,-3646624,3898661 }, + }, + { + { 7749907,-969567,-16339731,-16464,-25018111,15122143,-1573531,7152530,21831162,1245233 }, + { 26958459,-14658026,4314586,8346991,-5677764,11960072,-32589295,-620035,-30402091,-16716212 }, + { -12165896,9166947,33491384,13673479,29787085,13096535,6280834,14587357,-22338025,13987525 }, + }, + { + { -24349909,7778775,21116000,15572597,-4833266,-5357778,-4300898,-5124639,-7469781,-2858068 }, + { 9681908,-6737123,-31951644,13591838,-6883821,386950,31622781,6439245,-14581012,4091397 }, + { -8426427,1470727,-28109679,-1596990,3978627,-5123623,-19622683,12092163,29077877,-14741988 }, + }, + { + { 5269168,-6859726,-13230211,-8020715,25932563,1763552,-5606110,-5505881,-20017847,2357889 }, + { 32264008,-15407652,-5387735,-1160093,-2091322,-3946900,23104804,-12869908,5727338,189038 }, + { 14609123,-8954470,-6000566,-16622781,-14577387,-7743898,-26745169,10942115,-25888931,-14884697 }, + }, + { + { 20513500,5557931,-15604613,7829531,26413943,-2019404,-21378968,7471781,13913677,-5137875 }, + { -25574376,11967826,29233242,12948236,-6754465,4713227,-8940970,14059180,12878652,8511905 }, + { -25656801,3393631,-2955415,-7075526,-2250709,9366908,-30223418,6812974,5568676,-3127656 }, + }, + { + { 11630004,12144454,2116339,13606037,27378885,15676917,-17408753,-13504373,-14395196,8070818 }, + { 27117696,-10007378,-31282771,-5570088,1127282,12772488,-29845906,10483306,-11552749,-1028714 }, + { 10637467,-5688064,5674781,1072708,-26343588,-6982302,-1683975,9177853,-27493162,15431203 }, + }, + { + { 20525145,10892566,-12742472,12779443,-29493034,16150075,-28240519,14943142,-15056790,-7935931 }, + { -30024462,5626926,-551567,-9981087,753598,11981191,25244767,-3239766,-3356550,9594024 }, + { -23752644,2636870,-5163910,-10103818,585134,7877383,11345683,-6492290,13352335,-10977084 }, + }, + { + { -1931799,-5407458,3304649,-12884869,17015806,-4877091,-29783850,-7752482,-13215537,-319204 }, + { 20239939,6607058,6203985,3483793,-18386976,-779229,-20723742,15077870,-22750759,14523817 }, + { 27406042,-6041657,27423596,-4497394,4996214,10002360,-28842031,-4545494,-30172742,-4805667 }, + }, +}, +{ + { + { 11374242,12660715,17861383,-12540833,10935568,1099227,-13886076,-9091740,-27727044,11358504 }, + { -12730809,10311867,1510375,10778093,-2119455,-9145702,32676003,11149336,-26123651,4985768 }, + { -19096303,341147,-6197485,-239033,15756973,-8796662,-983043,13794114,-19414307,-15621255 }, + }, + { + { 6490081,11940286,25495923,-7726360,8668373,-8751316,3367603,6970005,-1691065,-9004790 }, + { 1656497,13457317,15370807,6364910,13605745,8362338,-19174622,-5475723,-16796596,-5031438 }, + { -22273315,-13524424,-64685,-4334223,-18605636,-10921968,-20571065,-7007978,-99853,-10237333 }, + }, + { + { 17747465,10039260,19368299,-4050591,-20630635,-16041286,31992683,-15857976,-29260363,-5511971 }, + { 31932027,-4986141,-19612382,16366580,22023614,88450,11371999,-3744247,4882242,-10626905 }, + { 29796507,37186,19818052,10115756,-11829032,3352736,18551198,3272828,-5190932,-4162409 }, + }, + { + { 12501286,4044383,-8612957,-13392385,-32430052,5136599,-19230378,-3529697,330070,-3659409 }, + { 6384877,2899513,17807477,7663917,-2358888,12363165,25366522,-8573892,-271295,12071499 }, + { -8365515,-4042521,25133448,-4517355,-6211027,2265927,-32769618,1936675,-5159697,3829363 }, + }, + { + { 28425966,-5835433,-577090,-4697198,-14217555,6870930,7921550,-6567787,26333140,14267664 }, + { -11067219,11871231,27385719,-10559544,-4585914,-11189312,10004786,-8709488,-21761224,8930324 }, + { -21197785,-16396035,25654216,-1725397,12282012,11008919,1541940,4757911,-26491501,-16408940 }, + }, + { + { 13537262,-7759490,-20604840,10961927,-5922820,-13218065,-13156584,6217254,-15943699,13814990 }, + { -17422573,15157790,18705543,29619,24409717,-260476,27361681,9257833,-1956526,-1776914 }, + { -25045300,-10191966,15366585,15166509,-13105086,8423556,-29171540,12361135,-18685978,4578290 }, + }, + { + { 24579768,3711570,1342322,-11180126,-27005135,14124956,-22544529,14074919,21964432,8235257 }, + { -6528613,-2411497,9442966,-5925588,12025640,-1487420,-2981514,-1669206,13006806,2355433 }, + { -16304899,-13605259,-6632427,-5142349,16974359,-10911083,27202044,1719366,1141648,-12796236 }, + }, + { + { -12863944,-13219986,-8318266,-11018091,-6810145,-4843894,13475066,-3133972,32674895,13715045 }, + { 11423335,-5468059,32344216,8962751,24989809,9241752,-13265253,16086212,-28740881,-15642093 }, + { -1409668,12530728,-6368726,10847387,19531186,-14132160,-11709148,7791794,-27245943,4383347 }, + }, +}, +{ + { + { -28970898,5271447,-1266009,-9736989,-12455236,16732599,-4862407,-4906449,27193557,6245191 }, + { -15193956,5362278,-1783893,2695834,4960227,12840725,23061898,3260492,22510453,8577507 }, + { -12632451,11257346,-32692994,13548177,-721004,10879011,31168030,13952092,-29571492,-3635906 }, + }, + { + { 3877321,-9572739,32416692,5405324,-11004407,-13656635,3759769,11935320,5611860,8164018 }, + { -16275802,14667797,15906460,12155291,-22111149,-9039718,32003002,-8832289,5773085,-8422109 }, + { -23788118,-8254300,1950875,8937633,18686727,16459170,-905725,12376320,31632953,190926 }, + }, + { + { -24593607,-16138885,-8423991,13378746,14162407,6901328,-8288749,4508564,-25341555,-3627528 }, + { 8884438,-5884009,6023974,10104341,-6881569,-4941533,18722941,-14786005,-1672488,827625 }, + { -32720583,-16289296,-32503547,7101210,13354605,2659080,-1800575,-14108036,-24878478,1541286 }, + }, + { + { 2901347,-1117687,3880376,-10059388,-17620940,-3612781,-21802117,-3567481,20456845,-1885033 }, + { 27019610,12299467,-13658288,-1603234,-12861660,-4861471,-19540150,-5016058,29439641,15138866 }, + { 21536104,-6626420,-32447818,-10690208,-22408077,5175814,-5420040,-16361163,7779328,109896 }, + }, + { + { 30279744,14648750,-8044871,6425558,13639621,-743509,28698390,12180118,23177719,-554075 }, + { 26572847,3405927,-31701700,12890905,-19265668,5335866,-6493768,2378492,4439158,-13279347 }, + { -22716706,3489070,-9225266,-332753,18875722,-1140095,14819434,-12731527,-17717757,-5461437 }, + }, + { + { -5056483,16566551,15953661,3767752,-10436499,15627060,-820954,2177225,8550082,-15114165 }, + { -18473302,16596775,-381660,15663611,22860960,15585581,-27844109,-3582739,-23260460,-8428588 }, + { -32480551,15707275,-8205912,-5652081,29464558,2713815,-22725137,15860482,-21902570,1494193 }, + }, + { + { -19562091,-14087393,-25583872,-9299552,13127842,759709,21923482,16529112,8742704,12967017 }, + { -28464899,1553205,32536856,-10473729,-24691605,-406174,-8914625,-2933896,-29903758,15553883 }, + { 21877909,3230008,9881174,10539357,-4797115,2841332,11543572,14513274,19375923,-12647961 }, + }, + { + { 8832269,-14495485,13253511,5137575,5037871,4078777,24880818,-6222716,2862653,9455043 }, + { 29306751,5123106,20245049,-14149889,9592566,8447059,-2077124,-2990080,15511449,4789663 }, + { -20679756,7004547,8824831,-9434977,-4045704,-3750736,-5754762,108893,23513200,16652362 }, + }, +}, +{ + { + { -33256173,4144782,-4476029,-6579123,10770039,-7155542,-6650416,-12936300,-18319198,10212860 }, + { 2756081,8598110,7383731,-6859892,22312759,-1105012,21179801,2600940,-9988298,-12506466 }, + { -24645692,13317462,-30449259,-15653928,21365574,-10869657,11344424,864440,-2499677,-16710063 }, + }, + { + { -26432803,6148329,-17184412,-14474154,18782929,-275997,-22561534,211300,2719757,4940997 }, + { -1323882,3911313,-6948744,14759765,-30027150,7851207,21690126,8518463,26699843,5276295 }, + { -13149873,-6429067,9396249,365013,24703301,-10488939,1321586,149635,-15452774,7159369 }, + }, + { + { 9987780,-3404759,17507962,9505530,9731535,-2165514,22356009,8312176,22477218,-8403385 }, + { 18155857,-16504990,19744716,9006923,15154154,-10538976,24256460,-4864995,-22548173,9334109 }, + { 2986088,-4911893,10776628,-3473844,10620590,-7083203,-21413845,14253545,-22587149,536906 }, + }, + { + { 4377756,8115836,24567078,15495314,11625074,13064599,7390551,10589625,10838060,-15420424 }, + { -19342404,867880,9277171,-3218459,-14431572,-1986443,19295826,-15796950,6378260,699185 }, + { 7895026,4057113,-7081772,-13077756,-17886831,-323126,-716039,15693155,-5045064,-13373962 }, + }, + { + { -7737563,-5869402,-14566319,-7406919,11385654,13201616,31730678,-10962840,-3918636,-9669325 }, + { 10188286,-15770834,-7336361,13427543,22223443,14896287,30743455,7116568,-21786507,5427593 }, + { 696102,13206899,27047647,-10632082,15285305,-9853179,10798490,-4578720,19236243,12477404 }, + }, + { + { -11229439,11243796,-17054270,-8040865,-788228,-8167967,-3897669,11180504,-23169516,7733644 }, + { 17800790,-14036179,-27000429,-11766671,23887827,3149671,23466177,-10538171,10322027,15313801 }, + { 26246234,11968874,32263343,-5468728,6830755,-13323031,-15794704,-101982,-24449242,10890804 }, + }, + { + { -31365647,10271363,-12660625,-6267268,16690207,-13062544,-14982212,16484931,25180797,-5334884 }, + { -586574,10376444,-32586414,-11286356,19801893,10997610,2276632,9482883,316878,13820577 }, + { -9882808,-4510367,-2115506,16457136,-11100081,11674996,30756178,-7515054,30696930,-3712849 }, + }, + { + { 32988917,-9603412,12499366,7910787,-10617257,-11931514,-7342816,-9985397,-32349517,7392473 }, + { -8855661,15927861,9866406,-3649411,-2396914,-16655781,-30409476,-9134995,25112947,-2926644 }, + { -2504044,-436966,25621774,-5678772,15085042,-5479877,-24884878,-13526194,5537438,-13914319 }, + }, +}, +{ + { + { -11225584,2320285,-9584280,10149187,-33444663,5808648,-14876251,-1729667,31234590,6090599 }, + { -9633316,116426,26083934,2897444,-6364437,-2688086,609721,15878753,-6970405,-9034768 }, + { -27757857,247744,-15194774,-9002551,23288161,-10011936,-23869595,6503646,20650474,1804084 }, + }, + { + { -27589786,15456424,8972517,8469608,15640622,4439847,3121995,-10329713,27842616,-202328 }, + { -15306973,2839644,22530074,10026331,4602058,5048462,28248656,5031932,-11375082,12714369 }, + { 20807691,-7270825,29286141,11421711,-27876523,-13868230,-21227475,1035546,-19733229,12796920 }, + }, + { + { 12076899,-14301286,-8785001,-11848922,-25012791,16400684,-17591495,-12899438,3480665,-15182815 }, + { -32361549,5457597,28548107,7833186,7303070,-11953545,-24363064,-15921875,-33374054,2771025 }, + { -21389266,421932,26597266,6860826,22486084,-6737172,-17137485,-4210226,-24552282,15673397 }, + }, + { + { -20184622,2338216,19788685,-9620956,-4001265,-8740893,-20271184,4733254,3727144,-12934448 }, + { 6120119,814863,-11794402,-622716,6812205,-15747771,2019594,7975683,31123697,-10958981 }, + { 30069250,-11435332,30434654,2958439,18399564,-976289,12296869,9204260,-16432438,9648165 }, + }, + { + { 32705432,-1550977,30705658,7451065,-11805606,9631813,3305266,5248604,-26008332,-11377501 }, + { 17219865,2375039,-31570947,-5575615,-19459679,9219903,294711,15298639,2662509,-16297073 }, + { -1172927,-7558695,-4366770,-4287744,-21346413,-8434326,32087529,-1222777,32247248,-14389861 }, + }, + { + { 14312628,1221556,17395390,-8700143,-4945741,-8684635,-28197744,-9637817,-16027623,-13378845 }, + { -1428825,-9678990,-9235681,6549687,-7383069,-468664,23046502,9803137,17597934,2346211 }, + { 18510800,15337574,26171504,981392,-22241552,7827556,-23491134,-11323352,3059833,-11782870 }, + }, + { + { 10141598,6082907,17829293,-1947643,9830092,13613136,-25556636,-5544586,-33502212,3592096 }, + { 33114168,-15889352,-26525686,-13343397,33076705,8716171,1151462,1521897,-982665,-6837803 }, + { -32939165,-4255815,23947181,-324178,-33072974,-12305637,-16637686,3891704,26353178,693168 }, + }, + { + { 30374239,1595580,-16884039,13186931,4600344,406904,9585294,-400668,31375464,14369965 }, + { -14370654,-7772529,1510301,6434173,-18784789,-6262728,32732230,-13108839,17901441,16011505 }, + { 18171223,-11934626,-12500402,15197122,-11038147,-15230035,-19172240,-16046376,8764035,12309598 }, + }, +}, +{ + { + { 5975908,-5243188,-19459362,-9681747,-11541277,14015782,-23665757,1228319,17544096,-10593782 }, + { 5811932,-1715293,3442887,-2269310,-18367348,-8359541,-18044043,-15410127,-5565381,12348900 }, + { -31399660,11407555,25755363,6891399,-3256938,14872274,-24849353,8141295,-10632534,-585479 }, + }, + { + { -12675304,694026,-5076145,13300344,14015258,-14451394,-9698672,-11329050,30944593,1130208 }, + { 8247766,-6710942,-26562381,-7709309,-14401939,-14648910,4652152,2488540,23550156,-271232 }, + { 17294316,-3788438,7026748,15626851,22990044,113481,2267737,-5908146,-408818,-137719 }, + }, + { + { 16091085,-16253926,18599252,7340678,2137637,-1221657,-3364161,14550936,3260525,-7166271 }, + { -4910104,-13332887,18550887,10864893,-16459325,-7291596,-23028869,-13204905,-12748722,2701326 }, + { -8574695,16099415,4629974,-16340524,-20786213,-6005432,-10018363,9276971,11329923,1862132 }, + }, + { + { 14763076,-15903608,-30918270,3689867,3511892,10313526,-21951088,12219231,-9037963,-940300 }, + { 8894987,-3446094,6150753,3013931,301220,15693451,-31981216,-2909717,-15438168,11595570 }, + { 15214962,3537601,-26238722,-14058872,4418657,-15230761,13947276,10730794,-13489462,-4363670 }, + }, + { + { -2538306,7682793,32759013,263109,-29984731,-7955452,-22332124,-10188635,977108,699994 }, + { -12466472,4195084,-9211532,550904,-15565337,12917920,19118110,-439841,-30534533,-14337913 }, + { 31788461,-14507657,4799989,7372237,8808585,-14747943,9408237,-10051775,12493932,-5409317 }, + }, + { + { -25680606,5260744,-19235809,-6284470,-3695942,16566087,27218280,2607121,29375955,6024730 }, + { 842132,-2794693,-4763381,-8722815,26332018,-12405641,11831880,6985184,-9940361,2854096 }, + { -4847262,-7969331,2516242,-5847713,9695691,-7221186,16512645,960770,12121869,16648078 }, + }, + { + { -15218652,14667096,-13336229,2013717,30598287,-464137,-31504922,-7882064,20237806,2838411 }, + { -19288047,4453152,15298546,-16178388,22115043,-15972604,12544294,-13470457,1068881,-12499905 }, + { -9558883,-16518835,33238498,13506958,30505848,-1114596,-8486907,-2630053,12521378,4845654 }, + }, + { + { -28198521,10744108,-2958380,10199664,7759311,-13088600,3409348,-873400,-6482306,-12885870 }, + { -23561822,6230156,-20382013,10655314,-24040585,-11621172,10477734,-1240216,-3113227,13974498 }, + { 12966261,15550616,-32038948,-1615346,21025980,-629444,5642325,7188737,18895762,12629579 }, + }, +}, +{ + { + { 14741879,-14946887,22177208,-11721237,1279741,8058600,11758140,789443,32195181,3895677 }, + { 10758205,15755439,-4509950,9243698,-4879422,6879879,-2204575,-3566119,-8982069,4429647 }, + { -2453894,15725973,-20436342,-10410672,-5803908,-11040220,-7135870,-11642895,18047436,-15281743 }, + }, + { + { -25173001,-11307165,29759956,11776784,-22262383,-15820455,10993114,-12850837,-17620701,-9408468 }, + { 21987233,700364,-24505048,14972008,-7774265,-5718395,32155026,2581431,-29958985,8773375 }, + { -25568350,454463,-13211935,16126715,25240068,8594567,20656846,12017935,-7874389,-13920155 }, + }, + { + { 6028182,6263078,-31011806,-11301710,-818919,2461772,-31841174,-5468042,-1721788,-2776725 }, + { -12278994,16624277,987579,-5922598,32908203,1248608,7719845,-4166698,28408820,6816612 }, + { -10358094,-8237829,19549651,-12169222,22082623,16147817,20613181,13982702,-10339570,5067943 }, + }, + { + { -30505967,-3821767,12074681,13582412,-19877972,2443951,-19719286,12746132,5331210,-10105944 }, + { 30528811,3601899,-1957090,4619785,-27361822,-15436388,24180793,-12570394,27679908,-1648928 }, + { 9402404,-13957065,32834043,10838634,-26580150,-13237195,26653274,-8685565,22611444,-12715406 }, + }, + { + { 22190590,1118029,22736441,15130463,-30460692,-5991321,19189625,-4648942,4854859,6622139 }, + { -8310738,-2953450,-8262579,-3388049,-10401731,-271929,13424426,-3567227,26404409,13001963 }, + { -31241838,-15415700,-2994250,8939346,11562230,-12840670,-26064365,-11621720,-15405155,11020693 }, + }, + { + { 1866042,-7949489,-7898649,-10301010,12483315,13477547,3175636,-12424163,28761762,1406734 }, + { -448555,-1777666,13018551,3194501,-9580420,-11161737,24760585,-4347088,25577411,-13378680 }, + { -24290378,4759345,-690653,-1852816,2066747,10693769,-29595790,9884936,-9368926,4745410 }, + }, + { + { -9141284,6049714,-19531061,-4341411,-31260798,9944276,-15462008,-11311852,10931924,-11931931 }, + { -16561513,14112680,-8012645,4817318,-8040464,-11414606,-22853429,10856641,-20470770,13434654 }, + { 22759489,-10073434,-16766264,-1871422,13637442,-10168091,1765144,-12654326,28445307,-5364710 }, + }, + { + { 29875063,12493613,2795536,-3786330,1710620,15181182,-10195717,-8788675,9074234,1167180 }, + { -26205683,11014233,-9842651,-2635485,-26908120,7532294,-18716888,-9535498,3843903,9367684 }, + { -10969595,-6403711,9591134,9582310,11349256,108879,16235123,8601684,-139197,4242895 }, + }, +}, +{ + { + { 22092954,-13191123,-2042793,-11968512,32186753,-11517388,-6574341,2470660,-27417366,16625501 }, + { -11057722,3042016,13770083,-9257922,584236,-544855,-7770857,2602725,-27351616,14247413 }, + { 6314175,-10264892,-32772502,15957557,-10157730,168750,-8618807,14290061,27108877,-1180880 }, + }, + { + { -8586597,-7170966,13241782,10960156,-32991015,-13794596,33547976,-11058889,-27148451,981874 }, + { 22833440,9293594,-32649448,-13618667,-9136966,14756819,-22928859,-13970780,-10479804,-16197962 }, + { -7768587,3326786,-28111797,10783824,19178761,14905060,22680049,13906969,-15933690,3797899 }, + }, + { + { 21721356,-4212746,-12206123,9310182,-3882239,-13653110,23740224,-2709232,20491983,-8042152 }, + { 9209270,-15135055,-13256557,-6167798,-731016,15289673,25947805,15286587,30997318,-6703063 }, + { 7392032,16618386,23946583,-8039892,-13265164,-1533858,-14197445,-2321576,17649998,-250080 }, + }, + { + { -9301088,-14193827,30609526,-3049543,-25175069,-1283752,-15241566,-9525724,-2233253,7662146 }, + { -17558673,1763594,-33114336,15908610,-30040870,-12174295,7335080,-8472199,-3174674,3440183 }, + { -19889700,-5977008,-24111293,-9688870,10799743,-16571957,40450,-4431835,4862400,1133 }, + }, + { + { -32856209,-7873957,-5422389,14860950,-16319031,7956142,7258061,311861,-30594991,-7379421 }, + { -3773428,-1565936,28985340,7499440,24445838,9325937,29727763,16527196,18278453,15405622 }, + { -4381906,8508652,-19898366,-3674424,-5984453,15149970,-13313598,843523,-21875062,13626197 }, + }, + { + { 2281448,-13487055,-10915418,-2609910,1879358,16164207,-10783882,3953792,13340839,15928663 }, + { 31727126,-7179855,-18437503,-8283652,2875793,-16390330,-25269894,-7014826,-23452306,5964753 }, + { 4100420,-5959452,-17179337,6017714,-18705837,12227141,-26684835,11344144,2538215,-7570755 }, + }, + { + { -9433605,6123113,11159803,-2156608,30016280,14966241,-20474983,1485421,-629256,-15958862 }, + { -26804558,4260919,11851389,9658551,-32017107,16367492,-20205425,-13191288,11659922,-11115118 }, + { 26180396,10015009,-30844224,-8581293,5418197,9480663,2231568,-10170080,33100372,-1306171 }, + }, + { + { 15121113,-5201871,-10389905,15427821,-27509937,-15992507,21670947,4486675,-5931810,-14466380 }, + { 16166486,-9483733,-11104130,6023908,-31926798,-1364923,2340060,-16254968,-10735770,-10039824 }, + { 28042865,-3557089,-12126526,12259706,-3717498,-6945899,6766453,-8689599,18036436,5803270 }, + }, +}, +{ + { + { -817581,6763912,11803561,1585585,10958447,-2671165,23855391,4598332,-6159431,-14117438 }, + { -31031306,-14256194,17332029,-2383520,31312682,-5967183,696309,50292,-20095739,11763584 }, + { -594563,-2514283,-32234153,12643980,12650761,14811489,665117,-12613632,-19773211,-10713562 }, + }, + { + { 30464590,-11262872,-4127476,-12734478,19835327,-7105613,-24396175,2075773,-17020157,992471 }, + { 18357185,-6994433,7766382,16342475,-29324918,411174,14578841,8080033,-11574335,-10601610 }, + { 19598397,10334610,12555054,2555664,18821899,-10339780,21873263,16014234,26224780,16452269 }, + }, + { + { -30223925,5145196,5944548,16385966,3976735,2009897,-11377804,-7618186,-20533829,3698650 }, + { 14187449,3448569,-10636236,-10810935,-22663880,-3433596,7268410,-10890444,27394301,12015369 }, + { 19695761,16087646,28032085,12999827,6817792,11427614,20244189,-1312777,-13259127,-3402461 }, + }, + { + { 30860103,12735208,-1888245,-4699734,-16974906,2256940,-8166013,12298312,-8550524,-10393462 }, + { -5719826,-11245325,-1910649,15569035,26642876,-7587760,-5789354,-15118654,-4976164,12651793 }, + { -2848395,9953421,11531313,-5282879,26895123,-12697089,-13118820,-16517902,9768698,-2533218 }, + }, + { + { -24719459,1894651,-287698,-4704085,15348719,-8156530,32767513,12765450,4940095,10678226 }, + { 18860224,15980149,-18987240,-1562570,-26233012,-11071856,-7843882,13944024,-24372348,16582019 }, + { -15504260,4970268,-29893044,4175593,-20993212,-2199756,-11704054,15444560,-11003761,7989037 }, + }, + { + { 31490452,5568061,-2412803,2182383,-32336847,4531686,-32078269,6200206,-19686113,-14800171 }, + { -17308668,-15879940,-31522777,-2831,-32887382,16375549,8680158,-16371713,28550068,-6857132 }, + { -28126887,-5688091,16837845,-1820458,-6850681,12700016,-30039981,4364038,1155602,5988841 }, + }, + { + { 21890435,-13272907,-12624011,12154349,-7831873,15300496,23148983,-4470481,24618407,8283181 }, + { -33136107,-10512751,9975416,6841041,-31559793,16356536,3070187,-7025928,1466169,10740210 }, + { -1509399,-15488185,-13503385,-10655916,32799044,909394,-13938903,-5779719,-32164649,-15327040 }, + }, + { + { 3960823,-14267803,-28026090,-15918051,-19404858,13146868,15567327,951507,-3260321,-573935 }, + { 24740841,5052253,-30094131,8961361,25877428,6165135,-24368180,14397372,-7380369,-6144105 }, + { -28888365,3510803,-28103278,-1158478,-11238128,-10631454,-15441463,-14453128,-1625486,-6494814 }, + }, +}, +{ + { + { 793299,-9230478,8836302,-6235707,-27360908,-2369593,33152843,-4885251,-9906200,-621852 }, + { 5666233,525582,20782575,-8038419,-24538499,14657740,16099374,1468826,-6171428,-15186581 }, + { -4859255,-3779343,-2917758,-6748019,7778750,11688288,-30404353,-9871238,-1558923,-9863646 }, + }, + { + { 10896332,-7719704,824275,472601,-19460308,3009587,25248958,14783338,-30581476,-15757844 }, + { 10566929,12612572,-31944212,11118703,-12633376,12362879,21752402,8822496,24003793,14264025 }, + { 27713862,-7355973,-11008240,9227530,27050101,2504721,23886875,-13117525,13958495,-5732453 }, + }, + { + { -23481610,4867226,-27247128,3900521,29838369,-8212291,-31889399,-10041781,7340521,-15410068 }, + { 4646514,-8011124,-22766023,-11532654,23184553,8566613,31366726,-1381061,-15066784,-10375192 }, + { -17270517,12723032,-16993061,14878794,21619651,-6197576,27584817,3093888,-8843694,3849921 }, + }, + { + { -9064912,2103172,25561640,-15125738,-5239824,9582958,32477045,-9017955,5002294,-15550259 }, + { -12057553,-11177906,21115585,-13365155,8808712,-12030708,16489530,13378448,-25845716,12741426 }, + { -5946367,10645103,-30911586,15390284,-3286982,-7118677,24306472,15852464,28834118,-7646072 }, + }, + { + { -17335748,-9107057,-24531279,9434953,-8472084,-583362,-13090771,455841,20461858,5491305 }, + { 13669248,-16095482,-12481974,-10203039,-14569770,-11893198,-24995986,11293807,-28588204,-9421832 }, + { 28497928,6272777,-33022994,14470570,8906179,-1225630,18504674,-14165166,29867745,-8795943 }, + }, + { + { -16207023,13517196,-27799630,-13697798,24009064,-6373891,-6367600,-13175392,22853429,-4012011 }, + { 24191378,16712145,-13931797,15217831,14542237,1646131,18603514,-11037887,12876623,-2112447 }, + { 17902668,4518229,-411702,-2829247,26878217,5258055,-12860753,608397,16031844,3723494 }, + }, + { + { -28632773,12763728,-20446446,7577504,33001348,-13017745,17558842,-7872890,23896954,-4314245 }, + { -20005381,-12011952,31520464,605201,2543521,5991821,-2945064,7229064,-9919646,-8826859 }, + { 28816045,298879,-28165016,-15920938,19000928,-1665890,-12680833,-2949325,-18051778,-2082915 }, + }, + { + { 16000882,-344896,3493092,-11447198,-29504595,-13159789,12577740,16041268,-19715240,7847707 }, + { 10151868,10572098,27312476,7922682,14825339,4723128,-32855931,-6519018,-10020567,3852848 }, + { -11430470,15697596,-21121557,-4420647,5386314,15063598,16514493,-15932110,29330899,-15076224 }, + }, +}, +{ + { + { -25499735,-4378794,-15222908,-6901211,16615731,2051784,3303702,15490,-27548796,12314391 }, + { 15683520,-6003043,18109120,-9980648,15337968,-5997823,-16717435,15921866,16103996,-3731215 }, + { -23169824,-10781249,13588192,-1628807,-3798557,-1074929,-19273607,5402699,-29815713,-9841101 }, + }, + { + { 23190676,2384583,-32714340,3462154,-29903655,-1529132,-11266856,8911517,-25205859,2739713 }, + { 21374101,-3554250,-33524649,9874411,15377179,11831242,-33529904,6134907,4931255,11987849 }, + { -7732,-2978858,-16223486,7277597,105524,-322051,-31480539,13861388,-30076310,10117930 }, + }, + { + { -29501170,-10744872,-26163768,13051539,-25625564,5089643,-6325503,6704079,12890019,15728940 }, + { -21972360,-11771379,-951059,-4418840,14704840,2695116,903376,-10428139,12885167,8311031 }, + { -17516482,5352194,10384213,-13811658,7506451,13453191,26423267,4384730,1888765,-5435404 }, + }, + { + { -25817338,-3107312,-13494599,-3182506,30896459,-13921729,-32251644,-12707869,-19464434,-3340243 }, + { -23607977,-2665774,-526091,4651136,5765089,4618330,6092245,14845197,17151279,-9854116 }, + { -24830458,-12733720,-15165978,10367250,-29530908,-265356,22825805,-7087279,-16866484,16176525 }, + }, + { + { -23583256,6564961,20063689,3798228,-4740178,7359225,2006182,-10363426,-28746253,-10197509 }, + { -10626600,-4486402,-13320562,-5125317,3432136,-6393229,23632037,-1940610,32808310,1099883 }, + { 15030977,5768825,-27451236,-2887299,-6427378,-15361371,-15277896,-6809350,2051441,-15225865 }, + }, + { + { -3362323,-7239372,7517890,9824992,23555850,295369,5148398,-14154188,-22686354,16633660 }, + { 4577086,-16752288,13249841,-15304328,19958763,-14537274,18559670,-10759549,8402478,-9864273 }, + { -28406330,-1051581,-26790155,-907698,-17212414,-11030789,9453451,-14980072,17983010,9967138 }, + }, + { + { -25762494,6524722,26585488,9969270,24709298,1220360,-1677990,7806337,17507396,3651560 }, + { -10420457,-4118111,14584639,15971087,-15768321,8861010,26556809,-5574557,-18553322,-11357135 }, + { 2839101,14284142,4029895,3472686,14402957,12689363,-26642121,8459447,-5605463,-7621941 }, + }, + { + { -4839289,-3535444,9744961,2871048,25113978,3187018,-25110813,-849066,17258084,-7977739 }, + { 18164541,-10595176,-17154882,-1542417,19237078,-9745295,23357533,-15217008,26908270,12150756 }, + { -30264870,-7647865,5112249,-7036672,-1499807,-6974257,43168,-5537701,-32302074,16215819 }, + }, +}, +{ + { + { -6898905,9824394,-12304779,-4401089,-31397141,-6276835,32574489,12532905,-7503072,-8675347 }, + { -27343522,-16515468,-27151524,-10722951,946346,16291093,254968,7168080,21676107,-1943028 }, + { 21260961,-8424752,-16831886,-11920822,-23677961,3968121,-3651949,-6215466,-3556191,-7913075 }, + }, + { + { 16544754,13250366,-16804428,15546242,-4583003,12757258,-2462308,-8680336,-18907032,-9662799 }, + { -2415239,-15577728,18312303,4964443,-15272530,-12653564,26820651,16690659,25459437,-4564609 }, + { -25144690,11425020,28423002,-11020557,-6144921,-15826224,9142795,-2391602,-6432418,-1644817 }, + }, + { + { -23104652,6253476,16964147,-3768872,-25113972,-12296437,-27457225,-16344658,6335692,7249989 }, + { -30333227,13979675,7503222,-12368314,-11956721,-4621693,-30272269,2682242,25993170,-12478523 }, + { 4364628,5930691,32304656,-10044554,-8054781,15091131,22857016,-10598955,31820368,15075278 }, + }, + { + { 31879134,-8918693,17258761,90626,-8041836,-4917709,24162788,-9650886,-17970238,12833045 }, + { 19073683,14851414,-24403169,-11860168,7625278,11091125,-19619190,2074449,-9413939,14905377 }, + { 24483667,-11935567,-2518866,-11547418,-1553130,15355506,-25282080,9253129,27628530,-7555480 }, + }, + { + { 17597607,8340603,19355617,552187,26198470,-3176583,4593324,-9157582,-14110875,15297016 }, + { 510886,14337390,-31785257,16638632,6328095,2713355,-20217417,-11864220,8683221,2921426 }, + { 18606791,11874196,27155355,-5281482,-24031742,6265446,-25178240,-1278924,4674690,13890525 }, + }, + { + { 13609624,13069022,-27372361,-13055908,24360586,9592974,14977157,9835105,4389687,288396 }, + { 9922506,-519394,13613107,5883594,-18758345,-434263,-12304062,8317628,23388070,16052080 }, + { 12720016,11937594,-31970060,-5028689,26900120,8561328,-20155687,-11632979,-14754271,-10812892 }, + }, + { + { 15961858,14150409,26716931,-665832,-22794328,13603569,11829573,7467844,-28822128,929275 }, + { 11038231,-11582396,-27310482,-7316562,-10498527,-16307831,-23479533,-9371869,-21393143,2465074 }, + { 20017163,-4323226,27915242,1529148,12396362,15675764,13817261,-9658066,2463391,-4622140 }, + }, + { + { -16358878,-12663911,-12065183,4996454,-1256422,1073572,9583558,12851107,4003896,12673717 }, + { -1731589,-15155870,-3262930,16143082,19294135,13385325,14741514,-9103726,7903886,2348101 }, + { 24536016,-16515207,12715592,-3862155,1511293,10047386,-3842346,-7129159,-28377538,10048127 }, + }, +}, +{ + { + { -12622226,-6204820,30718825,2591312,-10617028,12192840,18873298,-7297090,-32297756,15221632 }, + { -26478122,-11103864,11546244,-1852483,9180880,7656409,-21343950,2095755,29769758,6593415 }, + { -31994208,-2907461,4176912,3264766,12538965,-868111,26312345,-6118678,30958054,8292160 }, + }, + { + { 31429822,-13959116,29173532,15632448,12174511,-2760094,32808831,3977186,26143136,-3148876 }, + { 22648901,1402143,-22799984,13746059,7936347,365344,-8668633,-1674433,-3758243,-2304625 }, + { -15491917,8012313,-2514730,-12702462,-23965846,-10254029,-1612713,-1535569,-16664475,8194478 }, + }, + { + { 27338066,-7507420,-7414224,10140405,-19026427,-6589889,27277191,8855376,28572286,3005164 }, + { 26287124,4821776,25476601,-4145903,-3764513,-15788984,-18008582,1182479,-26094821,-13079595 }, + { -7171154,3178080,23970071,6201893,-17195577,-4489192,-21876275,-13982627,32208683,-1198248 }, + }, + { + { -16657702,2817643,-10286362,14811298,6024667,13349505,-27315504,-10497842,-27672585,-11539858 }, + { 15941029,-9405932,-21367050,8062055,31876073,-238629,-15278393,-1444429,15397331,-4130193 }, + { 8934485,-13485467,-23286397,-13423241,-32446090,14047986,31170398,-1441021,-27505566,15087184 }, + }, + { + { -18357243,-2156491,24524913,-16677868,15520427,-6360776,-15502406,11461896,16788528,-5868942 }, + { -1947386,16013773,21750665,3714552,-17401782,-16055433,-3770287,-10323320,31322514,-11615635 }, + { 21426655,-5650218,-13648287,-5347537,-28812189,-4920970,-18275391,-14621414,13040862,-12112948 }, + }, + { + { 11293895,12478086,-27136401,15083750,-29307421,14748872,14555558,-13417103,1613711,4896935 }, + { -25894883,15323294,-8489791,-8057900,25967126,-13425460,2825960,-4897045,-23971776,-11267415 }, + { -15924766,-5229880,-17443532,6410664,3622847,10243618,20615400,12405433,-23753030,-8436416 }, + }, + { + { -7091295,12556208,-20191352,9025187,-17072479,4333801,4378436,2432030,23097949,-566018 }, + { 4565804,-16025654,20084412,-7842817,1724999,189254,24767264,10103221,-18512313,2424778 }, + { 366633,-11976806,8173090,-6890119,30788634,5745705,-7168678,1344109,-3642553,12412659 }, + }, + { + { -24001791,7690286,14929416,-168257,-32210835,-13412986,24162697,-15326504,-3141501,11179385 }, + { 18289522,-14724954,8056945,16430056,-21729724,7842514,-6001441,-1486897,-18684645,-11443503 }, + { 476239,6601091,-6152790,-9723375,17503545,-4863900,27672959,13403813,11052904,5219329 }, + }, +}, +{ + { + { 20678546,-8375738,-32671898,8849123,-5009758,14574752,31186971,-3973730,9014762,-8579056 }, + { -13644050,-10350239,-15962508,5075808,-1514661,-11534600,-33102500,9160280,8473550,-3256838 }, + { 24900749,14435722,17209120,-15292541,-22592275,9878983,-7689309,-16335821,-24568481,11788948 }, + }, + { + { -3118155,-11395194,-13802089,14797441,9652448,-6845904,-20037437,10410733,-24568470,-1458691 }, + { -15659161,16736706,-22467150,10215878,-9097177,7563911,11871841,-12505194,-18513325,8464118 }, + { -23400612,8348507,-14585951,-861714,-3950205,-6373419,14325289,8628612,33313881,-8370517 }, + }, + { + { -20186973,-4967935,22367356,5271547,-1097117,-4788838,-24805667,-10236854,-8940735,-5818269 }, + { -6948785,-1795212,-32625683,-16021179,32635414,-7374245,15989197,-12838188,28358192,-4253904 }, + { -23561781,-2799059,-32351682,-1661963,-9147719,10429267,-16637684,4072016,-5351664,5596589 }, + }, + { + { -28236598,-3390048,12312896,6213178,3117142,16078565,29266239,2557221,1768301,15373193 }, + { -7243358,-3246960,-4593467,-7553353,-127927,-912245,-1090902,-4504991,-24660491,3442910 }, + { -30210571,5124043,14181784,8197961,18964734,-11939093,22597931,7176455,-18585478,13365930 }, + }, + { + { -7877390,-1499958,8324673,4690079,6261860,890446,24538107,-8570186,-9689599,-3031667 }, + { 25008904,-10771599,-4305031,-9638010,16265036,15721635,683793,-11823784,15723479,-15163481 }, + { -9660625,12374379,-27006999,-7026148,-7724114,-12314514,11879682,5400171,519526,-1235876 }, + }, + { + { 22258397,-16332233,-7869817,14613016,-22520255,-2950923,-20353881,7315967,16648397,7605640 }, + { -8081308,-8464597,-8223311,9719710,19259459,-15348212,23994942,-5281555,-9468848,4763278 }, + { -21699244,9220969,-15730624,1084137,-25476107,-2852390,31088447,-7764523,-11356529,728112 }, + }, + { + { 26047220,-11751471,-6900323,-16521798,24092068,9158119,-4273545,-12555558,-29365436,-5498272 }, + { 17510331,-322857,5854289,8403524,17133918,-3112612,-28111007,12327945,10750447,10014012 }, + { -10312768,3936952,9156313,-8897683,16498692,-994647,-27481051,-666732,3424691,7540221 }, + }, + { + { 30322361,-6964110,11361005,-4143317,7433304,4989748,-7071422,-16317219,-9244265,15258046 }, + { 13054562,-2779497,19155474,469045,-12482797,4566042,5631406,2711395,1062915,-5136345 }, + { -19240248,-11254599,-29509029,-7499965,-5835763,13005411,-6066489,12194497,32960380,1459310 }, + }, +}, +{ + { + { 19852034,7027924,23669353,10020366,8586503,-6657907,394197,-6101885,18638003,-11174937 }, + { 31395534,15098109,26581030,8030562,-16527914,-5007134,9012486,-7584354,-6643087,-5442636 }, + { -9192165,-2347377,-1997099,4529534,25766844,607986,-13222,9677543,-32294889,-6456008 }, + }, + { + { -2444496,-149937,29348902,8186665,1873760,12489863,-30934579,-7839692,-7852844,-8138429 }, + { -15236356,-15433509,7766470,746860,26346930,-10221762,-27333451,10754588,-9431476,5203576 }, + { 31834314,14135496,-770007,5159118,20917671,-16768096,-7467973,-7337524,31809243,7347066 }, + }, + { + { -9606723,-11874240,20414459,13033986,13716524,-11691881,19797970,-12211255,15192876,-2087490 }, + { -12663563,-2181719,1168162,-3804809,26747877,-14138091,10609330,12694420,33473243,-13382104 }, + { 33184999,11180355,15832085,-11385430,-1633671,225884,15089336,-11023903,-6135662,14480053 }, + }, + { + { 31308717,-5619998,31030840,-1897099,15674547,-6582883,5496208,13685227,27595050,8737275 }, + { -20318852,-15150239,10933843,-16178022,8335352,-7546022,-31008351,-12610604,26498114,66511 }, + { 22644454,-8761729,-16671776,4884562,-3105614,-13559366,30540766,-4286747,-13327787,-7515095 }, + }, + { + { -28017847,9834845,18617207,-2681312,-3401956,-13307506,8205540,13585437,-17127465,15115439 }, + { 23711543,-672915,31206561,-8362711,6164647,-9709987,-33535882,-1426096,8236921,16492939 }, + { -23910559,-13515526,-26299483,-4503841,25005590,-7687270,19574902,10071562,6708380,-6222424 }, + }, + { + { 2101391,-4930054,19702731,2367575,-15427167,1047675,5301017,9328700,29955601,-11678310 }, + { 3096359,9271816,-21620864,-15521844,-14847996,-7592937,-25892142,-12635595,-9917575,6216608 }, + { -32615849,338663,-25195611,2510422,-29213566,-13820213,24822830,-6146567,-26767480,7525079 }, + }, + { + { -23066649,-13985623,16133487,-7896178,-3389565,778788,-910336,-2782495,-19386633,11994101 }, + { 21691500,-13624626,-641331,-14367021,3285881,-3483596,-25064666,9718258,-7477437,13381418 }, + { 18445390,-4202236,14979846,11622458,-1727110,-3582980,23111648,-6375247,28535282,15779576 }, + }, + { + { 30098053,3089662,-9234387,16662135,-21306940,11308411,-14068454,12021730,9955285,-16303356 }, + { 9734894,-14576830,-7473633,-9138735,2060392,11313496,-18426029,9924399,20194861,13380996 }, + { -26378102,-7965207,-22167821,15789297,-18055342,-6168792,-1984914,15707771,26342023,10146099 }, + }, +}, +{ + { + { -26016874,-219943,21339191,-41388,19745256,-2878700,-29637280,2227040,21612326,-545728 }, + { -13077387,1184228,23562814,-5970442,-20351244,-6348714,25764461,12243797,-20856566,11649658 }, + { -10031494,11262626,27384172,2271902,26947504,-15997771,39944,6114064,33514190,2333242 }, + }, + { + { -21433588,-12421821,8119782,7219913,-21830522,-9016134,-6679750,-12670638,24350578,-13450001 }, + { -4116307,-11271533,-23886186,4843615,-30088339,690623,-31536088,-10406836,8317860,12352766 }, + { 18200138,-14475911,-33087759,-2696619,-23702521,-9102511,-23552096,-2287550,20712163,6719373 }, + }, + { + { 26656208,6075253,-7858556,1886072,-28344043,4262326,11117530,-3763210,26224235,-3297458 }, + { -17168938,-14854097,-3395676,-16369877,-19954045,14050420,21728352,9493610,18620611,-16428628 }, + { -13323321,13325349,11432106,5964811,18609221,6062965,-5269471,-9725556,-30701573,-16479657 }, + }, + { + { -23860538,-11233159,26961357,1640861,-32413112,-16737940,12248509,-5240639,13735342,1934062 }, + { 25089769,6742589,17081145,-13406266,21909293,-16067981,-15136294,-3765346,-21277997,5473616 }, + { 31883677,-7961101,1083432,-11572403,22828471,13290673,-7125085,12469656,29111212,-5451014 }, + }, + { + { 24244947,-15050407,-26262976,2791540,-14997599,16666678,24367466,6388839,-10295587,452383 }, + { -25640782,-3417841,5217916,16224624,19987036,-4082269,-24236251,-5915248,15766062,8407814 }, + { -20406999,13990231,15495425,16395525,5377168,15166495,-8917023,-4388953,-8067909,2276718 }, + }, + { + { 30157918,12924066,-17712050,9245753,19895028,3368142,-23827587,5096219,22740376,-7303417 }, + { 2041139,-14256350,7783687,13876377,-25946985,-13352459,24051124,13742383,-15637599,13295222 }, + { 33338237,-8505733,12532113,7977527,9106186,-1715251,-17720195,-4612972,-4451357,-14669444 }, + }, + { + { -20045281,5454097,-14346548,6447146,28862071,1883651,-2469266,-4141880,7770569,9620597 }, + { 23208068,7979712,33071466,8149229,1758231,-10834995,30945528,-1694323,-33502340,-14767970 }, + { 1439958,-16270480,-1079989,-793782,4625402,10647766,-5043801,1220118,30494170,-11440799 }, + }, + { + { -5037580,-13028295,-2970559,-3061767,15640974,-6701666,-26739026,926050,-1684339,-13333647 }, + { 13908495,-3549272,30919928,-6273825,-21521863,7989039,9021034,9078865,3353509,4033511 }, + { -29663431,-15113610,32259991,-344482,24295849,-12912123,23161163,8839127,27485041,7356032 }, + }, +}, +{ + { + { 9661027,705443,11980065,-5370154,-1628543,14661173,-6346142,2625015,28431036,-16771834 }, + { -23839233,-8311415,-25945511,7480958,-17681669,-8354183,-22545972,14150565,15970762,4099461 }, + { 29262576,16756590,26350592,-8793563,8529671,-11208050,13617293,-9937143,11465739,8317062 }, + }, + { + { -25493081,-6962928,32500200,-9419051,-23038724,-2302222,14898637,3848455,20969334,-5157516 }, + { -20384450,-14347713,-18336405,13884722,-33039454,2842114,-21610826,-3649888,11177095,14989547 }, + { -24496721,-11716016,16959896,2278463,12066309,10137771,13515641,2581286,-28487508,9930240 }, + }, + { + { -17751622,-2097826,16544300,-13009300,-15914807,-14949081,18345767,-13403753,16291481,-5314038 }, + { -33229194,2553288,32678213,9875984,8534129,6889387,-9676774,6957617,4368891,9788741 }, + { 16660756,7281060,-10830758,12911820,20108584,-8101676,-21722536,-8613148,16250552,-11111103 }, + }, + { + { -19765507,2390526,-16551031,14161980,1905286,6414907,4689584,10604807,-30190403,4782747 }, + { -1354539,14736941,-7367442,-13292886,7710542,-14155590,-9981571,4383045,22546403,437323 }, + { 31665577,-12180464,-16186830,1491339,-18368625,3294682,27343084,2786261,-30633590,-14097016 }, + }, + { + { -14467279,-683715,-33374107,7448552,19294360,14334329,-19690631,2355319,-19284671,-6114373 }, + { 15121312,-15796162,6377020,-6031361,-10798111,-12957845,18952177,15496498,-29380133,11754228 }, + { -2637277,-13483075,8488727,-14303896,12728761,-1622493,7141596,11724556,22761615,-10134141 }, + }, + { + { 16918416,11729663,-18083579,3022987,-31015732,-13339659,-28741185,-12227393,32851222,11717399 }, + { 11166634,7338049,-6722523,4531520,-29468672,-7302055,31474879,3483633,-1193175,-4030831 }, + { -185635,9921305,31456609,-13536438,-12013818,13348923,33142652,6546660,-19985279,-3948376 }, + }, + { + { -32460596,11266712,-11197107,-7899103,31703694,3855903,-8537131,-12833048,-30772034,-15486313 }, + { -18006477,12709068,3991746,-6479188,-21491523,-10550425,-31135347,-16049879,10928917,3011958 }, + { -6957757,-15594337,31696059,334240,29576716,14796075,-30831056,-12805180,18008031,10258577 }, + }, + { + { -22448644,15655569,7018479,-4410003,-30314266,-1201591,-1853465,1367120,25127874,6671743 }, + { 29701166,-14373934,-10878120,9279288,-17568,13127210,21382910,11042292,25838796,4642684 }, + { -20430234,14955537,-24126347,8124619,-5369288,-5990470,30468147,-13900640,18423289,4177476 }, + }, +}, diff --git a/src/ed25519-supercop-ref10/base.py b/src/ed25519-supercop-ref10/base.py new file mode 100644 index 0000000..84accc8 --- /dev/null +++ b/src/ed25519-supercop-ref10/base.py @@ -0,0 +1,65 @@ +b = 256 +q = 2**255 - 19 +l = 2**252 + 27742317777372353535851937790883648493 + +def expmod(b,e,m): + if e == 0: return 1 + t = expmod(b,e/2,m)**2 % m + if e & 1: t = (t*b) % m + return t + +def inv(x): + return expmod(x,q-2,q) + +d = -121665 * inv(121666) +I = expmod(2,(q-1)/4,q) + +def xrecover(y): + xx = (y*y-1) * inv(d*y*y+1) + x = expmod(xx,(q+3)/8,q) + if (x*x - xx) % q != 0: x = (x*I) % q + if x % 2 != 0: x = q-x + return x + +By = 4 * inv(5) +Bx = xrecover(By) +B = [Bx % q,By % q] + +def edwards(P,Q): + x1 = P[0] + y1 = P[1] + x2 = Q[0] + y2 = Q[1] + x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2) + y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2) + return [x3 % q,y3 % q] + +def radix255(x): + x = x % q + if x + x > q: x -= q + x = [x,0,0,0,0,0,0,0,0,0] + bits = [26,25,26,25,26,25,26,25,26,25] + for i in range(9): + carry = (x[i] + 2**(bits[i]-1)) / 2**bits[i] + x[i] -= carry * 2**bits[i] + x[i + 1] += carry + result = "" + for i in range(9): + result = result+str(x[i])+"," + result = result+str(x[9]) + return result + +Bi = B +for i in range(32): + print "{" + Bij = Bi + for j in range(8): + print " {" + print " {",radix255(Bij[1]+Bij[0]),"}," + print " {",radix255(Bij[1]-Bij[0]),"}," + print " {",radix255(2*d*Bij[0]*Bij[1]),"}," + Bij = edwards(Bij,Bi) + print " }," + print "}," + for k in range(8): + Bi = edwards(Bi,Bi) diff --git a/src/ed25519-supercop-ref10/base2.h b/src/ed25519-supercop-ref10/base2.h new file mode 100644 index 0000000..8c53844 --- /dev/null +++ b/src/ed25519-supercop-ref10/base2.h @@ -0,0 +1,40 @@ + { + { 25967493,-14356035,29566456,3660896,-12694345,4014787,27544626,-11754271,-6079156,2047605 }, + { -12545711,934262,-2722910,3049990,-727428,9406986,12720692,5043384,19500929,-15469378 }, + { -8738181,4489570,9688441,-14785194,10184609,-12363380,29287919,11864899,-24514362,-4438546 }, + }, + { + { 15636291,-9688557,24204773,-7912398,616977,-16685262,27787600,-14772189,28944400,-1550024 }, + { 16568933,4717097,-11556148,-1102322,15682896,-11807043,16354577,-11775962,7689662,11199574 }, + { 30464156,-5976125,-11779434,-15670865,23220365,15915852,7512774,10017326,-17749093,-9920357 }, + }, + { + { 10861363,11473154,27284546,1981175,-30064349,12577861,32867885,14515107,-15438304,10819380 }, + { 4708026,6336745,20377586,9066809,-11272109,6594696,-25653668,12483688,-12668491,5581306 }, + { 19563160,16186464,-29386857,4097519,10237984,-4348115,28542350,13850243,-23678021,-15815942 }, + }, + { + { 5153746,9909285,1723747,-2777874,30523605,5516873,19480852,5230134,-23952439,-15175766 }, + { -30269007,-3463509,7665486,10083793,28475525,1649722,20654025,16520125,30598449,7715701 }, + { 28881845,14381568,9657904,3680757,-20181635,7843316,-31400660,1370708,29794553,-1409300 }, + }, + { + { -22518993,-6692182,14201702,-8745502,-23510406,8844726,18474211,-1361450,-13062696,13821877 }, + { -6455177,-7839871,3374702,-4740862,-27098617,-10571707,31655028,-7212327,18853322,-14220951 }, + { 4566830,-12963868,-28974889,-12240689,-7602672,-2830569,-8514358,-10431137,2207753,-3209784 }, + }, + { + { -25154831,-4185821,29681144,7868801,-6854661,-9423865,-12437364,-663000,-31111463,-16132436 }, + { 25576264,-2703214,7349804,-11814844,16472782,9300885,3844789,15725684,171356,6466918 }, + { 23103977,13316479,9739013,-16149481,817875,-15038942,8965339,-14088058,-30714912,16193877 }, + }, + { + { -33521811,3180713,-2394130,14003687,-16903474,-16270840,17238398,4729455,-18074513,9256800 }, + { -25182317,-4174131,32336398,5036987,-21236817,11360617,22616405,9761698,-19827198,630305 }, + { -13720693,2639453,-24237460,-7406481,9494427,-5774029,-6554551,-15960994,-2449256,-14291300 }, + }, + { + { -3151181,-5046075,9282714,6866145,-31907062,-863023,-18940575,15033784,25105118,-7894876 }, + { -24326370,15950226,-31801215,-14592823,-11662737,-5090925,1573892,-2625887,2198790,-15804619 }, + { -3099351,10324967,-2241613,7453183,-5446979,-2735503,-13812022,-16236442,-32461234,-12290683 }, + }, diff --git a/src/ed25519-supercop-ref10/base2.py b/src/ed25519-supercop-ref10/base2.py new file mode 100644 index 0000000..5e4e873 --- /dev/null +++ b/src/ed25519-supercop-ref10/base2.py @@ -0,0 +1,60 @@ +b = 256 +q = 2**255 - 19 +l = 2**252 + 27742317777372353535851937790883648493 + +def expmod(b,e,m): + if e == 0: return 1 + t = expmod(b,e/2,m)**2 % m + if e & 1: t = (t*b) % m + return t + +def inv(x): + return expmod(x,q-2,q) + +d = -121665 * inv(121666) +I = expmod(2,(q-1)/4,q) + +def xrecover(y): + xx = (y*y-1) * inv(d*y*y+1) + x = expmod(xx,(q+3)/8,q) + if (x*x - xx) % q != 0: x = (x*I) % q + if x % 2 != 0: x = q-x + return x + +By = 4 * inv(5) +Bx = xrecover(By) +B = [Bx % q,By % q] + +def edwards(P,Q): + x1 = P[0] + y1 = P[1] + x2 = Q[0] + y2 = Q[1] + x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2) + y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2) + return [x3 % q,y3 % q] + +def radix255(x): + x = x % q + if x + x > q: x -= q + x = [x,0,0,0,0,0,0,0,0,0] + bits = [26,25,26,25,26,25,26,25,26,25] + for i in range(9): + carry = (x[i] + 2**(bits[i]-1)) / 2**bits[i] + x[i] -= carry * 2**bits[i] + x[i + 1] += carry + result = "" + for i in range(9): + result = result+str(x[i])+"," + result = result+str(x[9]) + return result + +Bi = B + +for i in range(8): + print " {" + print " {",radix255(Bi[1]+Bi[0]),"}," + print " {",radix255(Bi[1]-Bi[0]),"}," + print " {",radix255(2*d*Bi[0]*Bi[1]),"}," + print " }," + Bi = edwards(B,edwards(B,Bi)) diff --git a/src/ed25519-supercop-ref10/d.h b/src/ed25519-supercop-ref10/d.h new file mode 100644 index 0000000..e25f578 --- /dev/null +++ b/src/ed25519-supercop-ref10/d.h @@ -0,0 +1 @@ +-10913610,13857413,-15372611,6949391,114729,-8787816,-6275908,-3247719,-18696448,-12055116 diff --git a/src/ed25519-supercop-ref10/d.py b/src/ed25519-supercop-ref10/d.py new file mode 100644 index 0000000..8995bb8 --- /dev/null +++ b/src/ed25519-supercop-ref10/d.py @@ -0,0 +1,28 @@ +q = 2**255 - 19 + +def expmod(b,e,m): + if e == 0: return 1 + t = expmod(b,e/2,m)**2 % m + if e & 1: t = (t*b) % m + return t + +def inv(x): + return expmod(x,q-2,q) + +def radix255(x): + x = x % q + if x + x > q: x -= q + x = [x,0,0,0,0,0,0,0,0,0] + bits = [26,25,26,25,26,25,26,25,26,25] + for i in range(9): + carry = (x[i] + 2**(bits[i]-1)) / 2**bits[i] + x[i] -= carry * 2**bits[i] + x[i + 1] += carry + result = "" + for i in range(9): + result = result+str(x[i])+"," + result = result+str(x[9]) + return result + +d = -121665 * inv(121666) +print radix255(d) diff --git a/src/ed25519-supercop-ref10/d2.h b/src/ed25519-supercop-ref10/d2.h new file mode 100644 index 0000000..01aaec7 --- /dev/null +++ b/src/ed25519-supercop-ref10/d2.h @@ -0,0 +1 @@ +-21827239,-5839606,-30745221,13898782,229458,15978800,-12551817,-6495438,29715968,9444199 diff --git a/src/ed25519-supercop-ref10/d2.py b/src/ed25519-supercop-ref10/d2.py new file mode 100644 index 0000000..7984175 --- /dev/null +++ b/src/ed25519-supercop-ref10/d2.py @@ -0,0 +1,28 @@ +q = 2**255 - 19 + +def expmod(b,e,m): + if e == 0: return 1 + t = expmod(b,e/2,m)**2 % m + if e & 1: t = (t*b) % m + return t + +def inv(x): + return expmod(x,q-2,q) + +def radix255(x): + x = x % q + if x + x > q: x -= q + x = [x,0,0,0,0,0,0,0,0,0] + bits = [26,25,26,25,26,25,26,25,26,25] + for i in range(9): + carry = (x[i] + 2**(bits[i]-1)) / 2**bits[i] + x[i] -= carry * 2**bits[i] + x[i + 1] += carry + result = "" + for i in range(9): + result = result+str(x[i])+"," + result = result+str(x[9]) + return result + +d = -121665 * inv(121666) +print radix255(d*2) diff --git a/src/ed25519-supercop-ref10/fe.h b/src/ed25519-supercop-ref10/fe.h new file mode 100644 index 0000000..60c308b --- /dev/null +++ b/src/ed25519-supercop-ref10/fe.h @@ -0,0 +1,56 @@ +#ifndef FE_H +#define FE_H + +#include "crypto_int32.h" + +typedef crypto_int32 fe[10]; + +/* +fe means field element. +Here the field is \Z/(2^255-19). +An element t, entries t[0]...t[9], represents the integer +t[0]+2^26 t[1]+2^51 t[2]+2^77 t[3]+2^102 t[4]+...+2^230 t[9]. +Bounds on each t[i] vary depending on context. +*/ + +#define fe_frombytes crypto_sign_ed25519_ref10_fe_frombytes +#define fe_tobytes crypto_sign_ed25519_ref10_fe_tobytes +#define fe_copy crypto_sign_ed25519_ref10_fe_copy +#define fe_isnonzero crypto_sign_ed25519_ref10_fe_isnonzero +#define fe_isnegative crypto_sign_ed25519_ref10_fe_isnegative +#define fe_0 crypto_sign_ed25519_ref10_fe_0 +#define fe_1 crypto_sign_ed25519_ref10_fe_1 +#define fe_cswap crypto_sign_ed25519_ref10_fe_cswap +#define fe_cmov crypto_sign_ed25519_ref10_fe_cmov +#define fe_add crypto_sign_ed25519_ref10_fe_add +#define fe_sub crypto_sign_ed25519_ref10_fe_sub +#define fe_neg crypto_sign_ed25519_ref10_fe_neg +#define fe_mul crypto_sign_ed25519_ref10_fe_mul +#define fe_sq crypto_sign_ed25519_ref10_fe_sq +#define fe_sq2 crypto_sign_ed25519_ref10_fe_sq2 +#define fe_mul121666 crypto_sign_ed25519_ref10_fe_mul121666 +#define fe_invert crypto_sign_ed25519_ref10_fe_invert +#define fe_pow22523 crypto_sign_ed25519_ref10_fe_pow22523 + +extern void fe_frombytes(fe,const unsigned char *); +extern void fe_tobytes(unsigned char *,const fe); + +extern void fe_copy(fe,const fe); +extern int fe_isnonzero(const fe); +extern int fe_isnegative(const fe); +extern void fe_0(fe); +extern void fe_1(fe); +extern void fe_cswap(fe,fe,unsigned int); +extern void fe_cmov(fe,const fe,unsigned int); + +extern void fe_add(fe,const fe,const fe); +extern void fe_sub(fe,const fe,const fe); +extern void fe_neg(fe,const fe); +extern void fe_mul(fe,const fe,const fe); +extern void fe_sq(fe,const fe); +extern void fe_sq2(fe,const fe); +extern void fe_mul121666(fe,const fe); +extern void fe_invert(fe,const fe); +extern void fe_pow22523(fe,const fe); + +#endif diff --git a/src/ed25519-supercop-ref10/fe_0.c b/src/ed25519-supercop-ref10/fe_0.c new file mode 100644 index 0000000..ec879d7 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_0.c @@ -0,0 +1,19 @@ +#include "fe.h" + +/* +h = 0 +*/ + +void fe_0(fe h) +{ + h[0] = 0; + h[1] = 0; + h[2] = 0; + h[3] = 0; + h[4] = 0; + h[5] = 0; + h[6] = 0; + h[7] = 0; + h[8] = 0; + h[9] = 0; +} diff --git a/src/ed25519-supercop-ref10/fe_1.c b/src/ed25519-supercop-ref10/fe_1.c new file mode 100644 index 0000000..8cf7784 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_1.c @@ -0,0 +1,19 @@ +#include "fe.h" + +/* +h = 1 +*/ + +void fe_1(fe h) +{ + h[0] = 1; + h[1] = 0; + h[2] = 0; + h[3] = 0; + h[4] = 0; + h[5] = 0; + h[6] = 0; + h[7] = 0; + h[8] = 0; + h[9] = 0; +} diff --git a/src/ed25519-supercop-ref10/fe_add.c b/src/ed25519-supercop-ref10/fe_add.c new file mode 100644 index 0000000..e6a81da --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_add.c @@ -0,0 +1,57 @@ +#include "fe.h" + +/* +h = f + g +Can overlap h with f or g. + +Preconditions: + |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. + |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. + +Postconditions: + |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +*/ + +void fe_add(fe h,const fe f,const fe g) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + crypto_int32 g0 = g[0]; + crypto_int32 g1 = g[1]; + crypto_int32 g2 = g[2]; + crypto_int32 g3 = g[3]; + crypto_int32 g4 = g[4]; + crypto_int32 g5 = g[5]; + crypto_int32 g6 = g[6]; + crypto_int32 g7 = g[7]; + crypto_int32 g8 = g[8]; + crypto_int32 g9 = g[9]; + crypto_int32 h0 = f0 + g0; + crypto_int32 h1 = f1 + g1; + crypto_int32 h2 = f2 + g2; + crypto_int32 h3 = f3 + g3; + crypto_int32 h4 = f4 + g4; + crypto_int32 h5 = f5 + g5; + crypto_int32 h6 = f6 + g6; + crypto_int32 h7 = f7 + g7; + crypto_int32 h8 = f8 + g8; + crypto_int32 h9 = f9 + g9; + h[0] = h0; + h[1] = h1; + h[2] = h2; + h[3] = h3; + h[4] = h4; + h[5] = h5; + h[6] = h6; + h[7] = h7; + h[8] = h8; + h[9] = h9; +} diff --git a/src/ed25519-supercop-ref10/fe_cmov.c b/src/ed25519-supercop-ref10/fe_cmov.c new file mode 100644 index 0000000..8ca584f --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_cmov.c @@ -0,0 +1,63 @@ +#include "fe.h" + +/* +Replace (f,g) with (g,g) if b == 1; +replace (f,g) with (f,g) if b == 0. + +Preconditions: b in {0,1}. +*/ + +void fe_cmov(fe f,const fe g,unsigned int b) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + crypto_int32 g0 = g[0]; + crypto_int32 g1 = g[1]; + crypto_int32 g2 = g[2]; + crypto_int32 g3 = g[3]; + crypto_int32 g4 = g[4]; + crypto_int32 g5 = g[5]; + crypto_int32 g6 = g[6]; + crypto_int32 g7 = g[7]; + crypto_int32 g8 = g[8]; + crypto_int32 g9 = g[9]; + crypto_int32 x0 = f0 ^ g0; + crypto_int32 x1 = f1 ^ g1; + crypto_int32 x2 = f2 ^ g2; + crypto_int32 x3 = f3 ^ g3; + crypto_int32 x4 = f4 ^ g4; + crypto_int32 x5 = f5 ^ g5; + crypto_int32 x6 = f6 ^ g6; + crypto_int32 x7 = f7 ^ g7; + crypto_int32 x8 = f8 ^ g8; + crypto_int32 x9 = f9 ^ g9; + b = -b; + x0 &= b; + x1 &= b; + x2 &= b; + x3 &= b; + x4 &= b; + x5 &= b; + x6 &= b; + x7 &= b; + x8 &= b; + x9 &= b; + f[0] = f0 ^ x0; + f[1] = f1 ^ x1; + f[2] = f2 ^ x2; + f[3] = f3 ^ x3; + f[4] = f4 ^ x4; + f[5] = f5 ^ x5; + f[6] = f6 ^ x6; + f[7] = f7 ^ x7; + f[8] = f8 ^ x8; + f[9] = f9 ^ x9; +} diff --git a/src/ed25519-supercop-ref10/fe_copy.c b/src/ed25519-supercop-ref10/fe_copy.c new file mode 100644 index 0000000..9c5bf86 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_copy.c @@ -0,0 +1,29 @@ +#include "fe.h" + +/* +h = f +*/ + +void fe_copy(fe h,const fe f) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + h[0] = f0; + h[1] = f1; + h[2] = f2; + h[3] = f3; + h[4] = f4; + h[5] = f5; + h[6] = f6; + h[7] = f7; + h[8] = f8; + h[9] = f9; +} diff --git a/src/ed25519-supercop-ref10/fe_frombytes.c b/src/ed25519-supercop-ref10/fe_frombytes.c new file mode 100644 index 0000000..5c17917 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_frombytes.c @@ -0,0 +1,73 @@ +#include "fe.h" +#include "crypto_int64.h" +#include "crypto_uint64.h" + +static crypto_uint64 load_3(const unsigned char *in) +{ + crypto_uint64 result; + result = (crypto_uint64) in[0]; + result |= ((crypto_uint64) in[1]) << 8; + result |= ((crypto_uint64) in[2]) << 16; + return result; +} + +static crypto_uint64 load_4(const unsigned char *in) +{ + crypto_uint64 result; + result = (crypto_uint64) in[0]; + result |= ((crypto_uint64) in[1]) << 8; + result |= ((crypto_uint64) in[2]) << 16; + result |= ((crypto_uint64) in[3]) << 24; + return result; +} + +/* +Ignores top bit of h. +*/ + +void fe_frombytes(fe h,const unsigned char *s) +{ + crypto_int64 h0 = load_4(s); + crypto_int64 h1 = load_3(s + 4) << 6; + crypto_int64 h2 = load_3(s + 7) << 5; + crypto_int64 h3 = load_3(s + 10) << 3; + crypto_int64 h4 = load_3(s + 13) << 2; + crypto_int64 h5 = load_4(s + 16); + crypto_int64 h6 = load_3(s + 20) << 7; + crypto_int64 h7 = load_3(s + 23) << 5; + crypto_int64 h8 = load_3(s + 26) << 4; + crypto_int64 h9 = (load_3(s + 29) & 8388607) << 2; + crypto_int64 carry0; + crypto_int64 carry1; + crypto_int64 carry2; + crypto_int64 carry3; + crypto_int64 carry4; + crypto_int64 carry5; + crypto_int64 carry6; + crypto_int64 carry7; + crypto_int64 carry8; + crypto_int64 carry9; + + carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25; + carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25; + carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25; + carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25; + carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25; + + carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26; + carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26; + carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26; + carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26; + carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26; + + h[0] = h0; + h[1] = h1; + h[2] = h2; + h[3] = h3; + h[4] = h4; + h[5] = h5; + h[6] = h6; + h[7] = h7; + h[8] = h8; + h[9] = h9; +} diff --git a/src/ed25519-supercop-ref10/fe_invert.c b/src/ed25519-supercop-ref10/fe_invert.c new file mode 100644 index 0000000..bcfdb8f --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_invert.c @@ -0,0 +1,14 @@ +#include "fe.h" + +void fe_invert(fe out,const fe z) +{ + fe t0; + fe t1; + fe t2; + fe t3; + int i; + +#include "pow225521.h" + + return; +} diff --git a/src/ed25519-supercop-ref10/fe_isnegative.c b/src/ed25519-supercop-ref10/fe_isnegative.c new file mode 100644 index 0000000..3b2c8b8 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_isnegative.c @@ -0,0 +1,16 @@ +#include "fe.h" + +/* +return 1 if f is in {1,3,5,...,q-2} +return 0 if f is in {0,2,4,...,q-1} + +Preconditions: + |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +*/ + +int fe_isnegative(const fe f) +{ + unsigned char s[32]; + fe_tobytes(s,f); + return s[0] & 1; +} diff --git a/src/ed25519-supercop-ref10/fe_isnonzero.c b/src/ed25519-supercop-ref10/fe_isnonzero.c new file mode 100644 index 0000000..1f42e39 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_isnonzero.c @@ -0,0 +1,19 @@ +#include "fe.h" +#include "crypto_verify_32.h" + +/* +return 1 if f == 0 +return 0 if f != 0 + +Preconditions: + |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +*/ + +static const char zero[32]; + +int fe_isnonzero(const fe f) +{ + unsigned char s[32]; + fe_tobytes(s,f); + return crypto_verify_32(s,zero); +} diff --git a/src/ed25519-supercop-ref10/fe_mul.c b/src/ed25519-supercop-ref10/fe_mul.c new file mode 100644 index 0000000..26ca8b3 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_mul.c @@ -0,0 +1,253 @@ +#include "fe.h" +#include "crypto_int64.h" + +/* +h = f * g +Can overlap h with f or g. + +Preconditions: + |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. + |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. + +Postconditions: + |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +*/ + +/* +Notes on implementation strategy: + +Using schoolbook multiplication. +Karatsuba would save a little in some cost models. + +Most multiplications by 2 and 19 are 32-bit precomputations; +cheaper than 64-bit postcomputations. + +There is one remaining multiplication by 19 in the carry chain; +one *19 precomputation can be merged into this, +but the resulting data flow is considerably less clean. + +There are 12 carries below. +10 of them are 2-way parallelizable and vectorizable. +Can get away with 11 carries, but then data flow is much deeper. + +With tighter constraints on inputs can squeeze carries into int32. +*/ + +void fe_mul(fe h,const fe f,const fe g) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + crypto_int32 g0 = g[0]; + crypto_int32 g1 = g[1]; + crypto_int32 g2 = g[2]; + crypto_int32 g3 = g[3]; + crypto_int32 g4 = g[4]; + crypto_int32 g5 = g[5]; + crypto_int32 g6 = g[6]; + crypto_int32 g7 = g[7]; + crypto_int32 g8 = g[8]; + crypto_int32 g9 = g[9]; + crypto_int32 g1_19 = 19 * g1; /* 1.959375*2^29 */ + crypto_int32 g2_19 = 19 * g2; /* 1.959375*2^30; still ok */ + crypto_int32 g3_19 = 19 * g3; + crypto_int32 g4_19 = 19 * g4; + crypto_int32 g5_19 = 19 * g5; + crypto_int32 g6_19 = 19 * g6; + crypto_int32 g7_19 = 19 * g7; + crypto_int32 g8_19 = 19 * g8; + crypto_int32 g9_19 = 19 * g9; + crypto_int32 f1_2 = 2 * f1; + crypto_int32 f3_2 = 2 * f3; + crypto_int32 f5_2 = 2 * f5; + crypto_int32 f7_2 = 2 * f7; + crypto_int32 f9_2 = 2 * f9; + crypto_int64 f0g0 = f0 * (crypto_int64) g0; + crypto_int64 f0g1 = f0 * (crypto_int64) g1; + crypto_int64 f0g2 = f0 * (crypto_int64) g2; + crypto_int64 f0g3 = f0 * (crypto_int64) g3; + crypto_int64 f0g4 = f0 * (crypto_int64) g4; + crypto_int64 f0g5 = f0 * (crypto_int64) g5; + crypto_int64 f0g6 = f0 * (crypto_int64) g6; + crypto_int64 f0g7 = f0 * (crypto_int64) g7; + crypto_int64 f0g8 = f0 * (crypto_int64) g8; + crypto_int64 f0g9 = f0 * (crypto_int64) g9; + crypto_int64 f1g0 = f1 * (crypto_int64) g0; + crypto_int64 f1g1_2 = f1_2 * (crypto_int64) g1; + crypto_int64 f1g2 = f1 * (crypto_int64) g2; + crypto_int64 f1g3_2 = f1_2 * (crypto_int64) g3; + crypto_int64 f1g4 = f1 * (crypto_int64) g4; + crypto_int64 f1g5_2 = f1_2 * (crypto_int64) g5; + crypto_int64 f1g6 = f1 * (crypto_int64) g6; + crypto_int64 f1g7_2 = f1_2 * (crypto_int64) g7; + crypto_int64 f1g8 = f1 * (crypto_int64) g8; + crypto_int64 f1g9_38 = f1_2 * (crypto_int64) g9_19; + crypto_int64 f2g0 = f2 * (crypto_int64) g0; + crypto_int64 f2g1 = f2 * (crypto_int64) g1; + crypto_int64 f2g2 = f2 * (crypto_int64) g2; + crypto_int64 f2g3 = f2 * (crypto_int64) g3; + crypto_int64 f2g4 = f2 * (crypto_int64) g4; + crypto_int64 f2g5 = f2 * (crypto_int64) g5; + crypto_int64 f2g6 = f2 * (crypto_int64) g6; + crypto_int64 f2g7 = f2 * (crypto_int64) g7; + crypto_int64 f2g8_19 = f2 * (crypto_int64) g8_19; + crypto_int64 f2g9_19 = f2 * (crypto_int64) g9_19; + crypto_int64 f3g0 = f3 * (crypto_int64) g0; + crypto_int64 f3g1_2 = f3_2 * (crypto_int64) g1; + crypto_int64 f3g2 = f3 * (crypto_int64) g2; + crypto_int64 f3g3_2 = f3_2 * (crypto_int64) g3; + crypto_int64 f3g4 = f3 * (crypto_int64) g4; + crypto_int64 f3g5_2 = f3_2 * (crypto_int64) g5; + crypto_int64 f3g6 = f3 * (crypto_int64) g6; + crypto_int64 f3g7_38 = f3_2 * (crypto_int64) g7_19; + crypto_int64 f3g8_19 = f3 * (crypto_int64) g8_19; + crypto_int64 f3g9_38 = f3_2 * (crypto_int64) g9_19; + crypto_int64 f4g0 = f4 * (crypto_int64) g0; + crypto_int64 f4g1 = f4 * (crypto_int64) g1; + crypto_int64 f4g2 = f4 * (crypto_int64) g2; + crypto_int64 f4g3 = f4 * (crypto_int64) g3; + crypto_int64 f4g4 = f4 * (crypto_int64) g4; + crypto_int64 f4g5 = f4 * (crypto_int64) g5; + crypto_int64 f4g6_19 = f4 * (crypto_int64) g6_19; + crypto_int64 f4g7_19 = f4 * (crypto_int64) g7_19; + crypto_int64 f4g8_19 = f4 * (crypto_int64) g8_19; + crypto_int64 f4g9_19 = f4 * (crypto_int64) g9_19; + crypto_int64 f5g0 = f5 * (crypto_int64) g0; + crypto_int64 f5g1_2 = f5_2 * (crypto_int64) g1; + crypto_int64 f5g2 = f5 * (crypto_int64) g2; + crypto_int64 f5g3_2 = f5_2 * (crypto_int64) g3; + crypto_int64 f5g4 = f5 * (crypto_int64) g4; + crypto_int64 f5g5_38 = f5_2 * (crypto_int64) g5_19; + crypto_int64 f5g6_19 = f5 * (crypto_int64) g6_19; + crypto_int64 f5g7_38 = f5_2 * (crypto_int64) g7_19; + crypto_int64 f5g8_19 = f5 * (crypto_int64) g8_19; + crypto_int64 f5g9_38 = f5_2 * (crypto_int64) g9_19; + crypto_int64 f6g0 = f6 * (crypto_int64) g0; + crypto_int64 f6g1 = f6 * (crypto_int64) g1; + crypto_int64 f6g2 = f6 * (crypto_int64) g2; + crypto_int64 f6g3 = f6 * (crypto_int64) g3; + crypto_int64 f6g4_19 = f6 * (crypto_int64) g4_19; + crypto_int64 f6g5_19 = f6 * (crypto_int64) g5_19; + crypto_int64 f6g6_19 = f6 * (crypto_int64) g6_19; + crypto_int64 f6g7_19 = f6 * (crypto_int64) g7_19; + crypto_int64 f6g8_19 = f6 * (crypto_int64) g8_19; + crypto_int64 f6g9_19 = f6 * (crypto_int64) g9_19; + crypto_int64 f7g0 = f7 * (crypto_int64) g0; + crypto_int64 f7g1_2 = f7_2 * (crypto_int64) g1; + crypto_int64 f7g2 = f7 * (crypto_int64) g2; + crypto_int64 f7g3_38 = f7_2 * (crypto_int64) g3_19; + crypto_int64 f7g4_19 = f7 * (crypto_int64) g4_19; + crypto_int64 f7g5_38 = f7_2 * (crypto_int64) g5_19; + crypto_int64 f7g6_19 = f7 * (crypto_int64) g6_19; + crypto_int64 f7g7_38 = f7_2 * (crypto_int64) g7_19; + crypto_int64 f7g8_19 = f7 * (crypto_int64) g8_19; + crypto_int64 f7g9_38 = f7_2 * (crypto_int64) g9_19; + crypto_int64 f8g0 = f8 * (crypto_int64) g0; + crypto_int64 f8g1 = f8 * (crypto_int64) g1; + crypto_int64 f8g2_19 = f8 * (crypto_int64) g2_19; + crypto_int64 f8g3_19 = f8 * (crypto_int64) g3_19; + crypto_int64 f8g4_19 = f8 * (crypto_int64) g4_19; + crypto_int64 f8g5_19 = f8 * (crypto_int64) g5_19; + crypto_int64 f8g6_19 = f8 * (crypto_int64) g6_19; + crypto_int64 f8g7_19 = f8 * (crypto_int64) g7_19; + crypto_int64 f8g8_19 = f8 * (crypto_int64) g8_19; + crypto_int64 f8g9_19 = f8 * (crypto_int64) g9_19; + crypto_int64 f9g0 = f9 * (crypto_int64) g0; + crypto_int64 f9g1_38 = f9_2 * (crypto_int64) g1_19; + crypto_int64 f9g2_19 = f9 * (crypto_int64) g2_19; + crypto_int64 f9g3_38 = f9_2 * (crypto_int64) g3_19; + crypto_int64 f9g4_19 = f9 * (crypto_int64) g4_19; + crypto_int64 f9g5_38 = f9_2 * (crypto_int64) g5_19; + crypto_int64 f9g6_19 = f9 * (crypto_int64) g6_19; + crypto_int64 f9g7_38 = f9_2 * (crypto_int64) g7_19; + crypto_int64 f9g8_19 = f9 * (crypto_int64) g8_19; + crypto_int64 f9g9_38 = f9_2 * (crypto_int64) g9_19; + crypto_int64 h0 = f0g0+f1g9_38+f2g8_19+f3g7_38+f4g6_19+f5g5_38+f6g4_19+f7g3_38+f8g2_19+f9g1_38; + crypto_int64 h1 = f0g1+f1g0 +f2g9_19+f3g8_19+f4g7_19+f5g6_19+f6g5_19+f7g4_19+f8g3_19+f9g2_19; + crypto_int64 h2 = f0g2+f1g1_2 +f2g0 +f3g9_38+f4g8_19+f5g7_38+f6g6_19+f7g5_38+f8g4_19+f9g3_38; + crypto_int64 h3 = f0g3+f1g2 +f2g1 +f3g0 +f4g9_19+f5g8_19+f6g7_19+f7g6_19+f8g5_19+f9g4_19; + crypto_int64 h4 = f0g4+f1g3_2 +f2g2 +f3g1_2 +f4g0 +f5g9_38+f6g8_19+f7g7_38+f8g6_19+f9g5_38; + crypto_int64 h5 = f0g5+f1g4 +f2g3 +f3g2 +f4g1 +f5g0 +f6g9_19+f7g8_19+f8g7_19+f9g6_19; + crypto_int64 h6 = f0g6+f1g5_2 +f2g4 +f3g3_2 +f4g2 +f5g1_2 +f6g0 +f7g9_38+f8g8_19+f9g7_38; + crypto_int64 h7 = f0g7+f1g6 +f2g5 +f3g4 +f4g3 +f5g2 +f6g1 +f7g0 +f8g9_19+f9g8_19; + crypto_int64 h8 = f0g8+f1g7_2 +f2g6 +f3g5_2 +f4g4 +f5g3_2 +f6g2 +f7g1_2 +f8g0 +f9g9_38; + crypto_int64 h9 = f0g9+f1g8 +f2g7 +f3g6 +f4g5 +f5g4 +f6g3 +f7g2 +f8g1 +f9g0 ; + crypto_int64 carry0; + crypto_int64 carry1; + crypto_int64 carry2; + crypto_int64 carry3; + crypto_int64 carry4; + crypto_int64 carry5; + crypto_int64 carry6; + crypto_int64 carry7; + crypto_int64 carry8; + crypto_int64 carry9; + + /* + |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9 + */ + + carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26; + carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26; + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.71*2^59 */ + /* |h5| <= 1.71*2^59 */ + + carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25; + carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25; + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.41*2^60 */ + /* |h6| <= 1.41*2^60 */ + + carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26; + carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26; + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.71*2^59 */ + /* |h7| <= 1.71*2^59 */ + + carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25; + carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25; + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.72*2^34 */ + /* |h8| <= 1.41*2^60 */ + + carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26; + carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26; + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.71*2^59 */ + + carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25; + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.1*2^39 */ + + carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26; + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = h0; + h[1] = h1; + h[2] = h2; + h[3] = h3; + h[4] = h4; + h[5] = h5; + h[6] = h6; + h[7] = h7; + h[8] = h8; + h[9] = h9; +} diff --git a/src/ed25519-supercop-ref10/fe_neg.c b/src/ed25519-supercop-ref10/fe_neg.c new file mode 100644 index 0000000..2078ce5 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_neg.c @@ -0,0 +1,45 @@ +#include "fe.h" + +/* +h = -f + +Preconditions: + |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. + +Postconditions: + |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +*/ + +void fe_neg(fe h,const fe f) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + crypto_int32 h0 = -f0; + crypto_int32 h1 = -f1; + crypto_int32 h2 = -f2; + crypto_int32 h3 = -f3; + crypto_int32 h4 = -f4; + crypto_int32 h5 = -f5; + crypto_int32 h6 = -f6; + crypto_int32 h7 = -f7; + crypto_int32 h8 = -f8; + crypto_int32 h9 = -f9; + h[0] = h0; + h[1] = h1; + h[2] = h2; + h[3] = h3; + h[4] = h4; + h[5] = h5; + h[6] = h6; + h[7] = h7; + h[8] = h8; + h[9] = h9; +} diff --git a/src/ed25519-supercop-ref10/fe_pow22523.c b/src/ed25519-supercop-ref10/fe_pow22523.c new file mode 100644 index 0000000..56675a5 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_pow22523.c @@ -0,0 +1,13 @@ +#include "fe.h" + +void fe_pow22523(fe out,const fe z) +{ + fe t0; + fe t1; + fe t2; + int i; + +#include "pow22523.h" + + return; +} diff --git a/src/ed25519-supercop-ref10/fe_sq.c b/src/ed25519-supercop-ref10/fe_sq.c new file mode 100644 index 0000000..8dd1198 --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_sq.c @@ -0,0 +1,149 @@ +#include "fe.h" +#include "crypto_int64.h" + +/* +h = f * f +Can overlap h with f. + +Preconditions: + |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. + +Postconditions: + |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +*/ + +/* +See fe_mul.c for discussion of implementation strategy. +*/ + +void fe_sq(fe h,const fe f) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + crypto_int32 f0_2 = 2 * f0; + crypto_int32 f1_2 = 2 * f1; + crypto_int32 f2_2 = 2 * f2; + crypto_int32 f3_2 = 2 * f3; + crypto_int32 f4_2 = 2 * f4; + crypto_int32 f5_2 = 2 * f5; + crypto_int32 f6_2 = 2 * f6; + crypto_int32 f7_2 = 2 * f7; + crypto_int32 f5_38 = 38 * f5; /* 1.959375*2^30 */ + crypto_int32 f6_19 = 19 * f6; /* 1.959375*2^30 */ + crypto_int32 f7_38 = 38 * f7; /* 1.959375*2^30 */ + crypto_int32 f8_19 = 19 * f8; /* 1.959375*2^30 */ + crypto_int32 f9_38 = 38 * f9; /* 1.959375*2^30 */ + crypto_int64 f0f0 = f0 * (crypto_int64) f0; + crypto_int64 f0f1_2 = f0_2 * (crypto_int64) f1; + crypto_int64 f0f2_2 = f0_2 * (crypto_int64) f2; + crypto_int64 f0f3_2 = f0_2 * (crypto_int64) f3; + crypto_int64 f0f4_2 = f0_2 * (crypto_int64) f4; + crypto_int64 f0f5_2 = f0_2 * (crypto_int64) f5; + crypto_int64 f0f6_2 = f0_2 * (crypto_int64) f6; + crypto_int64 f0f7_2 = f0_2 * (crypto_int64) f7; + crypto_int64 f0f8_2 = f0_2 * (crypto_int64) f8; + crypto_int64 f0f9_2 = f0_2 * (crypto_int64) f9; + crypto_int64 f1f1_2 = f1_2 * (crypto_int64) f1; + crypto_int64 f1f2_2 = f1_2 * (crypto_int64) f2; + crypto_int64 f1f3_4 = f1_2 * (crypto_int64) f3_2; + crypto_int64 f1f4_2 = f1_2 * (crypto_int64) f4; + crypto_int64 f1f5_4 = f1_2 * (crypto_int64) f5_2; + crypto_int64 f1f6_2 = f1_2 * (crypto_int64) f6; + crypto_int64 f1f7_4 = f1_2 * (crypto_int64) f7_2; + crypto_int64 f1f8_2 = f1_2 * (crypto_int64) f8; + crypto_int64 f1f9_76 = f1_2 * (crypto_int64) f9_38; + crypto_int64 f2f2 = f2 * (crypto_int64) f2; + crypto_int64 f2f3_2 = f2_2 * (crypto_int64) f3; + crypto_int64 f2f4_2 = f2_2 * (crypto_int64) f4; + crypto_int64 f2f5_2 = f2_2 * (crypto_int64) f5; + crypto_int64 f2f6_2 = f2_2 * (crypto_int64) f6; + crypto_int64 f2f7_2 = f2_2 * (crypto_int64) f7; + crypto_int64 f2f8_38 = f2_2 * (crypto_int64) f8_19; + crypto_int64 f2f9_38 = f2 * (crypto_int64) f9_38; + crypto_int64 f3f3_2 = f3_2 * (crypto_int64) f3; + crypto_int64 f3f4_2 = f3_2 * (crypto_int64) f4; + crypto_int64 f3f5_4 = f3_2 * (crypto_int64) f5_2; + crypto_int64 f3f6_2 = f3_2 * (crypto_int64) f6; + crypto_int64 f3f7_76 = f3_2 * (crypto_int64) f7_38; + crypto_int64 f3f8_38 = f3_2 * (crypto_int64) f8_19; + crypto_int64 f3f9_76 = f3_2 * (crypto_int64) f9_38; + crypto_int64 f4f4 = f4 * (crypto_int64) f4; + crypto_int64 f4f5_2 = f4_2 * (crypto_int64) f5; + crypto_int64 f4f6_38 = f4_2 * (crypto_int64) f6_19; + crypto_int64 f4f7_38 = f4 * (crypto_int64) f7_38; + crypto_int64 f4f8_38 = f4_2 * (crypto_int64) f8_19; + crypto_int64 f4f9_38 = f4 * (crypto_int64) f9_38; + crypto_int64 f5f5_38 = f5 * (crypto_int64) f5_38; + crypto_int64 f5f6_38 = f5_2 * (crypto_int64) f6_19; + crypto_int64 f5f7_76 = f5_2 * (crypto_int64) f7_38; + crypto_int64 f5f8_38 = f5_2 * (crypto_int64) f8_19; + crypto_int64 f5f9_76 = f5_2 * (crypto_int64) f9_38; + crypto_int64 f6f6_19 = f6 * (crypto_int64) f6_19; + crypto_int64 f6f7_38 = f6 * (crypto_int64) f7_38; + crypto_int64 f6f8_38 = f6_2 * (crypto_int64) f8_19; + crypto_int64 f6f9_38 = f6 * (crypto_int64) f9_38; + crypto_int64 f7f7_38 = f7 * (crypto_int64) f7_38; + crypto_int64 f7f8_38 = f7_2 * (crypto_int64) f8_19; + crypto_int64 f7f9_76 = f7_2 * (crypto_int64) f9_38; + crypto_int64 f8f8_19 = f8 * (crypto_int64) f8_19; + crypto_int64 f8f9_38 = f8 * (crypto_int64) f9_38; + crypto_int64 f9f9_38 = f9 * (crypto_int64) f9_38; + crypto_int64 h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38; + crypto_int64 h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38; + crypto_int64 h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19; + crypto_int64 h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38; + crypto_int64 h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38; + crypto_int64 h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38; + crypto_int64 h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19; + crypto_int64 h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38; + crypto_int64 h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38; + crypto_int64 h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2; + crypto_int64 carry0; + crypto_int64 carry1; + crypto_int64 carry2; + crypto_int64 carry3; + crypto_int64 carry4; + crypto_int64 carry5; + crypto_int64 carry6; + crypto_int64 carry7; + crypto_int64 carry8; + crypto_int64 carry9; + + carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26; + carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26; + + carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25; + carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25; + + carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26; + carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26; + + carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25; + carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25; + + carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26; + carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26; + + carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25; + + carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26; + + h[0] = h0; + h[1] = h1; + h[2] = h2; + h[3] = h3; + h[4] = h4; + h[5] = h5; + h[6] = h6; + h[7] = h7; + h[8] = h8; + h[9] = h9; +} diff --git a/src/ed25519-supercop-ref10/fe_sq2.c b/src/ed25519-supercop-ref10/fe_sq2.c new file mode 100644 index 0000000..026ed3a --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_sq2.c @@ -0,0 +1,160 @@ +#include "fe.h" +#include "crypto_int64.h" + +/* +h = 2 * f * f +Can overlap h with f. + +Preconditions: + |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. + +Postconditions: + |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +*/ + +/* +See fe_mul.c for discussion of implementation strategy. +*/ + +void fe_sq2(fe h,const fe f) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + crypto_int32 f0_2 = 2 * f0; + crypto_int32 f1_2 = 2 * f1; + crypto_int32 f2_2 = 2 * f2; + crypto_int32 f3_2 = 2 * f3; + crypto_int32 f4_2 = 2 * f4; + crypto_int32 f5_2 = 2 * f5; + crypto_int32 f6_2 = 2 * f6; + crypto_int32 f7_2 = 2 * f7; + crypto_int32 f5_38 = 38 * f5; /* 1.959375*2^30 */ + crypto_int32 f6_19 = 19 * f6; /* 1.959375*2^30 */ + crypto_int32 f7_38 = 38 * f7; /* 1.959375*2^30 */ + crypto_int32 f8_19 = 19 * f8; /* 1.959375*2^30 */ + crypto_int32 f9_38 = 38 * f9; /* 1.959375*2^30 */ + crypto_int64 f0f0 = f0 * (crypto_int64) f0; + crypto_int64 f0f1_2 = f0_2 * (crypto_int64) f1; + crypto_int64 f0f2_2 = f0_2 * (crypto_int64) f2; + crypto_int64 f0f3_2 = f0_2 * (crypto_int64) f3; + crypto_int64 f0f4_2 = f0_2 * (crypto_int64) f4; + crypto_int64 f0f5_2 = f0_2 * (crypto_int64) f5; + crypto_int64 f0f6_2 = f0_2 * (crypto_int64) f6; + crypto_int64 f0f7_2 = f0_2 * (crypto_int64) f7; + crypto_int64 f0f8_2 = f0_2 * (crypto_int64) f8; + crypto_int64 f0f9_2 = f0_2 * (crypto_int64) f9; + crypto_int64 f1f1_2 = f1_2 * (crypto_int64) f1; + crypto_int64 f1f2_2 = f1_2 * (crypto_int64) f2; + crypto_int64 f1f3_4 = f1_2 * (crypto_int64) f3_2; + crypto_int64 f1f4_2 = f1_2 * (crypto_int64) f4; + crypto_int64 f1f5_4 = f1_2 * (crypto_int64) f5_2; + crypto_int64 f1f6_2 = f1_2 * (crypto_int64) f6; + crypto_int64 f1f7_4 = f1_2 * (crypto_int64) f7_2; + crypto_int64 f1f8_2 = f1_2 * (crypto_int64) f8; + crypto_int64 f1f9_76 = f1_2 * (crypto_int64) f9_38; + crypto_int64 f2f2 = f2 * (crypto_int64) f2; + crypto_int64 f2f3_2 = f2_2 * (crypto_int64) f3; + crypto_int64 f2f4_2 = f2_2 * (crypto_int64) f4; + crypto_int64 f2f5_2 = f2_2 * (crypto_int64) f5; + crypto_int64 f2f6_2 = f2_2 * (crypto_int64) f6; + crypto_int64 f2f7_2 = f2_2 * (crypto_int64) f7; + crypto_int64 f2f8_38 = f2_2 * (crypto_int64) f8_19; + crypto_int64 f2f9_38 = f2 * (crypto_int64) f9_38; + crypto_int64 f3f3_2 = f3_2 * (crypto_int64) f3; + crypto_int64 f3f4_2 = f3_2 * (crypto_int64) f4; + crypto_int64 f3f5_4 = f3_2 * (crypto_int64) f5_2; + crypto_int64 f3f6_2 = f3_2 * (crypto_int64) f6; + crypto_int64 f3f7_76 = f3_2 * (crypto_int64) f7_38; + crypto_int64 f3f8_38 = f3_2 * (crypto_int64) f8_19; + crypto_int64 f3f9_76 = f3_2 * (crypto_int64) f9_38; + crypto_int64 f4f4 = f4 * (crypto_int64) f4; + crypto_int64 f4f5_2 = f4_2 * (crypto_int64) f5; + crypto_int64 f4f6_38 = f4_2 * (crypto_int64) f6_19; + crypto_int64 f4f7_38 = f4 * (crypto_int64) f7_38; + crypto_int64 f4f8_38 = f4_2 * (crypto_int64) f8_19; + crypto_int64 f4f9_38 = f4 * (crypto_int64) f9_38; + crypto_int64 f5f5_38 = f5 * (crypto_int64) f5_38; + crypto_int64 f5f6_38 = f5_2 * (crypto_int64) f6_19; + crypto_int64 f5f7_76 = f5_2 * (crypto_int64) f7_38; + crypto_int64 f5f8_38 = f5_2 * (crypto_int64) f8_19; + crypto_int64 f5f9_76 = f5_2 * (crypto_int64) f9_38; + crypto_int64 f6f6_19 = f6 * (crypto_int64) f6_19; + crypto_int64 f6f7_38 = f6 * (crypto_int64) f7_38; + crypto_int64 f6f8_38 = f6_2 * (crypto_int64) f8_19; + crypto_int64 f6f9_38 = f6 * (crypto_int64) f9_38; + crypto_int64 f7f7_38 = f7 * (crypto_int64) f7_38; + crypto_int64 f7f8_38 = f7_2 * (crypto_int64) f8_19; + crypto_int64 f7f9_76 = f7_2 * (crypto_int64) f9_38; + crypto_int64 f8f8_19 = f8 * (crypto_int64) f8_19; + crypto_int64 f8f9_38 = f8 * (crypto_int64) f9_38; + crypto_int64 f9f9_38 = f9 * (crypto_int64) f9_38; + crypto_int64 h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38; + crypto_int64 h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38; + crypto_int64 h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19; + crypto_int64 h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38; + crypto_int64 h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38; + crypto_int64 h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38; + crypto_int64 h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19; + crypto_int64 h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38; + crypto_int64 h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38; + crypto_int64 h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2; + crypto_int64 carry0; + crypto_int64 carry1; + crypto_int64 carry2; + crypto_int64 carry3; + crypto_int64 carry4; + crypto_int64 carry5; + crypto_int64 carry6; + crypto_int64 carry7; + crypto_int64 carry8; + crypto_int64 carry9; + + h0 += h0; + h1 += h1; + h2 += h2; + h3 += h3; + h4 += h4; + h5 += h5; + h6 += h6; + h7 += h7; + h8 += h8; + h9 += h9; + + carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26; + carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26; + + carry1 = (h1 + (crypto_int64) (1<<24)) >> 25; h2 += carry1; h1 -= carry1 << 25; + carry5 = (h5 + (crypto_int64) (1<<24)) >> 25; h6 += carry5; h5 -= carry5 << 25; + + carry2 = (h2 + (crypto_int64) (1<<25)) >> 26; h3 += carry2; h2 -= carry2 << 26; + carry6 = (h6 + (crypto_int64) (1<<25)) >> 26; h7 += carry6; h6 -= carry6 << 26; + + carry3 = (h3 + (crypto_int64) (1<<24)) >> 25; h4 += carry3; h3 -= carry3 << 25; + carry7 = (h7 + (crypto_int64) (1<<24)) >> 25; h8 += carry7; h7 -= carry7 << 25; + + carry4 = (h4 + (crypto_int64) (1<<25)) >> 26; h5 += carry4; h4 -= carry4 << 26; + carry8 = (h8 + (crypto_int64) (1<<25)) >> 26; h9 += carry8; h8 -= carry8 << 26; + + carry9 = (h9 + (crypto_int64) (1<<24)) >> 25; h0 += carry9 * 19; h9 -= carry9 << 25; + + carry0 = (h0 + (crypto_int64) (1<<25)) >> 26; h1 += carry0; h0 -= carry0 << 26; + + h[0] = h0; + h[1] = h1; + h[2] = h2; + h[3] = h3; + h[4] = h4; + h[5] = h5; + h[6] = h6; + h[7] = h7; + h[8] = h8; + h[9] = h9; +} diff --git a/src/ed25519-supercop-ref10/fe_sub.c b/src/ed25519-supercop-ref10/fe_sub.c new file mode 100644 index 0000000..6e26b7d --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_sub.c @@ -0,0 +1,57 @@ +#include "fe.h" + +/* +h = f - g +Can overlap h with f or g. + +Preconditions: + |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. + |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. + +Postconditions: + |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +*/ + +void fe_sub(fe h,const fe f,const fe g) +{ + crypto_int32 f0 = f[0]; + crypto_int32 f1 = f[1]; + crypto_int32 f2 = f[2]; + crypto_int32 f3 = f[3]; + crypto_int32 f4 = f[4]; + crypto_int32 f5 = f[5]; + crypto_int32 f6 = f[6]; + crypto_int32 f7 = f[7]; + crypto_int32 f8 = f[8]; + crypto_int32 f9 = f[9]; + crypto_int32 g0 = g[0]; + crypto_int32 g1 = g[1]; + crypto_int32 g2 = g[2]; + crypto_int32 g3 = g[3]; + crypto_int32 g4 = g[4]; + crypto_int32 g5 = g[5]; + crypto_int32 g6 = g[6]; + crypto_int32 g7 = g[7]; + crypto_int32 g8 = g[8]; + crypto_int32 g9 = g[9]; + crypto_int32 h0 = f0 - g0; + crypto_int32 h1 = f1 - g1; + crypto_int32 h2 = f2 - g2; + crypto_int32 h3 = f3 - g3; + crypto_int32 h4 = f4 - g4; + crypto_int32 h5 = f5 - g5; + crypto_int32 h6 = f6 - g6; + crypto_int32 h7 = f7 - g7; + crypto_int32 h8 = f8 - g8; + crypto_int32 h9 = f9 - g9; + h[0] = h0; + h[1] = h1; + h[2] = h2; + h[3] = h3; + h[4] = h4; + h[5] = h5; + h[6] = h6; + h[7] = h7; + h[8] = h8; + h[9] = h9; +} diff --git a/src/ed25519-supercop-ref10/fe_tobytes.c b/src/ed25519-supercop-ref10/fe_tobytes.c new file mode 100644 index 0000000..0a63baf --- /dev/null +++ b/src/ed25519-supercop-ref10/fe_tobytes.c @@ -0,0 +1,119 @@ +#include "fe.h" + +/* +Preconditions: + |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. + +Write p=2^255-19; q=floor(h/p). +Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). + +Proof: + Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. + Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4. + + Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). + Then 0> 25; + q = (h0 + q) >> 26; + q = (h1 + q) >> 25; + q = (h2 + q) >> 26; + q = (h3 + q) >> 25; + q = (h4 + q) >> 26; + q = (h5 + q) >> 25; + q = (h6 + q) >> 26; + q = (h7 + q) >> 25; + q = (h8 + q) >> 26; + q = (h9 + q) >> 25; + + /* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */ + h0 += 19 * q; + /* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */ + + carry0 = h0 >> 26; h1 += carry0; h0 -= carry0 << 26; + carry1 = h1 >> 25; h2 += carry1; h1 -= carry1 << 25; + carry2 = h2 >> 26; h3 += carry2; h2 -= carry2 << 26; + carry3 = h3 >> 25; h4 += carry3; h3 -= carry3 << 25; + carry4 = h4 >> 26; h5 += carry4; h4 -= carry4 << 26; + carry5 = h5 >> 25; h6 += carry5; h5 -= carry5 << 25; + carry6 = h6 >> 26; h7 += carry6; h6 -= carry6 << 26; + carry7 = h7 >> 25; h8 += carry7; h7 -= carry7 << 25; + carry8 = h8 >> 26; h9 += carry8; h8 -= carry8 << 26; + carry9 = h9 >> 25; h9 -= carry9 << 25; + /* h10 = carry9 */ + + /* + Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + Have h0+...+2^230 h9 between 0 and 2^255-1; + evidently 2^255 h10-2^255 q = 0. + Goal: Output h0+...+2^230 h9. + */ + + s[0] = h0 >> 0; + s[1] = h0 >> 8; + s[2] = h0 >> 16; + s[3] = (h0 >> 24) | (h1 << 2); + s[4] = h1 >> 6; + s[5] = h1 >> 14; + s[6] = (h1 >> 22) | (h2 << 3); + s[7] = h2 >> 5; + s[8] = h2 >> 13; + s[9] = (h2 >> 21) | (h3 << 5); + s[10] = h3 >> 3; + s[11] = h3 >> 11; + s[12] = (h3 >> 19) | (h4 << 6); + s[13] = h4 >> 2; + s[14] = h4 >> 10; + s[15] = h4 >> 18; + s[16] = h5 >> 0; + s[17] = h5 >> 8; + s[18] = h5 >> 16; + s[19] = (h5 >> 24) | (h6 << 1); + s[20] = h6 >> 7; + s[21] = h6 >> 15; + s[22] = (h6 >> 23) | (h7 << 3); + s[23] = h7 >> 5; + s[24] = h7 >> 13; + s[25] = (h7 >> 21) | (h8 << 4); + s[26] = h8 >> 4; + s[27] = h8 >> 12; + s[28] = (h8 >> 20) | (h9 << 6); + s[29] = h9 >> 2; + s[30] = h9 >> 10; + s[31] = h9 >> 18; +} diff --git a/src/ed25519-supercop-ref10/ge.h b/src/ed25519-supercop-ref10/ge.h new file mode 100644 index 0000000..55e95f9 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge.h @@ -0,0 +1,95 @@ +#ifndef GE_H +#define GE_H + +/* +ge means group element. + +Here the group is the set of pairs (x,y) of field elements (see fe.h) +satisfying -x^2 + y^2 = 1 + d x^2y^2 +where d = -121665/121666. + +Representations: + ge_p2 (projective): (X:Y:Z) satisfying x=X/Z, y=Y/Z + ge_p3 (extended): (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT + ge_p1p1 (completed): ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T + ge_precomp (Duif): (y+x,y-x,2dxy) +*/ + +#include "fe.h" + +typedef struct { + fe X; + fe Y; + fe Z; +} ge_p2; + +typedef struct { + fe X; + fe Y; + fe Z; + fe T; +} ge_p3; + +typedef struct { + fe X; + fe Y; + fe Z; + fe T; +} ge_p1p1; + +typedef struct { + fe yplusx; + fe yminusx; + fe xy2d; +} ge_precomp; + +typedef struct { + fe YplusX; + fe YminusX; + fe Z; + fe T2d; +} ge_cached; + +#define ge_frombytes_negate_vartime crypto_sign_ed25519_ref10_ge_frombytes_negate_vartime +#define ge_tobytes crypto_sign_ed25519_ref10_ge_tobytes +#define ge_p3_tobytes crypto_sign_ed25519_ref10_ge_p3_tobytes + +#define ge_p2_0 crypto_sign_ed25519_ref10_ge_p2_0 +#define ge_p3_0 crypto_sign_ed25519_ref10_ge_p3_0 +#define ge_precomp_0 crypto_sign_ed25519_ref10_ge_precomp_0 +#define ge_p3_to_p2 crypto_sign_ed25519_ref10_ge_p3_to_p2 +#define ge_p3_to_cached crypto_sign_ed25519_ref10_ge_p3_to_cached +#define ge_p1p1_to_p2 crypto_sign_ed25519_ref10_ge_p1p1_to_p2 +#define ge_p1p1_to_p3 crypto_sign_ed25519_ref10_ge_p1p1_to_p3 +#define ge_p2_dbl crypto_sign_ed25519_ref10_ge_p2_dbl +#define ge_p3_dbl crypto_sign_ed25519_ref10_ge_p3_dbl + +#define ge_madd crypto_sign_ed25519_ref10_ge_madd +#define ge_msub crypto_sign_ed25519_ref10_ge_msub +#define ge_add crypto_sign_ed25519_ref10_ge_add +#define ge_sub crypto_sign_ed25519_ref10_ge_sub +#define ge_scalarmult_base crypto_sign_ed25519_ref10_ge_scalarmult_base +#define ge_double_scalarmult_vartime crypto_sign_ed25519_ref10_ge_double_scalarmult_vartime + +extern void ge_tobytes(unsigned char *,const ge_p2 *); +extern void ge_p3_tobytes(unsigned char *,const ge_p3 *); +extern int ge_frombytes_negate_vartime(ge_p3 *,const unsigned char *); + +extern void ge_p2_0(ge_p2 *); +extern void ge_p3_0(ge_p3 *); +extern void ge_precomp_0(ge_precomp *); +extern void ge_p3_to_p2(ge_p2 *,const ge_p3 *); +extern void ge_p3_to_cached(ge_cached *,const ge_p3 *); +extern void ge_p1p1_to_p2(ge_p2 *,const ge_p1p1 *); +extern void ge_p1p1_to_p3(ge_p3 *,const ge_p1p1 *); +extern void ge_p2_dbl(ge_p1p1 *,const ge_p2 *); +extern void ge_p3_dbl(ge_p1p1 *,const ge_p3 *); + +extern void ge_madd(ge_p1p1 *,const ge_p3 *,const ge_precomp *); +extern void ge_msub(ge_p1p1 *,const ge_p3 *,const ge_precomp *); +extern void ge_add(ge_p1p1 *,const ge_p3 *,const ge_cached *); +extern void ge_sub(ge_p1p1 *,const ge_p3 *,const ge_cached *); +extern void ge_scalarmult_base(ge_p3 *,const unsigned char *); +extern void ge_double_scalarmult_vartime(ge_p2 *,const unsigned char *,const ge_p3 *,const unsigned char *); + +#endif diff --git a/src/ed25519-supercop-ref10/ge_add.c b/src/ed25519-supercop-ref10/ge_add.c new file mode 100644 index 0000000..da7ff5d --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_add.c @@ -0,0 +1,11 @@ +#include "ge.h" + +/* +r = p + q +*/ + +void ge_add(ge_p1p1 *r,const ge_p3 *p,const ge_cached *q) +{ + fe t0; +#include "ge_add.h" +} diff --git a/src/ed25519-supercop-ref10/ge_add.h b/src/ed25519-supercop-ref10/ge_add.h new file mode 100644 index 0000000..7481f8f --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_add.h @@ -0,0 +1,97 @@ + +/* qhasm: enter ge_add */ + +/* qhasm: fe X1 */ + +/* qhasm: fe Y1 */ + +/* qhasm: fe Z1 */ + +/* qhasm: fe Z2 */ + +/* qhasm: fe T1 */ + +/* qhasm: fe ZZ */ + +/* qhasm: fe YpX2 */ + +/* qhasm: fe YmX2 */ + +/* qhasm: fe T2d2 */ + +/* qhasm: fe X3 */ + +/* qhasm: fe Y3 */ + +/* qhasm: fe Z3 */ + +/* qhasm: fe T3 */ + +/* qhasm: fe YpX1 */ + +/* qhasm: fe YmX1 */ + +/* qhasm: fe A */ + +/* qhasm: fe B */ + +/* qhasm: fe C */ + +/* qhasm: fe D */ + +/* qhasm: YpX1 = Y1+X1 */ +/* asm 1: fe_add(>YpX1=fe#1,YpX1=r->X,Y,X); */ +fe_add(r->X,p->Y,p->X); + +/* qhasm: YmX1 = Y1-X1 */ +/* asm 1: fe_sub(>YmX1=fe#2,YmX1=r->Y,Y,X); */ +fe_sub(r->Y,p->Y,p->X); + +/* qhasm: A = YpX1*YpX2 */ +/* asm 1: fe_mul(>A=fe#3,A=r->Z,X,YplusX); */ +fe_mul(r->Z,r->X,q->YplusX); + +/* qhasm: B = YmX1*YmX2 */ +/* asm 1: fe_mul(>B=fe#2,B=r->Y,Y,YminusX); */ +fe_mul(r->Y,r->Y,q->YminusX); + +/* qhasm: C = T2d2*T1 */ +/* asm 1: fe_mul(>C=fe#4,C=r->T,T2d,T); */ +fe_mul(r->T,q->T2d,p->T); + +/* qhasm: ZZ = Z1*Z2 */ +/* asm 1: fe_mul(>ZZ=fe#1,ZZ=r->X,Z,Z); */ +fe_mul(r->X,p->Z,q->Z); + +/* qhasm: D = 2*ZZ */ +/* asm 1: fe_add(>D=fe#5,D=t0,X,X); */ +fe_add(t0,r->X,r->X); + +/* qhasm: X3 = A-B */ +/* asm 1: fe_sub(>X3=fe#1,X3=r->X,Z,Y); */ +fe_sub(r->X,r->Z,r->Y); + +/* qhasm: Y3 = A+B */ +/* asm 1: fe_add(>Y3=fe#2,Y3=r->Y,Z,Y); */ +fe_add(r->Y,r->Z,r->Y); + +/* qhasm: Z3 = D+C */ +/* asm 1: fe_add(>Z3=fe#3,Z3=r->Z,T); */ +fe_add(r->Z,t0,r->T); + +/* qhasm: T3 = D-C */ +/* asm 1: fe_sub(>T3=fe#4,T3=r->T,T); */ +fe_sub(r->T,t0,r->T); + +/* qhasm: return */ diff --git a/src/ed25519-supercop-ref10/ge_add.q b/src/ed25519-supercop-ref10/ge_add.q new file mode 100644 index 0000000..a6572ab --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_add.q @@ -0,0 +1,49 @@ +:name:fe:r->X:r->Y:r->Z:r->T:t0:t1:t2:t3:t4:t5:p->X:p->Y:p->Z:p->T:q->YplusX:q->YminusX:q->Z:q->T2d: +fe r:var/r=fe: + +enter f:enter/f:>X1=fe#11:>Y1=fe#12:>Z1=fe#13:>T1=fe#14:>YpX2=fe#15:>YmX2=fe#16:>Z2=fe#17:>T2d2=fe#18: +return:nofallthrough:h=fe:asm/fe_add(>h,h=fe:asm/fe_sub(>h,h=fe:asm/fe_mul(>h,h=fe:asm/fe_sq(>h,h=fe:asm/fe_add(>h,> 3] >> (i & 7)); + + for (i = 0;i < 256;++i) + if (r[i]) { + for (b = 1;b <= 6 && i + b < 256;++b) { + if (r[i + b]) { + if (r[i] + (r[i + b] << b) <= 15) { + r[i] += r[i + b] << b; r[i + b] = 0; + } else if (r[i] - (r[i + b] << b) >= -15) { + r[i] -= r[i + b] << b; + for (k = i + b;k < 256;++k) { + if (!r[k]) { + r[k] = 1; + break; + } + r[k] = 0; + } + } else + break; + } + } + } + +} + +static ge_precomp Bi[8] = { +#include "base2.h" +} ; + +/* +r = a * A + b * B +where a = a[0]+256*a[1]+...+256^31 a[31]. +and b = b[0]+256*b[1]+...+256^31 b[31]. +B is the Ed25519 base point (x,4/5) with x positive. +*/ + +void ge_double_scalarmult_vartime(ge_p2 *r,const unsigned char *a,const ge_p3 *A,const unsigned char *b) +{ + signed char aslide[256]; + signed char bslide[256]; + ge_cached Ai[8]; /* A,3A,5A,7A,9A,11A,13A,15A */ + ge_p1p1 t; + ge_p3 u; + ge_p3 A2; + int i; + + slide(aslide,a); + slide(bslide,b); + + ge_p3_to_cached(&Ai[0],A); + ge_p3_dbl(&t,A); ge_p1p1_to_p3(&A2,&t); + ge_add(&t,&A2,&Ai[0]); ge_p1p1_to_p3(&u,&t); ge_p3_to_cached(&Ai[1],&u); + ge_add(&t,&A2,&Ai[1]); ge_p1p1_to_p3(&u,&t); ge_p3_to_cached(&Ai[2],&u); + ge_add(&t,&A2,&Ai[2]); ge_p1p1_to_p3(&u,&t); ge_p3_to_cached(&Ai[3],&u); + ge_add(&t,&A2,&Ai[3]); ge_p1p1_to_p3(&u,&t); ge_p3_to_cached(&Ai[4],&u); + ge_add(&t,&A2,&Ai[4]); ge_p1p1_to_p3(&u,&t); ge_p3_to_cached(&Ai[5],&u); + ge_add(&t,&A2,&Ai[5]); ge_p1p1_to_p3(&u,&t); ge_p3_to_cached(&Ai[6],&u); + ge_add(&t,&A2,&Ai[6]); ge_p1p1_to_p3(&u,&t); ge_p3_to_cached(&Ai[7],&u); + + ge_p2_0(r); + + for (i = 255;i >= 0;--i) { + if (aslide[i] || bslide[i]) break; + } + + for (;i >= 0;--i) { + ge_p2_dbl(&t,r); + + if (aslide[i] > 0) { + ge_p1p1_to_p3(&u,&t); + ge_add(&t,&u,&Ai[aslide[i]/2]); + } else if (aslide[i] < 0) { + ge_p1p1_to_p3(&u,&t); + ge_sub(&t,&u,&Ai[(-aslide[i])/2]); + } + + if (bslide[i] > 0) { + ge_p1p1_to_p3(&u,&t); + ge_madd(&t,&u,&Bi[bslide[i]/2]); + } else if (bslide[i] < 0) { + ge_p1p1_to_p3(&u,&t); + ge_msub(&t,&u,&Bi[(-bslide[i])/2]); + } + + ge_p1p1_to_p2(r,&t); + } +} diff --git a/src/ed25519-supercop-ref10/ge_frombytes.c b/src/ed25519-supercop-ref10/ge_frombytes.c new file mode 100644 index 0000000..1a059ee --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_frombytes.c @@ -0,0 +1,50 @@ +#include "ge.h" + +static const fe d = { +#include "d.h" +} ; + +static const fe sqrtm1 = { +#include "sqrtm1.h" +} ; + +int ge_frombytes_negate_vartime(ge_p3 *h,const unsigned char *s) +{ + fe u; + fe v; + fe v3; + fe vxx; + fe check; + + fe_frombytes(h->Y,s); + fe_1(h->Z); + fe_sq(u,h->Y); + fe_mul(v,u,d); + fe_sub(u,u,h->Z); /* u = y^2-1 */ + fe_add(v,v,h->Z); /* v = dy^2+1 */ + + fe_sq(v3,v); + fe_mul(v3,v3,v); /* v3 = v^3 */ + fe_sq(h->X,v3); + fe_mul(h->X,h->X,v); + fe_mul(h->X,h->X,u); /* x = uv^7 */ + + fe_pow22523(h->X,h->X); /* x = (uv^7)^((q-5)/8) */ + fe_mul(h->X,h->X,v3); + fe_mul(h->X,h->X,u); /* x = uv^3(uv^7)^((q-5)/8) */ + + fe_sq(vxx,h->X); + fe_mul(vxx,vxx,v); + fe_sub(check,vxx,u); /* vx^2-u */ + if (fe_isnonzero(check)) { + fe_add(check,vxx,u); /* vx^2+u */ + if (fe_isnonzero(check)) return -1; + fe_mul(h->X,h->X,sqrtm1); + } + + if (fe_isnegative(h->X) == (s[31] >> 7)) + fe_neg(h->X,h->X); + + fe_mul(h->T,h->X,h->Y); + return 0; +} diff --git a/src/ed25519-supercop-ref10/ge_madd.c b/src/ed25519-supercop-ref10/ge_madd.c new file mode 100644 index 0000000..6225717 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_madd.c @@ -0,0 +1,11 @@ +#include "ge.h" + +/* +r = p + q +*/ + +void ge_madd(ge_p1p1 *r,const ge_p3 *p,const ge_precomp *q) +{ + fe t0; +#include "ge_madd.h" +} diff --git a/src/ed25519-supercop-ref10/ge_madd.h b/src/ed25519-supercop-ref10/ge_madd.h new file mode 100644 index 0000000..ecae849 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_madd.h @@ -0,0 +1,88 @@ + +/* qhasm: enter ge_madd */ + +/* qhasm: fe X1 */ + +/* qhasm: fe Y1 */ + +/* qhasm: fe Z1 */ + +/* qhasm: fe T1 */ + +/* qhasm: fe ypx2 */ + +/* qhasm: fe ymx2 */ + +/* qhasm: fe xy2d2 */ + +/* qhasm: fe X3 */ + +/* qhasm: fe Y3 */ + +/* qhasm: fe Z3 */ + +/* qhasm: fe T3 */ + +/* qhasm: fe YpX1 */ + +/* qhasm: fe YmX1 */ + +/* qhasm: fe A */ + +/* qhasm: fe B */ + +/* qhasm: fe C */ + +/* qhasm: fe D */ + +/* qhasm: YpX1 = Y1+X1 */ +/* asm 1: fe_add(>YpX1=fe#1,YpX1=r->X,Y,X); */ +fe_add(r->X,p->Y,p->X); + +/* qhasm: YmX1 = Y1-X1 */ +/* asm 1: fe_sub(>YmX1=fe#2,YmX1=r->Y,Y,X); */ +fe_sub(r->Y,p->Y,p->X); + +/* qhasm: A = YpX1*ypx2 */ +/* asm 1: fe_mul(>A=fe#3,A=r->Z,X,yplusx); */ +fe_mul(r->Z,r->X,q->yplusx); + +/* qhasm: B = YmX1*ymx2 */ +/* asm 1: fe_mul(>B=fe#2,B=r->Y,Y,yminusx); */ +fe_mul(r->Y,r->Y,q->yminusx); + +/* qhasm: C = xy2d2*T1 */ +/* asm 1: fe_mul(>C=fe#4,C=r->T,xy2d,T); */ +fe_mul(r->T,q->xy2d,p->T); + +/* qhasm: D = 2*Z1 */ +/* asm 1: fe_add(>D=fe#5,D=t0,Z,Z); */ +fe_add(t0,p->Z,p->Z); + +/* qhasm: X3 = A-B */ +/* asm 1: fe_sub(>X3=fe#1,X3=r->X,Z,Y); */ +fe_sub(r->X,r->Z,r->Y); + +/* qhasm: Y3 = A+B */ +/* asm 1: fe_add(>Y3=fe#2,Y3=r->Y,Z,Y); */ +fe_add(r->Y,r->Z,r->Y); + +/* qhasm: Z3 = D+C */ +/* asm 1: fe_add(>Z3=fe#3,Z3=r->Z,T); */ +fe_add(r->Z,t0,r->T); + +/* qhasm: T3 = D-C */ +/* asm 1: fe_sub(>T3=fe#4,T3=r->T,T); */ +fe_sub(r->T,t0,r->T); + +/* qhasm: return */ diff --git a/src/ed25519-supercop-ref10/ge_madd.q b/src/ed25519-supercop-ref10/ge_madd.q new file mode 100644 index 0000000..aa3db45 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_madd.q @@ -0,0 +1,46 @@ +:name:fe:r->X:r->Y:r->Z:r->T:t0:t1:t2:t3:t4:t5:p->X:p->Y:p->Z:p->T:q->yplusx:q->yminusx:q->xy2d: +fe r:var/r=fe: + +enter f:enter/f:>X1=fe#11:>Y1=fe#12:>Z1=fe#13:>T1=fe#14:>ypx2=fe#15:>ymx2=fe#16:>xy2d2=fe#17: +return:nofallthrough:h=fe:asm/fe_add(>h,h=fe:asm/fe_sub(>h,h=fe:asm/fe_mul(>h,h=fe:asm/fe_sq(>h,h=fe:asm/fe_add(>h,YpX1=fe#1,YpX1=r->X,Y,X); */ +fe_add(r->X,p->Y,p->X); + +/* qhasm: YmX1 = Y1-X1 */ +/* asm 1: fe_sub(>YmX1=fe#2,YmX1=r->Y,Y,X); */ +fe_sub(r->Y,p->Y,p->X); + +/* qhasm: A = YpX1*ymx2 */ +/* asm 1: fe_mul(>A=fe#3,A=r->Z,X,yminusx); */ +fe_mul(r->Z,r->X,q->yminusx); + +/* qhasm: B = YmX1*ypx2 */ +/* asm 1: fe_mul(>B=fe#2,B=r->Y,Y,yplusx); */ +fe_mul(r->Y,r->Y,q->yplusx); + +/* qhasm: C = xy2d2*T1 */ +/* asm 1: fe_mul(>C=fe#4,C=r->T,xy2d,T); */ +fe_mul(r->T,q->xy2d,p->T); + +/* qhasm: D = 2*Z1 */ +/* asm 1: fe_add(>D=fe#5,D=t0,Z,Z); */ +fe_add(t0,p->Z,p->Z); + +/* qhasm: X3 = A-B */ +/* asm 1: fe_sub(>X3=fe#1,X3=r->X,Z,Y); */ +fe_sub(r->X,r->Z,r->Y); + +/* qhasm: Y3 = A+B */ +/* asm 1: fe_add(>Y3=fe#2,Y3=r->Y,Z,Y); */ +fe_add(r->Y,r->Z,r->Y); + +/* qhasm: Z3 = D-C */ +/* asm 1: fe_sub(>Z3=fe#3,Z3=r->Z,T); */ +fe_sub(r->Z,t0,r->T); + +/* qhasm: T3 = D+C */ +/* asm 1: fe_add(>T3=fe#4,T3=r->T,T); */ +fe_add(r->T,t0,r->T); + +/* qhasm: return */ diff --git a/src/ed25519-supercop-ref10/ge_msub.q b/src/ed25519-supercop-ref10/ge_msub.q new file mode 100644 index 0000000..e3cadd8 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_msub.q @@ -0,0 +1,46 @@ +:name:fe:r->X:r->Y:r->Z:r->T:t0:t1:t2:t3:t4:t5:p->X:p->Y:p->Z:p->T:q->yplusx:q->yminusx:q->xy2d: +fe r:var/r=fe: + +enter f:enter/f:>X1=fe#11:>Y1=fe#12:>Z1=fe#13:>T1=fe#14:>ypx2=fe#15:>ymx2=fe#16:>xy2d2=fe#17: +return:nofallthrough:h=fe:asm/fe_add(>h,h=fe:asm/fe_sub(>h,h=fe:asm/fe_mul(>h,h=fe:asm/fe_sq(>h,h=fe:asm/fe_add(>h,X,p->X,p->T); + fe_mul(r->Y,p->Y,p->Z); + fe_mul(r->Z,p->Z,p->T); +} diff --git a/src/ed25519-supercop-ref10/ge_p1p1_to_p3.c b/src/ed25519-supercop-ref10/ge_p1p1_to_p3.c new file mode 100644 index 0000000..2f57b10 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p1p1_to_p3.c @@ -0,0 +1,13 @@ +#include "ge.h" + +/* +r = p +*/ + +extern void ge_p1p1_to_p3(ge_p3 *r,const ge_p1p1 *p) +{ + fe_mul(r->X,p->X,p->T); + fe_mul(r->Y,p->Y,p->Z); + fe_mul(r->Z,p->Z,p->T); + fe_mul(r->T,p->X,p->Y); +} diff --git a/src/ed25519-supercop-ref10/ge_p2_0.c b/src/ed25519-supercop-ref10/ge_p2_0.c new file mode 100644 index 0000000..6191d1e --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p2_0.c @@ -0,0 +1,8 @@ +#include "ge.h" + +void ge_p2_0(ge_p2 *h) +{ + fe_0(h->X); + fe_1(h->Y); + fe_1(h->Z); +} diff --git a/src/ed25519-supercop-ref10/ge_p2_dbl.c b/src/ed25519-supercop-ref10/ge_p2_dbl.c new file mode 100644 index 0000000..2e332b5 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p2_dbl.c @@ -0,0 +1,11 @@ +#include "ge.h" + +/* +r = 2 * p +*/ + +void ge_p2_dbl(ge_p1p1 *r,const ge_p2 *p) +{ + fe t0; +#include "ge_p2_dbl.h" +} diff --git a/src/ed25519-supercop-ref10/ge_p2_dbl.h b/src/ed25519-supercop-ref10/ge_p2_dbl.h new file mode 100644 index 0000000..128efed --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p2_dbl.h @@ -0,0 +1,73 @@ + +/* qhasm: enter ge_p2_dbl */ + +/* qhasm: fe X1 */ + +/* qhasm: fe Y1 */ + +/* qhasm: fe Z1 */ + +/* qhasm: fe A */ + +/* qhasm: fe AA */ + +/* qhasm: fe XX */ + +/* qhasm: fe YY */ + +/* qhasm: fe B */ + +/* qhasm: fe X3 */ + +/* qhasm: fe Y3 */ + +/* qhasm: fe Z3 */ + +/* qhasm: fe T3 */ + +/* qhasm: XX=X1^2 */ +/* asm 1: fe_sq(>XX=fe#1,XX=r->X,X); */ +fe_sq(r->X,p->X); + +/* qhasm: YY=Y1^2 */ +/* asm 1: fe_sq(>YY=fe#3,YY=r->Z,Y); */ +fe_sq(r->Z,p->Y); + +/* qhasm: B=2*Z1^2 */ +/* asm 1: fe_sq2(>B=fe#4,B=r->T,Z); */ +fe_sq2(r->T,p->Z); + +/* qhasm: A=X1+Y1 */ +/* asm 1: fe_add(>A=fe#2,A=r->Y,X,Y); */ +fe_add(r->Y,p->X,p->Y); + +/* qhasm: AA=A^2 */ +/* asm 1: fe_sq(>AA=fe#5,AA=t0,Y); */ +fe_sq(t0,r->Y); + +/* qhasm: Y3=YY+XX */ +/* asm 1: fe_add(>Y3=fe#2,Y3=r->Y,Z,X); */ +fe_add(r->Y,r->Z,r->X); + +/* qhasm: Z3=YY-XX */ +/* asm 1: fe_sub(>Z3=fe#3,Z3=r->Z,Z,X); */ +fe_sub(r->Z,r->Z,r->X); + +/* qhasm: X3=AA-Y3 */ +/* asm 1: fe_sub(>X3=fe#1,X3=r->X,Y); */ +fe_sub(r->X,t0,r->Y); + +/* qhasm: T3=B-Z3 */ +/* asm 1: fe_sub(>T3=fe#4,T3=r->T,T,Z); */ +fe_sub(r->T,r->T,r->Z); + +/* qhasm: return */ diff --git a/src/ed25519-supercop-ref10/ge_p2_dbl.q b/src/ed25519-supercop-ref10/ge_p2_dbl.q new file mode 100644 index 0000000..170d42f --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p2_dbl.q @@ -0,0 +1,41 @@ +:name:fe:r->X:r->Y:r->Z:r->T:t0:t1:t2:t3:t4:t5:p->X:p->Y:p->Z: +fe r:var/r=fe: + +enter f:enter/f:>X1=fe#11:>Y1=fe#12:>Z1=fe#13: +return:nofallthrough:h=fe:asm/fe_add(>h,h=fe:asm/fe_sub(>h,h=fe:asm/fe_mul(>h,h=fe:asm/fe_sq(>h,h=fe:asm/fe_sq2(>h,h=fe:asm/fe_add(>h,X); + fe_1(h->Y); + fe_1(h->Z); + fe_0(h->T); +} diff --git a/src/ed25519-supercop-ref10/ge_p3_dbl.c b/src/ed25519-supercop-ref10/ge_p3_dbl.c new file mode 100644 index 0000000..0d8a059 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p3_dbl.c @@ -0,0 +1,12 @@ +#include "ge.h" + +/* +r = 2 * p +*/ + +void ge_p3_dbl(ge_p1p1 *r,const ge_p3 *p) +{ + ge_p2 q; + ge_p3_to_p2(&q,p); + ge_p2_dbl(r,&q); +} diff --git a/src/ed25519-supercop-ref10/ge_p3_to_cached.c b/src/ed25519-supercop-ref10/ge_p3_to_cached.c new file mode 100644 index 0000000..bde6422 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p3_to_cached.c @@ -0,0 +1,17 @@ +#include "ge.h" + +/* +r = p +*/ + +static const fe d2 = { +#include "d2.h" +} ; + +extern void ge_p3_to_cached(ge_cached *r,const ge_p3 *p) +{ + fe_add(r->YplusX,p->Y,p->X); + fe_sub(r->YminusX,p->Y,p->X); + fe_copy(r->Z,p->Z); + fe_mul(r->T2d,p->T,d2); +} diff --git a/src/ed25519-supercop-ref10/ge_p3_to_p2.c b/src/ed25519-supercop-ref10/ge_p3_to_p2.c new file mode 100644 index 0000000..e532a9e --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p3_to_p2.c @@ -0,0 +1,12 @@ +#include "ge.h" + +/* +r = p +*/ + +extern void ge_p3_to_p2(ge_p2 *r,const ge_p3 *p) +{ + fe_copy(r->X,p->X); + fe_copy(r->Y,p->Y); + fe_copy(r->Z,p->Z); +} diff --git a/src/ed25519-supercop-ref10/ge_p3_tobytes.c b/src/ed25519-supercop-ref10/ge_p3_tobytes.c new file mode 100644 index 0000000..21cb2fc --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_p3_tobytes.c @@ -0,0 +1,14 @@ +#include "ge.h" + +void ge_p3_tobytes(unsigned char *s,const ge_p3 *h) +{ + fe recip; + fe x; + fe y; + + fe_invert(recip,h->Z); + fe_mul(x,h->X,recip); + fe_mul(y,h->Y,recip); + fe_tobytes(s,y); + s[31] ^= fe_isnegative(x) << 7; +} diff --git a/src/ed25519-supercop-ref10/ge_precomp_0.c b/src/ed25519-supercop-ref10/ge_precomp_0.c new file mode 100644 index 0000000..2e21886 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_precomp_0.c @@ -0,0 +1,8 @@ +#include "ge.h" + +void ge_precomp_0(ge_precomp *h) +{ + fe_1(h->yplusx); + fe_1(h->yminusx); + fe_0(h->xy2d); +} diff --git a/src/ed25519-supercop-ref10/ge_scalarmult_base.c b/src/ed25519-supercop-ref10/ge_scalarmult_base.c new file mode 100644 index 0000000..421e4fa --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_scalarmult_base.c @@ -0,0 +1,105 @@ +#include "ge.h" +#include "crypto_uint32.h" + +static unsigned char equal(signed char b,signed char c) +{ + unsigned char ub = b; + unsigned char uc = c; + unsigned char x = ub ^ uc; /* 0: yes; 1..255: no */ + crypto_uint32 y = x; /* 0: yes; 1..255: no */ + y -= 1; /* 4294967295: yes; 0..254: no */ + y >>= 31; /* 1: yes; 0: no */ + return y; +} + +static unsigned char negative(signed char b) +{ + unsigned long long x = b; /* 18446744073709551361..18446744073709551615: yes; 0..255: no */ + x >>= 63; /* 1: yes; 0: no */ + return x; +} + +static void cmov(ge_precomp *t,ge_precomp *u,unsigned char b) +{ + fe_cmov(t->yplusx,u->yplusx,b); + fe_cmov(t->yminusx,u->yminusx,b); + fe_cmov(t->xy2d,u->xy2d,b); +} + +/* base[i][j] = (j+1)*256^i*B */ +static ge_precomp base[32][8] = { +#include "base.h" +} ; + +static void select(ge_precomp *t,int pos,signed char b) +{ + ge_precomp minust; + unsigned char bnegative = negative(b); + unsigned char babs = b - (((-bnegative) & b) << 1); + + ge_precomp_0(t); + cmov(t,&base[pos][0],equal(babs,1)); + cmov(t,&base[pos][1],equal(babs,2)); + cmov(t,&base[pos][2],equal(babs,3)); + cmov(t,&base[pos][3],equal(babs,4)); + cmov(t,&base[pos][4],equal(babs,5)); + cmov(t,&base[pos][5],equal(babs,6)); + cmov(t,&base[pos][6],equal(babs,7)); + cmov(t,&base[pos][7],equal(babs,8)); + fe_copy(minust.yplusx,t->yminusx); + fe_copy(minust.yminusx,t->yplusx); + fe_neg(minust.xy2d,t->xy2d); + cmov(t,&minust,bnegative); +} + +/* +h = a * B +where a = a[0]+256*a[1]+...+256^31 a[31] +B is the Ed25519 base point (x,4/5) with x positive. + +Preconditions: + a[31] <= 127 +*/ + +void ge_scalarmult_base(ge_p3 *h,const unsigned char *a) +{ + signed char e[64]; + signed char carry; + ge_p1p1 r; + ge_p2 s; + ge_precomp t; + int i; + + for (i = 0;i < 32;++i) { + e[2 * i + 0] = (a[i] >> 0) & 15; + e[2 * i + 1] = (a[i] >> 4) & 15; + } + /* each e[i] is between 0 and 15 */ + /* e[63] is between 0 and 7 */ + + carry = 0; + for (i = 0;i < 63;++i) { + e[i] += carry; + carry = e[i] + 8; + carry >>= 4; + e[i] -= carry << 4; + } + e[63] += carry; + /* each e[i] is between -8 and 8 */ + + ge_p3_0(h); + for (i = 1;i < 64;i += 2) { + select(&t,i / 2,e[i]); + ge_madd(&r,h,&t); ge_p1p1_to_p3(h,&r); + } + + ge_p3_dbl(&r,h); ge_p1p1_to_p2(&s,&r); + ge_p2_dbl(&r,&s); ge_p1p1_to_p2(&s,&r); + ge_p2_dbl(&r,&s); ge_p1p1_to_p2(&s,&r); + ge_p2_dbl(&r,&s); ge_p1p1_to_p3(h,&r); + + for (i = 0;i < 64;i += 2) { + select(&t,i / 2,e[i]); + ge_madd(&r,h,&t); ge_p1p1_to_p3(h,&r); + } +} diff --git a/src/ed25519-supercop-ref10/ge_sub.c b/src/ed25519-supercop-ref10/ge_sub.c new file mode 100644 index 0000000..69f3d54 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_sub.c @@ -0,0 +1,11 @@ +#include "ge.h" + +/* +r = p - q +*/ + +void ge_sub(ge_p1p1 *r,const ge_p3 *p,const ge_cached *q) +{ + fe t0; +#include "ge_sub.h" +} diff --git a/src/ed25519-supercop-ref10/ge_sub.h b/src/ed25519-supercop-ref10/ge_sub.h new file mode 100644 index 0000000..b4ef1f5 --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_sub.h @@ -0,0 +1,97 @@ + +/* qhasm: enter ge_sub */ + +/* qhasm: fe X1 */ + +/* qhasm: fe Y1 */ + +/* qhasm: fe Z1 */ + +/* qhasm: fe Z2 */ + +/* qhasm: fe T1 */ + +/* qhasm: fe ZZ */ + +/* qhasm: fe YpX2 */ + +/* qhasm: fe YmX2 */ + +/* qhasm: fe T2d2 */ + +/* qhasm: fe X3 */ + +/* qhasm: fe Y3 */ + +/* qhasm: fe Z3 */ + +/* qhasm: fe T3 */ + +/* qhasm: fe YpX1 */ + +/* qhasm: fe YmX1 */ + +/* qhasm: fe A */ + +/* qhasm: fe B */ + +/* qhasm: fe C */ + +/* qhasm: fe D */ + +/* qhasm: YpX1 = Y1+X1 */ +/* asm 1: fe_add(>YpX1=fe#1,YpX1=r->X,Y,X); */ +fe_add(r->X,p->Y,p->X); + +/* qhasm: YmX1 = Y1-X1 */ +/* asm 1: fe_sub(>YmX1=fe#2,YmX1=r->Y,Y,X); */ +fe_sub(r->Y,p->Y,p->X); + +/* qhasm: A = YpX1*YmX2 */ +/* asm 1: fe_mul(>A=fe#3,A=r->Z,X,YminusX); */ +fe_mul(r->Z,r->X,q->YminusX); + +/* qhasm: B = YmX1*YpX2 */ +/* asm 1: fe_mul(>B=fe#2,B=r->Y,Y,YplusX); */ +fe_mul(r->Y,r->Y,q->YplusX); + +/* qhasm: C = T2d2*T1 */ +/* asm 1: fe_mul(>C=fe#4,C=r->T,T2d,T); */ +fe_mul(r->T,q->T2d,p->T); + +/* qhasm: ZZ = Z1*Z2 */ +/* asm 1: fe_mul(>ZZ=fe#1,ZZ=r->X,Z,Z); */ +fe_mul(r->X,p->Z,q->Z); + +/* qhasm: D = 2*ZZ */ +/* asm 1: fe_add(>D=fe#5,D=t0,X,X); */ +fe_add(t0,r->X,r->X); + +/* qhasm: X3 = A-B */ +/* asm 1: fe_sub(>X3=fe#1,X3=r->X,Z,Y); */ +fe_sub(r->X,r->Z,r->Y); + +/* qhasm: Y3 = A+B */ +/* asm 1: fe_add(>Y3=fe#2,Y3=r->Y,Z,Y); */ +fe_add(r->Y,r->Z,r->Y); + +/* qhasm: Z3 = D-C */ +/* asm 1: fe_sub(>Z3=fe#3,Z3=r->Z,T); */ +fe_sub(r->Z,t0,r->T); + +/* qhasm: T3 = D+C */ +/* asm 1: fe_add(>T3=fe#4,T3=r->T,T); */ +fe_add(r->T,t0,r->T); + +/* qhasm: return */ diff --git a/src/ed25519-supercop-ref10/ge_sub.q b/src/ed25519-supercop-ref10/ge_sub.q new file mode 100644 index 0000000..2779a4a --- /dev/null +++ b/src/ed25519-supercop-ref10/ge_sub.q @@ -0,0 +1,49 @@ +:name:fe:r->X:r->Y:r->Z:r->T:t0:t1:t2:t3:t4:t5:p->X:p->Y:p->Z:p->T:q->YplusX:q->YminusX:q->Z:q->T2d: +fe r:var/r=fe: + +enter f:enter/f:>X1=fe#11:>Y1=fe#12:>Z1=fe#13:>T1=fe#14:>YpX2=fe#15:>YmX2=fe#16:>Z2=fe#17:>T2d2=fe#18: +return:nofallthrough:h=fe:asm/fe_add(>h,h=fe:asm/fe_sub(>h,h=fe:asm/fe_mul(>h,h=fe:asm/fe_sq(>h,h=fe:asm/fe_add(>h,Z); + fe_mul(x,h->X,recip); + fe_mul(y,h->Y,recip); + fe_tobytes(s,y); + s[31] ^= fe_isnegative(x) << 7; +} diff --git a/src/ed25519-supercop-ref10/keypair.c b/src/ed25519-supercop-ref10/keypair.c new file mode 100644 index 0000000..c7009e9 --- /dev/null +++ b/src/ed25519-supercop-ref10/keypair.c @@ -0,0 +1,22 @@ +#include "crypto_sign.h" +#include "crypto_hash_sha512.h" +#include "ge.h" + +int crypto_sign_keypair(unsigned char *pk,unsigned char *sk) +{ + unsigned char h[64]; + ge_p3 A; + int i; + + randombytes(sk,32); + crypto_hash_sha512(h,sk,32); + h[0] &= 248; + h[31] &= 63; + h[31] |= 64; + + ge_scalarmult_base(&A,h); + ge_p3_tobytes(pk,&A); + + for (i = 0;i < 32;++i) sk[32 + i] = pk[i]; + return 0; +} diff --git a/src/ed25519-supercop-ref10/open.c b/src/ed25519-supercop-ref10/open.c new file mode 100644 index 0000000..60738f0 --- /dev/null +++ b/src/ed25519-supercop-ref10/open.c @@ -0,0 +1,40 @@ +#include "crypto_sign.h" +#include "crypto_hash_sha512.h" +#include "crypto_verify_32.h" +#include "ge.h" +#include "sc.h" + +int crypto_sign_open( + unsigned char *m,unsigned long long *mlen, + const unsigned char *sm,unsigned long long smlen, + const unsigned char *pk +) +{ + unsigned char h[64]; + unsigned char checkr[32]; + ge_p3 A; + ge_p2 R; + unsigned long long i; + + *mlen = -1; + if (smlen < 64) return -1; + if (sm[63] & 224) return -1; + if (ge_frombytes_negate_vartime(&A,pk) != 0) return -1; + + for (i = 0;i < smlen;++i) m[i] = sm[i]; + for (i = 0;i < 32;++i) m[32 + i] = pk[i]; + crypto_hash_sha512(h,m,smlen); + sc_reduce(h); + + ge_double_scalarmult_vartime(&R,h,&A,sm + 32); + ge_tobytes(checkr,&R); + if (crypto_verify_32(checkr,sm) != 0) { + for (i = 0;i < smlen;++i) m[i] = 0; + return -1; + } + + for (i = 0;i < smlen - 64;++i) m[i] = sm[64 + i]; + for (i = smlen - 64;i < smlen;++i) m[i] = 0; + *mlen = smlen - 64; + return 0; +} diff --git a/src/ed25519-supercop-ref10/pow22523.h b/src/ed25519-supercop-ref10/pow22523.h new file mode 100644 index 0000000..60ffe0d --- /dev/null +++ b/src/ed25519-supercop-ref10/pow22523.h @@ -0,0 +1,160 @@ + +/* qhasm: fe z1 */ + +/* qhasm: fe z2 */ + +/* qhasm: fe z8 */ + +/* qhasm: fe z9 */ + +/* qhasm: fe z11 */ + +/* qhasm: fe z22 */ + +/* qhasm: fe z_5_0 */ + +/* qhasm: fe z_10_5 */ + +/* qhasm: fe z_10_0 */ + +/* qhasm: fe z_20_10 */ + +/* qhasm: fe z_20_0 */ + +/* qhasm: fe z_40_20 */ + +/* qhasm: fe z_40_0 */ + +/* qhasm: fe z_50_10 */ + +/* qhasm: fe z_50_0 */ + +/* qhasm: fe z_100_50 */ + +/* qhasm: fe z_100_0 */ + +/* qhasm: fe z_200_100 */ + +/* qhasm: fe z_200_0 */ + +/* qhasm: fe z_250_50 */ + +/* qhasm: fe z_250_0 */ + +/* qhasm: fe z_252_2 */ + +/* qhasm: fe z_252_3 */ + +/* qhasm: enter pow22523 */ + +/* qhasm: z2 = z1^2^1 */ +/* asm 1: fe_sq(>z2=fe#1,z2=fe#1,>z2=fe#1); */ +/* asm 2: fe_sq(>z2=t0,z2=t0,>z2=t0); */ +fe_sq(t0,z); for (i = 1;i < 1;++i) fe_sq(t0,t0); + +/* qhasm: z8 = z2^2^2 */ +/* asm 1: fe_sq(>z8=fe#2,z8=fe#2,>z8=fe#2); */ +/* asm 2: fe_sq(>z8=t1,z8=t1,>z8=t1); */ +fe_sq(t1,t0); for (i = 1;i < 2;++i) fe_sq(t1,t1); + +/* qhasm: z9 = z1*z8 */ +/* asm 1: fe_mul(>z9=fe#2,z9=t1,z11=fe#1,z11=t0,z22=fe#1,z22=fe#1,>z22=fe#1); */ +/* asm 2: fe_sq(>z22=t0,z22=t0,>z22=t0); */ +fe_sq(t0,t0); for (i = 1;i < 1;++i) fe_sq(t0,t0); + +/* qhasm: z_5_0 = z9*z22 */ +/* asm 1: fe_mul(>z_5_0=fe#1,z_5_0=t0,z_10_5=fe#2,z_10_5=fe#2,>z_10_5=fe#2); */ +/* asm 2: fe_sq(>z_10_5=t1,z_10_5=t1,>z_10_5=t1); */ +fe_sq(t1,t0); for (i = 1;i < 5;++i) fe_sq(t1,t1); + +/* qhasm: z_10_0 = z_10_5*z_5_0 */ +/* asm 1: fe_mul(>z_10_0=fe#1,z_10_0=t0,z_20_10=fe#2,z_20_10=fe#2,>z_20_10=fe#2); */ +/* asm 2: fe_sq(>z_20_10=t1,z_20_10=t1,>z_20_10=t1); */ +fe_sq(t1,t0); for (i = 1;i < 10;++i) fe_sq(t1,t1); + +/* qhasm: z_20_0 = z_20_10*z_10_0 */ +/* asm 1: fe_mul(>z_20_0=fe#2,z_20_0=t1,z_40_20=fe#3,z_40_20=fe#3,>z_40_20=fe#3); */ +/* asm 2: fe_sq(>z_40_20=t2,z_40_20=t2,>z_40_20=t2); */ +fe_sq(t2,t1); for (i = 1;i < 20;++i) fe_sq(t2,t2); + +/* qhasm: z_40_0 = z_40_20*z_20_0 */ +/* asm 1: fe_mul(>z_40_0=fe#2,z_40_0=t1,z_50_10=fe#2,z_50_10=fe#2,>z_50_10=fe#2); */ +/* asm 2: fe_sq(>z_50_10=t1,z_50_10=t1,>z_50_10=t1); */ +fe_sq(t1,t1); for (i = 1;i < 10;++i) fe_sq(t1,t1); + +/* qhasm: z_50_0 = z_50_10*z_10_0 */ +/* asm 1: fe_mul(>z_50_0=fe#1,z_50_0=t0,z_100_50=fe#2,z_100_50=fe#2,>z_100_50=fe#2); */ +/* asm 2: fe_sq(>z_100_50=t1,z_100_50=t1,>z_100_50=t1); */ +fe_sq(t1,t0); for (i = 1;i < 50;++i) fe_sq(t1,t1); + +/* qhasm: z_100_0 = z_100_50*z_50_0 */ +/* asm 1: fe_mul(>z_100_0=fe#2,z_100_0=t1,z_200_100=fe#3,z_200_100=fe#3,>z_200_100=fe#3); */ +/* asm 2: fe_sq(>z_200_100=t2,z_200_100=t2,>z_200_100=t2); */ +fe_sq(t2,t1); for (i = 1;i < 100;++i) fe_sq(t2,t2); + +/* qhasm: z_200_0 = z_200_100*z_100_0 */ +/* asm 1: fe_mul(>z_200_0=fe#2,z_200_0=t1,z_250_50=fe#2,z_250_50=fe#2,>z_250_50=fe#2); */ +/* asm 2: fe_sq(>z_250_50=t1,z_250_50=t1,>z_250_50=t1); */ +fe_sq(t1,t1); for (i = 1;i < 50;++i) fe_sq(t1,t1); + +/* qhasm: z_250_0 = z_250_50*z_50_0 */ +/* asm 1: fe_mul(>z_250_0=fe#1,z_250_0=t0,z_252_2=fe#1,z_252_2=fe#1,>z_252_2=fe#1); */ +/* asm 2: fe_sq(>z_252_2=t0,z_252_2=t0,>z_252_2=t0); */ +fe_sq(t0,t0); for (i = 1;i < 2;++i) fe_sq(t0,t0); + +/* qhasm: z_252_3 = z_252_2*z1 */ +/* asm 1: fe_mul(>z_252_3=fe#12,z_252_3=out,z1=fe#11: +return:nofallthrough:h=fe:asm/fe_mul(>h,h=fe:#k:asm/fe_sq(>h,h,>h);: + +: + +fe z1 +fe z2 +fe z8 +fe z9 +fe z11 +fe z22 +fe z_5_0 +fe z_10_5 +fe z_10_0 +fe z_20_10 +fe z_20_0 +fe z_40_20 +fe z_40_0 +fe z_50_10 +fe z_50_0 +fe z_100_50 +fe z_100_0 +fe z_200_100 +fe z_200_0 +fe z_250_50 +fe z_250_0 +fe z_252_2 +fe z_252_3 + +enter pow22523 + +z2 = z1^2^1 +z8 = z2^2^2 +z9 = z1*z8 +z11 = z2*z9 +z22 = z11^2^1 +z_5_0 = z9*z22 +z_10_5 = z_5_0^2^5 +z_10_0 = z_10_5*z_5_0 +z_20_10 = z_10_0^2^10 +z_20_0 = z_20_10*z_10_0 +z_40_20 = z_20_0^2^20 +z_40_0 = z_40_20*z_20_0 +z_50_10 = z_40_0^2^10 +z_50_0 = z_50_10*z_10_0 +z_100_50 = z_50_0^2^50 +z_100_0 = z_100_50*z_50_0 +z_200_100 = z_100_0^2^100 +z_200_0 = z_200_100*z_100_0 +z_250_50 = z_200_0^2^50 +z_250_0 = z_250_50*z_50_0 +z_252_2 = z_250_0^2^2 +z_252_3 = z_252_2*z1 + +return diff --git a/src/ed25519-supercop-ref10/pow225521.h b/src/ed25519-supercop-ref10/pow225521.h new file mode 100644 index 0000000..109df77 --- /dev/null +++ b/src/ed25519-supercop-ref10/pow225521.h @@ -0,0 +1,160 @@ + +/* qhasm: fe z1 */ + +/* qhasm: fe z2 */ + +/* qhasm: fe z8 */ + +/* qhasm: fe z9 */ + +/* qhasm: fe z11 */ + +/* qhasm: fe z22 */ + +/* qhasm: fe z_5_0 */ + +/* qhasm: fe z_10_5 */ + +/* qhasm: fe z_10_0 */ + +/* qhasm: fe z_20_10 */ + +/* qhasm: fe z_20_0 */ + +/* qhasm: fe z_40_20 */ + +/* qhasm: fe z_40_0 */ + +/* qhasm: fe z_50_10 */ + +/* qhasm: fe z_50_0 */ + +/* qhasm: fe z_100_50 */ + +/* qhasm: fe z_100_0 */ + +/* qhasm: fe z_200_100 */ + +/* qhasm: fe z_200_0 */ + +/* qhasm: fe z_250_50 */ + +/* qhasm: fe z_250_0 */ + +/* qhasm: fe z_255_5 */ + +/* qhasm: fe z_255_21 */ + +/* qhasm: enter pow225521 */ + +/* qhasm: z2 = z1^2^1 */ +/* asm 1: fe_sq(>z2=fe#1,z2=fe#1,>z2=fe#1); */ +/* asm 2: fe_sq(>z2=t0,z2=t0,>z2=t0); */ +fe_sq(t0,z); for (i = 1;i < 1;++i) fe_sq(t0,t0); + +/* qhasm: z8 = z2^2^2 */ +/* asm 1: fe_sq(>z8=fe#2,z8=fe#2,>z8=fe#2); */ +/* asm 2: fe_sq(>z8=t1,z8=t1,>z8=t1); */ +fe_sq(t1,t0); for (i = 1;i < 2;++i) fe_sq(t1,t1); + +/* qhasm: z9 = z1*z8 */ +/* asm 1: fe_mul(>z9=fe#2,z9=t1,z11=fe#1,z11=t0,z22=fe#3,z22=fe#3,>z22=fe#3); */ +/* asm 2: fe_sq(>z22=t2,z22=t2,>z22=t2); */ +fe_sq(t2,t0); for (i = 1;i < 1;++i) fe_sq(t2,t2); + +/* qhasm: z_5_0 = z9*z22 */ +/* asm 1: fe_mul(>z_5_0=fe#2,z_5_0=t1,z_10_5=fe#3,z_10_5=fe#3,>z_10_5=fe#3); */ +/* asm 2: fe_sq(>z_10_5=t2,z_10_5=t2,>z_10_5=t2); */ +fe_sq(t2,t1); for (i = 1;i < 5;++i) fe_sq(t2,t2); + +/* qhasm: z_10_0 = z_10_5*z_5_0 */ +/* asm 1: fe_mul(>z_10_0=fe#2,z_10_0=t1,z_20_10=fe#3,z_20_10=fe#3,>z_20_10=fe#3); */ +/* asm 2: fe_sq(>z_20_10=t2,z_20_10=t2,>z_20_10=t2); */ +fe_sq(t2,t1); for (i = 1;i < 10;++i) fe_sq(t2,t2); + +/* qhasm: z_20_0 = z_20_10*z_10_0 */ +/* asm 1: fe_mul(>z_20_0=fe#3,z_20_0=t2,z_40_20=fe#4,z_40_20=fe#4,>z_40_20=fe#4); */ +/* asm 2: fe_sq(>z_40_20=t3,z_40_20=t3,>z_40_20=t3); */ +fe_sq(t3,t2); for (i = 1;i < 20;++i) fe_sq(t3,t3); + +/* qhasm: z_40_0 = z_40_20*z_20_0 */ +/* asm 1: fe_mul(>z_40_0=fe#3,z_40_0=t2,z_50_10=fe#3,z_50_10=fe#3,>z_50_10=fe#3); */ +/* asm 2: fe_sq(>z_50_10=t2,z_50_10=t2,>z_50_10=t2); */ +fe_sq(t2,t2); for (i = 1;i < 10;++i) fe_sq(t2,t2); + +/* qhasm: z_50_0 = z_50_10*z_10_0 */ +/* asm 1: fe_mul(>z_50_0=fe#2,z_50_0=t1,z_100_50=fe#3,z_100_50=fe#3,>z_100_50=fe#3); */ +/* asm 2: fe_sq(>z_100_50=t2,z_100_50=t2,>z_100_50=t2); */ +fe_sq(t2,t1); for (i = 1;i < 50;++i) fe_sq(t2,t2); + +/* qhasm: z_100_0 = z_100_50*z_50_0 */ +/* asm 1: fe_mul(>z_100_0=fe#3,z_100_0=t2,z_200_100=fe#4,z_200_100=fe#4,>z_200_100=fe#4); */ +/* asm 2: fe_sq(>z_200_100=t3,z_200_100=t3,>z_200_100=t3); */ +fe_sq(t3,t2); for (i = 1;i < 100;++i) fe_sq(t3,t3); + +/* qhasm: z_200_0 = z_200_100*z_100_0 */ +/* asm 1: fe_mul(>z_200_0=fe#3,z_200_0=t2,z_250_50=fe#3,z_250_50=fe#3,>z_250_50=fe#3); */ +/* asm 2: fe_sq(>z_250_50=t2,z_250_50=t2,>z_250_50=t2); */ +fe_sq(t2,t2); for (i = 1;i < 50;++i) fe_sq(t2,t2); + +/* qhasm: z_250_0 = z_250_50*z_50_0 */ +/* asm 1: fe_mul(>z_250_0=fe#2,z_250_0=t1,z_255_5=fe#2,z_255_5=fe#2,>z_255_5=fe#2); */ +/* asm 2: fe_sq(>z_255_5=t1,z_255_5=t1,>z_255_5=t1); */ +fe_sq(t1,t1); for (i = 1;i < 5;++i) fe_sq(t1,t1); + +/* qhasm: z_255_21 = z_255_5*z11 */ +/* asm 1: fe_mul(>z_255_21=fe#12,z_255_21=out,z1=fe#11: +return:nofallthrough:h=fe:asm/fe_mul(>h,h=fe:#k:asm/fe_sq(>h,h,>h);: + +: + +fe z1 +fe z2 +fe z8 +fe z9 +fe z11 +fe z22 +fe z_5_0 +fe z_10_5 +fe z_10_0 +fe z_20_10 +fe z_20_0 +fe z_40_20 +fe z_40_0 +fe z_50_10 +fe z_50_0 +fe z_100_50 +fe z_100_0 +fe z_200_100 +fe z_200_0 +fe z_250_50 +fe z_250_0 +fe z_255_5 +fe z_255_21 + +enter pow225521 + +z2 = z1^2^1 +z8 = z2^2^2 +z9 = z1*z8 +z11 = z2*z9 +z22 = z11^2^1 +z_5_0 = z9*z22 +z_10_5 = z_5_0^2^5 +z_10_0 = z_10_5*z_5_0 +z_20_10 = z_10_0^2^10 +z_20_0 = z_20_10*z_10_0 +z_40_20 = z_20_0^2^20 +z_40_0 = z_40_20*z_20_0 +z_50_10 = z_40_0^2^10 +z_50_0 = z_50_10*z_10_0 +z_100_50 = z_50_0^2^50 +z_100_0 = z_100_50*z_50_0 +z_200_100 = z_100_0^2^100 +z_200_0 = z_200_100*z_100_0 +z_250_50 = z_200_0^2^50 +z_250_0 = z_250_50*z_50_0 +z_255_5 = z_250_0^2^5 +z_255_21 = z_255_5*z11 + +return diff --git a/src/ed25519-supercop-ref10/q2h.sh b/src/ed25519-supercop-ref10/q2h.sh new file mode 100755 index 0000000..47ec511 --- /dev/null +++ b/src/ed25519-supercop-ref10/q2h.sh @@ -0,0 +1,4 @@ +#!/bin/sh +sed 's/^#.*//' \ +| qhasm-generic \ +| sed 's_//\(.*\)$_/*\1 */_' diff --git a/src/ed25519-supercop-ref10/sc.h b/src/ed25519-supercop-ref10/sc.h new file mode 100644 index 0000000..d32ed2e --- /dev/null +++ b/src/ed25519-supercop-ref10/sc.h @@ -0,0 +1,15 @@ +#ifndef SC_H +#define SC_H + +/* +The set of scalars is \Z/l +where l = 2^252 + 27742317777372353535851937790883648493. +*/ + +#define sc_reduce crypto_sign_ed25519_ref10_sc_reduce +#define sc_muladd crypto_sign_ed25519_ref10_sc_muladd + +extern void sc_reduce(unsigned char *); +extern void sc_muladd(unsigned char *,const unsigned char *,const unsigned char *,const unsigned char *); + +#endif diff --git a/src/ed25519-supercop-ref10/sc_muladd.c b/src/ed25519-supercop-ref10/sc_muladd.c new file mode 100644 index 0000000..6f1e9d0 --- /dev/null +++ b/src/ed25519-supercop-ref10/sc_muladd.c @@ -0,0 +1,368 @@ +#include "sc.h" +#include "crypto_int64.h" +#include "crypto_uint32.h" +#include "crypto_uint64.h" + +static crypto_uint64 load_3(const unsigned char *in) +{ + crypto_uint64 result; + result = (crypto_uint64) in[0]; + result |= ((crypto_uint64) in[1]) << 8; + result |= ((crypto_uint64) in[2]) << 16; + return result; +} + +static crypto_uint64 load_4(const unsigned char *in) +{ + crypto_uint64 result; + result = (crypto_uint64) in[0]; + result |= ((crypto_uint64) in[1]) << 8; + result |= ((crypto_uint64) in[2]) << 16; + result |= ((crypto_uint64) in[3]) << 24; + return result; +} + +/* +Input: + a[0]+256*a[1]+...+256^31*a[31] = a + b[0]+256*b[1]+...+256^31*b[31] = b + c[0]+256*c[1]+...+256^31*c[31] = c + +Output: + s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l + where l = 2^252 + 27742317777372353535851937790883648493. +*/ + +void sc_muladd(unsigned char *s,const unsigned char *a,const unsigned char *b,const unsigned char *c) +{ + crypto_int64 a0 = 2097151 & load_3(a); + crypto_int64 a1 = 2097151 & (load_4(a + 2) >> 5); + crypto_int64 a2 = 2097151 & (load_3(a + 5) >> 2); + crypto_int64 a3 = 2097151 & (load_4(a + 7) >> 7); + crypto_int64 a4 = 2097151 & (load_4(a + 10) >> 4); + crypto_int64 a5 = 2097151 & (load_3(a + 13) >> 1); + crypto_int64 a6 = 2097151 & (load_4(a + 15) >> 6); + crypto_int64 a7 = 2097151 & (load_3(a + 18) >> 3); + crypto_int64 a8 = 2097151 & load_3(a + 21); + crypto_int64 a9 = 2097151 & (load_4(a + 23) >> 5); + crypto_int64 a10 = 2097151 & (load_3(a + 26) >> 2); + crypto_int64 a11 = (load_4(a + 28) >> 7); + crypto_int64 b0 = 2097151 & load_3(b); + crypto_int64 b1 = 2097151 & (load_4(b + 2) >> 5); + crypto_int64 b2 = 2097151 & (load_3(b + 5) >> 2); + crypto_int64 b3 = 2097151 & (load_4(b + 7) >> 7); + crypto_int64 b4 = 2097151 & (load_4(b + 10) >> 4); + crypto_int64 b5 = 2097151 & (load_3(b + 13) >> 1); + crypto_int64 b6 = 2097151 & (load_4(b + 15) >> 6); + crypto_int64 b7 = 2097151 & (load_3(b + 18) >> 3); + crypto_int64 b8 = 2097151 & load_3(b + 21); + crypto_int64 b9 = 2097151 & (load_4(b + 23) >> 5); + crypto_int64 b10 = 2097151 & (load_3(b + 26) >> 2); + crypto_int64 b11 = (load_4(b + 28) >> 7); + crypto_int64 c0 = 2097151 & load_3(c); + crypto_int64 c1 = 2097151 & (load_4(c + 2) >> 5); + crypto_int64 c2 = 2097151 & (load_3(c + 5) >> 2); + crypto_int64 c3 = 2097151 & (load_4(c + 7) >> 7); + crypto_int64 c4 = 2097151 & (load_4(c + 10) >> 4); + crypto_int64 c5 = 2097151 & (load_3(c + 13) >> 1); + crypto_int64 c6 = 2097151 & (load_4(c + 15) >> 6); + crypto_int64 c7 = 2097151 & (load_3(c + 18) >> 3); + crypto_int64 c8 = 2097151 & load_3(c + 21); + crypto_int64 c9 = 2097151 & (load_4(c + 23) >> 5); + crypto_int64 c10 = 2097151 & (load_3(c + 26) >> 2); + crypto_int64 c11 = (load_4(c + 28) >> 7); + crypto_int64 s0; + crypto_int64 s1; + crypto_int64 s2; + crypto_int64 s3; + crypto_int64 s4; + crypto_int64 s5; + crypto_int64 s6; + crypto_int64 s7; + crypto_int64 s8; + crypto_int64 s9; + crypto_int64 s10; + crypto_int64 s11; + crypto_int64 s12; + crypto_int64 s13; + crypto_int64 s14; + crypto_int64 s15; + crypto_int64 s16; + crypto_int64 s17; + crypto_int64 s18; + crypto_int64 s19; + crypto_int64 s20; + crypto_int64 s21; + crypto_int64 s22; + crypto_int64 s23; + crypto_int64 carry0; + crypto_int64 carry1; + crypto_int64 carry2; + crypto_int64 carry3; + crypto_int64 carry4; + crypto_int64 carry5; + crypto_int64 carry6; + crypto_int64 carry7; + crypto_int64 carry8; + crypto_int64 carry9; + crypto_int64 carry10; + crypto_int64 carry11; + crypto_int64 carry12; + crypto_int64 carry13; + crypto_int64 carry14; + crypto_int64 carry15; + crypto_int64 carry16; + crypto_int64 carry17; + crypto_int64 carry18; + crypto_int64 carry19; + crypto_int64 carry20; + crypto_int64 carry21; + crypto_int64 carry22; + + s0 = c0 + a0*b0; + s1 = c1 + a0*b1 + a1*b0; + s2 = c2 + a0*b2 + a1*b1 + a2*b0; + s3 = c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0; + s4 = c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0; + s5 = c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0; + s6 = c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0; + s7 = c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0; + s8 = c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0; + s9 = c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0; + s10 = c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0; + s11 = c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0; + s12 = a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1; + s13 = a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2; + s14 = a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3; + s15 = a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4; + s16 = a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5; + s17 = a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6; + s18 = a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7; + s19 = a8*b11 + a9*b10 + a10*b9 + a11*b8; + s20 = a9*b11 + a10*b10 + a11*b9; + s21 = a10*b11 + a11*b10; + s22 = a11*b11; + s23 = 0; + + carry0 = (s0 + (1<<20)) >> 21; s1 += carry0; s0 -= carry0 << 21; + carry2 = (s2 + (1<<20)) >> 21; s3 += carry2; s2 -= carry2 << 21; + carry4 = (s4 + (1<<20)) >> 21; s5 += carry4; s4 -= carry4 << 21; + carry6 = (s6 + (1<<20)) >> 21; s7 += carry6; s6 -= carry6 << 21; + carry8 = (s8 + (1<<20)) >> 21; s9 += carry8; s8 -= carry8 << 21; + carry10 = (s10 + (1<<20)) >> 21; s11 += carry10; s10 -= carry10 << 21; + carry12 = (s12 + (1<<20)) >> 21; s13 += carry12; s12 -= carry12 << 21; + carry14 = (s14 + (1<<20)) >> 21; s15 += carry14; s14 -= carry14 << 21; + carry16 = (s16 + (1<<20)) >> 21; s17 += carry16; s16 -= carry16 << 21; + carry18 = (s18 + (1<<20)) >> 21; s19 += carry18; s18 -= carry18 << 21; + carry20 = (s20 + (1<<20)) >> 21; s21 += carry20; s20 -= carry20 << 21; + carry22 = (s22 + (1<<20)) >> 21; s23 += carry22; s22 -= carry22 << 21; + + carry1 = (s1 + (1<<20)) >> 21; s2 += carry1; s1 -= carry1 << 21; + carry3 = (s3 + (1<<20)) >> 21; s4 += carry3; s3 -= carry3 << 21; + carry5 = (s5 + (1<<20)) >> 21; s6 += carry5; s5 -= carry5 << 21; + carry7 = (s7 + (1<<20)) >> 21; s8 += carry7; s7 -= carry7 << 21; + carry9 = (s9 + (1<<20)) >> 21; s10 += carry9; s9 -= carry9 << 21; + carry11 = (s11 + (1<<20)) >> 21; s12 += carry11; s11 -= carry11 << 21; + carry13 = (s13 + (1<<20)) >> 21; s14 += carry13; s13 -= carry13 << 21; + carry15 = (s15 + (1<<20)) >> 21; s16 += carry15; s15 -= carry15 << 21; + carry17 = (s17 + (1<<20)) >> 21; s18 += carry17; s17 -= carry17 << 21; + carry19 = (s19 + (1<<20)) >> 21; s20 += carry19; s19 -= carry19 << 21; + carry21 = (s21 + (1<<20)) >> 21; s22 += carry21; s21 -= carry21 << 21; + + s11 += s23 * 666643; + s12 += s23 * 470296; + s13 += s23 * 654183; + s14 -= s23 * 997805; + s15 += s23 * 136657; + s16 -= s23 * 683901; + s23 = 0; + + s10 += s22 * 666643; + s11 += s22 * 470296; + s12 += s22 * 654183; + s13 -= s22 * 997805; + s14 += s22 * 136657; + s15 -= s22 * 683901; + s22 = 0; + + s9 += s21 * 666643; + s10 += s21 * 470296; + s11 += s21 * 654183; + s12 -= s21 * 997805; + s13 += s21 * 136657; + s14 -= s21 * 683901; + s21 = 0; + + s8 += s20 * 666643; + s9 += s20 * 470296; + s10 += s20 * 654183; + s11 -= s20 * 997805; + s12 += s20 * 136657; + s13 -= s20 * 683901; + s20 = 0; + + s7 += s19 * 666643; + s8 += s19 * 470296; + s9 += s19 * 654183; + s10 -= s19 * 997805; + s11 += s19 * 136657; + s12 -= s19 * 683901; + s19 = 0; + + s6 += s18 * 666643; + s7 += s18 * 470296; + s8 += s18 * 654183; + s9 -= s18 * 997805; + s10 += s18 * 136657; + s11 -= s18 * 683901; + s18 = 0; + + carry6 = (s6 + (1<<20)) >> 21; s7 += carry6; s6 -= carry6 << 21; + carry8 = (s8 + (1<<20)) >> 21; s9 += carry8; s8 -= carry8 << 21; + carry10 = (s10 + (1<<20)) >> 21; s11 += carry10; s10 -= carry10 << 21; + carry12 = (s12 + (1<<20)) >> 21; s13 += carry12; s12 -= carry12 << 21; + carry14 = (s14 + (1<<20)) >> 21; s15 += carry14; s14 -= carry14 << 21; + carry16 = (s16 + (1<<20)) >> 21; s17 += carry16; s16 -= carry16 << 21; + + carry7 = (s7 + (1<<20)) >> 21; s8 += carry7; s7 -= carry7 << 21; + carry9 = (s9 + (1<<20)) >> 21; s10 += carry9; s9 -= carry9 << 21; + carry11 = (s11 + (1<<20)) >> 21; s12 += carry11; s11 -= carry11 << 21; + carry13 = (s13 + (1<<20)) >> 21; s14 += carry13; s13 -= carry13 << 21; + carry15 = (s15 + (1<<20)) >> 21; s16 += carry15; s15 -= carry15 << 21; + + s5 += s17 * 666643; + s6 += s17 * 470296; + s7 += s17 * 654183; + s8 -= s17 * 997805; + s9 += s17 * 136657; + s10 -= s17 * 683901; + s17 = 0; + + s4 += s16 * 666643; + s5 += s16 * 470296; + s6 += s16 * 654183; + s7 -= s16 * 997805; + s8 += s16 * 136657; + s9 -= s16 * 683901; + s16 = 0; + + s3 += s15 * 666643; + s4 += s15 * 470296; + s5 += s15 * 654183; + s6 -= s15 * 997805; + s7 += s15 * 136657; + s8 -= s15 * 683901; + s15 = 0; + + s2 += s14 * 666643; + s3 += s14 * 470296; + s4 += s14 * 654183; + s5 -= s14 * 997805; + s6 += s14 * 136657; + s7 -= s14 * 683901; + s14 = 0; + + s1 += s13 * 666643; + s2 += s13 * 470296; + s3 += s13 * 654183; + s4 -= s13 * 997805; + s5 += s13 * 136657; + s6 -= s13 * 683901; + s13 = 0; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = (s0 + (1<<20)) >> 21; s1 += carry0; s0 -= carry0 << 21; + carry2 = (s2 + (1<<20)) >> 21; s3 += carry2; s2 -= carry2 << 21; + carry4 = (s4 + (1<<20)) >> 21; s5 += carry4; s4 -= carry4 << 21; + carry6 = (s6 + (1<<20)) >> 21; s7 += carry6; s6 -= carry6 << 21; + carry8 = (s8 + (1<<20)) >> 21; s9 += carry8; s8 -= carry8 << 21; + carry10 = (s10 + (1<<20)) >> 21; s11 += carry10; s10 -= carry10 << 21; + + carry1 = (s1 + (1<<20)) >> 21; s2 += carry1; s1 -= carry1 << 21; + carry3 = (s3 + (1<<20)) >> 21; s4 += carry3; s3 -= carry3 << 21; + carry5 = (s5 + (1<<20)) >> 21; s6 += carry5; s5 -= carry5 << 21; + carry7 = (s7 + (1<<20)) >> 21; s8 += carry7; s7 -= carry7 << 21; + carry9 = (s9 + (1<<20)) >> 21; s10 += carry9; s9 -= carry9 << 21; + carry11 = (s11 + (1<<20)) >> 21; s12 += carry11; s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; s1 += carry0; s0 -= carry0 << 21; + carry1 = s1 >> 21; s2 += carry1; s1 -= carry1 << 21; + carry2 = s2 >> 21; s3 += carry2; s2 -= carry2 << 21; + carry3 = s3 >> 21; s4 += carry3; s3 -= carry3 << 21; + carry4 = s4 >> 21; s5 += carry4; s4 -= carry4 << 21; + carry5 = s5 >> 21; s6 += carry5; s5 -= carry5 << 21; + carry6 = s6 >> 21; s7 += carry6; s6 -= carry6 << 21; + carry7 = s7 >> 21; s8 += carry7; s7 -= carry7 << 21; + carry8 = s8 >> 21; s9 += carry8; s8 -= carry8 << 21; + carry9 = s9 >> 21; s10 += carry9; s9 -= carry9 << 21; + carry10 = s10 >> 21; s11 += carry10; s10 -= carry10 << 21; + carry11 = s11 >> 21; s12 += carry11; s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; s1 += carry0; s0 -= carry0 << 21; + carry1 = s1 >> 21; s2 += carry1; s1 -= carry1 << 21; + carry2 = s2 >> 21; s3 += carry2; s2 -= carry2 << 21; + carry3 = s3 >> 21; s4 += carry3; s3 -= carry3 << 21; + carry4 = s4 >> 21; s5 += carry4; s4 -= carry4 << 21; + carry5 = s5 >> 21; s6 += carry5; s5 -= carry5 << 21; + carry6 = s6 >> 21; s7 += carry6; s6 -= carry6 << 21; + carry7 = s7 >> 21; s8 += carry7; s7 -= carry7 << 21; + carry8 = s8 >> 21; s9 += carry8; s8 -= carry8 << 21; + carry9 = s9 >> 21; s10 += carry9; s9 -= carry9 << 21; + carry10 = s10 >> 21; s11 += carry10; s10 -= carry10 << 21; + + s[0] = s0 >> 0; + s[1] = s0 >> 8; + s[2] = (s0 >> 16) | (s1 << 5); + s[3] = s1 >> 3; + s[4] = s1 >> 11; + s[5] = (s1 >> 19) | (s2 << 2); + s[6] = s2 >> 6; + s[7] = (s2 >> 14) | (s3 << 7); + s[8] = s3 >> 1; + s[9] = s3 >> 9; + s[10] = (s3 >> 17) | (s4 << 4); + s[11] = s4 >> 4; + s[12] = s4 >> 12; + s[13] = (s4 >> 20) | (s5 << 1); + s[14] = s5 >> 7; + s[15] = (s5 >> 15) | (s6 << 6); + s[16] = s6 >> 2; + s[17] = s6 >> 10; + s[18] = (s6 >> 18) | (s7 << 3); + s[19] = s7 >> 5; + s[20] = s7 >> 13; + s[21] = s8 >> 0; + s[22] = s8 >> 8; + s[23] = (s8 >> 16) | (s9 << 5); + s[24] = s9 >> 3; + s[25] = s9 >> 11; + s[26] = (s9 >> 19) | (s10 << 2); + s[27] = s10 >> 6; + s[28] = (s10 >> 14) | (s11 << 7); + s[29] = s11 >> 1; + s[30] = s11 >> 9; + s[31] = s11 >> 17; +} diff --git a/src/ed25519-supercop-ref10/sc_reduce.c b/src/ed25519-supercop-ref10/sc_reduce.c new file mode 100644 index 0000000..d01f5a5 --- /dev/null +++ b/src/ed25519-supercop-ref10/sc_reduce.c @@ -0,0 +1,275 @@ +#include "sc.h" +#include "crypto_int64.h" +#include "crypto_uint32.h" +#include "crypto_uint64.h" + +static crypto_uint64 load_3(const unsigned char *in) +{ + crypto_uint64 result; + result = (crypto_uint64) in[0]; + result |= ((crypto_uint64) in[1]) << 8; + result |= ((crypto_uint64) in[2]) << 16; + return result; +} + +static crypto_uint64 load_4(const unsigned char *in) +{ + crypto_uint64 result; + result = (crypto_uint64) in[0]; + result |= ((crypto_uint64) in[1]) << 8; + result |= ((crypto_uint64) in[2]) << 16; + result |= ((crypto_uint64) in[3]) << 24; + return result; +} + +/* +Input: + s[0]+256*s[1]+...+256^63*s[63] = s + +Output: + s[0]+256*s[1]+...+256^31*s[31] = s mod l + where l = 2^252 + 27742317777372353535851937790883648493. + Overwrites s in place. +*/ + +void sc_reduce(unsigned char *s) +{ + crypto_int64 s0 = 2097151 & load_3(s); + crypto_int64 s1 = 2097151 & (load_4(s + 2) >> 5); + crypto_int64 s2 = 2097151 & (load_3(s + 5) >> 2); + crypto_int64 s3 = 2097151 & (load_4(s + 7) >> 7); + crypto_int64 s4 = 2097151 & (load_4(s + 10) >> 4); + crypto_int64 s5 = 2097151 & (load_3(s + 13) >> 1); + crypto_int64 s6 = 2097151 & (load_4(s + 15) >> 6); + crypto_int64 s7 = 2097151 & (load_3(s + 18) >> 3); + crypto_int64 s8 = 2097151 & load_3(s + 21); + crypto_int64 s9 = 2097151 & (load_4(s + 23) >> 5); + crypto_int64 s10 = 2097151 & (load_3(s + 26) >> 2); + crypto_int64 s11 = 2097151 & (load_4(s + 28) >> 7); + crypto_int64 s12 = 2097151 & (load_4(s + 31) >> 4); + crypto_int64 s13 = 2097151 & (load_3(s + 34) >> 1); + crypto_int64 s14 = 2097151 & (load_4(s + 36) >> 6); + crypto_int64 s15 = 2097151 & (load_3(s + 39) >> 3); + crypto_int64 s16 = 2097151 & load_3(s + 42); + crypto_int64 s17 = 2097151 & (load_4(s + 44) >> 5); + crypto_int64 s18 = 2097151 & (load_3(s + 47) >> 2); + crypto_int64 s19 = 2097151 & (load_4(s + 49) >> 7); + crypto_int64 s20 = 2097151 & (load_4(s + 52) >> 4); + crypto_int64 s21 = 2097151 & (load_3(s + 55) >> 1); + crypto_int64 s22 = 2097151 & (load_4(s + 57) >> 6); + crypto_int64 s23 = (load_4(s + 60) >> 3); + crypto_int64 carry0; + crypto_int64 carry1; + crypto_int64 carry2; + crypto_int64 carry3; + crypto_int64 carry4; + crypto_int64 carry5; + crypto_int64 carry6; + crypto_int64 carry7; + crypto_int64 carry8; + crypto_int64 carry9; + crypto_int64 carry10; + crypto_int64 carry11; + crypto_int64 carry12; + crypto_int64 carry13; + crypto_int64 carry14; + crypto_int64 carry15; + crypto_int64 carry16; + + s11 += s23 * 666643; + s12 += s23 * 470296; + s13 += s23 * 654183; + s14 -= s23 * 997805; + s15 += s23 * 136657; + s16 -= s23 * 683901; + s23 = 0; + + s10 += s22 * 666643; + s11 += s22 * 470296; + s12 += s22 * 654183; + s13 -= s22 * 997805; + s14 += s22 * 136657; + s15 -= s22 * 683901; + s22 = 0; + + s9 += s21 * 666643; + s10 += s21 * 470296; + s11 += s21 * 654183; + s12 -= s21 * 997805; + s13 += s21 * 136657; + s14 -= s21 * 683901; + s21 = 0; + + s8 += s20 * 666643; + s9 += s20 * 470296; + s10 += s20 * 654183; + s11 -= s20 * 997805; + s12 += s20 * 136657; + s13 -= s20 * 683901; + s20 = 0; + + s7 += s19 * 666643; + s8 += s19 * 470296; + s9 += s19 * 654183; + s10 -= s19 * 997805; + s11 += s19 * 136657; + s12 -= s19 * 683901; + s19 = 0; + + s6 += s18 * 666643; + s7 += s18 * 470296; + s8 += s18 * 654183; + s9 -= s18 * 997805; + s10 += s18 * 136657; + s11 -= s18 * 683901; + s18 = 0; + + carry6 = (s6 + (1<<20)) >> 21; s7 += carry6; s6 -= carry6 << 21; + carry8 = (s8 + (1<<20)) >> 21; s9 += carry8; s8 -= carry8 << 21; + carry10 = (s10 + (1<<20)) >> 21; s11 += carry10; s10 -= carry10 << 21; + carry12 = (s12 + (1<<20)) >> 21; s13 += carry12; s12 -= carry12 << 21; + carry14 = (s14 + (1<<20)) >> 21; s15 += carry14; s14 -= carry14 << 21; + carry16 = (s16 + (1<<20)) >> 21; s17 += carry16; s16 -= carry16 << 21; + + carry7 = (s7 + (1<<20)) >> 21; s8 += carry7; s7 -= carry7 << 21; + carry9 = (s9 + (1<<20)) >> 21; s10 += carry9; s9 -= carry9 << 21; + carry11 = (s11 + (1<<20)) >> 21; s12 += carry11; s11 -= carry11 << 21; + carry13 = (s13 + (1<<20)) >> 21; s14 += carry13; s13 -= carry13 << 21; + carry15 = (s15 + (1<<20)) >> 21; s16 += carry15; s15 -= carry15 << 21; + + s5 += s17 * 666643; + s6 += s17 * 470296; + s7 += s17 * 654183; + s8 -= s17 * 997805; + s9 += s17 * 136657; + s10 -= s17 * 683901; + s17 = 0; + + s4 += s16 * 666643; + s5 += s16 * 470296; + s6 += s16 * 654183; + s7 -= s16 * 997805; + s8 += s16 * 136657; + s9 -= s16 * 683901; + s16 = 0; + + s3 += s15 * 666643; + s4 += s15 * 470296; + s5 += s15 * 654183; + s6 -= s15 * 997805; + s7 += s15 * 136657; + s8 -= s15 * 683901; + s15 = 0; + + s2 += s14 * 666643; + s3 += s14 * 470296; + s4 += s14 * 654183; + s5 -= s14 * 997805; + s6 += s14 * 136657; + s7 -= s14 * 683901; + s14 = 0; + + s1 += s13 * 666643; + s2 += s13 * 470296; + s3 += s13 * 654183; + s4 -= s13 * 997805; + s5 += s13 * 136657; + s6 -= s13 * 683901; + s13 = 0; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = (s0 + (1<<20)) >> 21; s1 += carry0; s0 -= carry0 << 21; + carry2 = (s2 + (1<<20)) >> 21; s3 += carry2; s2 -= carry2 << 21; + carry4 = (s4 + (1<<20)) >> 21; s5 += carry4; s4 -= carry4 << 21; + carry6 = (s6 + (1<<20)) >> 21; s7 += carry6; s6 -= carry6 << 21; + carry8 = (s8 + (1<<20)) >> 21; s9 += carry8; s8 -= carry8 << 21; + carry10 = (s10 + (1<<20)) >> 21; s11 += carry10; s10 -= carry10 << 21; + + carry1 = (s1 + (1<<20)) >> 21; s2 += carry1; s1 -= carry1 << 21; + carry3 = (s3 + (1<<20)) >> 21; s4 += carry3; s3 -= carry3 << 21; + carry5 = (s5 + (1<<20)) >> 21; s6 += carry5; s5 -= carry5 << 21; + carry7 = (s7 + (1<<20)) >> 21; s8 += carry7; s7 -= carry7 << 21; + carry9 = (s9 + (1<<20)) >> 21; s10 += carry9; s9 -= carry9 << 21; + carry11 = (s11 + (1<<20)) >> 21; s12 += carry11; s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; s1 += carry0; s0 -= carry0 << 21; + carry1 = s1 >> 21; s2 += carry1; s1 -= carry1 << 21; + carry2 = s2 >> 21; s3 += carry2; s2 -= carry2 << 21; + carry3 = s3 >> 21; s4 += carry3; s3 -= carry3 << 21; + carry4 = s4 >> 21; s5 += carry4; s4 -= carry4 << 21; + carry5 = s5 >> 21; s6 += carry5; s5 -= carry5 << 21; + carry6 = s6 >> 21; s7 += carry6; s6 -= carry6 << 21; + carry7 = s7 >> 21; s8 += carry7; s7 -= carry7 << 21; + carry8 = s8 >> 21; s9 += carry8; s8 -= carry8 << 21; + carry9 = s9 >> 21; s10 += carry9; s9 -= carry9 << 21; + carry10 = s10 >> 21; s11 += carry10; s10 -= carry10 << 21; + carry11 = s11 >> 21; s12 += carry11; s11 -= carry11 << 21; + + s0 += s12 * 666643; + s1 += s12 * 470296; + s2 += s12 * 654183; + s3 -= s12 * 997805; + s4 += s12 * 136657; + s5 -= s12 * 683901; + s12 = 0; + + carry0 = s0 >> 21; s1 += carry0; s0 -= carry0 << 21; + carry1 = s1 >> 21; s2 += carry1; s1 -= carry1 << 21; + carry2 = s2 >> 21; s3 += carry2; s2 -= carry2 << 21; + carry3 = s3 >> 21; s4 += carry3; s3 -= carry3 << 21; + carry4 = s4 >> 21; s5 += carry4; s4 -= carry4 << 21; + carry5 = s5 >> 21; s6 += carry5; s5 -= carry5 << 21; + carry6 = s6 >> 21; s7 += carry6; s6 -= carry6 << 21; + carry7 = s7 >> 21; s8 += carry7; s7 -= carry7 << 21; + carry8 = s8 >> 21; s9 += carry8; s8 -= carry8 << 21; + carry9 = s9 >> 21; s10 += carry9; s9 -= carry9 << 21; + carry10 = s10 >> 21; s11 += carry10; s10 -= carry10 << 21; + + s[0] = s0 >> 0; + s[1] = s0 >> 8; + s[2] = (s0 >> 16) | (s1 << 5); + s[3] = s1 >> 3; + s[4] = s1 >> 11; + s[5] = (s1 >> 19) | (s2 << 2); + s[6] = s2 >> 6; + s[7] = (s2 >> 14) | (s3 << 7); + s[8] = s3 >> 1; + s[9] = s3 >> 9; + s[10] = (s3 >> 17) | (s4 << 4); + s[11] = s4 >> 4; + s[12] = s4 >> 12; + s[13] = (s4 >> 20) | (s5 << 1); + s[14] = s5 >> 7; + s[15] = (s5 >> 15) | (s6 << 6); + s[16] = s6 >> 2; + s[17] = s6 >> 10; + s[18] = (s6 >> 18) | (s7 << 3); + s[19] = s7 >> 5; + s[20] = s7 >> 13; + s[21] = s8 >> 0; + s[22] = s8 >> 8; + s[23] = (s8 >> 16) | (s9 << 5); + s[24] = s9 >> 3; + s[25] = s9 >> 11; + s[26] = (s9 >> 19) | (s10 << 2); + s[27] = s10 >> 6; + s[28] = (s10 >> 14) | (s11 << 7); + s[29] = s11 >> 1; + s[30] = s11 >> 9; + s[31] = s11 >> 17; +} diff --git a/src/ed25519-supercop-ref10/sign.c b/src/ed25519-supercop-ref10/sign.c new file mode 100644 index 0000000..4ed729e --- /dev/null +++ b/src/ed25519-supercop-ref10/sign.c @@ -0,0 +1,38 @@ +#include "crypto_sign.h" +#include "crypto_hash_sha512.h" +#include "ge.h" +#include "sc.h" + +int crypto_sign( + unsigned char *sm,unsigned long long *smlen, + const unsigned char *m,unsigned long long mlen, + const unsigned char *sk +) +{ + unsigned char az[64]; + unsigned char r[64]; + unsigned char hram[64]; + ge_p3 R; + unsigned long long i; + + crypto_hash_sha512(az,sk,32); + az[0] &= 248; + az[31] &= 63; + az[31] |= 64; + + *smlen = mlen + 64; + for (i = 0;i < mlen;++i) sm[64 + i] = m[i]; + for (i = 0;i < 32;++i) sm[32 + i] = az[32 + i]; + crypto_hash_sha512(r,sm + 32,mlen + 32); + for (i = 0;i < 32;++i) sm[32 + i] = sk[32 + i]; + + sc_reduce(r); + ge_scalarmult_base(&R,r); + ge_p3_tobytes(sm,&R); + + crypto_hash_sha512(hram,sm,mlen + 64); + sc_reduce(hram); + sc_muladd(sm + 32,hram,az,r); + + return 0; +} diff --git a/src/ed25519-supercop-ref10/sqrtm1.h b/src/ed25519-supercop-ref10/sqrtm1.h new file mode 100644 index 0000000..d8caa23 --- /dev/null +++ b/src/ed25519-supercop-ref10/sqrtm1.h @@ -0,0 +1 @@ +-32595792,-7943725,9377950,3500415,12389472,-272473,-25146209,-2005654,326686,11406482 diff --git a/src/ed25519-supercop-ref10/sqrtm1.py b/src/ed25519-supercop-ref10/sqrtm1.py new file mode 100644 index 0000000..9a47fbc --- /dev/null +++ b/src/ed25519-supercop-ref10/sqrtm1.py @@ -0,0 +1,28 @@ +q = 2**255 - 19 + +def expmod(b,e,m): + if e == 0: return 1 + t = expmod(b,e/2,m)**2 % m + if e & 1: t = (t*b) % m + return t + +def inv(x): + return expmod(x,q-2,q) + +def radix255(x): + x = x % q + if x + x > q: x -= q + x = [x,0,0,0,0,0,0,0,0,0] + bits = [26,25,26,25,26,25,26,25,26,25] + for i in range(9): + carry = (x[i] + 2**(bits[i]-1)) / 2**bits[i] + x[i] -= carry * 2**bits[i] + x[i + 1] += carry + result = "" + for i in range(9): + result = result+str(x[i])+"," + result = result+str(x[9]) + return result + +I = expmod(2,(q-1)/4,q) +print radix255(I) From 270bca6758f94d653c64794c542dd7a0199f2d45 Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 16:45:20 -0800 Subject: [PATCH 02/10] add some support code that ref10 will need to compile headers that SUPERCOP generates, a sha512 implementation, and a constant-time verify.c --- .../crypto_hash_sha512.h | 1 + src/ed25519-supercop-ref10/crypto_int32.h | 6 + src/ed25519-supercop-ref10/crypto_int64.h | 6 + src/ed25519-supercop-ref10/crypto_sign.h | 13 + src/ed25519-supercop-ref10/crypto_uint32.h | 6 + src/ed25519-supercop-ref10/crypto_uint64.h | 6 + src/ed25519-supercop-ref10/crypto_verify_32.h | 7 + src/ed25519-supercop-ref10/sha512-blocks.c | 239 ++++++++++++++++++ src/ed25519-supercop-ref10/sha512-hash.c | 72 ++++++ src/ed25519-supercop-ref10/sha512.h | 4 + src/ed25519-supercop-ref10/verify.c | 40 +++ 11 files changed, 400 insertions(+) create mode 100644 src/ed25519-supercop-ref10/crypto_hash_sha512.h create mode 100644 src/ed25519-supercop-ref10/crypto_int32.h create mode 100644 src/ed25519-supercop-ref10/crypto_int64.h create mode 100644 src/ed25519-supercop-ref10/crypto_sign.h create mode 100644 src/ed25519-supercop-ref10/crypto_uint32.h create mode 100644 src/ed25519-supercop-ref10/crypto_uint64.h create mode 100644 src/ed25519-supercop-ref10/crypto_verify_32.h create mode 100644 src/ed25519-supercop-ref10/sha512-blocks.c create mode 100644 src/ed25519-supercop-ref10/sha512-hash.c create mode 100644 src/ed25519-supercop-ref10/sha512.h create mode 100644 src/ed25519-supercop-ref10/verify.c diff --git a/src/ed25519-supercop-ref10/crypto_hash_sha512.h b/src/ed25519-supercop-ref10/crypto_hash_sha512.h new file mode 100644 index 0000000..6c60661 --- /dev/null +++ b/src/ed25519-supercop-ref10/crypto_hash_sha512.h @@ -0,0 +1 @@ +#include "sha512.h" diff --git a/src/ed25519-supercop-ref10/crypto_int32.h b/src/ed25519-supercop-ref10/crypto_int32.h new file mode 100644 index 0000000..cae135e --- /dev/null +++ b/src/ed25519-supercop-ref10/crypto_int32.h @@ -0,0 +1,6 @@ +#ifndef crypto_int32_h +#define crypto_int32_h + +typedef int crypto_int32; + +#endif diff --git a/src/ed25519-supercop-ref10/crypto_int64.h b/src/ed25519-supercop-ref10/crypto_int64.h new file mode 100644 index 0000000..fc92417 --- /dev/null +++ b/src/ed25519-supercop-ref10/crypto_int64.h @@ -0,0 +1,6 @@ +#ifndef crypto_int64_h +#define crypto_int64_h + +typedef long long crypto_int64; + +#endif diff --git a/src/ed25519-supercop-ref10/crypto_sign.h b/src/ed25519-supercop-ref10/crypto_sign.h new file mode 100644 index 0000000..afed208 --- /dev/null +++ b/src/ed25519-supercop-ref10/crypto_sign.h @@ -0,0 +1,13 @@ +#ifndef crypto_sign_edwards25519sha512batch_H +#define crypto_sign_edwards25519sha512batch_H + +#define SECRETKEYBYTES 64 +#define PUBLICKEYBYTES 32 +#define SIGNATUREBYTES 64 + +extern int crypto_sign(unsigned char *,unsigned long long *,const unsigned char *,unsigned long long,const unsigned char *); +extern int crypto_sign_open(unsigned char *,unsigned long long *,const unsigned char *,unsigned long long,const unsigned char *); +extern int crypto_sign_keypair(unsigned char *,unsigned char *); +extern int crypto_sign_publickey(unsigned char *pk, unsigned char *sk, unsigned char *seed); + +#endif diff --git a/src/ed25519-supercop-ref10/crypto_uint32.h b/src/ed25519-supercop-ref10/crypto_uint32.h new file mode 100644 index 0000000..21020d7 --- /dev/null +++ b/src/ed25519-supercop-ref10/crypto_uint32.h @@ -0,0 +1,6 @@ +#ifndef crypto_uint32_h +#define crypto_uint32_h + +typedef unsigned int crypto_uint32; + +#endif diff --git a/src/ed25519-supercop-ref10/crypto_uint64.h b/src/ed25519-supercop-ref10/crypto_uint64.h new file mode 100644 index 0000000..5aa0070 --- /dev/null +++ b/src/ed25519-supercop-ref10/crypto_uint64.h @@ -0,0 +1,6 @@ +#ifndef crypto_uint64_h +#define crypto_uint64_h + +typedef unsigned long long crypto_uint64; + +#endif diff --git a/src/ed25519-supercop-ref10/crypto_verify_32.h b/src/ed25519-supercop-ref10/crypto_verify_32.h new file mode 100644 index 0000000..ad265c7 --- /dev/null +++ b/src/ed25519-supercop-ref10/crypto_verify_32.h @@ -0,0 +1,7 @@ +#ifndef crypto_verify_32_H +#define crypto_verify_32_H + +#define crypto_verify_32_ref_BYTES 32 +extern int crypto_verify_32(const unsigned char *,const unsigned char *); + +#endif diff --git a/src/ed25519-supercop-ref10/sha512-blocks.c b/src/ed25519-supercop-ref10/sha512-blocks.c new file mode 100644 index 0000000..c8dbf0d --- /dev/null +++ b/src/ed25519-supercop-ref10/sha512-blocks.c @@ -0,0 +1,239 @@ +//#include "crypto_hashblocks.h" + +typedef unsigned long long uint64; + +static uint64 load_bigendian(const unsigned char *x) +{ + return + (uint64) (x[7]) \ + | (((uint64) (x[6])) << 8) \ + | (((uint64) (x[5])) << 16) \ + | (((uint64) (x[4])) << 24) \ + | (((uint64) (x[3])) << 32) \ + | (((uint64) (x[2])) << 40) \ + | (((uint64) (x[1])) << 48) \ + | (((uint64) (x[0])) << 56) + ; +} + +static void store_bigendian(unsigned char *x,uint64 u) +{ + x[7] = u; u >>= 8; + x[6] = u; u >>= 8; + x[5] = u; u >>= 8; + x[4] = u; u >>= 8; + x[3] = u; u >>= 8; + x[2] = u; u >>= 8; + x[1] = u; u >>= 8; + x[0] = u; +} + +#define SHR(x,c) ((x) >> (c)) +#define ROTR(x,c) (((x) >> (c)) | ((x) << (64 - (c)))) + +#define Ch(x,y,z) ((x & y) ^ (~x & z)) +#define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) +#define Sigma0(x) (ROTR(x,28) ^ ROTR(x,34) ^ ROTR(x,39)) +#define Sigma1(x) (ROTR(x,14) ^ ROTR(x,18) ^ ROTR(x,41)) +#define sigma0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x,7)) +#define sigma1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x,6)) + +#define M(w0,w14,w9,w1) w0 = sigma1(w14) + w9 + sigma0(w1) + w0; + +#define EXPAND \ + M(w0 ,w14,w9 ,w1 ) \ + M(w1 ,w15,w10,w2 ) \ + M(w2 ,w0 ,w11,w3 ) \ + M(w3 ,w1 ,w12,w4 ) \ + M(w4 ,w2 ,w13,w5 ) \ + M(w5 ,w3 ,w14,w6 ) \ + M(w6 ,w4 ,w15,w7 ) \ + M(w7 ,w5 ,w0 ,w8 ) \ + M(w8 ,w6 ,w1 ,w9 ) \ + M(w9 ,w7 ,w2 ,w10) \ + M(w10,w8 ,w3 ,w11) \ + M(w11,w9 ,w4 ,w12) \ + M(w12,w10,w5 ,w13) \ + M(w13,w11,w6 ,w14) \ + M(w14,w12,w7 ,w15) \ + M(w15,w13,w8 ,w0 ) + +#define F(w,k) \ + T1 = h + Sigma1(e) + Ch(e,f,g) + k + w; \ + T2 = Sigma0(a) + Maj(a,b,c); \ + h = g; \ + g = f; \ + f = e; \ + e = d + T1; \ + d = c; \ + c = b; \ + b = a; \ + a = T1 + T2; + +int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) +{ + uint64 state[8]; + uint64 a; + uint64 b; + uint64 c; + uint64 d; + uint64 e; + uint64 f; + uint64 g; + uint64 h; + uint64 T1; + uint64 T2; + + a = load_bigendian(statebytes + 0); state[0] = a; + b = load_bigendian(statebytes + 8); state[1] = b; + c = load_bigendian(statebytes + 16); state[2] = c; + d = load_bigendian(statebytes + 24); state[3] = d; + e = load_bigendian(statebytes + 32); state[4] = e; + f = load_bigendian(statebytes + 40); state[5] = f; + g = load_bigendian(statebytes + 48); state[6] = g; + h = load_bigendian(statebytes + 56); state[7] = h; + + while (inlen >= 128) { + uint64 w0 = load_bigendian(in + 0); + uint64 w1 = load_bigendian(in + 8); + uint64 w2 = load_bigendian(in + 16); + uint64 w3 = load_bigendian(in + 24); + uint64 w4 = load_bigendian(in + 32); + uint64 w5 = load_bigendian(in + 40); + uint64 w6 = load_bigendian(in + 48); + uint64 w7 = load_bigendian(in + 56); + uint64 w8 = load_bigendian(in + 64); + uint64 w9 = load_bigendian(in + 72); + uint64 w10 = load_bigendian(in + 80); + uint64 w11 = load_bigendian(in + 88); + uint64 w12 = load_bigendian(in + 96); + uint64 w13 = load_bigendian(in + 104); + uint64 w14 = load_bigendian(in + 112); + uint64 w15 = load_bigendian(in + 120); + + F(w0 ,0x428a2f98d728ae22ULL) + F(w1 ,0x7137449123ef65cdULL) + F(w2 ,0xb5c0fbcfec4d3b2fULL) + F(w3 ,0xe9b5dba58189dbbcULL) + F(w4 ,0x3956c25bf348b538ULL) + F(w5 ,0x59f111f1b605d019ULL) + F(w6 ,0x923f82a4af194f9bULL) + F(w7 ,0xab1c5ed5da6d8118ULL) + F(w8 ,0xd807aa98a3030242ULL) + F(w9 ,0x12835b0145706fbeULL) + F(w10,0x243185be4ee4b28cULL) + F(w11,0x550c7dc3d5ffb4e2ULL) + F(w12,0x72be5d74f27b896fULL) + F(w13,0x80deb1fe3b1696b1ULL) + F(w14,0x9bdc06a725c71235ULL) + F(w15,0xc19bf174cf692694ULL) + + EXPAND + + F(w0 ,0xe49b69c19ef14ad2ULL) + F(w1 ,0xefbe4786384f25e3ULL) + F(w2 ,0x0fc19dc68b8cd5b5ULL) + F(w3 ,0x240ca1cc77ac9c65ULL) + F(w4 ,0x2de92c6f592b0275ULL) + F(w5 ,0x4a7484aa6ea6e483ULL) + F(w6 ,0x5cb0a9dcbd41fbd4ULL) + F(w7 ,0x76f988da831153b5ULL) + F(w8 ,0x983e5152ee66dfabULL) + F(w9 ,0xa831c66d2db43210ULL) + F(w10,0xb00327c898fb213fULL) + F(w11,0xbf597fc7beef0ee4ULL) + F(w12,0xc6e00bf33da88fc2ULL) + F(w13,0xd5a79147930aa725ULL) + F(w14,0x06ca6351e003826fULL) + F(w15,0x142929670a0e6e70ULL) + + EXPAND + + F(w0 ,0x27b70a8546d22ffcULL) + F(w1 ,0x2e1b21385c26c926ULL) + F(w2 ,0x4d2c6dfc5ac42aedULL) + F(w3 ,0x53380d139d95b3dfULL) + F(w4 ,0x650a73548baf63deULL) + F(w5 ,0x766a0abb3c77b2a8ULL) + F(w6 ,0x81c2c92e47edaee6ULL) + F(w7 ,0x92722c851482353bULL) + F(w8 ,0xa2bfe8a14cf10364ULL) + F(w9 ,0xa81a664bbc423001ULL) + F(w10,0xc24b8b70d0f89791ULL) + F(w11,0xc76c51a30654be30ULL) + F(w12,0xd192e819d6ef5218ULL) + F(w13,0xd69906245565a910ULL) + F(w14,0xf40e35855771202aULL) + F(w15,0x106aa07032bbd1b8ULL) + + EXPAND + + F(w0 ,0x19a4c116b8d2d0c8ULL) + F(w1 ,0x1e376c085141ab53ULL) + F(w2 ,0x2748774cdf8eeb99ULL) + F(w3 ,0x34b0bcb5e19b48a8ULL) + F(w4 ,0x391c0cb3c5c95a63ULL) + F(w5 ,0x4ed8aa4ae3418acbULL) + F(w6 ,0x5b9cca4f7763e373ULL) + F(w7 ,0x682e6ff3d6b2b8a3ULL) + F(w8 ,0x748f82ee5defb2fcULL) + F(w9 ,0x78a5636f43172f60ULL) + F(w10,0x84c87814a1f0ab72ULL) + F(w11,0x8cc702081a6439ecULL) + F(w12,0x90befffa23631e28ULL) + F(w13,0xa4506cebde82bde9ULL) + F(w14,0xbef9a3f7b2c67915ULL) + F(w15,0xc67178f2e372532bULL) + + EXPAND + + F(w0 ,0xca273eceea26619cULL) + F(w1 ,0xd186b8c721c0c207ULL) + F(w2 ,0xeada7dd6cde0eb1eULL) + F(w3 ,0xf57d4f7fee6ed178ULL) + F(w4 ,0x06f067aa72176fbaULL) + F(w5 ,0x0a637dc5a2c898a6ULL) + F(w6 ,0x113f9804bef90daeULL) + F(w7 ,0x1b710b35131c471bULL) + F(w8 ,0x28db77f523047d84ULL) + F(w9 ,0x32caab7b40c72493ULL) + F(w10,0x3c9ebe0a15c9bebcULL) + F(w11,0x431d67c49c100d4cULL) + F(w12,0x4cc5d4becb3e42b6ULL) + F(w13,0x597f299cfc657e2aULL) + F(w14,0x5fcb6fab3ad6faecULL) + F(w15,0x6c44198c4a475817ULL) + + a += state[0]; + b += state[1]; + c += state[2]; + d += state[3]; + e += state[4]; + f += state[5]; + g += state[6]; + h += state[7]; + + state[0] = a; + state[1] = b; + state[2] = c; + state[3] = d; + state[4] = e; + state[5] = f; + state[6] = g; + state[7] = h; + + in += 128; + inlen -= 128; + } + + store_bigendian(statebytes + 0,state[0]); + store_bigendian(statebytes + 8,state[1]); + store_bigendian(statebytes + 16,state[2]); + store_bigendian(statebytes + 24,state[3]); + store_bigendian(statebytes + 32,state[4]); + store_bigendian(statebytes + 40,state[5]); + store_bigendian(statebytes + 48,state[6]); + store_bigendian(statebytes + 56,state[7]); + + return inlen; +} diff --git a/src/ed25519-supercop-ref10/sha512-hash.c b/src/ed25519-supercop-ref10/sha512-hash.c new file mode 100644 index 0000000..f2f2925 --- /dev/null +++ b/src/ed25519-supercop-ref10/sha512-hash.c @@ -0,0 +1,72 @@ +/* +20080913 +D. J. Bernstein +Public domain. +*/ + +#include "sha512.h" + +extern int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen); + +#define blocks crypto_hashblocks + +static const unsigned char iv[64] = { + 0x6a,0x09,0xe6,0x67,0xf3,0xbc,0xc9,0x08, + 0xbb,0x67,0xae,0x85,0x84,0xca,0xa7,0x3b, + 0x3c,0x6e,0xf3,0x72,0xfe,0x94,0xf8,0x2b, + 0xa5,0x4f,0xf5,0x3a,0x5f,0x1d,0x36,0xf1, + 0x51,0x0e,0x52,0x7f,0xad,0xe6,0x82,0xd1, + 0x9b,0x05,0x68,0x8c,0x2b,0x3e,0x6c,0x1f, + 0x1f,0x83,0xd9,0xab,0xfb,0x41,0xbd,0x6b, + 0x5b,0xe0,0xcd,0x19,0x13,0x7e,0x21,0x79 +} ; + +typedef unsigned long long uint64; + +int crypto_hash_sha512(unsigned char *out,const unsigned char *in,unsigned long long inlen) +{ + unsigned char h[64]; + unsigned char padded[256]; + int i; + unsigned long long bytes = inlen; + + for (i = 0;i < 64;++i) h[i] = iv[i]; + + blocks(h,in,inlen); + in += inlen; + inlen &= 127; + in -= inlen; + + for (i = 0;i < inlen;++i) padded[i] = in[i]; + padded[inlen] = 0x80; + + if (inlen < 112) { + for (i = inlen + 1;i < 119;++i) padded[i] = 0; + padded[119] = bytes >> 61; + padded[120] = bytes >> 53; + padded[121] = bytes >> 45; + padded[122] = bytes >> 37; + padded[123] = bytes >> 29; + padded[124] = bytes >> 21; + padded[125] = bytes >> 13; + padded[126] = bytes >> 5; + padded[127] = bytes << 3; + blocks(h,padded,128); + } else { + for (i = inlen + 1;i < 247;++i) padded[i] = 0; + padded[247] = bytes >> 61; + padded[248] = bytes >> 53; + padded[249] = bytes >> 45; + padded[250] = bytes >> 37; + padded[251] = bytes >> 29; + padded[252] = bytes >> 21; + padded[253] = bytes >> 13; + padded[254] = bytes >> 5; + padded[255] = bytes << 3; + blocks(h,padded,256); + } + + for (i = 0;i < 64;++i) out[i] = h[i]; + + return 0; +} diff --git a/src/ed25519-supercop-ref10/sha512.h b/src/ed25519-supercop-ref10/sha512.h new file mode 100644 index 0000000..37376b1 --- /dev/null +++ b/src/ed25519-supercop-ref10/sha512.h @@ -0,0 +1,4 @@ +extern int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen); +extern int crypto_hash_sha512(unsigned char *out,const unsigned char *in,unsigned long long inlen); + +#define crypto_hash_sha512_BYTES 64 diff --git a/src/ed25519-supercop-ref10/verify.c b/src/ed25519-supercop-ref10/verify.c new file mode 100644 index 0000000..a04186b --- /dev/null +++ b/src/ed25519-supercop-ref10/verify.c @@ -0,0 +1,40 @@ +#include "crypto_verify_32.h" + +int crypto_verify_32(const unsigned char *x,const unsigned char *y) +{ + unsigned int differentbits = 0; +#define F(i) differentbits |= x[i] ^ y[i]; + F(0) + F(1) + F(2) + F(3) + F(4) + F(5) + F(6) + F(7) + F(8) + F(9) + F(10) + F(11) + F(12) + F(13) + F(14) + F(15) + F(16) + F(17) + F(18) + F(19) + F(20) + F(21) + F(22) + F(23) + F(24) + F(25) + F(26) + F(27) + F(28) + F(29) + F(30) + F(31) + return (1 & ((differentbits - 1) >> 8)) - 1; +} From 3fbd6732b8d455a8be2ee489356dfc70f5e8aebd Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 16:46:04 -0800 Subject: [PATCH 03/10] modify fe_isnonzero.c to fix compiler warning (signedness) --- src/ed25519-supercop-ref10/fe_isnonzero.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ed25519-supercop-ref10/fe_isnonzero.c b/src/ed25519-supercop-ref10/fe_isnonzero.c index 1f42e39..4756800 100644 --- a/src/ed25519-supercop-ref10/fe_isnonzero.c +++ b/src/ed25519-supercop-ref10/fe_isnonzero.c @@ -9,7 +9,7 @@ return 0 if f != 0 |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */ -static const char zero[32]; +static const unsigned char zero[32]; int fe_isnonzero(const fe f) { From a8cae0ed99ad83a8a3c30c38f8de8cbdde4678e3 Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 16:50:18 -0800 Subject: [PATCH 04/10] new publickey() function, to avoid using randombytes.h We let the python code create a random key with os.urandom(), which is portable to windows and can be tested deterministically, rather than using the upstream keypair() that calls randombytes() and reads directly from /dev/urandom. --- src/ed25519-supercop-ref10/publickey.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 src/ed25519-supercop-ref10/publickey.c diff --git a/src/ed25519-supercop-ref10/publickey.c b/src/ed25519-supercop-ref10/publickey.c new file mode 100644 index 0000000..21ef491 --- /dev/null +++ b/src/ed25519-supercop-ref10/publickey.c @@ -0,0 +1,26 @@ +#include "crypto_sign.h" +#include "crypto_hash_sha512.h" +#include "ge.h" + +int crypto_sign_publickey( + unsigned char *pk, // write 32 bytes into this + unsigned char *sk, // write 64 bytes into this (seed+pubkey) + unsigned char *seed // 32 bytes input + ) +{ + unsigned char h[64]; + ge_p3 A; + int i; + + crypto_hash_sha512(h,seed,32); + h[0] &= 248; + h[31] &= 63; + h[31] |= 64; + + ge_scalarmult_base(&A,h); + ge_p3_tobytes(pk,&A); + + for (i = 0;i < 32;++i) sk[i] = seed[i]; + for (i = 0;i < 32;++i) sk[32 + i] = pk[i]; + return 0; +} From db347c6ec73d54d0bbb7001d6e5e35b00d2d7f8d Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 16:51:24 -0800 Subject: [PATCH 05/10] setup.py: switch to ref10 Also run the benchmark suite for 10x longer because this code is 20x faster. --- setup.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index b7e3634..e4cd7db 100644 --- a/setup.py +++ b/setup.py @@ -22,12 +22,12 @@ """ sources = ["src/ed25519-glue/ed25519module.c"] -sources.extend(["src/ed25519-supercop-ref/"+s - for s in os.listdir("src/ed25519-supercop-ref") - if s.endswith(".c") and s!="test.c"]) +sources.extend(["src/ed25519-supercop-ref10/"+s + for s in os.listdir("src/ed25519-supercop-ref10") + if s.endswith(".c") and s!="test.c" and s!="keypair.c"]) m = Extension("ed25519._ed25519", - include_dirs=["src/ed25519-supercop-ref"], sources=sources) + include_dirs=["src/ed25519-supercop-ref10"], sources=sources) commands = versioneer.get_cmdclass().copy() @@ -70,20 +70,20 @@ class Speed(Test): def run(self): self.setup_path() from timeit import main - #t = timeit(setup="import ed25519", stmt="ed25519.create_keypair()", number=1000) + #t = timeit(setup="import ed25519", stmt="ed25519.create_keypair()", number=10000) print " keypair generation:", - main(["-n", "1000", + main(["-n", "10000", "-s", "import ed25519", "ed25519.create_keypair()"]) print " signing:", - main(["-n", "1000", + main(["-n", "10000", "-s", "import ed25519; sk,vk=ed25519.create_keypair(); msg='hello world'", "sk.sign(msg)"]) print " verifying:", - main(["-n", "1000", + main(["-n", "10000", "-s", "import ed25519; sk,vk=ed25519.create_keypair(); msg='hello world'; sig=sk.sign(msg)", "vk.verify(sig,msg)"]) commands["speed"] = Speed From 0f735b6609da4a2c1d69f607d3236c2d50226fd2 Mon Sep 17 00:00:00 2001 From: warner Date: Fri, 10 Feb 2012 17:06:47 -0800 Subject: [PATCH 06/10] copy amd64 implementations from SUPERCOP-20120210 --- src/ed25519-supercop-amd64-51-30k/api.h | 4 + .../architectures | 1 + src/ed25519-supercop-amd64-51-30k/batch.c | 94 + src/ed25519-supercop-amd64-51-30k/choose_t.s | 1824 +++++ src/ed25519-supercop-amd64-51-30k/consts.s | 47 + src/ed25519-supercop-amd64-51-30k/fe25519.h | 65 + .../fe25519_add.c | 10 + .../fe25519_freeze.s | 434 ++ .../fe25519_getparity.c | 8 + .../fe25519_invert.c | 59 + .../fe25519_iseq.c | 15 + .../fe25519_iszero.c | 13 + .../fe25519_mul.s | 946 +++ .../fe25519_neg.c | 8 + .../fe25519_nsquare.s | 763 ++ .../fe25519_pack.c | 49 + .../fe25519_pow2523.c | 54 + .../fe25519_setint.c | 10 + .../fe25519_square.s | 749 ++ .../fe25519_sub.c | 34 + .../fe25519_unpack.c | 46 + src/ed25519-supercop-amd64-51-30k/ge25519.h | 106 + .../ge25519_add.c | 8 + .../ge25519_add_p1p1.s | 4716 +++++++++++++ .../ge25519_base.c | 7 + .../ge25519_base_niels_smalltables.data | 768 ++ .../ge25519_base_slide_multiples.data | 96 + .../ge25519_dbl_p1p1.s | 3155 +++++++++ .../ge25519_double.c | 8 + .../ge25519_double_scalarmult.c | 97 + .../ge25519_isneutral.c | 9 + .../ge25519_multi_scalarmult.c | 102 + .../ge25519_nielsadd2.s | 6152 +++++++++++++++++ .../ge25519_nielsadd_p1p1.s | 3161 +++++++++ .../ge25519_p1p1_to_p2.s | 2442 +++++++ .../ge25519_p1p1_to_p3.s | 3202 +++++++++ .../ge25519_p1p1_to_pniels.s | 4110 +++++++++++ .../ge25519_pack.c | 13 + .../ge25519_pnielsadd_p1p1.s | 3791 ++++++++++ .../ge25519_scalarmult_base.c | 50 + .../ge25519_unpackneg.c | 60 + .../heap_rootreplaced.s | 476 ++ .../heap_rootreplaced_1limb.s | 416 ++ .../heap_rootreplaced_2limbs.s | 436 ++ .../heap_rootreplaced_3limbs.s | 456 ++ src/ed25519-supercop-amd64-51-30k/hram.c | 13 + src/ed25519-supercop-amd64-51-30k/hram.h | 8 + .../implementors | 5 + .../index_heap.c | 58 + .../index_heap.h | 31 + src/ed25519-supercop-amd64-51-30k/keypair.c | 30 + src/ed25519-supercop-amd64-51-30k/open.c | 45 + src/ed25519-supercop-amd64-51-30k/sc25519.h | 69 + .../sc25519_add.s | 232 + .../sc25519_barrett.s | 1188 ++++ .../sc25519_from32bytes.c | 55 + .../sc25519_from64bytes.c | 7 + .../sc25519_from_shortsc.c | 9 + .../sc25519_iszero.c | 10 + .../sc25519_lt.s | 131 + .../sc25519_mul.c | 12 + .../sc25519_mul_shortsc.c | 9 + .../sc25519_slide.c | 49 + .../sc25519_sub_nored.s | 142 + .../sc25519_to32bytes.c | 8 + .../sc25519_window4.c | 27 + src/ed25519-supercop-amd64-51-30k/sign.c | 56 + src/ed25519-supercop-amd64-51-30k/ull4_mul.s | 716 ++ src/ed25519-supercop-amd64-64-24k/api.h | 4 + .../architectures | 1 + src/ed25519-supercop-amd64-64-24k/batch.c | 94 + src/ed25519-supercop-amd64-64-24k/choose_t.s | 1565 +++++ src/ed25519-supercop-amd64-64-24k/consts.s | 39 + src/ed25519-supercop-amd64-64-24k/fe25519.h | 64 + .../fe25519_add.s | 189 + .../fe25519_freeze.s | 322 + .../fe25519_getparity.c | 8 + .../fe25519_invert.c | 60 + .../fe25519_iseq.c | 14 + .../fe25519_iszero.c | 12 + .../fe25519_mul.s | 843 +++ .../fe25519_neg.c | 8 + .../fe25519_pack.c | 13 + .../fe25519_pow2523.c | 55 + .../fe25519_setint.c | 9 + .../fe25519_square.s | 617 ++ .../fe25519_sub.s | 189 + .../fe25519_unpack.c | 11 + src/ed25519-supercop-amd64-64-24k/ge25519.h | 95 + .../ge25519_add.c | 8 + .../ge25519_add_p1p1.s | 4452 ++++++++++++ .../ge25519_base.c | 7 + .../ge25519_base_niels_smalltables.data | 768 ++ .../ge25519_base_slide_multiples.data | 96 + .../ge25519_dbl_p1p1.s | 2891 ++++++++ .../ge25519_double.c | 8 + .../ge25519_double_scalarmult.c | 102 + .../ge25519_isneutral.c | 9 + .../ge25519_multi_scalarmult.c | 102 + .../ge25519_nielsadd2.s | 5649 +++++++++++++++ .../ge25519_nielsadd_p1p1.s | 3010 ++++++++ .../ge25519_p1p1_to_p2.s | 2174 ++++++ .../ge25519_p1p1_to_p3.s | 2844 ++++++++ .../ge25519_pack.c | 13 + .../ge25519_pnielsadd_p1p1.s | 3580 ++++++++++ .../ge25519_scalarmult_base.c | 50 + .../ge25519_unpackneg.c | 60 + .../heap_rootreplaced.s | 476 ++ .../heap_rootreplaced_1limb.s | 416 ++ .../heap_rootreplaced_2limbs.s | 436 ++ .../heap_rootreplaced_3limbs.s | 456 ++ src/ed25519-supercop-amd64-64-24k/hram.c | 13 + src/ed25519-supercop-amd64-64-24k/hram.h | 8 + .../implementors | 5 + .../index_heap.c | 58 + .../index_heap.h | 31 + src/ed25519-supercop-amd64-64-24k/keypair.c | 30 + src/ed25519-supercop-amd64-64-24k/open.c | 45 + src/ed25519-supercop-amd64-64-24k/sc25519.h | 66 + .../sc25519_add.s | 232 + .../sc25519_barrett.s | 1188 ++++ .../sc25519_from32bytes.c | 55 + .../sc25519_from64bytes.c | 7 + .../sc25519_from_shortsc.c | 9 + .../sc25519_iszero.c | 10 + .../sc25519_lt.s | 131 + .../sc25519_mul.c | 12 + .../sc25519_mul_shortsc.c | 9 + .../sc25519_slide.c | 49 + .../sc25519_sub_nored.s | 142 + .../sc25519_to32bytes.c | 8 + .../sc25519_window4.c | 27 + src/ed25519-supercop-amd64-64-24k/sign.c | 56 + src/ed25519-supercop-amd64-64-24k/ull4_mul.s | 716 ++ 134 files changed, 76706 insertions(+) create mode 100644 src/ed25519-supercop-amd64-51-30k/api.h create mode 100644 src/ed25519-supercop-amd64-51-30k/architectures create mode 100644 src/ed25519-supercop-amd64-51-30k/batch.c create mode 100644 src/ed25519-supercop-amd64-51-30k/choose_t.s create mode 100644 src/ed25519-supercop-amd64-51-30k/consts.s create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519.h create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_add.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_freeze.s create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_getparity.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_invert.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_iseq.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_iszero.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_mul.s create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_neg.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_nsquare.s create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_pack.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_pow2523.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_setint.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_square.s create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_sub.c create mode 100644 src/ed25519-supercop-amd64-51-30k/fe25519_unpack.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519.h create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_add.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_add_p1p1.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_base.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_base_niels_smalltables.data create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_base_slide_multiples.data create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_dbl_p1p1.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_double.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_double_scalarmult.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_isneutral.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_multi_scalarmult.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd2.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd_p1p1.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p2.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p3.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_pniels.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_pack.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_pnielsadd_p1p1.s create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_scalarmult_base.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ge25519_unpackneg.c create mode 100644 src/ed25519-supercop-amd64-51-30k/heap_rootreplaced.s create mode 100644 src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_1limb.s create mode 100644 src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_2limbs.s create mode 100644 src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_3limbs.s create mode 100644 src/ed25519-supercop-amd64-51-30k/hram.c create mode 100644 src/ed25519-supercop-amd64-51-30k/hram.h create mode 100644 src/ed25519-supercop-amd64-51-30k/implementors create mode 100644 src/ed25519-supercop-amd64-51-30k/index_heap.c create mode 100644 src/ed25519-supercop-amd64-51-30k/index_heap.h create mode 100644 src/ed25519-supercop-amd64-51-30k/keypair.c create mode 100644 src/ed25519-supercop-amd64-51-30k/open.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519.h create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_add.s create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_barrett.s create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_from32bytes.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_from64bytes.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_from_shortsc.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_iszero.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_lt.s create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_mul.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_mul_shortsc.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_slide.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_sub_nored.s create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_to32bytes.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sc25519_window4.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sign.c create mode 100644 src/ed25519-supercop-amd64-51-30k/ull4_mul.s create mode 100644 src/ed25519-supercop-amd64-64-24k/api.h create mode 100644 src/ed25519-supercop-amd64-64-24k/architectures create mode 100644 src/ed25519-supercop-amd64-64-24k/batch.c create mode 100644 src/ed25519-supercop-amd64-64-24k/choose_t.s create mode 100644 src/ed25519-supercop-amd64-64-24k/consts.s create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519.h create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_add.s create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_freeze.s create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_getparity.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_invert.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_iseq.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_iszero.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_mul.s create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_neg.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_pack.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_pow2523.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_setint.c create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_square.s create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_sub.s create mode 100644 src/ed25519-supercop-amd64-64-24k/fe25519_unpack.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519.h create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_add.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_add_p1p1.s create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_base.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_base_niels_smalltables.data create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_base_slide_multiples.data create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_dbl_p1p1.s create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_double.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_double_scalarmult.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_isneutral.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_multi_scalarmult.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd2.s create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd_p1p1.s create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p2.s create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p3.s create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_pack.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_pnielsadd_p1p1.s create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_scalarmult_base.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ge25519_unpackneg.c create mode 100644 src/ed25519-supercop-amd64-64-24k/heap_rootreplaced.s create mode 100644 src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_1limb.s create mode 100644 src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_2limbs.s create mode 100644 src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_3limbs.s create mode 100644 src/ed25519-supercop-amd64-64-24k/hram.c create mode 100644 src/ed25519-supercop-amd64-64-24k/hram.h create mode 100644 src/ed25519-supercop-amd64-64-24k/implementors create mode 100644 src/ed25519-supercop-amd64-64-24k/index_heap.c create mode 100644 src/ed25519-supercop-amd64-64-24k/index_heap.h create mode 100644 src/ed25519-supercop-amd64-64-24k/keypair.c create mode 100644 src/ed25519-supercop-amd64-64-24k/open.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519.h create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_add.s create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_barrett.s create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_from32bytes.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_from64bytes.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_from_shortsc.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_iszero.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_lt.s create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_mul.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_mul_shortsc.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_slide.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_sub_nored.s create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_to32bytes.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sc25519_window4.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sign.c create mode 100644 src/ed25519-supercop-amd64-64-24k/ull4_mul.s diff --git a/src/ed25519-supercop-amd64-51-30k/api.h b/src/ed25519-supercop-amd64-51-30k/api.h new file mode 100644 index 0000000..1d0c988 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/api.h @@ -0,0 +1,4 @@ +#define CRYPTO_SECRETKEYBYTES 64 +#define CRYPTO_PUBLICKEYBYTES 32 +#define CRYPTO_BYTES 64 + diff --git a/src/ed25519-supercop-amd64-51-30k/architectures b/src/ed25519-supercop-amd64-51-30k/architectures new file mode 100644 index 0000000..21d5bd8 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/architectures @@ -0,0 +1 @@ +amd64 diff --git a/src/ed25519-supercop-amd64-51-30k/batch.c b/src/ed25519-supercop-amd64-51-30k/batch.c new file mode 100644 index 0000000..955392e --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/batch.c @@ -0,0 +1,94 @@ +#include "crypto_sign.h" + +#include "crypto_verify_32.h" +#include "crypto_hash_sha512.h" +#include "randombytes.h" + +#include "ge25519.h" +#include "hram.h" + +#define MAXBATCH 64 + +int crypto_sign_open_batch( + unsigned char* const m[],unsigned long long mlen[], + unsigned char* const sm[],const unsigned long long smlen[], + unsigned char* const pk[], + unsigned long long num + ) +{ + int ret = 0; + unsigned long long i, j; + shortsc25519 r[MAXBATCH]; + sc25519 scalars[2*MAXBATCH+1]; + ge25519 points[2*MAXBATCH+1]; + unsigned char hram[crypto_hash_sha512_BYTES]; + unsigned long long batchsize; + + for (i = 0;i < num;++i) mlen[i] = -1; + + while (num >= 3) { + batchsize = num; + if (batchsize > MAXBATCH) batchsize = MAXBATCH; + + for (i = 0;i < batchsize;++i) + if (smlen[i] < 64) goto fallback; + + randombytes((unsigned char*)r,sizeof(shortsc25519) * batchsize); + + /* Computing scalars[0] = ((r1s1 + r2s2 + ...)) */ + for(i=0;icaller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: tp_stack = tp +# asm 1: movq tp_stack=stack64#8 +# asm 2: movq tp_stack=56(%rsp) +movq %rdi,56(%rsp) + +# qhasm: pos *= 960 +# asm 1: imulq $960,pos=int64#1 +# asm 2: imulq $960,pos=%rdi +imulq $960,%rsi,%rdi + +# qhasm: mask = b +# asm 1: mov mask=int64#2 +# asm 2: mov mask=%rsi +mov %rdx,%rsi + +# qhasm: (int64) mask >>= 7 +# asm 1: sar $7,u=int64#5 +# asm 2: mov u=%r8 +mov %rdx,%r8 + +# qhasm: u += mask +# asm 1: add tysubx0=int64#2 +# asm 2: mov $1,>tysubx0=%rsi +mov $1,%rsi + +# qhasm: tysubx1 = 0 +# asm 1: mov $0,>tysubx1=int64#6 +# asm 2: mov $0,>tysubx1=%r9 +mov $0,%r9 + +# qhasm: tysubx2 = 0 +# asm 1: mov $0,>tysubx2=int64#7 +# asm 2: mov $0,>tysubx2=%rax +mov $0,%rax + +# qhasm: tysubx3 = 0 +# asm 1: mov $0,>tysubx3=int64#8 +# asm 2: mov $0,>tysubx3=%r10 +mov $0,%r10 + +# qhasm: tysubx4 = 0 +# asm 1: mov $0,>tysubx4=int64#9 +# asm 2: mov $0,>tysubx4=%r11 +mov $0,%r11 + +# qhasm: txaddy0 = 1 +# asm 1: mov $1,>txaddy0=int64#10 +# asm 2: mov $1,>txaddy0=%r12 +mov $1,%r12 + +# qhasm: txaddy1 = 0 +# asm 1: mov $0,>txaddy1=int64#11 +# asm 2: mov $0,>txaddy1=%r13 +mov $0,%r13 + +# qhasm: txaddy2 = 0 +# asm 1: mov $0,>txaddy2=int64#12 +# asm 2: mov $0,>txaddy2=%r14 +mov $0,%r14 + +# qhasm: txaddy3 = 0 +# asm 1: mov $0,>txaddy3=int64#13 +# asm 2: mov $0,>txaddy3=%r15 +mov $0,%r15 + +# qhasm: txaddy4 = 0 +# asm 1: mov $0,>txaddy4=int64#14 +# asm 2: mov $0,>txaddy4=%rbx +mov $0,%rbx + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#15 +# asm 2: movq 0(t=%rbp +movq 0(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 8(t=%rbp +movq 8(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 16(t=%rbp +movq 16(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 24(t=%rbp +movq 24(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 32(t=%rbp +movq 32(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 40(t=%rbp +movq 40(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 48(t=%rbp +movq 48(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 56(t=%rbp +movq 56(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 64(t=%rbp +movq 64(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 72(t=%rbp +movq 72(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 120(t=%rbp +movq 120(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 128(t=%rbp +movq 128(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 136(t=%rbp +movq 136(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 144(t=%rbp +movq 144(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 152(t=%rbp +movq 152(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 160(t=%rbp +movq 160(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 168(t=%rbp +movq 168(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 176(t=%rbp +movq 176(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 184(t=%rbp +movq 184(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 192(t=%rbp +movq 192(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 240(t=%rbp +movq 240(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 248(t=%rbp +movq 248(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 256(t=%rbp +movq 256(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 264(t=%rbp +movq 264(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 272(t=%rbp +movq 272(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 280(t=%rbp +movq 280(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 288(t=%rbp +movq 288(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 296(t=%rbp +movq 296(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 304(t=%rbp +movq 304(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 312(t=%rbp +movq 312(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 360(t=%rbp +movq 360(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 368(t=%rbp +movq 368(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 376(t=%rbp +movq 376(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 384(t=%rbp +movq 384(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 392(t=%rbp +movq 392(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 400(t=%rbp +movq 400(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 408(t=%rbp +movq 408(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 416(t=%rbp +movq 416(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 424(t=%rbp +movq 424(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 432(t=%rbp +movq 432(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 480(t=%rbp +movq 480(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 488(t=%rbp +movq 488(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 496(t=%rbp +movq 496(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 504(t=%rbp +movq 504(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 512(t=%rbp +movq 512(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 520(t=%rbp +movq 520(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 528(t=%rbp +movq 528(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 536(t=%rbp +movq 536(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 544(t=%rbp +movq 544(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 552(t=%rbp +movq 552(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 600(t=%rbp +movq 600(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 608(t=%rbp +movq 608(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 616(t=%rbp +movq 616(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 624(t=%rbp +movq 624(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 632(t=%rbp +movq 632(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 640(t=%rbp +movq 640(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 648(t=%rbp +movq 648(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 656(t=%rbp +movq 656(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 664(t=%rbp +movq 664(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 672(t=%rbp +movq 672(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 720(t=%rbp +movq 720(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 728(t=%rbp +movq 728(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 736(t=%rbp +movq 736(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 744(t=%rbp +movq 744(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 752(t=%rbp +movq 752(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 760(t=%rbp +movq 760(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 768(t=%rbp +movq 768(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 776(t=%rbp +movq 776(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 784(t=%rbp +movq 784(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 792(t=%rbp +movq 792(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 840(t=%rbp +movq 840(%rcx,%rdi),%rbp + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 848(t=%rbp +movq 848(%rcx,%rdi),%rbp + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 856(t=%rbp +movq 856(%rcx,%rdi),%rbp + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 864(t=%rbp +movq 864(%rcx,%rdi),%rbp + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 872(t=%rbp +movq 872(%rcx,%rdi),%rbp + +# qhasm: tysubx4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 880(t=%rbp +movq 880(%rcx,%rdi),%rbp + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 888(t=%rbp +movq 888(%rcx,%rdi),%rbp + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 896(t=%rbp +movq 896(%rcx,%rdi),%rbp + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 904(t=%rbp +movq 904(%rcx,%rdi),%rbp + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#15 +# asm 2: movq 912(t=%rbp +movq 912(%rcx,%rdi),%rbp + +# qhasm: txaddy4 = t if = +# asm 1: cmove t=int64#15 +# asm 2: mov t=%rbp +mov %rsi,%rbp + +# qhasm: tysubx0 = txaddy0 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %r9,%rbp + +# qhasm: tysubx1 = txaddy1 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %rax,%rbp + +# qhasm: tysubx2 = txaddy2 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %r10,%rbp + +# qhasm: tysubx3 = txaddy3 if signed< +# asm 1: cmovl t=int64#15 +# asm 2: mov t=%rbp +mov %r11,%rbp + +# qhasm: tysubx4 = txaddy4 if signed< +# asm 1: cmovl tp=int64#15 +# asm 2: movq tp=%rbp +movq 56(%rsp),%rbp + +# qhasm: *(uint64 *)(tp + 0) = tysubx0 +# asm 1: movq tt2d0=int64#2 +# asm 2: mov $0,>tt2d0=%rsi +mov $0,%rsi + +# qhasm: tt2d1 = 0 +# asm 1: mov $0,>tt2d1=int64#6 +# asm 2: mov $0,>tt2d1=%r9 +mov $0,%r9 + +# qhasm: tt2d2 = 0 +# asm 1: mov $0,>tt2d2=int64#7 +# asm 2: mov $0,>tt2d2=%rax +mov $0,%rax + +# qhasm: tt2d3 = 0 +# asm 1: mov $0,>tt2d3=int64#8 +# asm 2: mov $0,>tt2d3=%r10 +mov $0,%r10 + +# qhasm: tt2d4 = 0 +# asm 1: mov $0,>tt2d4=int64#9 +# asm 2: mov $0,>tt2d4=%r11 +mov $0,%r11 + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#10 +# asm 2: movq 80(t=%r12 +movq 80(%rcx,%rdi),%r12 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 88(t=%r12 +movq 88(%rcx,%rdi),%r12 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 96(t=%r12 +movq 96(%rcx,%rdi),%r12 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 104(t=%r12 +movq 104(%rcx,%rdi),%r12 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 112(t=%r12 +movq 112(%rcx,%rdi),%r12 + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 200(t=%r12 +movq 200(%rcx,%rdi),%r12 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 208(t=%r12 +movq 208(%rcx,%rdi),%r12 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 216(t=%r12 +movq 216(%rcx,%rdi),%r12 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 224(t=%r12 +movq 224(%rcx,%rdi),%r12 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 232(t=%r12 +movq 232(%rcx,%rdi),%r12 + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 320(t=%r12 +movq 320(%rcx,%rdi),%r12 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 328(t=%r12 +movq 328(%rcx,%rdi),%r12 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 336(t=%r12 +movq 336(%rcx,%rdi),%r12 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 344(t=%r12 +movq 344(%rcx,%rdi),%r12 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 352(t=%r12 +movq 352(%rcx,%rdi),%r12 + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 440(t=%r12 +movq 440(%rcx,%rdi),%r12 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 448(t=%r12 +movq 448(%rcx,%rdi),%r12 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 456(t=%r12 +movq 456(%rcx,%rdi),%r12 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 464(t=%r12 +movq 464(%rcx,%rdi),%r12 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 472(t=%r12 +movq 472(%rcx,%rdi),%r12 + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 560(t=%r12 +movq 560(%rcx,%rdi),%r12 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 568(t=%r12 +movq 568(%rcx,%rdi),%r12 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 576(t=%r12 +movq 576(%rcx,%rdi),%r12 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 584(t=%r12 +movq 584(%rcx,%rdi),%r12 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 592(t=%r12 +movq 592(%rcx,%rdi),%r12 + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 680(t=%r12 +movq 680(%rcx,%rdi),%r12 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 688(t=%r12 +movq 688(%rcx,%rdi),%r12 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 696(t=%r12 +movq 696(%rcx,%rdi),%r12 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 704(t=%r12 +movq 704(%rcx,%rdi),%r12 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 712(t=%r12 +movq 712(%rcx,%rdi),%r12 + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 800(t=%r12 +movq 800(%rcx,%rdi),%r12 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 808(t=%r12 +movq 808(%rcx,%rdi),%r12 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 816(t=%r12 +movq 816(%rcx,%rdi),%r12 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 824(t=%r12 +movq 824(%rcx,%rdi),%r12 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#10 +# asm 2: movq 832(t=%r12 +movq 832(%rcx,%rdi),%r12 + +# qhasm: tt2d4 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 920(t=%r8 +movq 920(%rcx,%rdi),%r8 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 928(t=%r8 +movq 928(%rcx,%rdi),%r8 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 936(t=%r8 +movq 936(%rcx,%rdi),%r8 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 944(t=%r8 +movq 944(%rcx,%rdi),%r8 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#1 +# asm 2: movq 952(t=%rdi +movq 952(%rcx,%rdi),%rdi + +# qhasm: tt2d4 = t if = +# asm 1: cmove tt0=int64#1 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>tt0=%rdi +movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdi + +# qhasm: tt1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=int64#4 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt1=%rcx +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx + +# qhasm: tt2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=int64#5 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt2=%r8 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8 + +# qhasm: tt3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=int64#10 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt3=%r12 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12 + +# qhasm: tt4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=int64#11 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>tt4=%r13 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r13 + +# qhasm: tt0 -= tt2d0 +# asm 1: sub caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/consts.s b/src/ed25519-supercop-amd64-51-30k/consts.s new file mode 100644 index 0000000..838982b --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/consts.s @@ -0,0 +1,47 @@ +.data + +.globl crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51 +.globl crypto_sign_ed25519_amd64_51_30k_batch_121666_213 +.globl crypto_sign_ed25519_amd64_51_30k_batch_2P0 +.globl crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +.globl crypto_sign_ed25519_amd64_51_30k_batch_4P0 +.globl crypto_sign_ed25519_amd64_51_30k_batch_4P1234 +.globl crypto_sign_ed25519_amd64_51_30k_batch_MU0 +.globl crypto_sign_ed25519_amd64_51_30k_batch_MU1 +.globl crypto_sign_ed25519_amd64_51_30k_batch_MU2 +.globl crypto_sign_ed25519_amd64_51_30k_batch_MU3 +.globl crypto_sign_ed25519_amd64_51_30k_batch_MU4 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 +.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +.globl crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +.globl crypto_sign_ed25519_amd64_51_30k_batch__38 + +.p2align 4 + +crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51: .quad 0x0007FFFFFFFFFFFF +crypto_sign_ed25519_amd64_51_30k_batch_121666_213: .quad 996687872 +crypto_sign_ed25519_amd64_51_30k_batch_2P0: .quad 0xFFFFFFFFFFFDA +crypto_sign_ed25519_amd64_51_30k_batch_2P1234: .quad 0xFFFFFFFFFFFFE +crypto_sign_ed25519_amd64_51_30k_batch_4P0: .quad 0x1FFFFFFFFFFFB4 +crypto_sign_ed25519_amd64_51_30k_batch_4P1234: .quad 0x1FFFFFFFFFFFFC +crypto_sign_ed25519_amd64_51_30k_batch_MU0: .quad 0xED9CE5A30A2C131B +crypto_sign_ed25519_amd64_51_30k_batch_MU1: .quad 0x2106215D086329A7 +crypto_sign_ed25519_amd64_51_30k_batch_MU2: .quad 0xFFFFFFFFFFFFFFEB +crypto_sign_ed25519_amd64_51_30k_batch_MU3: .quad 0xFFFFFFFFFFFFFFFF +crypto_sign_ed25519_amd64_51_30k_batch_MU4: .quad 0x000000000000000F +crypto_sign_ed25519_amd64_51_30k_batch_ORDER0: .quad 0x5812631A5CF5D3ED +crypto_sign_ed25519_amd64_51_30k_batch_ORDER1: .quad 0x14DEF9DEA2F79CD6 +crypto_sign_ed25519_amd64_51_30k_batch_ORDER2: .quad 0x0000000000000000 +crypto_sign_ed25519_amd64_51_30k_batch_ORDER3: .quad 0x1000000000000000 +crypto_sign_ed25519_amd64_51_30k_batch_EC2D0: .quad 1859910466990425 +crypto_sign_ed25519_amd64_51_30k_batch_EC2D1: .quad 932731440258426 +crypto_sign_ed25519_amd64_51_30k_batch_EC2D2: .quad 1072319116312658 +crypto_sign_ed25519_amd64_51_30k_batch_EC2D3: .quad 1815898335770999 +crypto_sign_ed25519_amd64_51_30k_batch_EC2D4: .quad 633789495995903 +crypto_sign_ed25519_amd64_51_30k_batch__38: .quad 38 diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519.h b/src/ed25519-supercop-amd64-51-30k/fe25519.h new file mode 100644 index 0000000..dfad55e --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519.h @@ -0,0 +1,65 @@ +#ifndef FE25519_H +#define FE25519_H + +#define fe25519 crypto_sign_ed25519_amd64_51_30k_batch_fe25519 +#define fe25519_freeze crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze +#define fe25519_unpack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_unpack +#define fe25519_pack crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pack +#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iszero_vartime +#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_51_30k_batch_fe25519_iseq_vartime +#define fe25519_cmov crypto_sign_ed25519_amd64_51_30k_batch_fe25519_cmov +#define fe25519_setint crypto_sign_ed25519_amd64_51_30k_batch_fe25519_setint +#define fe25519_neg crypto_sign_ed25519_amd64_51_30k_batch_fe25519_neg +#define fe25519_getparity crypto_sign_ed25519_amd64_51_30k_batch_fe25519_getparity +#define fe25519_add crypto_sign_ed25519_amd64_51_30k_batch_fe25519_add +#define fe25519_sub crypto_sign_ed25519_amd64_51_30k_batch_fe25519_sub +#define fe25519_mul crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul +#define fe25519_mul121666 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul121666 +#define fe25519_square crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square +#define fe25519_nsquare crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare +#define fe25519_invert crypto_sign_ed25519_amd64_51_30k_batch_fe25519_invert +#define fe25519_pow2523 crypto_sign_ed25519_amd64_51_30k_batch_fe25519_pow2523 + +typedef struct +{ + unsigned long long v[5]; +} +fe25519; + +void fe25519_freeze(fe25519 *r); + +void fe25519_unpack(fe25519 *r, const unsigned char x[32]); + +void fe25519_pack(unsigned char r[32], const fe25519 *x); + +void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b); + +void fe25519_cswap(fe25519 *r, fe25519 *x, unsigned char b); + +void fe25519_setint(fe25519 *r, unsigned int v); + +void fe25519_neg(fe25519 *r, const fe25519 *x); + +unsigned char fe25519_getparity(const fe25519 *x); + +int fe25519_iszero_vartime(const fe25519 *x); + +int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y); + +void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_mul121666(fe25519 *r, const fe25519 *x); + +void fe25519_square(fe25519 *r, const fe25519 *x); + +void fe25519_nsquare(fe25519 *r, unsigned long long n); + +void fe25519_invert(fe25519 *r, const fe25519 *x); + +void fe25519_pow2523(fe25519 *r, const fe25519 *x); + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_add.c b/src/ed25519-supercop-amd64-51-30k/fe25519_add.c new file mode 100644 index 0000000..7b39ece --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_add.c @@ -0,0 +1,10 @@ +#include "fe25519.h" + +void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y) +{ + r->v[0] = x->v[0] + y->v[0]; + r->v[1] = x->v[1] + y->v[1]; + r->v[2] = x->v[2] + y->v[2]; + r->v[3] = x->v[3] + y->v[3]; + r->v[4] = x->v[4] + y->v[4]; +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_freeze.s b/src/ed25519-supercop-amd64-51-30k/fe25519_freeze.s new file mode 100644 index 0000000..5cd0b1d --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_freeze.s @@ -0,0 +1,434 @@ + +# qhasm: int64 rp + +# qhasm: input rp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 r4 + +# qhasm: int64 t + +# qhasm: int64 loop + +# qhasm: int64 two51minus1 + +# qhasm: int64 two51minus19 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze +.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze +_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze: +crypto_sign_ed25519_amd64_51_30k_batch_fe25519_freeze: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: r0 = *(uint64 *) (rp + 0) +# asm 1: movq 0(r0=int64#2 +# asm 2: movq 0(r0=%rsi +movq 0(%rdi),%rsi + +# qhasm: r1 = *(uint64 *) (rp + 8) +# asm 1: movq 8(r1=int64#3 +# asm 2: movq 8(r1=%rdx +movq 8(%rdi),%rdx + +# qhasm: r2 = *(uint64 *) (rp + 16) +# asm 1: movq 16(r2=int64#4 +# asm 2: movq 16(r2=%rcx +movq 16(%rdi),%rcx + +# qhasm: r3 = *(uint64 *) (rp + 24) +# asm 1: movq 24(r3=int64#5 +# asm 2: movq 24(r3=%r8 +movq 24(%rdi),%r8 + +# qhasm: r4 = *(uint64 *) (rp + 32) +# asm 1: movq 32(r4=int64#6 +# asm 2: movq 32(r4=%r9 +movq 32(%rdi),%r9 + +# qhasm: two51minus1 = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>two51minus1=%rax +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rax + +# qhasm: two51minus19 = two51minus1 +# asm 1: mov two51minus19=int64#8 +# asm 2: mov two51minus19=%r10 +mov %rax,%r10 + +# qhasm: two51minus19 -= 18 +# asm 1: sub $18,loop=int64#9 +# asm 2: mov $3,>loop=%r11 +mov $3,%r11 + +# qhasm: reduceloop: +._reduceloop: + +# qhasm: t = r0 +# asm 1: mov t=int64#10 +# asm 2: mov t=%r12 +mov %rsi,%r12 + +# qhasm: (uint64) t >>= 51 +# asm 1: shr $51,t=int64#10 +# asm 2: mov t=%r12 +mov %rdx,%r12 + +# qhasm: (uint64) t >>= 51 +# asm 1: shr $51,t=int64#10 +# asm 2: mov t=%r12 +mov %rcx,%r12 + +# qhasm: (uint64) t >>= 51 +# asm 1: shr $51,t=int64#10 +# asm 2: mov t=%r12 +mov %r8,%r12 + +# qhasm: (uint64) t >>= 51 +# asm 1: shr $51,t=int64#10 +# asm 2: mov t=%r12 +mov %r9,%r12 + +# qhasm: (uint64) t >>= 51 +# asm 1: shr $51,t=int64#10 +# asm 2: imulq $19,t=%r12 +imulq $19,%r12,%r12 + +# qhasm: r0 += t +# asm 1: add ? loop -= 1 +# asm 1: sub $1, +ja ._reduceloop + +# qhasm: t = 1 +# asm 1: mov $1,>t=int64#10 +# asm 2: mov $1,>t=%r12 +mov $1,%r12 + +# qhasm: signedcaller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_getparity.c b/src/ed25519-supercop-amd64-51-30k/fe25519_getparity.c new file mode 100644 index 0000000..a003ec8 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_getparity.c @@ -0,0 +1,8 @@ +#include "fe25519.h" + +unsigned char fe25519_getparity(const fe25519 *x) +{ + fe25519 t = *x; + fe25519_freeze(&t); + return (unsigned char)t.v[0] & 1; +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_invert.c b/src/ed25519-supercop-amd64-51-30k/fe25519_invert.c new file mode 100644 index 0000000..1e518b2 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_invert.c @@ -0,0 +1,59 @@ +#include "fe25519.h" + +void fe25519_invert(fe25519 *r, const fe25519 *x) +{ + fe25519 z2; + fe25519 z9; + fe25519 z11; + fe25519 z2_5_0; + fe25519 z2_10_0; + fe25519 z2_20_0; + fe25519 z2_50_0; + fe25519 z2_100_0; + fe25519 t; + + /* 2 */ fe25519_square(&z2,x); + /* 4 */ fe25519_square(&t,&z2); + /* 8 */ fe25519_square(&t,&t); + /* 9 */ fe25519_mul(&z9,&t,x); + /* 11 */ fe25519_mul(&z11,&z9,&z2); + /* 22 */ fe25519_square(&t,&z11); + /* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9); + + /* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0); + /* 2^10 - 2^5 */ fe25519_nsquare(&t,4); + /* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0); + + /* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0); + /* 2^20 - 2^10 */ fe25519_nsquare(&t,9); + /* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0); + + /* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0); + /* 2^40 - 2^20 */ fe25519_nsquare(&t,19); + /* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0); + + /* 2^41 - 2^1 */ fe25519_square(&t,&t); + /* 2^50 - 2^10 */ fe25519_nsquare(&t,9); + /* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0); + + /* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0); + /* 2^100 - 2^50 */ fe25519_nsquare(&t,49); + /* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0); + + /* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0); + /* 2^200 - 2^100 */ fe25519_nsquare(&t,99); + /* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0); + + /* 2^201 - 2^1 */ fe25519_square(&t,&t); + /* 2^250 - 2^50 */ fe25519_nsquare(&t,49); + /* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0); + + /* 2^251 - 2^1 */ fe25519_square(&t,&t); + /* 2^252 - 2^2 */ fe25519_square(&t,&t); + /* 2^253 - 2^3 */ fe25519_square(&t,&t); + + /* 2^254 - 2^4 */ fe25519_square(&t,&t); + + /* 2^255 - 2^5 */ fe25519_square(&t,&t); + /* 2^255 - 21 */ fe25519_mul(r,&t,&z11); +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_iseq.c b/src/ed25519-supercop-amd64-51-30k/fe25519_iseq.c new file mode 100644 index 0000000..b4ef201 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_iseq.c @@ -0,0 +1,15 @@ +#include "fe25519.h" + +int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y) +{ + fe25519 t1 = *x; + fe25519 t2 = *y; + fe25519_freeze(&t1); + fe25519_freeze(&t2); + if(t1.v[0] != t2.v[0]) return 0; + if(t1.v[1] != t2.v[1]) return 0; + if(t1.v[2] != t2.v[2]) return 0; + if(t1.v[3] != t2.v[3]) return 0; + if(t1.v[4] != t2.v[4]) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_iszero.c b/src/ed25519-supercop-amd64-51-30k/fe25519_iszero.c new file mode 100644 index 0000000..c182001 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_iszero.c @@ -0,0 +1,13 @@ +#include "fe25519.h" + +int fe25519_iszero_vartime(const fe25519 *x) +{ + fe25519 t = *x; + fe25519_freeze(&t); + if (t.v[0]) return 0; + if (t.v[1]) return 0; + if (t.v[2]) return 0; + if (t.v[3]) return 0; + if (t.v[4]) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_mul.s b/src/ed25519-supercop-amd64-51-30k/fe25519_mul.s new file mode 100644 index 0000000..9d6c537 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_mul.s @@ -0,0 +1,946 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 r4 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: int64 c5 + +# qhasm: int64 c6 + +# qhasm: int64 c7 + +# qhasm: caller c1 + +# qhasm: caller c2 + +# qhasm: caller c3 + +# qhasm: caller c4 + +# qhasm: caller c5 + +# qhasm: caller c6 + +# qhasm: caller c7 + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: stack64 c5_stack + +# qhasm: stack64 c6_stack + +# qhasm: stack64 c7_stack + +# qhasm: stack64 x119_stack + +# qhasm: stack64 x219_stack + +# qhasm: stack64 x319_stack + +# qhasm: stack64 x419_stack + +# qhasm: stack64 rp_stack + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul +.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul +_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul: +crypto_sign_ed25519_amd64_51_30k_batch_fe25519_mul: +mov %rsp,%r11 +and $31,%r11 +add $96,%r11 +sub %r11,%rsp + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#1 +# asm 2: movq c1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#2 +# asm 2: movq c2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#3 +# asm 2: movq c3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: c4_stack = c4 +# asm 1: movq c4_stack=stack64#4 +# asm 2: movq c4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: c5_stack = c5 +# asm 1: movq c5_stack=stack64#5 +# asm 2: movq c5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: c6_stack = c6 +# asm 1: movq c6_stack=stack64#6 +# asm 2: movq c6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: c7_stack = c7 +# asm 1: movq c7_stack=stack64#7 +# asm 2: movq c7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: rp_stack = rp +# asm 1: movq rp_stack=stack64#8 +# asm 2: movq rp_stack=56(%rsp) +movq %rdi,56(%rsp) + +# qhasm: yp = yp +# asm 1: mov yp=int64#4 +# asm 2: mov yp=%rcx +mov %rdx,%rcx + +# qhasm: mulrax = *(uint64 *)(xp + 24) +# asm 1: movq 24(mulrax=int64#3 +# asm 2: movq 24(mulrax=%rdx +movq 24(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#9 +# asm 2: movq mulx319_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(r0=int64#5 +# asm 2: mov r0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = *(uint64 *)(xp + 32) +# asm 1: movq 32(mulrax=int64#3 +# asm 2: movq 32(mulrax=%rdx +movq 32(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#10 +# asm 2: movq mulx419_stack=72(%rsp) +movq %rax,72(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(r1=int64#8 +# asm 2: mov r1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = *(uint64 *)(xp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(r2=int64#10 +# asm 2: mov r2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(xp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(r3=int64#12 +# asm 2: mov r3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(xp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32) +# asm 1: mulq 32(r4=int64#14 +# asm 2: mov r4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(xp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq 8(mulrax=%rdx +movq 8(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(yp + 32) +# asm 1: mulq 32(mulredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: mulr01 = (mulr01.r0) << 13 +# asm 1: shld $13,mulr41=int64#3 +# asm 2: imulq $19,mulr41=%rdx +imulq $19,%rbp,%rdx + +# qhasm: r0 += mulr41 +# asm 1: add mult=int64#3 +# asm 2: mov mult=%rdx +mov %r8,%rdx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,r1=int64#4 +# asm 2: mov r1=%rcx +mov %rdx,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,r2=int64#6 +# asm 2: mov r2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,r3=int64#7 +# asm 2: mov r3=%rax +mov %rdx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,r4=int64#8 +# asm 2: mov r4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#3 +# asm 2: imulq $19,mult=%rdx +imulq $19,%rdx,%rdx + +# qhasm: r0 += mult +# asm 1: add c1=int64#9 +# asm 2: movq c1=%r11 +movq 0(%rsp),%r11 + +# qhasm: c2 =c2_stack +# asm 1: movq c2=int64#10 +# asm 2: movq c2=%r12 +movq 8(%rsp),%r12 + +# qhasm: c3 =c3_stack +# asm 1: movq c3=int64#11 +# asm 2: movq c3=%r13 +movq 16(%rsp),%r13 + +# qhasm: c4 =c4_stack +# asm 1: movq c4=int64#12 +# asm 2: movq c4=%r14 +movq 24(%rsp),%r14 + +# qhasm: c5 =c5_stack +# asm 1: movq c5=int64#13 +# asm 2: movq c5=%r15 +movq 32(%rsp),%r15 + +# qhasm: c6 =c6_stack +# asm 1: movq c6=int64#14 +# asm 2: movq c6=%rbx +movq 40(%rsp),%rbx + +# qhasm: c7 =c7_stack +# asm 1: movq c7=int64#15 +# asm 2: movq c7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_neg.c b/src/ed25519-supercop-amd64-51-30k/fe25519_neg.c new file mode 100644 index 0000000..235b209 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_neg.c @@ -0,0 +1,8 @@ +#include "fe25519.h" + +void fe25519_neg(fe25519 *r, const fe25519 *x) +{ + fe25519 t; + fe25519_setint(&t,0); + fe25519_sub(r,&t,x); +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_nsquare.s b/src/ed25519-supercop-amd64-51-30k/fe25519_nsquare.s new file mode 100644 index 0000000..cd749bc --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_nsquare.s @@ -0,0 +1,763 @@ + +# qhasm: int64 rp + +# qhasm: int64 n + +# qhasm: input rp + +# qhasm: input n + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 r4 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: int64 c5 + +# qhasm: int64 c6 + +# qhasm: int64 c7 + +# qhasm: caller c1 + +# qhasm: caller c2 + +# qhasm: caller c3 + +# qhasm: caller c4 + +# qhasm: caller c5 + +# qhasm: caller c6 + +# qhasm: caller c7 + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: stack64 c5_stack + +# qhasm: stack64 c6_stack + +# qhasm: stack64 c7_stack + +# qhasm: stack64 x119_stack + +# qhasm: stack64 x219_stack + +# qhasm: stack64 x319_stack + +# qhasm: stack64 x419_stack + +# qhasm: int64 squarer01 + +# qhasm: int64 squarer11 + +# qhasm: int64 squarer21 + +# qhasm: int64 squarer31 + +# qhasm: int64 squarer41 + +# qhasm: int64 squarerax + +# qhasm: int64 squarerdx + +# qhasm: int64 squaret + +# qhasm: int64 squareredmask + +# qhasm: stack64 n_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare +.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare +_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare: +crypto_sign_ed25519_amd64_51_30k_batch_fe25519_nsquare: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#1 +# asm 2: movq c1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#2 +# asm 2: movq c2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#3 +# asm 2: movq c3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: c4_stack = c4 +# asm 1: movq c4_stack=stack64#4 +# asm 2: movq c4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: c5_stack = c5 +# asm 1: movq c5_stack=stack64#5 +# asm 2: movq c5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: c6_stack = c6 +# asm 1: movq c6_stack=stack64#6 +# asm 2: movq c6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: c7_stack = c7 +# asm 1: movq c7_stack=stack64#7 +# asm 2: movq c7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: loop: +._loop: + +# qhasm: squarerax = *(uint64 *)(rp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rdi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 0) +# asm 1: mulq 0(r0=int64#4 +# asm 2: mov r0=%rcx +mov %rax,%rcx + +# qhasm: squarer01 = squarerdx +# asm 1: mov squarer01=int64#5 +# asm 2: mov squarer01=%r8 +mov %rdx,%r8 + +# qhasm: squarerax = *(uint64 *)(rp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rdi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r1=int64#6 +# asm 2: mov r1=%r9 +mov %rax,%r9 + +# qhasm: squarer11 = squarerdx +# asm 1: mov squarer11=int64#8 +# asm 2: mov squarer11=%r10 +mov %rdx,%r10 + +# qhasm: squarerax = *(uint64 *)(rp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rdi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r2=int64#9 +# asm 2: mov r2=%r11 +mov %rax,%r11 + +# qhasm: squarer21 = squarerdx +# asm 1: mov squarer21=int64#10 +# asm 2: mov squarer21=%r12 +mov %rdx,%r12 + +# qhasm: squarerax = *(uint64 *)(rp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rdi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r3=int64#11 +# asm 2: mov r3=%r13 +mov %rax,%r13 + +# qhasm: squarer31 = squarerdx +# asm 1: mov squarer31=int64#12 +# asm 2: mov squarer31=%r14 +mov %rdx,%r14 + +# qhasm: squarerax = *(uint64 *)(rp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rdi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r4=int64#13 +# asm 2: mov r4=%r15 +mov %rax,%r15 + +# qhasm: squarer41 = squarerdx +# asm 1: mov squarer41=int64#14 +# asm 2: mov squarer41=%rbx +mov %rdx,%rbx + +# qhasm: squarerax = *(uint64 *)(rp + 8) +# asm 1: movq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rdi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 8) +# asm 1: mulq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rdi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rdi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#3 +# asm 2: movq 8(squarerax=%rdx +movq 8(%rdi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32) +# asm 1: mulq 32(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rdi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 16) +# asm 1: mulq 16(squarerax=int64#3 +# asm 2: movq 16(squarerax=%rdx +movq 16(%rdi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 24) +# asm 1: mulq 24(squarerax=int64#3 +# asm 2: movq 16(squarerax=%rdx +movq 16(%rdi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32) +# asm 1: mulq 32(squarerax=int64#3 +# asm 2: movq 24(squarerax=%rdx +movq 24(%rdi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 24) +# asm 1: mulq 24(squarerax=int64#3 +# asm 2: movq 24(squarerax=%rdx +movq 24(%rdi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32) +# asm 1: mulq 32(squarerax=int64#3 +# asm 2: movq 32(squarerax=%rdx +movq 32(%rdi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(rp + 32) +# asm 1: mulq 32(squareredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: squarer01 = (squarer01.r0) << 13 +# asm 1: shld $13,squarer41=int64#5 +# asm 2: imulq $19,squarer41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: r0 += squarer41 +# asm 1: add squaret=int64#5 +# asm 2: mov squaret=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r1=int64#6 +# asm 2: mov r1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r2=int64#7 +# asm 2: mov r2=%rax +mov %r8,%rax + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r3=int64#8 +# asm 2: mov r3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r4=int64#9 +# asm 2: mov r4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,squaret=int64#5 +# asm 2: imulq $19,squaret=%r8 +imulq $19,%r8,%r8 + +# qhasm: r0 += squaret +# asm 1: add ? n -= 1 +# asm 1: sub $1, +jg ._loop + +# qhasm: c1 =c1_stack +# asm 1: movq c1=int64#9 +# asm 2: movq c1=%r11 +movq 0(%rsp),%r11 + +# qhasm: c2 =c2_stack +# asm 1: movq c2=int64#10 +# asm 2: movq c2=%r12 +movq 8(%rsp),%r12 + +# qhasm: c3 =c3_stack +# asm 1: movq c3=int64#11 +# asm 2: movq c3=%r13 +movq 16(%rsp),%r13 + +# qhasm: c4 =c4_stack +# asm 1: movq c4=int64#12 +# asm 2: movq c4=%r14 +movq 24(%rsp),%r14 + +# qhasm: c5 =c5_stack +# asm 1: movq c5=int64#13 +# asm 2: movq c5=%r15 +movq 32(%rsp),%r15 + +# qhasm: c6 =c6_stack +# asm 1: movq c6=int64#14 +# asm 2: movq c6=%rbx +movq 40(%rsp),%rbx + +# qhasm: c7 =c7_stack +# asm 1: movq c7=int64#15 +# asm 2: movq c7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_pack.c b/src/ed25519-supercop-amd64-51-30k/fe25519_pack.c new file mode 100644 index 0000000..03ad05f --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_pack.c @@ -0,0 +1,49 @@ +#include "fe25519.h" + +/* Assumes input x being reduced below 2^255 */ +void fe25519_pack(unsigned char r[32], const fe25519 *x) +{ + fe25519 t; + t = *x; + fe25519_freeze(&t); + r[0] = (unsigned char) ( t.v[0] & 0xff); + r[1] = (unsigned char) ((t.v[0] >> 8) & 0xff); + r[2] = (unsigned char) ((t.v[0] >> 16) & 0xff); + r[3] = (unsigned char) ((t.v[0] >> 24) & 0xff); + r[4] = (unsigned char) ((t.v[0] >> 32) & 0xff); + r[5] = (unsigned char) ((t.v[0] >> 40) & 0xff); + r[6] = (unsigned char) ((t.v[0] >> 48)); + + r[6] ^= (unsigned char) ((t.v[1] << 3) & 0xf8); + r[7] = (unsigned char) ((t.v[1] >> 5) & 0xff); + r[8] = (unsigned char) ((t.v[1] >> 13) & 0xff); + r[9] = (unsigned char) ((t.v[1] >> 21) & 0xff); + r[10] = (unsigned char) ((t.v[1] >> 29) & 0xff); + r[11] = (unsigned char) ((t.v[1] >> 37) & 0xff); + r[12] = (unsigned char) ((t.v[1] >> 45)); + + r[12] ^= (unsigned char) ((t.v[2] << 6) & 0xc0); + r[13] = (unsigned char) ((t.v[2] >> 2) & 0xff); + r[14] = (unsigned char) ((t.v[2] >> 10) & 0xff); + r[15] = (unsigned char) ((t.v[2] >> 18) & 0xff); + r[16] = (unsigned char) ((t.v[2] >> 26) & 0xff); + r[17] = (unsigned char) ((t.v[2] >> 34) & 0xff); + r[18] = (unsigned char) ((t.v[2] >> 42) & 0xff); + r[19] = (unsigned char) ((t.v[2] >> 50)); + + r[19] ^= (unsigned char) ((t.v[3] << 1) & 0xfe); + r[20] = (unsigned char) ((t.v[3] >> 7) & 0xff); + r[21] = (unsigned char) ((t.v[3] >> 15) & 0xff); + r[22] = (unsigned char) ((t.v[3] >> 23) & 0xff); + r[23] = (unsigned char) ((t.v[3] >> 31) & 0xff); + r[24] = (unsigned char) ((t.v[3] >> 39) & 0xff); + r[25] = (unsigned char) ((t.v[3] >> 47)); + + r[25] ^= (unsigned char) ((t.v[4] << 4) & 0xf0); + r[26] = (unsigned char) ((t.v[4] >> 4) & 0xff); + r[27] = (unsigned char) ((t.v[4] >> 12) & 0xff); + r[28] = (unsigned char) ((t.v[4] >> 20) & 0xff); + r[29] = (unsigned char) ((t.v[4] >> 28) & 0xff); + r[30] = (unsigned char) ((t.v[4] >> 36) & 0xff); + r[31] = (unsigned char) ((t.v[4] >> 44)); +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_pow2523.c b/src/ed25519-supercop-amd64-51-30k/fe25519_pow2523.c new file mode 100644 index 0000000..3d43392 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_pow2523.c @@ -0,0 +1,54 @@ +#include "fe25519.h" + +void fe25519_pow2523(fe25519 *r, const fe25519 *x) +{ + fe25519 z2; + fe25519 z9; + fe25519 z11; + fe25519 z2_5_0; + fe25519 z2_10_0; + fe25519 z2_20_0; + fe25519 z2_50_0; + fe25519 z2_100_0; + fe25519 t; + + /* 2 */ fe25519_square(&z2,x); + /* 4 */ fe25519_square(&t,&z2); + /* 8 */ fe25519_square(&t,&t); + /* 9 */ fe25519_mul(&z9,&t,x); + /* 11 */ fe25519_mul(&z11,&z9,&z2); + /* 22 */ fe25519_square(&t,&z11); + /* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9); + + /* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0); + /* 2^10 - 2^5 */ fe25519_nsquare(&t,4); + /* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0); + + /* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0); + /* 2^20 - 2^10 */ fe25519_nsquare(&t,9); + /* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0); + + /* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0); + /* 2^40 - 2^20 */ fe25519_nsquare(&t,19); + /* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0); + + /* 2^41 - 2^1 */ fe25519_square(&t,&t); + /* 2^50 - 2^10 */ fe25519_nsquare(&t,9); + /* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0); + + /* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0); + /* 2^100 - 2^50 */ fe25519_nsquare(&t,49); + /* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0); + + /* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0); + /* 2^200 - 2^100 */ fe25519_nsquare(&t,99); + /* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0); + + /* 2^201 - 2^1 */ fe25519_square(&t,&t); + /* 2^250 - 2^50 */ fe25519_nsquare(&t,49); + /* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0); + + /* 2^251 - 2^1 */ fe25519_square(&t,&t); + /* 2^252 - 2^2 */ fe25519_square(&t,&t); + /* 2^252 - 3 */ fe25519_mul(r,&t,x); +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_setint.c b/src/ed25519-supercop-amd64-51-30k/fe25519_setint.c new file mode 100644 index 0000000..2073867 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_setint.c @@ -0,0 +1,10 @@ +#include "fe25519.h" + +void fe25519_setint(fe25519 *r, unsigned int v) +{ + r->v[0] = v; + r->v[1] = 0; + r->v[2] = 0; + r->v[3] = 0; + r->v[4] = 0; +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_square.s b/src/ed25519-supercop-amd64-51-30k/fe25519_square.s new file mode 100644 index 0000000..971c197 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_square.s @@ -0,0 +1,749 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 r4 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: int64 c5 + +# qhasm: int64 c6 + +# qhasm: int64 c7 + +# qhasm: caller c1 + +# qhasm: caller c2 + +# qhasm: caller c3 + +# qhasm: caller c4 + +# qhasm: caller c5 + +# qhasm: caller c6 + +# qhasm: caller c7 + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: stack64 c5_stack + +# qhasm: stack64 c6_stack + +# qhasm: stack64 c7_stack + +# qhasm: stack64 x119_stack + +# qhasm: stack64 x219_stack + +# qhasm: stack64 x319_stack + +# qhasm: stack64 x419_stack + +# qhasm: int64 squarer01 + +# qhasm: int64 squarer11 + +# qhasm: int64 squarer21 + +# qhasm: int64 squarer31 + +# qhasm: int64 squarer41 + +# qhasm: int64 squarerax + +# qhasm: int64 squarerdx + +# qhasm: int64 squaret + +# qhasm: int64 squareredmask + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square +.globl crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square +_crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square: +crypto_sign_ed25519_amd64_51_30k_batch_fe25519_square: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#1 +# asm 2: movq c1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#2 +# asm 2: movq c2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#3 +# asm 2: movq c3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: c4_stack = c4 +# asm 1: movq c4_stack=stack64#4 +# asm 2: movq c4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: c5_stack = c5 +# asm 1: movq c5_stack=stack64#5 +# asm 2: movq c5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: c6_stack = c6 +# asm 1: movq c6_stack=stack64#6 +# asm 2: movq c6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: c7_stack = c7 +# asm 1: movq c7_stack=stack64#7 +# asm 2: movq c7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: squarerax = *(uint64 *)(xp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(r0=int64#4 +# asm 2: mov r0=%rcx +mov %rax,%rcx + +# qhasm: squarer01 = squarerdx +# asm 1: mov squarer01=int64#5 +# asm 2: mov squarer01=%r8 +mov %rdx,%r8 + +# qhasm: squarerax = *(uint64 *)(xp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r1=int64#6 +# asm 2: mov r1=%r9 +mov %rax,%r9 + +# qhasm: squarer11 = squarerdx +# asm 1: mov squarer11=int64#8 +# asm 2: mov squarer11=%r10 +mov %rdx,%r10 + +# qhasm: squarerax = *(uint64 *)(xp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r2=int64#9 +# asm 2: mov r2=%r11 +mov %rax,%r11 + +# qhasm: squarer21 = squarerdx +# asm 1: mov squarer21=int64#10 +# asm 2: mov squarer21=%r12 +mov %rdx,%r12 + +# qhasm: squarerax = *(uint64 *)(xp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r3=int64#11 +# asm 2: mov r3=%r13 +mov %rax,%r13 + +# qhasm: squarer31 = squarerdx +# asm 1: mov squarer31=int64#12 +# asm 2: mov squarer31=%r14 +mov %rdx,%r14 + +# qhasm: squarerax = *(uint64 *)(xp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,r4=int64#13 +# asm 2: mov r4=%r15 +mov %rax,%r15 + +# qhasm: squarer41 = squarerdx +# asm 1: mov squarer41=int64#14 +# asm 2: mov squarer41=%rbx +mov %rdx,%rbx + +# qhasm: squarerax = *(uint64 *)(xp + 8) +# asm 1: movq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) +# asm 1: mulq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#3 +# asm 2: movq 8(squarerax=%rdx +movq 8(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32) +# asm 1: mulq 32(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16) +# asm 1: mulq 16(squarerax=int64#3 +# asm 2: movq 16(squarerax=%rdx +movq 16(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24) +# asm 1: mulq 24(squarerax=int64#3 +# asm 2: movq 16(squarerax=%rdx +movq 16(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32) +# asm 1: mulq 32(squarerax=int64#3 +# asm 2: movq 24(squarerax=%rdx +movq 24(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24) +# asm 1: mulq 24(squarerax=int64#3 +# asm 2: movq 24(squarerax=%rdx +movq 24(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32) +# asm 1: mulq 32(squarerax=int64#3 +# asm 2: movq 32(squarerax=%rdx +movq 32(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 32) +# asm 1: mulq 32(squareredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: squarer01 = (squarer01.r0) << 13 +# asm 1: shld $13,squarer41=int64#3 +# asm 2: imulq $19,squarer41=%rdx +imulq $19,%rbx,%rdx + +# qhasm: r0 += squarer41 +# asm 1: add squaret=int64#3 +# asm 2: mov squaret=%rdx +mov %rcx,%rdx + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r1=int64#5 +# asm 2: mov r1=%r8 +mov %rdx,%r8 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r2=int64#6 +# asm 2: mov r2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r3=int64#7 +# asm 2: mov r3=%rax +mov %rdx,%rax + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,r4=int64#8 +# asm 2: mov r4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,squaret=int64#3 +# asm 2: imulq $19,squaret=%rdx +imulq $19,%rdx,%rdx + +# qhasm: r0 += squaret +# asm 1: add c1=int64#9 +# asm 2: movq c1=%r11 +movq 0(%rsp),%r11 + +# qhasm: c2 =c2_stack +# asm 1: movq c2=int64#10 +# asm 2: movq c2=%r12 +movq 8(%rsp),%r12 + +# qhasm: c3 =c3_stack +# asm 1: movq c3=int64#11 +# asm 2: movq c3=%r13 +movq 16(%rsp),%r13 + +# qhasm: c4 =c4_stack +# asm 1: movq c4=int64#12 +# asm 2: movq c4=%r14 +movq 24(%rsp),%r14 + +# qhasm: c5 =c5_stack +# asm 1: movq c5=int64#13 +# asm 2: movq c5=%r15 +movq 32(%rsp),%r15 + +# qhasm: c6 =c6_stack +# asm 1: movq c6=int64#14 +# asm 2: movq c6=%rbx +movq 40(%rsp),%rbx + +# qhasm: c7 =c7_stack +# asm 1: movq c7=int64#15 +# asm 2: movq c7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_sub.c b/src/ed25519-supercop-amd64-51-30k/fe25519_sub.c new file mode 100644 index 0000000..f9eb538 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_sub.c @@ -0,0 +1,34 @@ +#include "fe25519.h" + +void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y) +{ + fe25519 yt = *y; + /* Not required for reduced input */ + + unsigned long long t; + t = yt.v[0] >> 51; + yt.v[0] &= 2251799813685247; + yt.v[1] += t; + + t = yt.v[1] >> 51; + yt.v[1] &= 2251799813685247; + yt.v[2] += t; + + t = yt.v[2] >> 51; + yt.v[2] &= 2251799813685247; + yt.v[3] += t; + + t = yt.v[3] >> 51; + yt.v[3] &= 2251799813685247; + yt.v[4] += t; + + t = yt.v[4] >> 51; + yt.v[4] &= 2251799813685247; + yt.v[0] += 19*t; + + r->v[0] = x->v[0] + 0xFFFFFFFFFFFDA - yt.v[0]; + r->v[1] = x->v[1] + 0xFFFFFFFFFFFFE - yt.v[1]; + r->v[2] = x->v[2] + 0xFFFFFFFFFFFFE - yt.v[2]; + r->v[3] = x->v[3] + 0xFFFFFFFFFFFFE - yt.v[3]; + r->v[4] = x->v[4] + 0xFFFFFFFFFFFFE - yt.v[4]; +} diff --git a/src/ed25519-supercop-amd64-51-30k/fe25519_unpack.c b/src/ed25519-supercop-amd64-51-30k/fe25519_unpack.c new file mode 100644 index 0000000..5eea0ab --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/fe25519_unpack.c @@ -0,0 +1,46 @@ +#include "fe25519.h" + +void fe25519_unpack(fe25519 *r, const unsigned char x[32]) +{ + r->v[0] = x[0]; + r->v[0] += (unsigned long long)x[1] << 8; + r->v[0] += (unsigned long long)x[2] << 16; + r->v[0] += (unsigned long long)x[3] << 24; + r->v[0] += (unsigned long long)x[4] << 32; + r->v[0] += (unsigned long long)x[5] << 40; + r->v[0] += ((unsigned long long)x[6] & 7) << 48; + + r->v[1] = x[6] >> 3; + r->v[1] += (unsigned long long)x[7] << 5; + r->v[1] += (unsigned long long)x[8] << 13; + r->v[1] += (unsigned long long)x[9] << 21; + r->v[1] += (unsigned long long)x[10] << 29; + r->v[1] += (unsigned long long)x[11] << 37; + r->v[1] += ((unsigned long long)x[12] & 63) << 45; + + r->v[2] = x[12] >> 6; + r->v[2] += (unsigned long long)x[13] << 2; + r->v[2] += (unsigned long long)x[14] << 10; + r->v[2] += (unsigned long long)x[15] << 18; + r->v[2] += (unsigned long long)x[16] << 26; + r->v[2] += (unsigned long long)x[17] << 34; + r->v[2] += (unsigned long long)x[18] << 42; + r->v[2] += ((unsigned long long)x[19] & 1) << 50; + + r->v[3] = x[19] >> 1; + r->v[3] += (unsigned long long)x[20] << 7; + r->v[3] += (unsigned long long)x[21] << 15; + r->v[3] += (unsigned long long)x[22] << 23; + r->v[3] += (unsigned long long)x[23] << 31; + r->v[3] += (unsigned long long)x[24] << 39; + r->v[3] += ((unsigned long long)x[25] & 15) << 47; + + r->v[4] = x[25] >> 4; + r->v[4] += (unsigned long long)x[26] << 4; + r->v[4] += (unsigned long long)x[27] << 12; + r->v[4] += (unsigned long long)x[28] << 20; + r->v[4] += (unsigned long long)x[29] << 28; + r->v[4] += (unsigned long long)x[30] << 36; + r->v[4] += ((unsigned long long)x[31] & 127) << 44; +} + diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519.h b/src/ed25519-supercop-amd64-51-30k/ge25519.h new file mode 100644 index 0000000..02fd73a --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519.h @@ -0,0 +1,106 @@ +#ifndef GE25519_H +#define GE25519_H + +/* + * Arithmetic on the twisted Edwards curve -x^2 + y^2 = 1 + dx^2y^2 + * with d = -(121665/121666) = + * 37095705934669439343138083508754565189542113879843219016388785533085940283555 + * Base point: + * (15112221349535400772501151409588531511454012693041857206046113283949847762202,46316835694926478169428394003475163141307993866256225615783033603165251855960); + */ + +#include "fe25519.h" +#include "sc25519.h" + +#define ge25519 crypto_sign_ed25519_amd64_51_30k_batch_ge25519 +#define ge25519_base crypto_sign_ed25519_amd64_51_30k_batch_ge25519_base +#define ge25519_unpackneg_vartime crypto_sign_ed25519_amd64_51_30k_batch_unpackneg_vartime +#define ge25519_pack crypto_sign_ed25519_amd64_51_30k_batch_pack +#define ge25519_isneutral_vartime crypto_sign_ed25519_amd64_51_30k_batch_isneutral_vartime +#define ge25519_add crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add +#define ge25519_double crypto_sign_ed25519_amd64_51_30k_batch_ge25519_double +#define ge25519_double_scalarmult_vartime crypto_sign_ed25519_amd64_51_30k_batch_double_scalarmult_vartime +#define ge25519_multi_scalarmult_vartime crypto_sign_ed25519_amd64_51_30k_batch_ge25519_multi_scalarmult_vartime +#define ge25519_scalarmult_base crypto_sign_ed25519_amd64_51_30k_batch_scalarmult_base +#define ge25519_p1p1_to_p2 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2 +#define ge25519_p1p1_to_p3 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3 +#define ge25519_p1p1_to_pniels crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels +#define ge25519_add_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1 +#define ge25519_dbl_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1 +#define choose_t crypto_sign_ed25519_amd64_51_30k_batch_choose_t +#define choose_t_smultq crypto_sign_ed25519_amd64_51_30k_batch_choose_t_smultq +#define ge25519_nielsadd2 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2 +#define ge25519_nielsadd_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1 +#define ge25519_pnielsadd_p1p1 crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1 + +#define ge25519_p3 ge25519 + +typedef struct +{ + fe25519 x; + fe25519 y; + fe25519 z; + fe25519 t; +} ge25519; + +typedef struct +{ + fe25519 x; + fe25519 z; + fe25519 y; + fe25519 t; +} ge25519_p1p1; + +typedef struct +{ + fe25519 x; + fe25519 y; + fe25519 z; +} ge25519_p2; + +typedef struct +{ + fe25519 ysubx; + fe25519 xaddy; + fe25519 t2d; +} ge25519_niels; + +typedef struct +{ + fe25519 ysubx; + fe25519 xaddy; + fe25519 z; + fe25519 t2d; +} ge25519_pniels; + +extern void ge25519_p1p1_to_p2(ge25519_p2 *r, const ge25519_p1p1 *p); +extern void ge25519_p1p1_to_p3(ge25519_p3 *r, const ge25519_p1p1 *p); +extern void ge25519_p1p1_to_pniels(ge25519_pniels *r, const ge25519_p1p1 *p); +extern void ge25519_add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q); +extern void ge25519_dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p); +extern void choose_t(ge25519_niels *t, unsigned long long pos, signed long long b, const ge25519_niels *base_multiples); +extern void choose_t_smultq(ge25519_pniels *t, signed long long b, const ge25519_pniels *pre); +extern void ge25519_nielsadd2(ge25519_p3 *r, const ge25519_niels *q); +extern void ge25519_nielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_niels *q); +extern void ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_pniels *q); + +extern const ge25519 ge25519_base; + +extern int ge25519_unpackneg_vartime(ge25519 *r, const unsigned char p[32]); + +extern void ge25519_pack(unsigned char r[32], const ge25519 *p); + +extern int ge25519_isneutral_vartime(const ge25519 *p); + +extern void ge25519_add(ge25519 *r, const ge25519 *p, const ge25519 *q); + +extern void ge25519_double(ge25519 *r, const ge25519 *p); + +/* computes [s1]p1 + [s2]ge25519_base */ +extern void ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const sc25519 *s1, const sc25519 *s2); + +extern void ge25519_multi_scalarmult_vartime(ge25519 *r, ge25519 *p, sc25519 *s, const unsigned long long npoints); + +extern void ge25519_scalarmult_base(ge25519 *r, const sc25519 *s); + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_add.c b/src/ed25519-supercop-amd64-51-30k/ge25519_add.c new file mode 100644 index 0000000..c4d1c68 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_add.c @@ -0,0 +1,8 @@ +#include "ge25519.h" + +void ge25519_add(ge25519_p3 *r, const ge25519_p3 *p, const ge25519_p3 *q) +{ + ge25519_p1p1 grp1p1; + ge25519_add_p1p1(&grp1p1, p, q); + ge25519_p1p1_to_p3(r, &grp1p1); +} diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_add_p1p1.s b/src/ed25519-supercop-amd64-51-30k/ge25519_add_p1p1.s new file mode 100644 index 0000000..5a2a834 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_add_p1p1.s @@ -0,0 +1,4716 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: int64 a4 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: stack64 a4_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: int64 b4 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: stack64 b4_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: int64 d4 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: stack64 d4_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: int64 t14 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: stack64 t14_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: int64 t24 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: stack64 t24_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 rx4 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 rt4 + +# qhasm: int64 x0 + +# qhasm: int64 x1 + +# qhasm: int64 x2 + +# qhasm: int64 x3 + +# qhasm: int64 x4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1 +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_add_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $256,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov qp=int64#4 +# asm 2: mov qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 40) +# asm 1: movq 40(a0=int64#3 +# asm 2: movq 40(a0=%rdx +movq 40(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 48) +# asm 1: movq 48(a1=int64#5 +# asm 2: movq 48(a1=%r8 +movq 48(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 56) +# asm 1: movq 56(a2=int64#6 +# asm 2: movq 56(a2=%r9 +movq 56(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 64) +# asm 1: movq 64(a3=int64#7 +# asm 2: movq 64(a3=%rax +movq 64(%rsi),%rax + +# qhasm: a4 = *(uint64 *)(pp + 72) +# asm 1: movq 72(a4=int64#8 +# asm 2: movq 72(a4=%r10 +movq 72(%rsi),%r10 + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#9 +# asm 2: mov b0=%r11 +mov %rdx,%r11 + +# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,x0=int64#10 +# asm 2: movq 0(x0=%r12 +movq 0(%rsi),%r12 + +# qhasm: b0 += x0 +# asm 1: add b1=int64#10 +# asm 2: mov b1=%r12 +mov %r8,%r12 + +# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x1=int64#11 +# asm 2: movq 8(x1=%r13 +movq 8(%rsi),%r13 + +# qhasm: b1 += x1 +# asm 1: add b2=int64#11 +# asm 2: mov b2=%r13 +mov %r9,%r13 + +# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x2=int64#12 +# asm 2: movq 16(x2=%r14 +movq 16(%rsi),%r14 + +# qhasm: b2 += x2 +# asm 1: add b3=int64#12 +# asm 2: mov b3=%r14 +mov %rax,%r14 + +# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x3=int64#13 +# asm 2: movq 24(x3=%r15 +movq 24(%rsi),%r15 + +# qhasm: b3 += x3 +# asm 1: add b4=int64#13 +# asm 2: mov b4=%r15 +mov %r10,%r15 + +# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,x4=int64#14 +# asm 2: movq 32(x4=%rbx +movq 32(%rsi),%rbx + +# qhasm: b4 += x4 +# asm 1: add a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#13 +# asm 2: movq b0_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#14 +# asm 2: movq b1_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#15 +# asm 2: movq b2_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#16 +# asm 2: movq b3_stack=120(%rsp) +movq %r14,120(%rsp) + +# qhasm: b4_stack = b4 +# asm 1: movq b4_stack=stack64#17 +# asm 2: movq b4_stack=128(%rsp) +movq %r15,128(%rsp) + +# qhasm: t10 = *(uint64 *)(qp + 40) +# asm 1: movq 40(t10=int64#3 +# asm 2: movq 40(t10=%rdx +movq 40(%rcx),%rdx + +# qhasm: t11 = *(uint64 *)(qp + 48) +# asm 1: movq 48(t11=int64#5 +# asm 2: movq 48(t11=%r8 +movq 48(%rcx),%r8 + +# qhasm: t12 = *(uint64 *)(qp + 56) +# asm 1: movq 56(t12=int64#6 +# asm 2: movq 56(t12=%r9 +movq 56(%rcx),%r9 + +# qhasm: t13 = *(uint64 *)(qp + 64) +# asm 1: movq 64(t13=int64#7 +# asm 2: movq 64(t13=%rax +movq 64(%rcx),%rax + +# qhasm: t14 = *(uint64 *)(qp + 72) +# asm 1: movq 72(t14=int64#8 +# asm 2: movq 72(t14=%r10 +movq 72(%rcx),%r10 + +# qhasm: t20 = t10 +# asm 1: mov t20=int64#9 +# asm 2: mov t20=%r11 +mov %rdx,%r11 + +# qhasm: t10 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,rx0=int64#10 +# asm 2: movq 0(rx0=%r12 +movq 0(%rcx),%r12 + +# qhasm: t20 += rx0 +# asm 1: add t21=int64#10 +# asm 2: mov t21=%r12 +mov %r8,%r12 + +# qhasm: t11 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx1=int64#11 +# asm 2: movq 8(rx1=%r13 +movq 8(%rcx),%r13 + +# qhasm: t21 += rx1 +# asm 1: add t22=int64#11 +# asm 2: mov t22=%r13 +mov %r9,%r13 + +# qhasm: t12 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx2=int64#12 +# asm 2: movq 16(rx2=%r14 +movq 16(%rcx),%r14 + +# qhasm: t22 += rx2 +# asm 1: add t23=int64#12 +# asm 2: mov t23=%r14 +mov %rax,%r14 + +# qhasm: t13 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx3=int64#13 +# asm 2: movq 24(rx3=%r15 +movq 24(%rcx),%r15 + +# qhasm: t23 += rx3 +# asm 1: add t24=int64#13 +# asm 2: mov t24=%r15 +mov %r10,%r15 + +# qhasm: t14 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P1234,rx4=int64#14 +# asm 2: movq 32(rx4=%rbx +movq 32(%rcx),%rbx + +# qhasm: t24 += rx4 +# asm 1: add t10_stack=stack64#18 +# asm 2: movq t10_stack=136(%rsp) +movq %rdx,136(%rsp) + +# qhasm: t11_stack = t11 +# asm 1: movq t11_stack=stack64#19 +# asm 2: movq t11_stack=144(%rsp) +movq %r8,144(%rsp) + +# qhasm: t12_stack = t12 +# asm 1: movq t12_stack=stack64#20 +# asm 2: movq t12_stack=152(%rsp) +movq %r9,152(%rsp) + +# qhasm: t13_stack = t13 +# asm 1: movq t13_stack=stack64#21 +# asm 2: movq t13_stack=160(%rsp) +movq %rax,160(%rsp) + +# qhasm: t14_stack = t14 +# asm 1: movq t14_stack=stack64#22 +# asm 2: movq t14_stack=168(%rsp) +movq %r10,168(%rsp) + +# qhasm: t20_stack = t20 +# asm 1: movq t20_stack=stack64#23 +# asm 2: movq t20_stack=176(%rsp) +movq %r11,176(%rsp) + +# qhasm: t21_stack = t21 +# asm 1: movq t21_stack=stack64#24 +# asm 2: movq t21_stack=184(%rsp) +movq %r12,184(%rsp) + +# qhasm: t22_stack = t22 +# asm 1: movq t22_stack=stack64#25 +# asm 2: movq t22_stack=192(%rsp) +movq %r13,192(%rsp) + +# qhasm: t23_stack = t23 +# asm 1: movq t23_stack=stack64#26 +# asm 2: movq t23_stack=200(%rsp) +movq %r14,200(%rsp) + +# qhasm: t24_stack = t24 +# asm 1: movq t24_stack=stack64#27 +# asm 2: movq t24_stack=208(%rsp) +movq %r15,208(%rsp) + +# qhasm: mulrax = a3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 80(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#28 +# asm 2: movq mulx319_stack=216(%rsp) +movq %rax,216(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack +# asm 1: mulq a0=int64#5 +# asm 2: mov a0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = a4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 88(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#29 +# asm 2: movq mulx419_stack=224(%rsp) +movq %rax,224(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack +# asm 1: mulq a1=int64#8 +# asm 2: mov a1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack +# asm 1: mulq a2=int64#10 +# asm 2: mov a2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack +# asm 1: mulq a3=int64#12 +# asm 2: mov a3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack +# asm 1: mulq a4=int64#14 +# asm 2: mov a4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = a1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 64(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t11_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t10_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t12_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t13_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t14_stack +# asm 1: mulq mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.a0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: a0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a1=int64#7 +# asm 2: mov a1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a2=int64#8 +# asm 2: mov a2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a3=int64#9 +# asm 2: mov a3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a4=int64#10 +# asm 2: mov a4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: a0 += mult +# asm 1: add a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: mulrax = b3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 120(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack +# asm 1: mulq rx0=int64#5 +# asm 2: mov rx0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = b4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 128(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack +# asm 1: mulq rx1=int64#8 +# asm 2: mov rx1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack +# asm 1: mulq rx2=int64#10 +# asm 2: mov rx2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack +# asm 1: mulq rx3=int64#12 +# asm 2: mov rx3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack +# asm 1: mulq rx4=int64#14 +# asm 2: mov rx4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = b1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 104(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t21_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t20_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t22_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t23_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * t24_stack +# asm 1: mulq mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rx0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: rx0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx1=int64#7 +# asm 2: mov rx1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx2=int64#8 +# asm 2: mov rx2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx3=int64#9 +# asm 2: mov rx3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx4=int64#10 +# asm 2: mov rx4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: rx0 += mult +# asm 1: add ry0=int64#3 +# asm 2: mov ry0=%rdx +mov %r8,%rdx + +# qhasm: ry1 = rx1 +# asm 1: mov ry1=int64#6 +# asm 2: mov ry1=%r9 +mov %rax,%r9 + +# qhasm: ry2 = rx2 +# asm 1: mov ry2=int64#11 +# asm 2: mov ry2=%r13 +mov %r10,%r13 + +# qhasm: ry3 = rx3 +# asm 1: mov ry3=int64#12 +# asm 2: mov ry3=%r14 +mov %r11,%r14 + +# qhasm: ry4 = rx4 +# asm 1: mov ry4=int64#13 +# asm 2: mov ry4=%r15 +mov %r12,%r15 + +# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulrax=int64#3 +# asm 2: movq 144(mulrax=%rdx +movq 144(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(c0=int64#5 +# asm 2: mov c0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = *(uint64 *)(pp + 152) +# asm 1: movq 152(mulrax=int64#3 +# asm 2: movq 152(mulrax=%rdx +movq 152(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(c1=int64#8 +# asm 2: mov c1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(c2=int64#10 +# asm 2: mov c2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(c3=int64#12 +# asm 2: mov c3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(c4=int64#14 +# asm 2: mov c4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(pp + 128) +# asm 1: movq 128(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 128(mulrax=%rdx +movq 128(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 152(mulrax=%rax +movq 152(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.c0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: c0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c1=int64#7 +# asm 2: mov c1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c2=int64#8 +# asm 2: mov c2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c3=int64#9 +# asm 2: mov c3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c4=int64#10 +# asm 2: mov c4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: c0 += mult +# asm 1: add c0_stack=stack64#8 +# asm 2: movq c0_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#9 +# asm 2: movq c1_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#10 +# asm 2: movq c2_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#11 +# asm 2: movq c3_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: c4_stack = c4 +# asm 1: movq c4_stack=stack64#12 +# asm 2: movq c4_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: mulrax = c3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 80(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#13 +# asm 2: movq mulx319_stack=96(%rsp) +movq %rax,96(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: c0 = mulrax +# asm 1: mov c0=int64#5 +# asm 2: mov c0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = c4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 88(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#14 +# asm 2: movq mulx419_stack=104(%rsp) +movq %rax,104(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: c1 = mulrax +# asm 1: mov c1=int64#8 +# asm 2: mov c1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = c0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: c2 = mulrax +# asm 1: mov c2=int64#10 +# asm 2: mov c2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = c0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: c3 = mulrax +# asm 1: mov c3=int64#12 +# asm 2: mov c3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = c0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: c4 = mulrax +# asm 1: mov c4=int64#14 +# asm 2: mov c4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = c1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? c4 += mulrax +# asm 1: add mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 64(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: carry? c4 += mulrax +# asm 1: add mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? c4 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? c4 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.c0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: c0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c1=int64#7 +# asm 2: mov c1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c2=int64#8 +# asm 2: mov c2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c3=int64#9 +# asm 2: mov c3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c4=int64#10 +# asm 2: mov c4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: c0 += mult +# asm 1: add c0_stack=stack64#8 +# asm 2: movq c0_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#9 +# asm 2: movq c1_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#10 +# asm 2: movq c2_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#11 +# asm 2: movq c3_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: c4_stack = c4 +# asm 1: movq c4_stack=stack64#12 +# asm 2: movq c4_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(mulrax=int64#3 +# asm 2: movq 104(mulrax=%rdx +movq 104(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#13 +# asm 2: movq mulx319_stack=96(%rsp) +movq %rax,96(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(rt0=int64#5 +# asm 2: mov rt0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(mulrax=int64#3 +# asm 2: movq 112(mulrax=%rdx +movq 112(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#14 +# asm 2: movq mulx419_stack=104(%rsp) +movq %rax,104(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(rt1=int64#8 +# asm 2: mov rt1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(rt2=int64#10 +# asm 2: mov rt2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(rt3=int64#12 +# asm 2: mov rt3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(rt4=int64#14 +# asm 2: mov rt4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 88(mulrax=%rdx +movq 88(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: mulr01 = (mulr01.rt0) << 13 +# asm 1: shld $13,mulr41=int64#3 +# asm 2: imulq $19,mulr41=%rdx +imulq $19,%rbp,%rdx + +# qhasm: rt0 += mulr41 +# asm 1: add mult=int64#3 +# asm 2: mov mult=%rdx +mov %r8,%rdx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt1=int64#4 +# asm 2: mov rt1=%rcx +mov %rdx,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt2=int64#6 +# asm 2: mov rt2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt3=int64#7 +# asm 2: mov rt3=%rax +mov %rdx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt4=int64#8 +# asm 2: mov rt4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#3 +# asm 2: imulq $19,mult=%rdx +imulq $19,%rdx,%rdx + +# qhasm: rt0 += mult +# asm 1: add rz0=int64#2 +# asm 2: mov rz0=%rsi +mov %r8,%rsi + +# qhasm: rz1 = rt1 +# asm 1: mov rz1=int64#3 +# asm 2: mov rz1=%rdx +mov %rcx,%rdx + +# qhasm: rz2 = rt2 +# asm 1: mov rz2=int64#9 +# asm 2: mov rz2=%r11 +mov %r9,%r11 + +# qhasm: rz3 = rt3 +# asm 1: mov rz3=int64#10 +# asm 2: mov rz3=%r12 +mov %rax,%r12 + +# qhasm: rz4 = rt4 +# asm 1: mov rz4=int64#11 +# asm 2: mov rz4=%r13 +mov %r10,%r13 + +# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_base.c b/src/ed25519-supercop-amd64-51-30k/ge25519_base.c new file mode 100644 index 0000000..bbdf2fe --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_base.c @@ -0,0 +1,7 @@ +#include "ge25519.h" + +/* Base point in P^3 coordinates (with Z=1) */ +const ge25519 ge25519_base = {{{0x00062d608f25d51a, 0x000412a4b4f6592a, 0x00075b7171a4b31d, 0x0001ff60527118fe, 0x000216936d3cd6e5}}, + {{0x0006666666666658, 0x0004cccccccccccc, 0x0001999999999999, 0x0003333333333333, 0x0006666666666666}}, + {{0x0000000000000001, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}}, + {{0x00068AB3A5B7DDA3, 0x00000EEA2A5EADBB, 0x0002AF8DF483C27E, 0x000332B375274732, 0x00067875F0FD78B7}}}; diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_base_niels_smalltables.data b/src/ed25519-supercop-amd64-51-30k/ge25519_base_niels_smalltables.data new file mode 100644 index 0000000..6f04bdf --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_base_niels_smalltables.data @@ -0,0 +1,768 @@ +{{{0x00003905d740913e, 0x0000ba2817d673a2, 0x00023e2827f4e67c, 0x000133d2e0c21a34, 0x00044fd2f9298f81}}, + {{0x000493c6f58c3b85, 0x0000df7181c325f7, 0x0000f50b0b3e4cb7, 0x0005329385a44c32, 0x00007cf9d3a33d4b}}, + {{0x000515674b6fbb59, 0x00001dd454bd5b77, 0x00055f1be90784fc, 0x00066566ea4e8e64, 0x0004f0ebe1faf16e}}}, +{{{0x0001a56042b4d5a8, 0x000189cc159ed153, 0x0005b8deaa3cae04, 0x0002aaf04f11b5d8, 0x0006bb595a669c92}}, + {{0x0004e7fc933c71d7, 0x0002cf41feb6b244, 0x0007581c0a7d1a76, 0x0007172d534d32f0, 0x000590c063fa87d2}}, + {{0x00047eaadad36802, 0x0002707dc900adc6, 0x00001da09aebcd66, 0x0000dc1de55f0873, 0x00049314f0a165ed}}}, +{{{0x00011fe8a4fcd265, 0x0007bcb8374faacc, 0x00052f5af4ef4d4f, 0x0005314098f98d10, 0x0002ab91587555bd}}, + {{0x0005b0a84cee9730, 0x00061d10c97155e4, 0x0004059cc8096a10, 0x00047a608da8014f, 0x0007a164e1b9a80f}}, + {{0x000211f4f1674834, 0x0002fec5bf12b37e, 0x0005c8a93ae139ac, 0x000510ebef3783ad, 0x000549a04b963bb2}}}, +{{{0x0006050a056818bf, 0x00062acc1f5532bf, 0x00028141ccc9fa25, 0x00024d61f471e683, 0x00027933f4c7445a}}, + {{0x000351b98efc099f, 0x00068fbfa4a7050e, 0x00042a49959d971b, 0x000393e51a469efd, 0x000680e910321e58}}, + {{0x000645ece51426b0, 0x0007adb741f297e3, 0x0003e14b038caf18, 0x00060c7214ba6ac6, 0x00044f079b1b0e64}}}, +{{{0x000182c3a447d6ba, 0x00022964e536eff2, 0x000192821f540053, 0x0002f9f19e788e5c, 0x000154a7e73eb1b5}}, + {{0x0002bc4408a5bb33, 0x000078ebdda05442, 0x0002ffb112354123, 0x000375ee8df5862d, 0x0002945ccf146e20}}, + {{0x0002a179e7d003b3, 0x0001400249afd906, 0x0001b63fcd7dca74, 0x00054c3160ea5087, 0x00068b8ac5938b27}}}, +{{{0x000006b67b7d8ca4, 0x000084fa44e72933, 0x0001154ee55d6f8a, 0x0004425d842e7390, 0x00038b64c41ae417}}, + {{0x0004eeeb77157131, 0x0001201915f10741, 0x0001669cda6c9c56, 0x00045ec032db346d, 0x00051e57bb6a2cc3}}, + {{0x0005ad91689de3a4, 0x00051f1f7226f1f3, 0x00073ee6205d7c90, 0x00004f82855a994f, 0x0007865dfa21354c}}}, +{{{0x00072c9aaa3221b1, 0x000267774474f74d, 0x000064b0e9b28085, 0x0003f04ef53b27c9, 0x0001d6edd5d2e531}}, + {{0x00025cd0944ea3bf, 0x00075673b81a4d63, 0x000150b925d1c0d4, 0x00013f38d9294114, 0x000461bea69283c9}}, + {{0x00028aacab0fda36, 0x000287a6a939042f, 0x0006add5a294a319, 0x00061b9be82dc589, 0x000233cef623a2cb}}}, +{{{0x00075dedf39234d9, 0x00001c36ab1f3c54, 0x0000f08fee58f5da, 0x0000e19613a0d637, 0x0003a9024a1320e0}}, + {{0x0007596604dd3e8f, 0x0006fc510e058b36, 0x0003670c8db2cc0d, 0x000297d899ce332f, 0x0000915e76061bce}}, + {{0x0005e835a834a37e, 0x00034d130afd5bef, 0x00059ecc9a2f8673, 0x0002e11608c29b38, 0x000589eb3d9dbefd}}}, +{{{0x0003635449aa515e, 0x0003e178d0475dab, 0x00050b4712a19712, 0x0002dcc2860ff4ad, 0x00030d76d6f03d31}}, + {{0x0004dd0e632f9c1d, 0x0002ced12622a5d9, 0x00018de9614742da, 0x00079ca96fdbb5d4, 0x0006dd37d49a00ee}}, + {{0x000444172106e4c7, 0x00001251afed2d88, 0x000534fc9bed4f5a, 0x0005d85a39cf5234, 0x00010c697112e864}}}, +{{{0x0003c4277dbe5fde, 0x0005a335afd44c92, 0x0000c1164099753e, 0x00070487006fe423, 0x00025e61cabed66f}}, + {{0x00062aa08358c805, 0x00046f440848e194, 0x000447b771a8f52b, 0x000377ba3269d31d, 0x00003bf9baf55080}}, + {{0x0003e128cc586604, 0x0005968b2e8fc7e2, 0x000049a3d5bd61cf, 0x000116505b1ef6e6, 0x000566d78634586e}}}, +{{{0x0007a49f9cc10834, 0x0002b513788a22c6, 0x0005ff4b6ef2395b, 0x0002ec8e5af607bf, 0x00033975bca5ecc3}}, + {{0x00054285c65a2fd0, 0x00055e62ccf87420, 0x00046bb961b19044, 0x0001153405712039, 0x00014fba5f34793b}}, + {{0x000746166985f7d4, 0x00009939000ae79a, 0x0005844c7964f97a, 0x00013617e1f95b3d, 0x00014829cea83fc5}}}, +{{{0x00037b8497dd95c2, 0x00061549d6b4ffe8, 0x000217a22db1d138, 0x0000b9cf062eb09e, 0x0002fd9c71e5f758}}, + {{0x00070b2f4e71ecb8, 0x000728148efc643c, 0x0000753e03995b76, 0x0005bf5fb2ab6767, 0x00005fc3bc4535d7}}, + {{0x0000b3ae52afdedd, 0x00019da76619e497, 0x0006fa0654d2558e, 0x00078219d25e41d4, 0x000373767475c651}}}, +{{{0x000299fd40d1add9, 0x0005f2de9a04e5f7, 0x0007c0eebacc1c59, 0x0004cca1b1f8290a, 0x0001fbea56c3b18f}}, + {{0x000095cb14246590, 0x000002d82aa6ac68, 0x000442f183bc4851, 0x0006464f1c0a0644, 0x0006bf5905730907}}, + {{0x000778f1e1415b8a, 0x0006f75874efc1f4, 0x00028a694019027f, 0x00052b37a96bdc4d, 0x00002521cf67a635}}}, +{{{0x0007ee0b0a9d5294, 0x000381fbeb4cca27, 0x0007841f3a3e639d, 0x000676ea30c3445f, 0x0003fa00a7e71382}}, + {{0x00046720772f5ee4, 0x000632c0f359d622, 0x0002b2092ba3e252, 0x000662257c112680, 0x000001753d9f7cd6}}, + {{0x0001232d963ddb34, 0x00035692e70b078d, 0x000247ca14777a1f, 0x0006db556be8fcd0, 0x00012b5fe2fa048e}}}, +{{{0x0000fbc496fce34d, 0x000575be6b7dae3e, 0x0004a31585cee609, 0x000037e9023930ff, 0x000749b76f96fb12}}, + {{0x00037c26ad6f1e92, 0x00046a0971227be5, 0x0004722f0d2d9b4c, 0x0003dc46204ee03a, 0x0006f7e93c20796c}}, + {{0x0002f604aea6ae05, 0x000637dc939323eb, 0x0003fdad9b048d47, 0x0000a8b0d4045af7, 0x0000fcec10f01e02}}}, +{{{0x000558a649fe1e44, 0x00044635aeefcc89, 0x0001ff434887f2ba, 0x0000f981220e2d44, 0x0004901aa7183c51}}, + {{0x0002d29dc4244e45, 0x0006927b1bc147be, 0x0000308534ac0839, 0x0004853664033f41, 0x000413779166feab}}, + {{0x0001b7548c1af8f0, 0x0007848c53368116, 0x00001b64e7383de9, 0x000109fbb0587c8f, 0x00041bb887b726d1}}}, +{{{0x00007d44744346be, 0x000282b6a564a81d, 0x0004ed80f875236b, 0x0006fbbe1d450c50, 0x0004eb728c12fcdb}}, + {{0x00034c597c6691ae, 0x0007a150b6990fc4, 0x00052beb9d922274, 0x00070eed7164861a, 0x0000a871e070c6a9}}, + {{0x0001b5994bbc8989, 0x00074b7ba84c0660, 0x00075678f1cdaeb8, 0x00023206b0d6f10c, 0x0003ee7300f2685d}}}, +{{{0x000255e49e7dd6b7, 0x00038c2163d59eba, 0x0003861f2a005845, 0x0002e11e4ccbaec9, 0x0001381576297912}}, + {{0x00027947841e7518, 0x00032c7388dae87f, 0x000414add3971be9, 0x00001850832f0ef1, 0x0007d47c6a2cfb89}}, + {{0x0002d0148ef0d6e0, 0x0003522a8de787fb, 0x0002ee055e74f9d2, 0x00064038f6310813, 0x000148cf58d34c9e}}}, +{{{0x000492f67934f027, 0x0007ded0815528d4, 0x00058461511a6612, 0x0005ea2e50de1544, 0x0003ff2fa1ebd5db}}, + {{0x00072f7d9ae4756d, 0x0007711e690ffc4a, 0x000582a2355b0d16, 0x0000dccfe885b6b4, 0x000278febad4eaea}}, + {{0x0002681f8c933966, 0x0003840521931635, 0x000674f14a308652, 0x0003bd9c88a94890, 0x0004104dd02fe9c6}}}, +{{{0x0002bf5e1124422a, 0x000673146756ae56, 0x00014ad99a87e830, 0x0001eaca65b080fd, 0x0002c863b00afaf5}}, + {{0x00014e06db096ab8, 0x0001219c89e6b024, 0x000278abd486a2db, 0x000240b292609520, 0x0000165b5a48efca}}, + {{0x0000a474a0846a76, 0x000099a5ef981e32, 0x0002a8ae3c4bbfe6, 0x00045c34af14832c, 0x000591b67d9bffec}}}, +{{{0x00070d1c80b49bfa, 0x0003d57e7d914625, 0x0003c0722165e545, 0x0005e5b93819e04f, 0x0003de02ec7ca8f7}}, + {{0x0001b3719f18b55d, 0x000754318c83d337, 0x00027c17b7919797, 0x000145b084089b61, 0x000489b4f8670301}}, + {{0x0002102d3aeb92ef, 0x00068c22d50c3a46, 0x00042ea89385894e, 0x00075f9ebf55f38c, 0x00049f5fbba496cb}}}, +{{{0x00049a108a5bcfd4, 0x0006178c8e7d6612, 0x0001f03473710375, 0x00073a49614a6098, 0x0005604a86dcbfa6}}, + {{0x0005628c1e9c572e, 0x000598b108e822ab, 0x00055d8fae29361a, 0x0000adc8d1a97b28, 0x00006a1a6c288675}}, + {{0x0000d1d47c1764b6, 0x00001c08316a2e51, 0x0002b3db45c95045, 0x0001634f818d300c, 0x00020989e89fe274}}}, +{{{0x000777fd3a2dcc7f, 0x000594a9fb124932, 0x00001f8e80ca15f0, 0x000714d13cec3269, 0x0000403ed1d0ca67}}, + {{0x0004278b85eaec2e, 0x0000ef59657be2ce, 0x00072fd169588770, 0x0002e9b205260b30, 0x000730b9950f7059}}, + {{0x00032d35874ec552, 0x0001f3048df1b929, 0x000300d73b179b23, 0x0006e67be5a37d0b, 0x0005bd7454308303}}}, +{{{0x0002d19528b24cc2, 0x0004ac66b8302ff3, 0x000701c8d9fdad51, 0x0006c1b35c5b3727, 0x000133a78007380a}}, + {{0x0004932115e7792a, 0x000457b9bbb930b8, 0x00068f5d8b193226, 0x0004164e8f1ed456, 0x0005bb7db123067f}}, + {{0x0001f467c6ca62be, 0x0002c4232a5dc12c, 0x0007551dc013b087, 0x0000690c11b03bcd, 0x000740dca6d58f0e}}}, +{{{0x0000ee0752cfce4e, 0x000660dd8116fbe9, 0x00055167130fffeb, 0x0001c682b885955c, 0x000161d25fa963ea}}, + {{0x00028c570478433c, 0x0001d8502873a463, 0x0007641e7eded49c, 0x0001ecedd54cf571, 0x0002c03f5256c2b0}}, + {{0x000718757b53a47d, 0x000619e18b0f2f21, 0x0005fbdfe4c1ec04, 0x0005d798c81ebb92, 0x000699468bdbd96b}}}, +{{{0x00072f46f4dafecf, 0x0002948ffadef7a3, 0x00011ecdfdf3bc04, 0x0003c2e98ffeed25, 0x000525219a473905}}, + {{0x00053de66aa91948, 0x000045f81a599b1b, 0x0003f7a8bd214193, 0x00071d4da412331a, 0x000293e1c4e6c4a2}}, + {{0x0006134b925112e1, 0x0006bb942bb406ed, 0x000070c445c0dde2, 0x000411d822c4d7a3, 0x0005b605c447f032}}}, +{{{0x0005805920c47c89, 0x0001924771f9972c, 0x00038bbddf9fc040, 0x0001f7000092b281, 0x00024a76dcea8aeb}}, + {{0x0001fec6f0e7f04c, 0x0003cebc692c477d, 0x000077986a19a95e, 0x0006eaaaa1778b0f, 0x0002f12fef4cc5ab}}, + {{0x000522b2dfc0c740, 0x0007e8193480e148, 0x00033fd9a04341b9, 0x0003c863678a20bc, 0x0005e607b2518a43}}}, +{{{0x00031d8f6cdf1818, 0x0001f86c4b144b16, 0x00039875b8d73e9d, 0x0002fbf0d9ffa7b3, 0x0005067acab6ccdd}}, + {{0x0004431ca596cf14, 0x000015da7c801405, 0x00003c9b6f8f10b5, 0x0000346922934017, 0x000201f33139e457}}, + {{0x00027f6b08039d51, 0x0004802f8000dfaa, 0x00009692a062c525, 0x0001baea91075817, 0x000397cba8862460}}}, +{{{0x00013093f05959b2, 0x0001bd352f2ec618, 0x000075789b88ea86, 0x00061d1117ea48b9, 0x0002339d320766e6}}, + {{0x0005c3fbc81379e7, 0x00041bbc255e2f02, 0x0006a3f756998650, 0x0001297fd4e07c42, 0x000771b4022c1e1c}}, + {{0x0005d986513a2fa7, 0x00063f3a99e11b0f, 0x00028a0ecfd6b26d, 0x00053b6835e18d8f, 0x000331a189219971}}}, +{{{0x00066f45fb4f80c6, 0x0003cc38eeb9fea2, 0x000107647270db1f, 0x000710f1ea740dc8, 0x00031167c6b83bdf}}, + {{0x00012f3a9d7572af, 0x00010d00e953c4ca, 0x000603df116f2f8a, 0x00033dc276e0e088, 0x0001ac9619ff649a}}, + {{0x00033842524b1068, 0x00077dd39d30fe45, 0x000189432141a0d0, 0x000088fe4eb8c225, 0x000612436341f08b}}}, +{{{0x000541db874e898d, 0x00062d80fb841b33, 0x00003e6ef027fa97, 0x0007a03c9e9633e8, 0x00046ebe2309e5ef}}, + {{0x000349e31a2d2638, 0x0000137a7fa6b16c, 0x000681ae92777edc, 0x000222bfc5f8dc51, 0x0001522aa3178d90}}, + {{0x00002f5369614938, 0x000356e5ada20587, 0x00011bc89f6bf902, 0x000036746419c8db, 0x00045fe70f505243}}}, +{{{0x000075a6960c0b8c, 0x0006dde1c5e41b49, 0x00042e3f516da341, 0x00016a03fda8e79e, 0x000428d1623a0e39}}, + {{0x00024920c8951491, 0x000107ec61944c5e, 0x00072752e017c01f, 0x000122b7dda2e97a, 0x00016619f6db57a2}}, + {{0x00074a4401a308fd, 0x00006ed4b9558109, 0x000746f1f6a08867, 0x0004636f5c6f2321, 0x0001d81592d60bd3}}}, +{{{0x00068756a60dac5f, 0x00055d757b8aec26, 0x0003383df45f80bd, 0x0006783f8c9f96a6, 0x00020234a7789ecd}}, + {{0x0005b69f7b85c5e8, 0x00017a2d175650ec, 0x0004cc3e6dbfc19e, 0x00073e1d3873be0e, 0x0003a5f6d51b0af8}}, + {{0x00020db67178b252, 0x00073aa3da2c0eda, 0x00079045c01c70d3, 0x0001b37b15251059, 0x0007cd682353cffe}}}, +{{{0x0001a45bd887fab6, 0x00065748076dc17c, 0x0005b98000aa11a8, 0x0004a1ecc9080974, 0x0002838c8863bdc0}}, + {{0x0005cd6068acf4f3, 0x0003079afc7a74cc, 0x00058097650b64b4, 0x00047fabac9c4e99, 0x0003ef0253b2b2cd}}, + {{0x0003b0cf4a465030, 0x000022b8aef57a2d, 0x0002ad0677e925ad, 0x0004094167d7457a, 0x00021dcb8a606a82}}}, +{{{0x000004468c9d9fc8, 0x0005da8554796b8c, 0x0003b8be70950025, 0x0006d5892da6a609, 0x0000bc3d08194a31}}, + {{0x000500fabe7731ba, 0x0007cc53c3113351, 0x0007cf65fe080d81, 0x0003c5d966011ba1, 0x0005d840dbf6c6f6}}, + {{0x0006380d309fe18b, 0x0004d73c2cb8ee0d, 0x0006b882adbac0b6, 0x00036eabdddd4cbe, 0x0003a4276232ac19}}}, +{{{0x0002432c8a7084fa, 0x00047bf73ca8a968, 0x0001639176262867, 0x0005e8df4f8010ce, 0x0001ff177cea16de}}, + {{0x0000c172db447ecb, 0x0003f8c505b7a77f, 0x0006a857f97f3f10, 0x0004fcc0567fe03a, 0x0000770c9e824e1a}}, + {{0x0001d99a45b5b5fd, 0x000523674f2499ec, 0x0000f8fa26182613, 0x00058f7398048c98, 0x00039f264fd41500}}}, +{{{0x00053417dbe7e29c, 0x00054573827394f5, 0x000565eea6f650dd, 0x00042050748dc749, 0x0001712d73468889}}, + {{0x00034aabfe097be1, 0x00043bfc03253a33, 0x00029bc7fe91b7f3, 0x0000a761e4844a16, 0x00065c621272c35f}}, + {{0x000389f8ce3193dd, 0x0002d424b8177ce5, 0x000073fa0d3440cd, 0x000139020cd49e97, 0x00022f9800ab19ce}}}, +{{{0x0002368a3e9ef8cb, 0x000454aa08e2ac0b, 0x000490923f8fa700, 0x000372aa9ea4582f, 0x00013f416cd64762}}, + {{0x00029fdd9a6efdac, 0x0007c694a9282840, 0x0006f7cdeee44b3a, 0x00055a3207b25cc3, 0x0004171a4d38598c}}, + {{0x000758aa99c94c8c, 0x0005f6001700ff44, 0x0007694e488c01bd, 0x0000d5fde948eed6, 0x000508214fa574bd}}}, +{{{0x000269153ed6fe4b, 0x00072a23aef89840, 0x000052be5299699c, 0x0003a5e5ef132316, 0x00022f960ec6faba}}, + {{0x000215bb53d003d6, 0x0001179e792ca8c3, 0x0001a0e96ac840a2, 0x00022393e2bb3ab6, 0x0003a7758a4c86cb}}, + {{0x000111f693ae5076, 0x0003e3bfaa94ca90, 0x000445799476b887, 0x00024a0912464879, 0x0005d9fd15f8de7f}}}, +{{{0x000408d36d63727f, 0x0005faf8f6a66062, 0x0002bb892da8de6b, 0x000769d4f0c7e2e6, 0x000332f35914f8fb}}, + {{0x00044d2aeed7521e, 0x00050865d2c2a7e4, 0x0002705b5238ea40, 0x00046c70b25d3b97, 0x0003bc187fa47eb9}}, + {{0x00070115ea86c20c, 0x00016d88da24ada8, 0x0001980622662adf, 0x000501ebbc195a9d, 0x000450d81ce906fb}}}, +{{{0x0003b6a1a6205275, 0x0002e82791d06dcf, 0x00023d72caa93c87, 0x0005f0b7ab68aaf4, 0x0002de25d4ba6345}}, + {{0x0004d8961cae743f, 0x0006bdc38c7dba0e, 0x0007d3b4a7e1b463, 0x0000844bdee2adf3, 0x0004cbad279663ab}}, + {{0x00019024a0d71fcd, 0x00015f65115f101a, 0x0004e99067149708, 0x000119d8d1cba5af, 0x0007d7fbcefe2007}}}, +{{{0x00071e6a266b2801, 0x00009aae73e2df5d, 0x00040dd8b219b1a3, 0x000546fb4517de0d, 0x0005975435e87b75}}, + {{0x00045dc5f3c29094, 0x0003455220b579af, 0x000070c1631e068a, 0x00026bc0630e9b21, 0x0004f9cd196dcd8d}}, + {{0x000297d86a7b3768, 0x0004835a2f4c6332, 0x000070305f434160, 0x000183dd014e56ae, 0x0007ccdd084387a0}}}, +{{{0x0006422c6d260417, 0x000212904817bb94, 0x0005a319deb854f5, 0x0007a9d4e060da7d, 0x000428bd0ed61d0c}}, + {{0x000484186760cc93, 0x0007435665533361, 0x00002f686336b801, 0x0005225446f64331, 0x0003593ca848190c}}, + {{0x0003189a5e849aa7, 0x0006acbb1f59b242, 0x0007f6ef4753630c, 0x0001f346292a2da9, 0x00027398308da2d6}}}, +{{{0x00038d28435ed413, 0x0004064f19992858, 0x0007680fbef543cd, 0x0001aadd83d58d3c, 0x000269597aebe8c3}}, + {{0x00010e4c0a702453, 0x0004daafa37bd734, 0x00049f6bdc3e8961, 0x0001feffdcecdae6, 0x000572c2945492c3}}, + {{0x0007c745d6cd30be, 0x00027c7755df78ef, 0x0001776833937fa3, 0x0005405116441855, 0x0007f985498c05bc}}}, +{{{0x0001ce889f0be117, 0x00036f6a94510709, 0x0007f248720016b4, 0x0001821ed1e1cf91, 0x00076c2ec470a31f}}, + {{0x000615520fbf6363, 0x0000b9e9bf74da6a, 0x0004fe8308201169, 0x000173f76127de43, 0x00030f2653cd69b1}}, + {{0x0000c938aac10c85, 0x00041b64ed797141, 0x0001beb1c1185e6d, 0x0001ed5490600f07, 0x0002f1273f159647}}}, +{{{0x0001fc7c8ae01e11, 0x0002094d5573e8e7, 0x0005ca3cbbf549d2, 0x0004f920ecc54143, 0x0005d9e572ad85b6}}, + {{0x00008bd755a70bc0, 0x00049e3a885ce609, 0x00016585881b5ad6, 0x0003c27568d34f5e, 0x00038ac1997edc5f}}, + {{0x0006b517a751b13b, 0x0000cfd370b180cc, 0x0005377925d1f41a, 0x00034e56566008a2, 0x00022dfcd9cbfe9e}}}, +{{{0x0003d2e0c30d0cd9, 0x0003f597686671bb, 0x0000aa587eb63999, 0x0000e3c7b592c619, 0x0006b2916c05448c}}, + {{0x000459b4103be0a1, 0x00059a4b3f2d2add, 0x0007d734c8bb8eeb, 0x0002393cbe594a09, 0x0000fe9877824cde}}, + {{0x000334d10aba913b, 0x000045cdb581cfdb, 0x0005e3e0553a8f36, 0x00050bb3041effb2, 0x0004c303f307ff00}}}, +{{{0x00023bd617b28c85, 0x0006e72ee77d5a61, 0x0001a972ff174dde, 0x0003e2636373c60f, 0x0000d61b8f78b2ab}}, + {{0x000403580dd94500, 0x00048df77d92653f, 0x00038a9fe3b349ea, 0x0000ea89850aafe1, 0x000416b151ab706a}}, + {{0x0000d7efe9c136b0, 0x0001ab1c89640ad5, 0x00055f82aef41f97, 0x00046957f317ed0d, 0x000191a2af74277e}}}, +{{{0x0006f74bc53c1431, 0x0001c40e5dbbd9c2, 0x0006c8fb9cae5c97, 0x0004845c5ce1b7da, 0x0007e2e0e450b5cc}}, + {{0x00062b434f460efb, 0x000294c6c0fad3fc, 0x00068368937b4c0f, 0x0005c9f82910875b, 0x000237e7dbe00545}}, + {{0x000575ed6701b430, 0x0004d3e17fa20026, 0x000791fc888c4253, 0x0002f1ba99078ac1, 0x00071afa699b1115}}}, +{{{0x00066f9b3953b61d, 0x000555f4283cccb9, 0x0007dd67fb1960e7, 0x00014707a1affed4, 0x000021142e9c2b1c}}, + {{0x00023c1c473b50d6, 0x0003e7671de21d48, 0x000326fa5547a1e8, 0x00050e4dc25fafd9, 0x00000731fbc78f89}}, + {{0x0000c71848f81880, 0x00044bd9d8233c86, 0x0006e8578efe5830, 0x0004045b6d7041b5, 0x0004c4d6f3347e15}}}, +{{{0x0007eccfc17d1fc9, 0x0004ca280782831e, 0x0007b8337db1d7d6, 0x0005116def3895fb, 0x000193fddaaa7e47}}, + {{0x0004ddfc988f1970, 0x0004f6173ea365e1, 0x000645daf9ae4588, 0x0007d43763db623b, 0x00038bf9500a88f9}}, + {{0x0002c93c37e8876f, 0x0003431a28c583fa, 0x00049049da8bd879, 0x0004b4a8407ac11c, 0x0006a6fb99ebf0d4}}}, +{{{0x0006c1bb560855eb, 0x00071f127e13ad48, 0x0005c6b304905aec, 0x0003756b8e889bc7, 0x00075f76914a3189}}, + {{0x000122b5b6e423c6, 0x00021e50dff1ddd6, 0x00073d76324e75c0, 0x000588485495418e, 0x000136fda9f42c5e}}, + {{0x0004dfb1a305bdd1, 0x0003b3ff05811f29, 0x0006ed62283cd92e, 0x00065d1543ec52e1, 0x000022183510be8d}}}, +{{{0x000766385ead2d14, 0x0000194f8b06095e, 0x00008478f6823b62, 0x0006018689d37308, 0x0006a071ce17b806}}, + {{0x0002710143307a7f, 0x0003d88fb48bf3ab, 0x000249eb4ec18f7a, 0x000136115dff295f, 0x0001387c441fd404}}, + {{0x0003c3d187978af8, 0x0007afe1c88276ba, 0x00051df281c8ad68, 0x00064906bda4245d, 0x0003171b26aaf1ed}}}, +{{{0x0007319097564ca8, 0x0001844ebc233525, 0x00021d4543fdeee1, 0x0001ad27aaff1bd2, 0x000221fd4873cf08}}, + {{0x0005b7d8b28a47d1, 0x0002c2ee149e34c1, 0x000776f5629afc53, 0x0001f4ea50fc49a9, 0x0006c514a6334424}}, + {{0x0002204f3a156341, 0x000537414065a464, 0x00043c0c3bedcf83, 0x0005557e706ea620, 0x00048daa596fb924}}}, +{{{0x00028e665ca59cc7, 0x000165c715940dd9, 0x0000785f3aa11c95, 0x00057b98d7e38469, 0x000676dd6fccad84}}, + {{0x00061d5dc84c9793, 0x00047de83040c29e, 0x000189deb26507e7, 0x0004d4e6fadc479a, 0x00058c837fa0e8a7}}, + {{0x0001688596fc9058, 0x00066f6ad403619f, 0x0004d759a87772ef, 0x0007856e6173bea4, 0x0001c4f73f2c6a57}}}, +{{{0x00024fbd305fa0bb, 0x00040a98cc75a1cf, 0x00078ce1220a7533, 0x0006217a10e1c197, 0x000795ac80d1bf64}}, + {{0x0006706efc7c3484, 0x0006987839ec366d, 0x0000731f95cf7f26, 0x0003ae758ebce4bc, 0x00070459adb7daf6}}, + {{0x0001db4991b42bb3, 0x000469605b994372, 0x000631e3715c9a58, 0x0007e9cfefcf728f, 0x0005fe162848ce21}}}, +{{{0x0001214fe194961a, 0x0000e1ae39a9e9cb, 0x000543c8b526f9f7, 0x000119498067e91d, 0x0004789d446fc917}}, + {{0x0001852d5d7cb208, 0x00060d0fbe5ce50f, 0x0005a1e246e37b75, 0x00051aee05ffd590, 0x0002b44c043677da}}, + {{0x000487ab074eb78e, 0x0001d33b5e8ce343, 0x00013e419feb1b46, 0x0002721f565de6a4, 0x00060c52eef2bb9a}}}, +{{{0x000589bc3bfd8bf1, 0x0006f93e6aa3416b, 0x0004c0a3d6c1ae48, 0x00055587260b586a, 0x00010bc9c312ccfc}}, + {{0x0003c5c27cae6d11, 0x00036a9491956e05, 0x000124bac9131da6, 0x0003b6f7de202b5d, 0x00070d77248d9b66}}, + {{0x0002e84b3ec2a05b, 0x00069da2f03c1551, 0x00023a174661a67b, 0x000209bca289f238, 0x00063755bd3a976f}}}, +{{{0x0007a03e2ad10853, 0x000213dcc6ad36ab, 0x0001a6e240d5bdd6, 0x0007c24ffcf8fedf, 0x0000d8cc1c48bc16}}, + {{0x0007101897f1acb7, 0x0003d82cb77b07b8, 0x000684083d7769f5, 0x00052b28472dce07, 0x0002763751737c52}}, + {{0x000402d36eb419a9, 0x0007cef68c14a052, 0x0000f1255bc2d139, 0x000373e7d431186a, 0x00070c2dd8a7ad16}}}, +{{{0x000194509f6fec0e, 0x000528d8ca31acac, 0x0007826d73b8b9fa, 0x00024acb99e0f9b3, 0x0002e0fac6363948}}, + {{0x0004967db8ed7e13, 0x00015aeed02f523a, 0x0006149591d094bc, 0x000672f204c17006, 0x00032b8613816a53}}, + {{0x0007f7bee448cd64, 0x0004e10f10da0f3c, 0x0003936cb9ab20e9, 0x0007a0fc4fea6cd0, 0x0004179215c735a4}}}, +{{{0x000094e7d7dced2a, 0x000068fa738e118e, 0x00041b640a5fee2b, 0x0006bb709df019d4, 0x000700344a30cd99}}, + {{0x000633b9286bcd34, 0x0006cab3badb9c95, 0x00074e387edfbdfa, 0x00014313c58a0fd9, 0x00031fa85662241c}}, + {{0x00026c422e3622f4, 0x0000f3066a05b5f0, 0x0004e2448f0480a6, 0x000244cde0dbf095, 0x00024bb2312a9952}}}, +{{{0x0000ed1732de67c3, 0x000308c369291635, 0x00033ef348f2d250, 0x000004475ea1a1bb, 0x0000fee3e871e188}}, + {{0x00000c2af5f85c6b, 0x0000609f4cf2883f, 0x0006e86eb5a1ca13, 0x00068b44a2efccd1, 0x0000d1d2af9ffeb5}}, + {{0x00028aa132621edf, 0x00042b244caf353b, 0x00066b064cc2e08a, 0x0006bb20020cbdd3, 0x00016acd79718531}}}, +{{{0x000772af2d9b1d3d, 0x0006d486448b4e5b, 0x0002ce58dd8d18a8, 0x0001849f67503c8b, 0x000123e0ef6b9302}}, + {{0x0001c6c57887b6ad, 0x0005abf21fd7592b, 0x00050bd41253867a, 0x0003800b71273151, 0x000164ed34b18161}}, + {{0x0006d94c192fe69a, 0x0005475222a2690f, 0x000693789d86b8b3, 0x0001f5c3bdfb69dc, 0x00078da0fc61073f}}}, +{{{0x00015d28e52bc66a, 0x00030e1e0351cb7e, 0x00030a2f74b11f8c, 0x00039d120cd7de03, 0x0002d25deeb256b1}}, + {{0x000780f1680c3a94, 0x0002a35d3cfcd453, 0x000005e5cdc7ddf8, 0x0006ee888078ac24, 0x000054aa4b316b38}}, + {{0x0000468d19267cb8, 0x00038cdca9b5fbf9, 0x0001bbb05c2ca1e2, 0x0003b015758e9533, 0x000134610a6ab7da}}}, +{{{0x00038ec78df6b0fe, 0x00013caebea36a22, 0x0005ebc6e54e5f6a, 0x00032804903d0eb8, 0x0002102fdba2b20d}}, + {{0x000265e777d1f515, 0x0000f1f54c1e39a5, 0x0002f01b95522646, 0x0004fdd8db9dde6d, 0x000654878cba97cc}}, + {{0x0006e405055ce6a1, 0x0005024a35a532d3, 0x0001f69054daf29d, 0x00015d1d0d7a8bd5, 0x0000ad725db29ecb}}}, +{{{0x000267b1834e2457, 0x0006ae19c378bb88, 0x0007457b5ed9d512, 0x0003280d783d05fb, 0x0004aefcffb71a03}}, + {{0x0007bc0c9b056f85, 0x00051cfebffaffd8, 0x00044abbe94df549, 0x0007ecbbd7e33121, 0x0004f675f5302399}}, + {{0x000536360415171e, 0x0002313309077865, 0x000251444334afbc, 0x0002b0c3853756e8, 0x0000bccbb72a2a86}}}, +{{{0x0006962feab1a9c8, 0x0006aca28fb9a30b, 0x00056db7ca1b9f98, 0x00039f58497018dd, 0x0004024f0ab59d6b}}, + {{0x00055e4c50fe1296, 0x00005fdd13efc30d, 0x0001c0c6c380e5ee, 0x0003e11de3fb62a8, 0x0006678fd69108f3}}, + {{0x0006fa31636863c2, 0x00010ae5a67e42b0, 0x00027abbf01fda31, 0x000380a7b9e64fbc, 0x0002d42e2108ead4}}}, +{{{0x0005131594dfd29b, 0x0003a627e98d52fe, 0x0001154041855661, 0x00019175d09f8384, 0x000676b2608b8d2d}}, + {{0x00017b0d0f537593, 0x00016263c0c9842e, 0x0004ab827e4539a4, 0x0006370ddb43d73a, 0x000420bf3a79b423}}, + {{0x0000ba651c5b2b47, 0x0005862363701027, 0x0000c4d6c219c6db, 0x0000f03dff8658de, 0x000745d2ffa9c0cf}}}, +{{{0x00025a1e2bc9c8bd, 0x000104c8f3b037ea, 0x000405576fa96c98, 0x0002e86a88e3876f, 0x0001ae23ceb960cf}}, + {{0x0006df5721d34e6a, 0x0004f32f767a0c06, 0x0001d5abeac76e20, 0x00041ce9e104e1e4, 0x00006e15be54c1dc}}, + {{0x00025d871932994a, 0x0006b9d63b560b6e, 0x0002df2814c8d472, 0x0000fbbee20aa4ed, 0x00058ded861278ec}}}, +{{{0x00073793f266c55c, 0x0000b988a9c93b02, 0x00009b0ea32325db, 0x00037cae71c17c5e, 0x0002ff39de85485f}}, + {{0x00035ba8b6c2c9a8, 0x0001dea58b3185bf, 0x0004b455cd23bbbe, 0x0005ec19c04883f8, 0x00008ba696b531d5}}, + {{0x00053eeec3efc57a, 0x0002fa9fe9022efd, 0x000699c72c138154, 0x00072a751ebd1ff8, 0x000120633b4947cf}}}, +{{{0x0004987891610042, 0x00079d9d7f5d0172, 0x0003c293013b9ec4, 0x0000c2b85f39caca, 0x00035d30a99b4d59}}, + {{0x000531474912100a, 0x0005afcdf7c0d057, 0x0007a9e71b788ded, 0x0005ef708f3b0c88, 0x00007433be3cb393}}, + {{0x000144c05ce997f4, 0x0004960b8a347fef, 0x0001da11f15d74f7, 0x00054fac19c0fead, 0x0002d873ede7af6d}}}, +{{{0x0002316443373409, 0x0005de95503b22af, 0x000699201beae2df, 0x0003db5849ff737a, 0x0002e773654707fa}}, + {{0x000202e14e5df981, 0x0002ea02bc3eb54c, 0x00038875b2883564, 0x0001298c513ae9dd, 0x0000543618a01600}}, + {{0x0002bdf4974c23c1, 0x0004b3b9c8d261bd, 0x00026ae8b2a9bc28, 0x0003068210165c51, 0x0004b1443362d079}}}, +{{{0x0004b7c7b66e1f7a, 0x0004bea185efd998, 0x0004fabc711055f8, 0x0001fb9f7836fe38, 0x000582f446752da6}}, + {{0x000454e91c529ccb, 0x00024c98c6bf72cf, 0x0000486594c3d89a, 0x0007ae13a3d7fa3c, 0x00017038418eaf66}}, + {{0x00017bd320324ce4, 0x00051489117898c6, 0x0001684d92a0410b, 0x0006e4d90f78c5a7, 0x0000c2a1c4bcda28}}}, +{{{0x0005c7d06f1f0447, 0x0007db70f80b3a49, 0x0006cb4a3ec89a78, 0x00043be8ad81397d, 0x0007c558bd1c6f64}}, + {{0x0004814869bd6945, 0x0007b7c391a45db8, 0x00057316ac35b641, 0x000641e31de9096a, 0x0005a6a9b30a314d}}, + {{0x00041524d396463d, 0x0001586b449e1a1d, 0x0002f17e904aed8a, 0x0007e1d2861d3c8e, 0x0000404a5ca0afba}}}, +{{{0x000740070aa743d6, 0x00016b64cbdd1183, 0x00023f4b7b32eb43, 0x000319aba58235b3, 0x00046395bfdcadd9}}, + {{0x00049e1b2a416fd1, 0x00051c6a0b316c57, 0x000575a59ed71bdc, 0x00074c021a1fec1e, 0x00039527516e7f8e}}, + {{0x0007db2d1a5d9a9c, 0x00079a200b85422f, 0x000355bfaa71dd16, 0x00000b77ea5f78aa, 0x00076579a29e822d}}}, +{{{0x00068e7e49c02a17, 0x00045795346fe8b6, 0x000089306c8f3546, 0x0006d89f6b2f88f6, 0x00043a384dc9e05b}}, + {{0x0004b51352b434f2, 0x0001327bd01c2667, 0x000434d73b60c8a1, 0x0003e0daa89443ba, 0x00002c514bb2a277}}, + {{0x0003d5da8bf1b645, 0x0007ded6a96a6d09, 0x0006c3494fee2f4d, 0x00002c989c8b6bd4, 0x0001160920961548}}}, +{{{0x0005166929dacfaa, 0x000190826b31f689, 0x0004f55567694a7d, 0x000705f4f7b1e522, 0x000351e125bc5698}}, + {{0x00005616369b4dcd, 0x0004ecab86ac6f47, 0x0003c60085d700b2, 0x0000213ee10dfcea, 0x0002f637d7491e6e}}, + {{0x00049b461af67bbe, 0x00075915712c3a96, 0x00069a67ef580c0d, 0x00054d38ef70cffc, 0x0007f182d06e7ce2}}}, +{{{0x00048e64ab0168ec, 0x0002a2bdb8a86f4f, 0x0007343b6b2d6929, 0x0001d804aa8ce9a3, 0x00067d4ac8c343e9}}, + {{0x00054b728e217522, 0x00069a90971b0128, 0x00051a40f2a963a3, 0x00010be9ac12a6bf, 0x00044acc043241c5}}, + {{0x00056bbb4f7a5777, 0x00029230627c238f, 0x0005ad1a122cd7fb, 0x0000dea56e50e364, 0x000556d1c8312ad7}}}, +{{{0x000740e30c8d3982, 0x0007c2b47f4682fd, 0x0005cd91b8c7dc1c, 0x00077fa790f9e583, 0x000746c6c6d1d824}}, + {{0x00006756b11be821, 0x000462147e7bb03e, 0x00026519743ebfe0, 0x000782fc59682ab5, 0x000097abe38cc8c7}}, + {{0x0001c9877ea52da4, 0x0002b37b83a86189, 0x000733af49310da5, 0x00025e81161c04fb, 0x000577e14a34bee8}}}, +{{{0x000268ac61a73b0a, 0x000206f234bebe1c, 0x0005b403a7cbebe8, 0x0007a160f09f4135, 0x00060fa7ee96fd78}}, + {{0x0006cebebd4dd72b, 0x000340c1e442329f, 0x00032347ffd1a93f, 0x00014a89252cbbe0, 0x000705304b8fb009}}, + {{0x00051d354d296ec6, 0x0007cbf5a63b16c7, 0x0002f50bb3cf0c14, 0x0001feb385cac65a, 0x00021398e0ca1635}}}, +{{{0x0005058a382b33f3, 0x000175a91816913e, 0x0004f6cdb96b8ae8, 0x00017347c9da81d2, 0x0005aa3ed9d95a23}}, + {{0x0000aaf9b4b75601, 0x00026b91b5ae44f3, 0x0006de808d7ab1c8, 0x0006a769675530b0, 0x0001bbfb284e98f7}}, + {{0x000777e9c7d96561, 0x00028e58f006ccac, 0x000541bbbb2cac49, 0x0003e63282994cec, 0x0004a07e14e5e895}}}, +{{{0x000412cb980df999, 0x0005e78dd8ee29dc, 0x000171dff68c575d, 0x0002015dd2f6ef49, 0x0003f0bac391d313}}, + {{0x000358cdc477a49b, 0x0003cc88fe02e481, 0x000721aab7f4e36b, 0x0000408cc9469953, 0x00050af7aed84afa}}, + {{0x0007de0115f65be5, 0x0004242c21364dc9, 0x0006b75b64a66098, 0x0000033c0102c085, 0x0001921a316baebd}}}, +{{{0x00022f7edfb870fc, 0x000569eed677b128, 0x00030937dcb0a5af, 0x000758039c78ea1b, 0x0006458df41e273a}}, + {{0x0002ad9ad9f3c18b, 0x0005ec1638339aeb, 0x0005703b6559a83b, 0x0003fa9f4d05d612, 0x0007b049deca062c}}, + {{0x0003e37a35444483, 0x000661fdb7d27b99, 0x000317761dd621e4, 0x0007323c30026189, 0x0006093dccbc2950}}}, +{{{0x00039a8585e0706d, 0x0003167ce72663fe, 0x00063d14ecdb4297, 0x0004be21dcf970b8, 0x00057d1ea084827a}}, + {{0x0006eebe6084034b, 0x0006cf01f70a8d7b, 0x0000b41a54c6670a, 0x0006c84b99bb55db, 0x0006e3180c98b647}}, + {{0x0002b6e7a128b071, 0x0005b27511755dcf, 0x00008584c2930565, 0x00068c7bda6f4159, 0x000363e999ddd97b}}}, +{{{0x000043c135ee1fc4, 0x0002a11c9919f2d5, 0x0006334cc25dbacd, 0x000295da17b400da, 0x00048ee9b78693a0}}, + {{0x000048dce24baec6, 0x0002b75795ec05e3, 0x0003bfa4c5da6dc9, 0x0001aac8659e371e, 0x000231f979bc6f9b}}, + {{0x0001de4bcc2af3c6, 0x00061fc411a3eb86, 0x00053ed19ac12ec0, 0x000209dbc6b804e0, 0x000079bfa9b08792}}}, +{{{0x00003a51da300df4, 0x000467b52b561c72, 0x0004d5920210e590, 0x0000ca769e789685, 0x000038c77f684817}}, + {{0x0001ed80a2d54245, 0x00070efec72a5e79, 0x00042151d42a822d, 0x0001b5ebb6d631e8, 0x0001ef4fb1594706}}, + {{0x00065ee65b167bec, 0x000052da19b850a9, 0x0000408665656429, 0x0007ab39596f9a4c, 0x000575ee92a4a0bf}}}, +{{{0x000080908a182fcf, 0x0000532913b7ba98, 0x0003dccf78c385c3, 0x00068002dd5eaba9, 0x00043d4e7112cd3f}}, + {{0x0006bc450aa4d801, 0x0004f4a6773b0ba8, 0x0006241b0b0ebc48, 0x00040d9c4f1d9315, 0x000200a1e7e382f5}}, + {{0x0005b967eaf93ac5, 0x000360acca580a31, 0x0001c65fd5c6f262, 0x00071c7f15c2ecab, 0x000050eca52651e4}}}, +{{{0x00031ade453f0c9c, 0x0003dfee07737868, 0x000611ecf7a7d411, 0x0002637e6cbd64f6, 0x0004b0ee6c21c58f}}, + {{0x0004397660e668ea, 0x0007c2a75692f2f5, 0x0003b29e7e6c66ef, 0x00072ba658bcda9a, 0x0006151c09fa131a}}, + {{0x00055c0dfdf05d96, 0x000405569dcf475e, 0x00005c5c277498bb, 0x00018588d95dc389, 0x0001fef24fa800f0}}}, +{{{0x000653fb1aa73196, 0x000607faec8306fa, 0x0004e85ec83e5254, 0x00009f56900584fd, 0x000544d49292fc86}}, + {{0x0002aff530976b86, 0x0000d85a48c0845a, 0x000796eb963642e0, 0x00060bee50c4b626, 0x00028005fe6c8340}}, + {{0x0007ba9f34528688, 0x000284a20fb42d5d, 0x0003652cd9706ffe, 0x0006fd7baddde6b3, 0x00072e472930f316}}}, +{{{0x0005208c9781084f, 0x00016468a1dc24d2, 0x0007bf780ac540a8, 0x0001a67eced75301, 0x0005a9d2e8c2733a}}, + {{0x0003f635d32a7627, 0x0000cbecacde00fe, 0x0003411141eaa936, 0x00021c1e42f3cb94, 0x0001fee7f000fe06}}, + {{0x000305da03dbf7e5, 0x0001228699b7aeca, 0x00012a23b2936bc9, 0x0002a1bda56ae6e9, 0x00000f94051ee040}}}, +{{{0x00056b23c3d330b2, 0x00037608e360d1a6, 0x00010ae0f3c8722e, 0x000086d9b618b637, 0x00007d79c7e8beab}}, + {{0x000793bb07af9753, 0x0001e7b6ecd4fafd, 0x00002c7b1560fb43, 0x0002296734cc5fb7, 0x00047b7ffd25dd40}}, + {{0x0003fb9cbc08dd12, 0x00075c3dd85370ff, 0x00047f06fe2819ac, 0x0005db06ab9215ed, 0x0001c3520a35ea64}}}, +{{{0x000253a6bccba34a, 0x000427070433701a, 0x00020b8e58f9870e, 0x000337c861db00cc, 0x0001c3d05775d0ee}}, + {{0x00006f40216bc059, 0x0003a2579b0fd9b5, 0x00071c26407eec8c, 0x00072ada4ab54f0b, 0x00038750c3b66d12}}, + {{0x0006f1409422e51a, 0x0007856bbece2d25, 0x00013380a72f031c, 0x00043e1080a7f3ba, 0x0000621e2c7d3304}}}, +{{{0x000060cc8259838d, 0x000038d3f35b95f3, 0x00056078c243a923, 0x0002de3293241bb2, 0x0000007d6097bd3a}}, + {{0x00061796b0dbf0f3, 0x00073c2f9c32d6f5, 0x0006aa8ed1537ebe, 0x00074e92c91838f4, 0x0005d8e589ca1002}}, + {{0x00071d950842a94b, 0x00046b11e5c7d817, 0x0005478bbecb4f0d, 0x0007c3054b0a1c5d, 0x0001583d7783c1cb}}}, +{{{0x0006a2ef5da27ae1, 0x00028aace02e9d9d, 0x00002459e965f0e8, 0x0007b864d3150933, 0x000252a5f2e81ed8}}, + {{0x00034704cc9d28c7, 0x0003dee598b1f200, 0x00016e1c98746d9e, 0x0004050b7095afdf, 0x0004958064e83c55}}, + {{0x000094265066e80d, 0x0000a60f918d61a5, 0x0000444bf7f30fde, 0x0001c40da9ed3c06, 0x000079c170bd843b}}}, +{{{0x0006ece464fa6fff, 0x0003cc40bca460a0, 0x0006e3a90afb8d0c, 0x0005801abca11228, 0x0006dec05e34ac9f}}, + {{0x0006cd50c0d5d056, 0x0005b7606ae779ba, 0x00070fbd226bdda1, 0x0005661e53391ff9, 0x0006768c0d7317b8}}, + {{0x000625e5f155c1b3, 0x0004f32f6f723296, 0x0005ac980105efce, 0x00017a61165eee36, 0x00051445e14ddcd5}}}, +{{{0x00002b4b3b144951, 0x0005688977966aea, 0x00018e176e399ffd, 0x0002e45c5eb4938b, 0x00013186f31e3929}}, + {{0x000147ab2bbea455, 0x0001f240f2253126, 0x0000c3de9e314e89, 0x00021ea5a4fca45f, 0x00012e990086e4fd}}, + {{0x000496b37fdfbb2e, 0x0003c2439d5f3e21, 0x00016e60fe7e6a4d, 0x0004d7ef889b621d, 0x00077b2e3f05d3e9}}}, +{{{0x0007a9c59c2ec4de, 0x0007e9f09e79652d, 0x0006a3e422f22d86, 0x0002ae8e3b836c8b, 0x00063b795fc7ad32}}, + {{0x0000639c12ddb0a4, 0x0006180490cd7ab3, 0x0003f3918297467c, 0x00074568be1781ac, 0x00007a195152e095}}, + {{0x00068f02389e5fc8, 0x000059f1bc877506, 0x000504990e410cec, 0x00009bd7d0feaee2, 0x0003e8fe83d032f0}}}, +{{{0x000315b90570a294, 0x00060ce108a925f1, 0x0006eff61253c909, 0x000003ef0e2d70b0, 0x00075ba3b797fac4}}, + {{0x00004c8de8efd13c, 0x0001c67c06e6210e, 0x000183378f7f146a, 0x00064352ceaed289, 0x00022d60899a6258}}, + {{0x0001dbc070cdd196, 0x00016d8fb1534c47, 0x000500498183fa2a, 0x00072f59c423de75, 0x0000904d07b87779}}}, +{{{0x00061fd4ddba919c, 0x0007d8e991b55699, 0x00061b31473cc76c, 0x0007039631e631d6, 0x00043e2143fbc1dd}}, + {{0x00022d6648f940b9, 0x000197a5a1873e86, 0x000207e4c41a54bc, 0x0005360b3b4bd6d0, 0x0006240aacebaf72}}, + {{0x0004749c5ba295a0, 0x00037946fa4b5f06, 0x000724c5ab5a51f1, 0x00065633789dd3f3, 0x00056bdaf238db40}}}, +{{{0x0002b9e3f53533eb, 0x0002add727a806c5, 0x00056955c8ce15a3, 0x00018c4f070a290e, 0x0001d24a86d83741}}, + {{0x0000d36cc19d3bb2, 0x0006ec4470d72262, 0x0006853d7018a9ae, 0x0003aa3e4dc2c8eb, 0x00003aa31507e1e5}}, + {{0x00047648ffd4ce1f, 0x00060a9591839e9d, 0x000424d5f38117ab, 0x00042cc46912c10e, 0x00043b261dc9aeb4}}}, +{{{0x00031e1988bb79bb, 0x0007b82f46b3bcab, 0x0000f7a8ce827b41, 0x0005e15816177130, 0x000326055cf5b276}}, + {{0x00013d8b6c951364, 0x0004c0017e8f632a, 0x00053e559e53f9c4, 0x0004b20146886eea, 0x00002b4d5e242940}}, + {{0x000155cb28d18df2, 0x0000c30d9ca11694, 0x0002090e27ab3119, 0x000208624e7a49b6, 0x00027a6c809ae5d3}}}, +{{{0x0006ebcd1f0db188, 0x00074ceb4b7d1174, 0x0007d56168df4f5c, 0x0000bf79176fd18a, 0x0002cb67174ff60a}}, + {{0x0004270ac43d6954, 0x0002ed4cd95659a5, 0x00075c0db37528f9, 0x0002ccbcfd2c9234, 0x000221503603d8c2}}, + {{0x0006cdf9390be1d0, 0x00008e519c7e2b3d, 0x000253c3d2a50881, 0x00021b41448e333d, 0x0007b1df4b73890f}}}, +{{{0x0002f2e0b3b2a224, 0x0000c56aa22c1c92, 0x0005fdec39f1b278, 0x0004c90af5c7f106, 0x00061fcef2658fc5}}, + {{0x0006221807f8f58c, 0x0003fa92813a8be5, 0x0006da98c38d5572, 0x00001ed95554468f, 0x00068698245d352e}}, + {{0x00015d852a18187a, 0x000270dbb59afb76, 0x0007db120bcf92ab, 0x0000e7a25d714087, 0x00046cf4c473daf0}}}, +{{{0x000525ed9ec4e5f9, 0x000022d20660684c, 0x0007972b70397b68, 0x0007a03958d3f965, 0x00029387bcd14eb5}}, + {{0x00046ea7f1498140, 0x00070725690a8427, 0x0000a73ae9f079fb, 0x0002dd924461c62b, 0x0001065aae50d8cc}}, + {{0x00044525df200d57, 0x0002d7f94ce94385, 0x00060d00c170ecb7, 0x00038b0503f3d8f0, 0x00069a198e64f1ce}}}, +{{{0x0002b2e0d91a78bc, 0x0003990a12ccf20c, 0x000141c2e11f2622, 0x0000dfcefaa53320, 0x0007369e6a92493a}}, + {{0x00014434dcc5caed, 0x0002c7909f667c20, 0x00061a839d1fb576, 0x0004f23800cabb76, 0x00025b2697bd267f}}, + {{0x00073ffb13986864, 0x0003282bb8f713ac, 0x00049ced78f297ef, 0x0006697027661def, 0x0001420683db54e4}}}, +{{{0x0000bd1e249dd197, 0x00000bcb1820568f, 0x0002eab1718830d4, 0x000396fd816997e6, 0x00060b63bebf508a}}, + {{0x0006bb6fc1cc5ad0, 0x000532c8d591669d, 0x0001af794da86c33, 0x0000e0e9d86d24d3, 0x00031e83b4161d08}}, + {{0x0000c7129e062b4f, 0x0001e526415b12fd, 0x000461a0fd27923d, 0x00018badf670a5b7, 0x00055cf1eb62d550}}}, +{{{0x0001101065c23d58, 0x0005aa1290338b0f, 0x0003157e9e2e7421, 0x0000ea712017d489, 0x000669a656457089}}, + {{0x0006b5e37df58c52, 0x0003bcf33986c60e, 0x00044fb8835ceae7, 0x000099dec18e71a4, 0x0001a56fbaa62ba0}}, + {{0x00066b505c9dc9ec, 0x000774ef86e35287, 0x0004d1d944c0955e, 0x00052e4c39d72b20, 0x00013c4836799c58}}}, +{{{0x00025d465ab3e1b9, 0x0000f8fe27ec2847, 0x0002d6e6dbf04f06, 0x0003038cfc1b3276, 0x00066f80c93a637b}}, + {{0x0004fb6a5d8bd080, 0x00058ae34908589b, 0x0003954d977baf13, 0x000413ea597441dc, 0x00050bdc87dc8e5b}}, + {{0x000537836edfe111, 0x0002be02357b2c0d, 0x0006dcee58c8d4f8, 0x0002d732581d6192, 0x0001dd56444725fd}}}, +{{{0x00047ff83362127d, 0x00008e39af82b1f4, 0x000488322ef27dab, 0x0001973738a2a1a4, 0x0000e645912219f7}}, + {{0x0007e60008bac89a, 0x00023d5c387c1852, 0x00079e5df1f533a8, 0x0002e6f9f1c5f0cf, 0x0003a3a450f63a30}}, + {{0x00072f31d8394627, 0x00007bd294a200f1, 0x000665be00e274c6, 0x00043de8f1b6368b, 0x000318c8d9393a9a}}}, +{{{0x00045d032afffe19, 0x00012fe49b6cde4e, 0x00021663bc327cf1, 0x00018a5e4c69f1dd, 0x000224c7c679a1d5}}, + {{0x00069e29ab1dd398, 0x00030685b3c76bac, 0x000565cf37f24859, 0x00057b2ac28efef9, 0x000509a41c325950}}, + {{0x00006edca6f925e9, 0x00068c8363e677b8, 0x00060cfa25e4fbcf, 0x0001c4c17609404e, 0x00005bff02328a11}}}, +{{{0x0002137023cae00b, 0x00015a3599eb26c6, 0x0000687221512b3c, 0x000253cb3a0824e9, 0x000780b8cc3fa2a4}}, + {{0x0001a0dd0dc512e4, 0x00010894bf5fcd10, 0x00052949013f9c37, 0x0001f50fba4735c7, 0x000576277cdee01a}}, + {{0x00038abc234f305f, 0x0007a280bbc103de, 0x000398a836695dfe, 0x0003d0af41528a1a, 0x0005ff418726271b}}}, +{{{0x0006080c1789db9d, 0x0004be7cef1ea731, 0x0002f40d769d8080, 0x00035f7d4c44a603, 0x000106a03dc25a96}}, + {{0x000347e813b69540, 0x00076864c21c3cbb, 0x0001e049dbcd74a8, 0x0005b4d60f93749c, 0x00029d4db8ca0a0c}}, + {{0x00050aaf333353d0, 0x0004b59a613cbb35, 0x000223dfc0e19a76, 0x00077d1e2bb2c564, 0x0004ab38a51052cb}}}, +{{{0x00042b256768d593, 0x0002e88459427b4f, 0x00002b3876630701, 0x00034878d405eae5, 0x00029cdd1adc088a}}, + {{0x0007d1ef5fddc09c, 0x0007beeaebb9dad9, 0x000058d30ba0acfb, 0x0005cd92eab5ae90, 0x0003041c6bb04ed2}}, + {{0x0002f2f9d956e148, 0x0006b3e6ad65c1fe, 0x0005b00972b79e5d, 0x00053d8d234c5daf, 0x000104bbd6814049}}}, +{{{0x0000fd3168f1ed67, 0x0001bb0de7784a3e, 0x00034bcb78b20477, 0x0000a4a26e2e2182, 0x0005be8cc57092a7}}, + {{0x00059a5fd67ff163, 0x0003a998ead0352b, 0x000083c95fa4af9a, 0x0006fadbfc01266f, 0x000204f2a20fb072}}, + {{0x00043b3d30ebb079, 0x000357aca5c61902, 0x0005b570c5d62455, 0x00030fb29e1e18c7, 0x0002570fb17c2791}}}, +{{{0x0002367f2cb61575, 0x0006c39ac04d87df, 0x0006d4958bd7e5bd, 0x000566f4638a1532, 0x0003dcb65ea53030}}, + {{0x0006a9550bb8245a, 0x000511f20a1a2325, 0x00029324d7239bee, 0x0003343cc37516c4, 0x000241c5f91de018}}, + {{0x0000172940de6caa, 0x0006045b2e67451b, 0x00056c07463efcb3, 0x0000728b6bfe6e91, 0x00008420edd5fcdf}}}, +{{{0x000720ab8362fa4a, 0x00029c4347cdd9bf, 0x0000e798ad5f8463, 0x0004fef18bcb0bfe, 0x0000d9a53efbc176}}, + {{0x0000c34e04f410ce, 0x000344edc0d0a06b, 0x0006e45486d84d6d, 0x00044e2ecb3863f5, 0x00004d654f321db8}}, + {{0x0005c116ddbdb5d5, 0x0006d1b4bba5abcf, 0x0004d28a48a5537a, 0x00056b8e5b040b99, 0x0004a7a4f2618991}}}, +{{{0x000718025fb15f95, 0x00068d6b8371fe94, 0x0003804448f7d97c, 0x00042466fe784280, 0x00011b50c4cddd31}}, + {{0x0003b291af372a4b, 0x00060e3028fe4498, 0x0002267bca4f6a09, 0x000719eec242b243, 0x0004a96314223e0e}}, + {{0x0000274408a4ffd6, 0x0007d382aedb34dd, 0x00040acfc9ce385d, 0x000628bb99a45b1e, 0x0004f4bce4dce6bc}}}, +{{{0x0007ce5ae2242584, 0x0002d25eb153d4e3, 0x0003a8f3d09ba9c9, 0x0000f3690d04eb8e, 0x00073fcdd14b71c0}}, + {{0x0002616ec49d0b6f, 0x0001f95d8462e61c, 0x0001ad3e9b9159c6, 0x00079ba475a04df9, 0x0003042cee561595}}, + {{0x00067079449bac41, 0x0005b79c4621484f, 0x00061069f2156b8d, 0x0000eb26573b10af, 0x000389e740c9a9ce}}}, +{{{0x0004b3ae34dcb9ce, 0x00047c691a15ac9f, 0x000318e06e5d400c, 0x0003c422d9f83eb1, 0x00061545379465a6}}, + {{0x000578f6570eac28, 0x000644f2339c3937, 0x00066e47b7956c2c, 0x00034832fe1f55d0, 0x00025c425e5d6263}}, + {{0x000606a6f1d7de6e, 0x0004f1c0c46107e7, 0x000229b1dcfbe5d8, 0x0003acc60a7b1327, 0x0006539a08915484}}}, +{{{0x00021f74c3d2f773, 0x000024b88d08bd3a, 0x0006e678cf054151, 0x00043631272e747c, 0x00011c5e4aac5cd1}}, + {{0x0004dbd414bb4a19, 0x0007930849f1dbb8, 0x000329c5a466caf0, 0x0006c824544feb9b, 0x0000f65320ef019b}}, + {{0x0006d1b1cafde0c6, 0x000462c76a303a90, 0x0003ca4e693cff9b, 0x0003952cd45786fd, 0x0004cabc7bdec330}}}, +{{{0x00069624089c0a2e, 0x0000075fc8e70473, 0x00013e84ab1d2313, 0x0002c10bedf6953b, 0x000639b93f0321c8}}, + {{0x0007788f3f78d289, 0x0005942809b3f811, 0x0005973277f8c29c, 0x000010f93bc5fe67, 0x0007ee498165acb2}}, + {{0x000508e39111a1c3, 0x000290120e912f7a, 0x0001cbf464acae43, 0x00015373e9576157, 0x0000edf493c85b60}}}, +{{{0x00048158599b5a68, 0x0001fd75bc41d5d9, 0x0002d9fc1fa95d3c, 0x0007da27f20eba11, 0x000403b92e3019d4}}, + {{0x0007c4d284764113, 0x0007fefebf06acec, 0x00039afb7a824100, 0x0001b48e47e7fd65, 0x00004c00c54d1dfa}}, + {{0x00022f818b465cf8, 0x000342901dff09b8, 0x00031f595dc683cd, 0x00037a57745fd682, 0x000355bb12ab2617}}}, +{{{0x000664cc7493bbf4, 0x00033d94761874e3, 0x0000179e1796f613, 0x0001890535e2867d, 0x0000f9b8132182ec}}, + {{0x0001dac75a8c7318, 0x0003b679d5423460, 0x0006b8fcb7b6400e, 0x0006c73783be5f9d, 0x0007518eaf8e052a}}, + {{0x000059c41b7f6c32, 0x00079e8706531491, 0x0006c747643cb582, 0x0002e20c0ad494e4, 0x00047c3871bbb175}}}, +{{{0x0004539771ec4f48, 0x0007b9318badca28, 0x00070f19afe016c5, 0x0004ee7bb1608d23, 0x00000b89b8576469}}, + {{0x00065d50c85066b0, 0x0006167453361f7c, 0x00006ba3818bb312, 0x0006aff29baa7522, 0x00008fea02ce8d48}}, + {{0x0005dd7668deead0, 0x0004096d0ba47049, 0x0006275997219114, 0x00029bda8a67e6ae, 0x000473829a74f75d}}}, +{{{0x0002da754679c418, 0x0003164c31be105a, 0x00011fac2b98ef5f, 0x00035a1aaf779256, 0x0002078684c4833c}}, + {{0x0001533aad3902c9, 0x0001dde06b11e47b, 0x000784bed1930b77, 0x0001c80a92b9c867, 0x0006c668b4d44e4d}}, + {{0x0000cf217a78820c, 0x00065024e7d2e769, 0x00023bb5efdda82a, 0x00019fd4b632d3c6, 0x0007411a6054f8a4}}}, +{{{0x00059d32b99dc86d, 0x0006ac075e22a9ac, 0x00030b9220113371, 0x00027fd9a638966e, 0x0007c136574fb813}}, + {{0x0002e53d18b175b4, 0x00033e7254204af3, 0x0003bcd7d5a1c4c5, 0x0004c7c22af65d0f, 0x0001ec9a872458c3}}, + {{0x0006a4d400a2509b, 0x000041791056971c, 0x000655d5866e075c, 0x0002302bf3e64df8, 0x0003add88a5c7cd6}}}, +{{{0x00015770b635dcf2, 0x00059ecd83f79571, 0x0002db461c0b7fbd, 0x00073a42a981345f, 0x000249929fccc879}}, + {{0x000298d459393046, 0x00030bfecb3d90b8, 0x0003d9b8ea3df8d6, 0x0003900e96511579, 0x00061ba1131a406a}}, + {{0x0000a0f116959029, 0x0005974fd7b1347a, 0x0001e0cc1c08edad, 0x000673bdf8ad1f13, 0x0005620310cbbd8e}}}, +{{{0x000193434934d643, 0x0000d4a2445eaa51, 0x0007d0708ae76fe0, 0x00039847b6c3c7e1, 0x00037676a2a4d9d9}}, + {{0x0006b5f477e285d6, 0x0004ed91ec326cc8, 0x0006d6537503a3fd, 0x000626d3763988d5, 0x0007ec846f3658ce}}, + {{0x00068f3f1da22ec7, 0x0006ed8039a2736b, 0x0002627ee04c3c75, 0x0006ea90a647e7d1, 0x0006daaf723399b9}}}, +{{{0x00027562eb3dbe47, 0x000291d7b4170be7, 0x0005d1ca67dfa8e1, 0x0002a88061f298a2, 0x0001304e9e71627d}}, + {{0x000304bfacad8ea2, 0x000502917d108b07, 0x000043176ca6dd0f, 0x0005d5158f2c1d84, 0x0002b5449e58eb3b}}, + {{0x000014d26adc9cfe, 0x0007f1691ba16f13, 0x0005e71828f06eac, 0x000349ed07f0fffc, 0x0004468de2d7c2dd}}}, +{{{0x0003355e9419469e, 0x0001847bb8ea8a37, 0x0001fe6588cf9b71, 0x0006b1c9d2db6b22, 0x0006cce7c6ffb44b}}, + {{0x0002d8c6f86307ce, 0x0006286ba1850973, 0x0005e9dcb08444d4, 0x0001a96a543362b2, 0x0005da6427e63247}}, + {{0x0004c688deac22ca, 0x0006f775c3ff0352, 0x000565603ee419bb, 0x0006544456c61c46, 0x00058f29abfe79f2}}}, +{{{0x0006cfab8de73e68, 0x0003e6efced4bd21, 0x0000056609500dbe, 0x00071b7824ad85df, 0x000577629c4a7f41}}, + {{0x000264bf710ecdf6, 0x000708c58527896b, 0x00042ceae6c53394, 0x0004381b21e82b6a, 0x0006af93724185b4}}, + {{0x0000024509c6a888, 0x0002696ab12e6644, 0x0000cca27f4b80d8, 0x0000c7c1f11b119e, 0x000701f25bb0caec}}}, +{{{0x0000b0f8e4616ced, 0x0001d3c4b50fb875, 0x0002f29673dc0198, 0x0005f4b0f1830ffa, 0x0002e0c92bfbdc40}}, + {{0x0000f6d97cbec113, 0x0004ce97fb7c93a3, 0x000139835a11281b, 0x000728907ada9156, 0x000720a5bc050955}}, + {{0x000709439b805a35, 0x0006ec48557f8187, 0x00008a4d1ba13a2c, 0x000076348a0bf9ae, 0x0000e9b9cbb144ef}}}, +{{{0x0002d48ffb5720ad, 0x00057b7f21a1df77, 0x0005550effba0645, 0x0005ec6a4098a931, 0x000221104eb3f337}}, + {{0x00069bd55db1beee, 0x0006e14e47f731bd, 0x0001a35e47270eac, 0x00066f225478df8e, 0x000366d44191cfd3}}, + {{0x00041743f2bc8c14, 0x000796b0ad8773c7, 0x00029fee5cbb689b, 0x000122665c178734, 0x0004167a4e6bc593}}}, +{{{0x00039d2876f62700, 0x000001cecd1d6c87, 0x0007f01a11747675, 0x0002350da5a18190, 0x0007938bb7e22552}}, + {{0x00062665f8ce8fee, 0x00029d101ac59857, 0x0004d93bbba59ffc, 0x00017b7897373f17, 0x00034b33370cb7ed}}, + {{0x000591ee8681d6cc, 0x00039db0b4ea79b8, 0x000202220f380842, 0x0002f276ba42e0ac, 0x0001176fc6e2dfe6}}}, +{{{0x00076cd05b9c619b, 0x00069654b0901695, 0x0007a53710b77f27, 0x00079a1ea7d28175, 0x00008fc3a4c677d5}}, + {{0x0000e28949770eb8, 0x0005559e88147b72, 0x00035e1e6e63ef30, 0x00035b109aa7ff6f, 0x0001f6a3e54f2690}}, + {{0x0004c199d30734ea, 0x0006c622cb9acc14, 0x0005660a55030216, 0x000068f1199f11fb, 0x0004f2fad0116b90}}}, +{{{0x0006b24194ae4e54, 0x0002230afded8897, 0x00023412617d5071, 0x0003d5d30f35969b, 0x000445484a4972ef}}, + {{0x0004d91db73bb638, 0x00055f82538112c5, 0x0006d85a279815de, 0x000740b7b0cd9cf9, 0x0003451995f2944e}}, + {{0x0002fcd09fea7d7c, 0x000296126b9ed22a, 0x0004a171012a05b2, 0x0001db92c74d5523, 0x00010b89ca604289}}}, +{{{0x000147499718289c, 0x0000a48a67e4c7ab, 0x00030fbc544bafe3, 0x0000c701315fe58a, 0x00020b878d577b75}}, + {{0x000141be5a45f06e, 0x0005adb38becaea7, 0x0003fd46db41f2bb, 0x0006d488bbb5ce39, 0x00017d2d1d9ef0d4}}, + {{0x0002af18073f3e6a, 0x00033aea420d24fe, 0x000298008bf4ff94, 0x0003539171db961e, 0x00072214f63cc65c}}}, +{{{0x00037f405307a693, 0x0002e5e66cf2b69c, 0x0005d84266ae9c53, 0x0005e4eb7de853b9, 0x0005fdf48c58171c}}, + {{0x0005b7b9f43b29c9, 0x000149ea31eea3b3, 0x0004be7713581609, 0x0002d87960395e98, 0x0001f24ac855a154}}, + {{0x000608328e9505aa, 0x00022182841dc49a, 0x0003ec96891d2307, 0x0002f363fff22e03, 0x00000ba739e2ae39}}}, +{{{0x000698de5c8790d6, 0x000268b8545beb25, 0x0006d2648b96fedf, 0x00047988ad1db07c, 0x00003283a3e67ad7}}, + {{0x000426f5ea88bb26, 0x00033092e77f75c8, 0x0001a53940d819e7, 0x0001132e4f818613, 0x00072297de7d518d}}, + {{0x00041dc7be0cb939, 0x0001b16c66100904, 0x0000a24c20cbc66d, 0x0004a2e9efe48681, 0x00005e1296846271}}}, +{{{0x0002eeb32d9c495a, 0x00079e25772f9750, 0x0006d747833bbf23, 0x0006cdd816d5d749, 0x00039c00c9c13698}}, + {{0x0007bbc8242c4550, 0x00059a06103b35b7, 0x0007237e4af32033, 0x000726421ab3537a, 0x00078cf25d38258c}}, + {{0x00066b8e31489d68, 0x000573857e10e2b5, 0x00013be816aa1472, 0x00041964d3ad4bf8, 0x000006b52076b3ff}}}, +{{{0x0000cfe19d95781c, 0x000312cc621c453c, 0x000145ace6da077c, 0x0000912bef9ce9b8, 0x0004d57e3443bc76}}, + {{0x00037e16b9ce082d, 0x0001882f57853eb9, 0x0007d29eacd01fc5, 0x0002e76a59b5e715, 0x0007de2e9561a9f7}}, + {{0x0000d4f4b6a55ecb, 0x0007ebb0bb733bce, 0x0007ba6a05200549, 0x0004f6ede4e22069, 0x0006b2a90af1a602}}}, +{{{0x0003f4fc9ae61e97, 0x0003bc07ebfa2d24, 0x0003b744b55cd4a0, 0x00072553b25721f3, 0x0005fd8f4e9d12d3}}, + {{0x0003f3245bb2d80a, 0x0000e5f720f36efd, 0x0003b9cccf60c06d, 0x000084e323f37926, 0x000465812c8276c2}}, + {{0x0003beb22a1062d9, 0x0006a7063b82c9a8, 0x0000a5a35dc197ed, 0x0003c80c06a53def, 0x00005b32c2b1cb16}}}, +{{{0x00005eccd24da8fd, 0x000580bbfdf07918, 0x0007e73586873c6a, 0x00074ceddf77f93e, 0x0003b5556a37b471}}, + {{0x0004a42c7ad58195, 0x0005c8667e799eff, 0x00002e5e74c850a1, 0x0003f0db614e869a, 0x00031771a4856730}}, + {{0x0000c524e14dd482, 0x000283457496c656, 0x0000ad6bcfb6cd45, 0x000375d1e8b02414, 0x0004fc079d27a733}}}, +{{{0x000138b089bf2f7f, 0x0004a05bfd34ea39, 0x000203914c925ef5, 0x0007497fffe04e3c, 0x000124567cecaf98}}, + {{0x00048b440c86c50d, 0x000139929cca3b86, 0x0000f8f2e44cdf2f, 0x00068432117ba6b2, 0x000241170c2bae3c}}, + {{0x0001ab860ac473b4, 0x0005c0227c86a7ff, 0x00071b12bfc24477, 0x000006a573a83075, 0x0003f8612966c870}}}, +{{{0x00020cc9782a0dde, 0x00065d4e3070aab3, 0x0007bc8e31547736, 0x00009ebfb1432d98, 0x000504aa77679736}}, + {{0x0000fcfa36048d13, 0x00066e7133bbb383, 0x00064b42a8a45676, 0x0004ea6e4f9a85cf, 0x00026f57eee878a1}}, + {{0x00032cd55687efb1, 0x0004448f5e2f6195, 0x000568919d460345, 0x000034c2e0ad1a27, 0x0004041943d9dba3}}}, +{{{0x0000eeba43ebcc96, 0x000384dd5395f878, 0x0001df331a35d272, 0x000207ecfd4af70e, 0x0001420a1d976843}}, + {{0x00017743a26caadd, 0x00048c9156f9c964, 0x0007ef278d1e9ad0, 0x00000ce58ea7bd01, 0x00012d931429800d}}, + {{0x00067799d337594f, 0x00001647548f6018, 0x00057fce5578f145, 0x000009220c142a71, 0x0001b4f92314359a}}}, +{{{0x0004109d89150951, 0x000225bd2d2d47cb, 0x00057cc080e73bea, 0x0006d71075721fcb, 0x000239b572a7f132}}, + {{0x00073030a49866b1, 0x0002442be90b2679, 0x00077bd3d8947dcf, 0x0001fb55c1552028, 0x0005ff191d56f9a2}}, + {{0x0006d433ac2d9068, 0x00072bf930a47033, 0x00064facf4a20ead, 0x000365f7a2b9402a, 0x000020c526a758f3}}}, +{{{0x000034f89ed8dbbc, 0x00073b8f948d8ef3, 0x000786c1d323caab, 0x00043bd4a9266e51, 0x00002aacc4615313}}, + {{0x0001ef59f042cc89, 0x0003b1c24976dd26, 0x00031d665cb16272, 0x00028656e470c557, 0x000452cfe0a5602c}}, + {{0x0000f7a0647877df, 0x0004e1cc0f93f0d4, 0x0007ec4726ef1190, 0x0003bdd58bf512f8, 0x0004cfb7d7b304b8}}}, +{{{0x00043d6cb89b75fe, 0x0003338d5b900e56, 0x00038d327d531a53, 0x0001b25c61d51b9f, 0x00014b4622b39075}}, + {{0x000699c29789ef12, 0x00063beae321bc50, 0x000325c340adbb35, 0x000562e1a1e42bf6, 0x0005b1d4cbc434d3}}, + {{0x00032615cc0a9f26, 0x00057711b99cb6df, 0x0005a69c14e93c38, 0x0006e88980a4c599, 0x0002f98f71258592}}}, +{{{0x0004a74cb50f9e56, 0x000531d1c2640192, 0x0000c03d9d6c7fd2, 0x00057ccd156610c1, 0x0003a6ae249d806a}}, + {{0x0002ae444f54a701, 0x000615397afbc5c2, 0x00060d7783f3f8fb, 0x0002aa675fc486ba, 0x0001d8062e9e7614}}, + {{0x0002da85a9907c5a, 0x0006b23721ec4caf, 0x0004d2d3a4683aa2, 0x0007f9c6870efdef, 0x000298b8ce8aef25}}}, +{{{0x00027953eff70cb2, 0x00054f22ae0ec552, 0x00029f3da92e2724, 0x000242ca0c22bd18, 0x00034b8a8404d5ce}}, + {{0x000272ea0a2165de, 0x00068179ef3ed06f, 0x0004e2b9c0feac1e, 0x0003ee290b1b63bb, 0x0006ba6271803a7d}}, + {{0x0006ecb583693335, 0x0003ec76bfdfb84d, 0x0002c895cf56a04f, 0x0006355149d54d52, 0x00071d62bdd465e1}}}, +{{{0x0003cc28d378df80, 0x00072141f4968ca6, 0x000407696bdb6d0d, 0x0005d271b22ffcfb, 0x00074d5f317f3172}}, + {{0x0005b5dab1f75ef5, 0x0001e2d60cbeb9a5, 0x000527c2175dfe57, 0x00059e8a2b8ff51f, 0x0001c333621262b2}}, + {{0x0007e55467d9ca81, 0x0006a5653186f50d, 0x0006b188ece62df1, 0x0004c66d36844971, 0x0004aebcc4547e9d}}}, +{{{0x0000071b276d01c9, 0x0000b0d8918e025e, 0x00075beea79ee2eb, 0x0003c92984094db8, 0x0005d88fbf95a3db}}, + {{0x00008d9e7354b610, 0x00026b750b6dc168, 0x000162881e01acc9, 0x0007966df31d01a5, 0x000173bd9ddc9a1d}}, + {{0x00000f1efe5872df, 0x0005da872318256a, 0x00059ceb81635960, 0x00018cf37693c764, 0x00006e1cd13b19ea}}}, +{{{0x0000ad516f166f23, 0x000263f56d57c81a, 0x00013422384638ca, 0x0001331ff1af0a50, 0x0003080603526e16}}, + {{0x0003af629e5b0353, 0x000204f1a088e8e5, 0x00010efc9ceea82e, 0x000589863c2fa34b, 0x0007f3a6a1a8d837}}, + {{0x000644395d3d800b, 0x0002b9203dbedefc, 0x0004b18ce656a355, 0x00003f3466bc182c, 0x00030d0fded2e513}}}, +{{{0x00014d1af21233b3, 0x0001de1989b39c0b, 0x00052669dc6f6f9e, 0x00043434b28c3fc7, 0x0000a9214202c099}}, + {{0x0004971e68b84750, 0x00052ccc9779f396, 0x0003e904ae8255c8, 0x0004ecae46f39339, 0x0004615084351c58}}, + {{0x000019c0aeb9a02e, 0x0001a2c06995d792, 0x000664cbb1571c44, 0x0006ff0736fa80b2, 0x0003bca0d2895ca5}}}, +{{{0x000031bc3c5d62a4, 0x0007d9fe0f4c081e, 0x00043ed51467f22c, 0x0001e6cc0c1ed109, 0x0005631deddae8f1}}, + {{0x00008eb69ecc01bf, 0x0005b4c8912df38d, 0x0005ea7f8bc2f20e, 0x000120e516caafaf, 0x0004ea8b4038df28}}, + {{0x0005460af1cad202, 0x0000b4919dd0655d, 0x0007c4697d18c14c, 0x000231c890bba2a4, 0x00024ce0930542ca}}}, +{{{0x000090f5fd06c106, 0x0006abb1021e43fd, 0x000232bcfad711a0, 0x0003a5c13c047f37, 0x00041d4e3c28a06d}}, + {{0x0007a155fdf30b85, 0x0001c6c6e5d487f9, 0x00024be1134bdc5a, 0x0001405970326f32, 0x000549928a7324f4}}, + {{0x000632a763ee1a2e, 0x0006fa4bffbd5e4d, 0x0005fd35a6ba4792, 0x0007b55e1de99de8, 0x000491b66dec0dcf}}}, +{{{0x0005b13dc7ea32a7, 0x00018fc2db73131e, 0x0007e3651f8f57e3, 0x00025656055fa965, 0x00008f338d0c85ee}}, + {{0x00004a8ed0da64a1, 0x0005ecfc45096ebe, 0x0005edee93b488b2, 0x0005b3c11a51bc8f, 0x0004cf6b8b0b7018}}, + {{0x0003a821991a73bd, 0x00003be6418f5870, 0x0001ddc18eac9ef0, 0x00054ce09e998dc2, 0x000530d4a82eb078}}}, +{{{0x00043630e1f94825, 0x0004d1956a6b4009, 0x000213fe2df8b5e0, 0x00005ce3a41191e6, 0x00065ea753f10177}}, + {{0x000173456c9abf9e, 0x0007892015100dad, 0x00033ee14095fecb, 0x0006ad95d67a0964, 0x0000db3e7e00cbfb}}, + {{0x0006fc3ee2096363, 0x0007ec36b96d67ac, 0x000510ec6a0758b1, 0x0000ed87df022109, 0x00002a4ec1921e1a}}}, +{{{0x0006259a3b24b8a2, 0x000188b5f4170b9c, 0x000681c0dee15deb, 0x0004dfe665f37445, 0x0003d143c5112780}}, + {{0x00006162f1cf795f, 0x000324ddcafe5eb9, 0x000018d5e0463218, 0x0007e78b9092428e, 0x00036d12b5dec067}}, + {{0x0005279179154557, 0x00039f8f0741424d, 0x00045e6eb357923d, 0x00042c9b5edb746f, 0x0002ef517885ba82}}}, +{{{0x0007974e8c58aedc, 0x0007757e083488c6, 0x000601c62ae7bc8b, 0x00045370c2ecab74, 0x0002f1b78fab143a}}, + {{0x0006bffb305b2f51, 0x0005b112b2d712dd, 0x00035774974fe4e2, 0x00004af87a96e3a3, 0x00057968290bb3a0}}, + {{0x0002b8430a20e101, 0x0001a49e1d88fee3, 0x00038bbb47ce4d96, 0x0001f0e7ba84d437, 0x0007dc43e35dc2aa}}}, +{{{0x00066665887dd9c3, 0x000629760a6ab0b2, 0x000481e6c7243e6c, 0x000097e37046fc77, 0x0007ef72016758cc}}, + {{0x00002a5c273e9718, 0x00032bc9dfb28b4f, 0x00048df4f8d5db1a, 0x00054c87976c028f, 0x000044fb81d82d50}}, + {{0x000718c5a907e3d9, 0x0003b9c98c6b383b, 0x000006ed255eccdc, 0x0006976538229a59, 0x0007f79823f9c30d}}}, +{{{0x0004d239a3b513e8, 0x00029723f51b1066, 0x000642f4cf04d9c3, 0x0004da095aa09b7a, 0x0000a4e0373d784d}}, + {{0x00041ff068f587ba, 0x0001c00a191bcd53, 0x0007b56f9c209e25, 0x0003781e5fccaabe, 0x00064a9b0431c06d}}, + {{0x0003d6a15b7d2919, 0x00041aa75046a5d6, 0x000691751ec2d3da, 0x00023638ab6721c4, 0x000071a7d0ace183}}}, +{{{0x00072daac887ba0b, 0x0000b7f4ac5dda60, 0x0003bdda2c0498a4, 0x00074e67aa180160, 0x0002c3bcc7146ea7}}, + {{0x0004355220e14431, 0x0000e1362a283981, 0x0002757cd8359654, 0x0002e9cd7ab10d90, 0x0007c69bcf761775}}, + {{0x0000d7eb04e8295f, 0x0004a5ea1e6fa0fe, 0x00045e635c436c60, 0x00028ef4a8d4d18b, 0x0006f5a9a7322aca}}}, +{{{0x0001000c2f41c6c5, 0x0000219fdf737174, 0x000314727f127de7, 0x0007e5277d23b81e, 0x000494e21a2e147a}}, + {{0x0001d4eba3d944be, 0x0000100f15f3dce5, 0x00061a700e367825, 0x0005922292ab3d23, 0x00002ab9680ee8d3}}, + {{0x00048a85dde50d9a, 0x0001c1f734493df4, 0x00047bdb64866889, 0x00059a7d048f8eec, 0x0006b5d76cbea46b}}}, +{{{0x0007556cec0cd994, 0x0005eb9a03b7510a, 0x00050ad1dd91cb71, 0x0001aa5780b48a47, 0x0000ae333f685277}}, + {{0x000141171e782522, 0x0006806d26da7c1f, 0x0003f31d1bc79ab9, 0x00009f20459f5168, 0x00016fb869c03dd3}}, + {{0x0006199733b60962, 0x00069b157c266511, 0x00064740f893f1ca, 0x00003aa408fbf684, 0x0003f81e38b8f70d}}}, +{{{0x00010fcc7ed9affe, 0x0004248cb0e96ff2, 0x0004311c115172e2, 0x0004c9d41cbf6925, 0x00050510fc104f50}}, + {{0x00037f355f17c824, 0x00007ae85334815b, 0x0007e3abddd2e48f, 0x00061eeabe1f45e5, 0x0000ad3e2d34cded}}, + {{0x00040fc5336e249d, 0x0003386639fb2de1, 0x0007bbf871d17b78, 0x00075f796b7e8004, 0x000127c158bf0fa1}}}, +{{{0x00017c422e9879a2, 0x00028a5946c8fec3, 0x00053ab32e912b77, 0x0007b44da09fe0a5, 0x000354ef87d07ef4}}, + {{0x00028fc4ae51b974, 0x00026e89bfd2dbd4, 0x0004e122a07665cf, 0x0007cab1203405c3, 0x0004ed82479d167d}}, + {{0x0003b52260c5d975, 0x00079d6836171fdc, 0x0007d994f140d4bb, 0x0001b6c404561854, 0x000302d92d205392}}}, +{{{0x0003c1a2bca4283d, 0x00023430c7bb2f02, 0x0001a3ea1bb58bc2, 0x0007265763de5c61, 0x00010e5d3b76f1ca}}, + {{0x00046fb6e4e0f177, 0x00053497ad5265b7, 0x0001ebdba01386fc, 0x0000302f0cb36a3c, 0x0000edc5f5eb426d}}, + {{0x0003bfd653da8e67, 0x000584953ec82a8a, 0x00055e288fa7707b, 0x0005395fc3931d81, 0x00045b46c51361cb}}}, +{{{0x00002abf314f7fa1, 0x000391d19e8a1528, 0x0006a2fa13895fc7, 0x00009d8eddeaa591, 0x0002177bfa36dcb7}}, + {{0x00054ddd8a7fe3e4, 0x0002cecc41c619d3, 0x00043a6562ac4d91, 0x0004efa5aca7bdd9, 0x0005c1c0aef32122}}, + {{0x00001bbcfa79db8f, 0x0003d84beb3666e1, 0x00020c921d812204, 0x0002dd843d3b32ce, 0x0004ae619387d8ab}}}, +{{{0x0003f6aa5344a32e, 0x00069683680f11bb, 0x00004c3581f623aa, 0x000701af5875cba5, 0x0001a00d91b17bf3}}, + {{0x00017e44985bfb83, 0x00054e32c626cc22, 0x000096412ff38118, 0x0006b241d61a246a, 0x00075685abe5ba43}}, + {{0x00060933eb61f2b2, 0x0005193fe92a4dd2, 0x0003d995a550f43e, 0x0003556fb93a883d, 0x000135529b623b0e}}}, +{{{0x0000dbd7add1d518, 0x000119f823e2231e, 0x000451d66e5e7de2, 0x000500c39970f838, 0x00079b5b81a65ca3}}, + {{0x000716bce22e83fe, 0x00033d0130b83eb8, 0x0000952abad0afac, 0x000309f64ed31b8a, 0x0005972ea051590a}}, + {{0x0004ac20dc8f7811, 0x00029589a9f501fa, 0x0004d810d26a6b4a, 0x0005ede00d96b259, 0x0004f7e9c95905f3}}}, +{{{0x00074bbc5781302e, 0x00073135bb81ec4c, 0x0007ef671b61483c, 0x0007264614ccd729, 0x00031993ad92e638}}, + {{0x0000443d355299fe, 0x00039b7d7d5aee39, 0x000692519a2f34ec, 0x0006e4404924cf78, 0x0001942eec4a144a}}, + {{0x00045319ae234992, 0x0002219d47d24fb5, 0x0004f04488b06cf6, 0x00053aaa9e724a12, 0x0002a0a65314ef9c}}}, +{{{0x0007937ff7f927c2, 0x0000c2fa14c6a5b6, 0x000556bddb6dd07c, 0x0006f6acc179d108, 0x0004cf6e218647c2}}, + {{0x00061acd3c1c793a, 0x00058b46b78779e6, 0x0003369aacbe7af2, 0x000509b0743074d4, 0x000055dc39b6dea1}}, + {{0x0001227cc28d5bb6, 0x00078ee9bff57623, 0x00028cb2241f893a, 0x00025b541e3c6772, 0x000121a307710aa2}}}, +{{{0x00035d5e9f034a97, 0x000126069785bc9b, 0x0005474ec7854ff0, 0x000296a302a348ca, 0x000333fc76c7a40e}}, + {{0x0001713ec77483c9, 0x0006f70572d5facb, 0x00025ef34e22ff81, 0x00054d944f141188, 0x000527bb94a6ced3}}, + {{0x0005992a995b482e, 0x00078dc707002ac7, 0x0005936394d01741, 0x0004fba4281aef17, 0x0006b89069b20a7a}}}, +{{{0x0002a0416270220d, 0x00075f248b69d025, 0x0001cbbc16656a27, 0x0005b9ffd6e26728, 0x00023bc2103aa73e}}, + {{0x0002fa8cb5c7db77, 0x000718e6982aa810, 0x00039e95f81a1a1b, 0x0005e794f3646cfb, 0x0000473d308a7639}}, + {{0x0006792603589e05, 0x000248db9892595d, 0x000006a53cad2d08, 0x00020d0150f7ba73, 0x000102f73bfde043}}}, +{{{0x0000b9ab7f5745c6, 0x0005caf0f8d21d63, 0x0007debea408ea2b, 0x00009edb93896d16, 0x00036597d25ea5c0}}, + {{0x0004dae0b5511c9a, 0x0005257fffe0d456, 0x00054108d1eb2180, 0x000096cc0f9baefa, 0x0003f6bd725da4ea}}, + {{0x00058d7b106058ac, 0x0003cdf8d20bee69, 0x00000a4cb765015e, 0x00036832337c7cc9, 0x0007b7ecc19da60d}}}, +{{{0x0002373c695c690d, 0x0004c0c8520dcf18, 0x000384af4b7494b9, 0x0004ab4a8ea22225, 0x0004235ad7601743}}, + {{0x00064a51a77cfa9b, 0x00029cf470ca0db5, 0x0004b60b6e0898d9, 0x00055d04ddffe6c7, 0x00003bedc661bf5c}}, + {{0x0000cb0d078975f5, 0x000292313e530c4b, 0x00038dbb9124a509, 0x000350d0655a11f1, 0x0000e7ce2b0cdf06}}}, +{{{0x0004643ac48c85a3, 0x0006878c2735b892, 0x0003a53523f4d877, 0x0003a504ed8bee9d, 0x000666e0a5d8fb46}}, + {{0x0006fedfd94b70f9, 0x0002383f9745bfd4, 0x0004beae27c4c301, 0x00075aa4416a3f3f, 0x000615256138aece}}, + {{0x0003f64e4870cb0d, 0x00061548b16d6557, 0x0007a261773596f3, 0x0007724d5f275d3a, 0x0007f0bc810d514d}}}, +{{{0x00006ba426f4136f, 0x0003cafc0606b720, 0x000518f0a2359cda, 0x0005fae5e46feca7, 0x0000d1f8dbcf8eed}}, + {{0x00049dad737213a0, 0x000745dee5d31075, 0x0007b1a55e7fdbe2, 0x0005ba988f176ea1, 0x0001d3a907ddec5a}}, + {{0x000693313ed081dc, 0x0005b0a366901742, 0x00040c872ca4ca7e, 0x0006f18094009e01, 0x00000011b44a31bf}}}, +{{{0x0007a06c3fc66c0c, 0x0001c9bac1ba47fb, 0x00023935c575038e, 0x0003f0bd71c59c13, 0x0003ac48d916e835}}, + {{0x00061f696a0aa75c, 0x00038b0a57ad42ca, 0x0001e59ab706fdc9, 0x00001308d46ebfcd, 0x00063d988a2d2851}}, + {{0x00020753afbd232e, 0x00071fbb1ed06002, 0x00039cae47a4af3a, 0x0000337c0b34d9c2, 0x00033fad52b2368a}}}, +{{{0x000649c6c5e41e16, 0x00060667eee6aa80, 0x0004179d182be190, 0x000653d9567e6979, 0x00016c0f429a256d}}, + {{0x0004c8d0c422cfe8, 0x000760b4275971a5, 0x0003da95bc1cad3d, 0x0000f151ff5b7376, 0x0003cc355ccb90a7}}, + {{0x00069443903e9131, 0x00016f4ac6f9dd36, 0x0002ea4912e29253, 0x0002b4643e68d25d, 0x000631eaf426bae7}}}, +{{{0x00010410da66fe9f, 0x00024d82dcb4d67d, 0x0003e6fe0e17752d, 0x0004dade1ecbb08f, 0x0005599648b1ea91}}, + {{0x000175b9a3700de8, 0x00077c5f00aa48fb, 0x0003917785ca0317, 0x00005aa9b2c79399, 0x000431f2c7f665f8}}, + {{0x00026344858f7b19, 0x0005f43d4a295ac0, 0x000242a75c52acd4, 0x0005934480220d10, 0x0007b04715f91253}}}, +{{{0x0005bd28acf6ae43, 0x00016fab8f56907d, 0x0007acb11218d5f2, 0x00041fe02023b4db, 0x00059b37bf5c2f65}}, + {{0x0006c280c4e6bac6, 0x0003ada3b361766e, 0x00042fe5125c3b4f, 0x000111d84d4aac22, 0x00048d0acfa57cde}}, + {{0x000726e47dabe671, 0x0002ec45e746f6c1, 0x0006580e53c74686, 0x0005eda104673f74, 0x00016234191336d3}}}, +{{{0x000499def6267ff6, 0x00076e858108773c, 0x000693cac5ddcb29, 0x00000311d00a9ff4, 0x0002cdfdfecd5d05}}, + {{0x00019cd61ff38640, 0x000060c6c4b41ba9, 0x00075cf70ca7366f, 0x000118a8f16c011e, 0x0004a25707a203b9}}, + {{0x0007668a53f6ed6a, 0x000303ba2e142556, 0x0003880584c10909, 0x0004fe20000a261d, 0x0005721896d248e4}}}, +{{{0x00065517fd181bae, 0x0003e5772c76816d, 0x000019189640898a, 0x0001ed2a84de7499, 0x000578edd74f63c1}}, + {{0x00055091a1d0da4e, 0x0004f6bfc7c1050b, 0x00064e4ecd2ea9be, 0x00007eb1f28bbe70, 0x00003c935afc4b03}}, + {{0x000276c6492b0c3d, 0x00009bfc40bf932e, 0x000588e8f11f330b, 0x0003d16e694dc26e, 0x0003ec2ab590288c}}}, +{{{0x0000d27be4d87bb9, 0x00056c27235db434, 0x00072e6e0ea62d37, 0x0005674cd06ee839, 0x0002dd5c25a200fc}}, + {{0x00013a09ae32d1cb, 0x0003e81eb85ab4e4, 0x00007aaca43cae1f, 0x00062f05d7526374, 0x0000e1bf66c6adba}}, + {{0x0003d5e9792c887e, 0x000319724dabbc55, 0x0002b97c78680800, 0x0007afdfdd34e6dd, 0x000730548b35ae88}}}, +{{{0x000551a3cba8b8ee, 0x0003b6422be2d886, 0x000630e1419689bc, 0x0004653b07a7a955, 0x0003043443b411db}}, + {{0x0003094ba1d6e334, 0x0006e126a7e3300b, 0x000089c0aefcfbc5, 0x0002eea11f836583, 0x000585a2277d8784}}, + {{0x00025f8233d48962, 0x0006bd8f04aff431, 0x0004f907fd9a6312, 0x00040fd3c737d29b, 0x0007656278950ef9}}}, +{{{0x0003cf59d51fc8c0, 0x0007a0a0d6de4718, 0x00055c3a3e6fb74b, 0x000353135f884fd5, 0x0003f4160a8c1b84}}, + {{0x000073a3ea86cf9d, 0x0006e0e2abfb9c2e, 0x00060e2a38ea33ee, 0x00030b2429f3fe18, 0x00028bbf484b613f}}, + {{0x00012f5c6f136c7c, 0x0000fedba237de4c, 0x000779bccebfab44, 0x0003aea93f4d6909, 0x0001e79cb358188f}}}, +{{{0x000436c3eef7e3f1, 0x0007ffd3c21f0026, 0x0003e77bf20a2da9, 0x000418bffc8472de, 0x00065d7951b3a3b3}}, + {{0x000153d8f5e08181, 0x00008533bbdb2efd, 0x0001149796129431, 0x00017a6e36168643, 0x000478ab52d39d1f}}, + {{0x0006a4d39252d159, 0x000790e35900ecd4, 0x00030725bf977786, 0x00010a5c1635a053, 0x00016d87a411a212}}}, +{{{0x00057e5a42066215, 0x0001a18b44983677, 0x0003e652de1e6f8f, 0x0006532be02ed8eb, 0x00028f87c8165f38}}, + {{0x0004d5e2d54e0583, 0x0002e5d7b33f5f74, 0x0003a5de3f887ebf, 0x0006ef24bd6139b7, 0x0001f990b577a5a6}}, + {{0x00044ead1be8f7d6, 0x0005759d4f31f466, 0x0000378149f47943, 0x00069f3be32b4f29, 0x00045882fe1534d6}}}, +{{{0x0001345d757983d6, 0x000222f54234cccd, 0x0001784a3d8adbb4, 0x00036ebeee8c2bcc, 0x000688fe5b8f626f}}, + {{0x00049929943c6fe4, 0x0004347072545b15, 0x0003226bced7e7c5, 0x00003a134ced89df, 0x0007dcf843ce405f}}, + {{0x0000d6484a4732c0, 0x0007b94ac6532d92, 0x0005771b8754850f, 0x00048dd9df1461c8, 0x0006739687e73271}}}, +{{{0x00002014385675a6, 0x0006155fb53d1def, 0x00037ea32e89927c, 0x000059a668f5a82e, 0x00046115aba1d4dc}}, + {{0x0005cc9dc80c1ac0, 0x000683671486d4cd, 0x00076f5f1a5e8173, 0x0006d5d3f5f9df4a, 0x0007da0b8f68d7e7}}, + {{0x00071953c3b5da76, 0x0006642233d37a81, 0x0002c9658076b1bd, 0x0005a581e63010ff, 0x0005a5f887e83674}}}, +{{{0x000301cf70a13d11, 0x0002a6a1ba1891ec, 0x0002f291fb3f3ae0, 0x00021a7b814bea52, 0x0003669b656e44d1}}, + {{0x000628d3a0a643b9, 0x00001cd8640c93d2, 0x0000b7b0cad70f2c, 0x0003864da98144be, 0x00043e37ae2d5d1c}}, + {{0x00063f06eda6e133, 0x000233342758070f, 0x000098e0459cc075, 0x0004df5ead6c7c1b, 0x0006a21e6cd4fd5e}}}, +{{{0x0006170a3046e65f, 0x0005401a46a49e38, 0x00020add5561c4a8, 0x0007abb4edde9e46, 0x000586bf9f1a195f}}, + {{0x000129126699b2e3, 0x0000ee11a2603de8, 0x00060ac2f5c74c21, 0x00059b192a196808, 0x00045371b07001e8}}, + {{0x0003088d5ef8790b, 0x00038c2126fcb4db, 0x000685bae149e3c3, 0x0000bcd601a4e930, 0x0000eafb03790e52}}}, +{{{0x000555c13748042f, 0x0004d041754232c0, 0x000521b430866907, 0x0003308e40fb9c39, 0x000309acc675a02c}}, + {{0x0000805e0f75ae1d, 0x000464cc59860a28, 0x000248e5b7b00bef, 0x0005d99675ef8f75, 0x00044ae3344c5435}}, + {{0x000289b9bba543ee, 0x0003ab592e28539e, 0x00064d82abcdd83a, 0x0003c78ec172e327, 0x00062d5221b7f946}}}, +{{{0x0004299c18d0936d, 0x0005914183418a49, 0x00052a18c721aed5, 0x0002b151ba82976d, 0x0005c0efde4bc754}}, + {{0x0005d4263af77a3c, 0x00023fdd2289aeb0, 0x0007dc64f77eb9ec, 0x00001bd28338402c, 0x00014f29a5383922}}, + {{0x00017edc25b2d7f5, 0x00037336a6081bee, 0x0007b5318887e5c3, 0x00049f6d491a5be1, 0x0005e72365c7bee0}}}, +{{{0x0003fc074571217f, 0x0003a0d29b2b6aeb, 0x00006478ccdde59d, 0x00055e4d051bddfa, 0x00077f1104c47b4e}}, + {{0x000339062f08b33e, 0x0004bbf3e657cfb2, 0x00067af7f56e5967, 0x0004dbd67f9ed68f, 0x00070b20555cb734}}, + {{0x000113c555112c4c, 0x0007535103f9b7ca, 0x000140ed1d9a2108, 0x00002522333bc2af, 0x0000e34398f4a064}}}, +{{{0x000522d93ecebde8, 0x000024f045e0f6cf, 0x00016db63426cfa1, 0x0001b93a1fd30fd8, 0x0005e5405368a362}}, + {{0x00030b093e4b1928, 0x0001ce7e7ec80312, 0x0004e575bdf78f84, 0x00061f7a190bed39, 0x0006f8aded6ca379}}, + {{0x0000123dfdb7b29a, 0x0004344356523c68, 0x00079a527921ee5f, 0x00074bfccb3e817e, 0x000780de72ec8d3d}}}, +{{{0x00028545089ae7bc, 0x0001e38fe9a0c15c, 0x00012046e0e2377b, 0x0006721c560aa885, 0x0000eb28bf671928}}, + {{0x0007eaf300f42772, 0x0005455188354ce3, 0x0004dcca4a3dcbac, 0x0003d314d0bfebcb, 0x0001defc6ad32b58}}, + {{0x0003be1aef5195a7, 0x0006f22f62bdb5eb, 0x00039768b8523049, 0x00043394c8fbfdbd, 0x000467d201bf8dd2}}}, +{{{0x0006919a74ef4fad, 0x00059ed4611452bf, 0x000691ec04ea09ef, 0x0003cbcb2700e984, 0x00071c43c4f5ba3c}}, + {{0x0006f4bd567ae7a9, 0x00065ac89317b783, 0x00007d3b20fd8932, 0x000000f208326916, 0x0002ef9c5a5ba384}}, + {{0x00056df6fa9e74cd, 0x00079c95e4cf56df, 0x0007be643bc609e2, 0x000149c12ad9e878, 0x0005a758ca390c5f}}}, +{{{0x00072710d9462495, 0x00025aafaa007456, 0x0002d21f28eaa31b, 0x00017671ea005fd0, 0x0002dbae244b3eb7}}, + {{0x0000918b1d61dc94, 0x0000d350260cd19c, 0x0007a2ab4e37b4d9, 0x00021fea735414d7, 0x0000a738027f639d}}, + {{0x00074a2f57ffe1cc, 0x0001bc3073087301, 0x0007ec57f4019c34, 0x00034e082e1fa524, 0x0002698ca635126a}}}, +{{{0x0005318832b0ba78, 0x0006f24b9ff17cec, 0x0000a47f30e060c7, 0x00058384540dc8d0, 0x0001fb43dcc49cae}}, + {{0x0005702f5e3dd90e, 0x00031c9a4a70c5c7, 0x000136a5aa78fc24, 0x0001992f3b9f7b01, 0x0003c004b0c4afa3}}, + {{0x000146ac06f4b82b, 0x0004b500d89e7355, 0x0003351e1c728a12, 0x00010b9f69932fe3, 0x0006b43fd01cd1fd}}}, +{{{0x00075d4b4697c544, 0x00011be1fff7f8f4, 0x000119e16857f7e1, 0x00038a14345cf5d5, 0x0005a68d7105b52f}}, + {{0x000742583e760ef3, 0x00073dc1573216b8, 0x0004ae48fdd7714a, 0x0004f85f8a13e103, 0x00073420b2d6ff0d}}, + {{0x0004f6cb9e851e06, 0x000278c4471895e5, 0x0007efcdce3d64e4, 0x00064f6d455c4b4c, 0x0003db5632fea34b}}}, +{{{0x0006ee2bf75dd9d8, 0x0006c72ceb34be8d, 0x000679c9cc345ec7, 0x0007898df96898a4, 0x00004321adf49d75}}, + {{0x000190b1829825d5, 0x0000e7d3513225c9, 0x0001c12be3b7abae, 0x00058777781e9ca6, 0x00059197ea495df2}}, + {{0x00016019e4e55aae, 0x00074fc5f25d209c, 0x0004566a939ded0d, 0x00066063e716e0b7, 0x00045eafdc1f4d70}}}, +{{{0x000401858045d72b, 0x000459e5e0ca2d30, 0x000488b719308bea, 0x00056f4a0d1b32b5, 0x0005a5eebc80362d}}, + {{0x00064624cfccb1ed, 0x000257ab8072b6c1, 0x0000120725676f0a, 0x0004a018d04e8eee, 0x0003f73ceea5d56d}}, + {{0x0007bfd10a4e8dc6, 0x0007c899366736f4, 0x00055ebbeaf95c01, 0x00046db060903f8a, 0x0002605889126621}}}, +{{{0x000704a68360ff04, 0x0003cecc3cde8b3e, 0x00021cd5470f64ff, 0x0006abc18d953989, 0x00054ad0c2e4e615}}, + {{0x00018e3cc676e542, 0x00026079d995a990, 0x00004a7c217908b2, 0x0001dc7603e6655a, 0x0000dedfa10b2444}}, + {{0x000367d5b82b522a, 0x0000d3f4b83d7dc7, 0x0003067f4cdbc58d, 0x00020452da697937, 0x00062ecb2baa77a9}}}, +{{{0x0005795261152b3d, 0x0007a1dbbafa3cbd, 0x0005ad31c52588d5, 0x00045f3a4164685c, 0x0002e59f919a966d}}, + {{0x00072836afb62874, 0x0000af3c2094b240, 0x0000c285297f357a, 0x0007cc2d5680d6e3, 0x00061913d5075663}}, + {{0x00062d361a3231da, 0x00065284004e01b8, 0x000656533be91d60, 0x0006ae016c00a89f, 0x0003ddbc2a131c05}}}, +{{{0x00040ff9ce5ec54b, 0x00057185e261b35b, 0x0003e254540e70a9, 0x0001b5814003e3f8, 0x00078968314ac04b}}, + {{0x000257a22796bb14, 0x0006f360fb443e75, 0x000680e47220eaea, 0x0002fcf2a5f10c18, 0x0005ee7fb38d8320}}, + {{0x0005fdcb41446a8e, 0x0005286926ff2a71, 0x0000f231e296b3f6, 0x000684a357c84693, 0x00061d0633c9bca0}}}, +{{{0x00044935ffdb2566, 0x00012f016d176c6e, 0x0004fbb00f16f5ae, 0x0003fab78d99402a, 0x0006e965fd847aed}}, + {{0x000328bcf8fc73df, 0x0003b4de06ff95b4, 0x00030aa427ba11a5, 0x0005ee31bfda6d9c, 0x0005b23ac2df8067}}, + {{0x0002b953ee80527b, 0x00055f5bcdb1b35a, 0x00043a0b3fa23c66, 0x00076e07388b820a, 0x00079b9bbb9dd95d}}}, +{{{0x000355406a3126c2, 0x00050d1918727d76, 0x0006e5ea0b498e0e, 0x0000a3b6063214f2, 0x0005065f158c9fd2}}, + {{0x00017dae8e9f7374, 0x000719f76102da33, 0x0005117c2a80ca8b, 0x00041a66b65d0936, 0x0001ba811460accb}}, + {{0x000169fb0c429954, 0x00059aedd9ecee10, 0x00039916eb851802, 0x00057917555cc538, 0x0003981f39e58a4f}}}, +{{{0x00038a7559230a93, 0x00052c1cde8ba31f, 0x0002a4f2d4745a3d, 0x00007e9d42d4a28a, 0x00038dc083705acd}}, + {{0x0005dfa56de66fde, 0x0000058809075908, 0x0006d3d8cb854a94, 0x0005b2f4e970b1e3, 0x00030f4452edcbc1}}, + {{0x00052782c5759740, 0x00053f3397d990ad, 0x0003a939c7e84d15, 0x000234c4227e39e0, 0x000632d9a1a593f2}}}, +{{{0x00036b15b807cba6, 0x0003f78a9e1afed7, 0x0000a59c2c608f1f, 0x00052bdd8ecb81b7, 0x0000b24f48847ed4}}, + {{0x0001fd11ed0c84a7, 0x000021b3ed2757e1, 0x00073e1de58fc1c6, 0x0005d110c84616ab, 0x0003a5a7df28af64}}, + {{0x0002d4be511beac7, 0x0006bda4d99e5b9b, 0x00017e6996914e01, 0x0007b1f0ce7fcf80, 0x00034fcf74475481}}}, +{{{0x0007e04c789767ca, 0x0001671b28cfb832, 0x0007e57ea2e1c537, 0x0001fbaaef444141, 0x0003d3bdc164dfa6}}, + {{0x00031dab78cfaa98, 0x0004e3216e5e54b7, 0x000249823973b689, 0x0002584984e48885, 0x0000119a3042fb37}}, + {{0x0002d89ce8c2177d, 0x0006cd12ba182cf4, 0x00020a8ac19a7697, 0x000539fab2cc72d9, 0x00056c088f1ede20}}}, +{{{0x00053d1110a86e17, 0x0006416eb65f466d, 0x00041ca6235fce20, 0x0005c3fc8a99bb12, 0x00009674c6b99108}}, + {{0x00035fac24f38f02, 0x0007d75c6197ab03, 0x00033e4bc2a42fa7, 0x0001c7cd10b48145, 0x000038b7ea483590}}, + {{0x0006f82199316ff8, 0x00005d54f1a9f3e9, 0x0003bcc5d0bd274a, 0x0005b284b8d2d5ad, 0x0006e5e31025969e}}}, +{{{0x000462f587e593fb, 0x0003d94ba7ce362d, 0x000330f9b52667b7, 0x0005d45a48e0f00a, 0x00008f5114789a8d}}, + {{0x0004fb0e63066222, 0x000130f59747e660, 0x000041868fecd41a, 0x0003105e8c923bc6, 0x0003058ad43d1838}}, + {{0x00040ffde57663d0, 0x00071445d4c20647, 0x0002653e68170f7c, 0x00064cdee3c55ed6, 0x00026549fa4efe3d}}}, +{{{0x00055a461e6bf9d6, 0x00078eeef4b02e83, 0x0001d34f648c16cf, 0x00007fea2aba5132, 0x0001926e1dc6401e}}, + {{0x00068549af3f666e, 0x00009e2941d4bb68, 0x0002e8311f5dff3c, 0x0006429ef91ffbd2, 0x0003a10dfe132ce3}}, + {{0x00074e8aea17cea0, 0x0000c743f83fbc0f, 0x0007cb03c4bf5455, 0x00068a8ba9917e98, 0x0001fa1d01d861e5}}}, +{{{0x000055947d599832, 0x000346fe2aa41990, 0x0000164c8079195b, 0x000799ccfb7bba27, 0x000773563bc6a75c}}, + {{0x0004ac00d1df94ab, 0x0003ba2101bd271b, 0x0007578988b9c4af, 0x0000f2bf89f49f7e, 0x00073fced18ee9a0}}, + {{0x0001e90863139cb3, 0x0004f8b407d9a0d6, 0x00058e24ca924f69, 0x0007a246bbe76456, 0x0001f426b701b864}}}, +{{{0x0001264c41911c01, 0x000702f44584bdf9, 0x00043c511fc68ede, 0x0000482c3aed35f9, 0x0004e1af5271d31b}}, + {{0x000635c891a12552, 0x00026aebd38ede2f, 0x00066dc8faddae05, 0x00021c7d41a03786, 0x0000b76bb1b3fa7e}}, + {{0x0000c1f97f92939b, 0x00017a88956dc117, 0x0006ee005ef99dc7, 0x0004aa9172b231cc, 0x0007b6dd61eb772a}}}, +{{{0x0005c1e850f33d92, 0x0001ec119ab9f6f5, 0x0007f16f6de663e9, 0x0007a7d6cb16dec6, 0x000703e9bceaf1d2}}, + {{0x0000abf9ab01d2c7, 0x0003880287630ae6, 0x00032eca045beddb, 0x00057f43365f32d0, 0x00053fa9b659bff6}}, + {{0x0004c8e994885455, 0x0004ccb5da9cad82, 0x0003596bc610e975, 0x0007a80c0ddb9f5e, 0x000398d93e5c4c61}}}, +{{{0x0003d16733e248f3, 0x0000e2b7e14be389, 0x00042c0ddaf6784a, 0x000589ea1fc67850, 0x00053b09b5ddf191}}, + {{0x00077c60d2e7e3f2, 0x0004061051763870, 0x00067bc4e0ecd2aa, 0x0002bb941f1373b9, 0x000699c9c9002c30}}, + {{0x0006a7235946f1cc, 0x0006b99cbb2fbe60, 0x0006d3a5d6485c62, 0x0004839466e923c0, 0x00051caf30c6fcdd}}}, +{{{0x0003a7427674e00a, 0x0006142f4f7e74c1, 0x0004cc93318c3a15, 0x0006d51bac2b1ee7, 0x0005504aa292383f}}, + {{0x0002f99a18ac54c7, 0x000398a39661ee6f, 0x000384331e40cde3, 0x0004cd15c4de19a6, 0x00012ae29c189f8e}}, + {{0x0006c0cb1f0d01cf, 0x000187469ef5d533, 0x00027138883747bf, 0x0002f52ae53a90e8, 0x0005fd14fe958eba}}}, +{{{0x00042ddf2845ab2c, 0x0006214ffd3276bb, 0x00000b8d181a5246, 0x000268a6d579eb20, 0x000093ff26e58647}}, + {{0x0002fe5ebf93cb8e, 0x000226da8acbe788, 0x00010883a2fb7ea1, 0x000094707842cf44, 0x0007dd73f960725d}}, + {{0x000524fe68059829, 0x00065b75e47cb621, 0x00015eb0a5d5cc19, 0x00005209b3929d5a, 0x0002f59bcbc86b47}}}, +{{{0x00047d429917135f, 0x0003eacfa07af070, 0x0001deab46b46e44, 0x0007a53f3ba46cdf, 0x0005458b42e2e51a}}, + {{0x0001d560b691c301, 0x0007f5bafce3ce08, 0x0004cd561614806c, 0x0004588b6170b188, 0x0002aa55e3d01082}}, + {{0x000192e60c07444f, 0x0005ae8843a21daa, 0x0006d721910b1538, 0x0003321a95a6417e, 0x00013e9004a8a768}}}, +{{{0x00058845832fcedb, 0x000135cd7f0c6e73, 0x00053ffbdfe8e35b, 0x00022f195e06e55b, 0x00073937e8814bce}}, + {{0x000600c9193b877f, 0x00021c1b8a0d7765, 0x000379927fb38ea2, 0x00070d7679dbe01b, 0x0005f46040898de9}}, + {{0x00037116297bf48d, 0x00045a9e0d069720, 0x00025af71aa744ec, 0x00041af0cb8aaba3, 0x0002cf8a4e891d5e}}}, +{{{0x0003fd8707110f67, 0x00026f8716a92db2, 0x0001cdaa1b753027, 0x000504be58b52661, 0x0002049bd6e58252}}, + {{0x0005487e17d06ba2, 0x0003872a032d6596, 0x00065e28c09348e0, 0x00027b6bb2ce40c2, 0x0007a6f7f2891d6a}}, + {{0x0001fd8d6a9aef49, 0x0007cb67b7216fa1, 0x00067aff53c3b982, 0x00020ea610da9628, 0x0006011aadfc5459}}}, +{{{0x0007926dcf95f83c, 0x00042e25120e2bec, 0x00063de96df1fa15, 0x0004f06b50f3f9cc, 0x0006fc5cc1b0b62f}}, + {{0x0006d0c802cbf890, 0x000141bfed554c7b, 0x0006dbb667ef4263, 0x00058f3126857edc, 0x00069ce18b779340}}, + {{0x00075528b29879cb, 0x00079a8fd2125a3d, 0x00027c8d4b746ab8, 0x0000f8893f02210c, 0x00015596b3ae5710}}}, +{{{0x000739d23f9179a2, 0x000632fadbb9e8c4, 0x0007c8522bfe0c48, 0x0006ed0983ef5aa9, 0x0000d2237687b5f4}}, + {{0x000731167e5124ca, 0x00017b38e8bbe13f, 0x0003d55b942f9056, 0x00009c1495be913f, 0x0003aa4e241afb6d}}, + {{0x000138bf2a3305f5, 0x0001f45d24d86598, 0x0005274bad2160fe, 0x0001b6041d58d12a, 0x00032fcaa6e4687a}}}, +{{{0x00056e8dc57d9af5, 0x0005b3be17be4f78, 0x0003bf928cf82f4b, 0x00052e55600a6f11, 0x0004627e9cefebd6}}, + {{0x0007a4732787ccdf, 0x00011e427c7f0640, 0x00003659385f8c64, 0x0005f4ead9766bfb, 0x000746f6336c2600}}, + {{0x0002f345ab6c971c, 0x000653286e63e7e9, 0x00051061b78a23ad, 0x00014999acb54501, 0x0007b4917007ed66}}}, +{{{0x0005fb5cab84b064, 0x0002513e778285b0, 0x000457383125e043, 0x0006bda3b56e223d, 0x000122ba376f844f}}, + {{0x00041b28dd53a2dd, 0x00037be85f87ea86, 0x00074be3d2a85e41, 0x0001be87fac96ca6, 0x0001d03620fe08cd}}, + {{0x000232cda2b4e554, 0x0000422ba30ff840, 0x000751e7667b43f5, 0x0006261755da5f3e, 0x00002c70bf52b68e}}}, +{{{0x0007ec4b5d0b2fbb, 0x000200e910595450, 0x000742057105715e, 0x0002f07022530f60, 0x00026334f0a409ef}}, + {{0x000532bf458d72e1, 0x00040f96e796b59c, 0x00022ef79d6f9da3, 0x000501ab67beca77, 0x0006b0697e3feb43}}, + {{0x0000f04adf62a3c0, 0x0005e0edb48bb6d9, 0x0007c34aa4fbc003, 0x0007d74e4e5cac24, 0x0001cc37f43441b2}}}, +{{{0x0007565a5cc7324f, 0x00001ca0d5244a11, 0x000116b067418713, 0x0000a57d8c55edae, 0x0006c6809c103803}}, + {{0x000656f1c9ceaeb9, 0x0007031cacad5aec, 0x0001308cd0716c57, 0x00041c1373941942, 0x0003a346f772f196}}, + {{0x00055112e2da6ac8, 0x0006363d0a3dba5a, 0x000319c98ba6f40c, 0x0002e84b03a36ec7, 0x00005911b9f6ef7c}}}, +{{{0x00039983f5df0ebb, 0x0001ea2589959826, 0x0006ce638703cdd6, 0x0006311678898505, 0x0006b3cecf9aa270}}, + {{0x0001acf3512eeaef, 0x0002639839692a69, 0x000669a234830507, 0x00068b920c0603d4, 0x000555ef9d1c64b2}}, + {{0x000770ba3b73bd08, 0x00011475f7e186d4, 0x0000251bc9892bbc, 0x00024eab9bffcc5a, 0x000675f4de133817}}}, +{{{0x000452036b1782fc, 0x00002d95b07681c5, 0x0005901cf99205b2, 0x000290686e5eecb4, 0x00013d99df70164c}}, + {{0x0007f6d93bdab31d, 0x0001f3aca5bfd425, 0x0002fa521c1c9760, 0x00062180ce27f9cd, 0x00060f450b882cd3}}, + {{0x00035ec321e5c0ca, 0x00013ae337f44029, 0x0004008e813f2da7, 0x000640272f8e0c3a, 0x0001c06de9e55eda}}}, +{{{0x00077ad6a33ec4e2, 0x000717c5dc11d321, 0x0004a114559823e4, 0x000306ce50a1e2b1, 0x0004cf38a1fec2db}}, + {{0x00052b40ff6d69aa, 0x00031b8809377ffa, 0x000536625cd14c2c, 0x000516af252e17d1, 0x00078096f8e7d32b}}, + {{0x0002aa650dfa5ce7, 0x00054916a8f19415, 0x00000dc96fe71278, 0x00055f2784e63eb8, 0x000373cad3a26091}}}, +{{{0x0004634d82c9f57c, 0x0004249268a6d652, 0x0006336d687f2ff7, 0x0004fe4f4e26d9a0, 0x0000040f3d945441}}, + {{0x0006a8fb89ddbbad, 0x00078c35d5d97e37, 0x00066e3674ef2cb2, 0x00034347ac53dd8f, 0x00021547eda5112a}}, + {{0x0005e939fd5986d3, 0x00012a2147019bdf, 0x0004c466e7d09cb2, 0x0006fa5b95d203dd, 0x00063550a334a254}}}, +{{{0x0007d6edb569cf37, 0x00060194a5dc2ca0, 0x0005af59745e10a6, 0x0007a8f53e004875, 0x0003eea62c7daf78}}, + {{0x0002584572547b49, 0x00075c58811c1377, 0x0004d3c637cc171b, 0x00033d30747d34e3, 0x00039a92bafaa7d7}}, + {{0x0004c713e693274e, 0x0006ed1b7a6eb3a4, 0x00062ace697d8e15, 0x000266b8292ab075, 0x00068436a0665c9c}}}, +{{{0x000235e8202f3f27, 0x00044c9f2eb61780, 0x000630905b1d7003, 0x0004fcc8d274ead1, 0x00017b6e7f68ab78}}, + {{0x0006d317e820107c, 0x000090815d2ca3ca, 0x00003ff1eb1499a1, 0x00023960f050e319, 0x0005373669c91611}}, + {{0x000014ab9a0e5257, 0x00009939567f8ba5, 0x0004b47b2a423c82, 0x000688d7e57ac42d, 0x0001cb4b5a678f87}}}, +{{{0x0004c06b394afc6c, 0x0004931b4bf636cc, 0x00072b60d0322378, 0x00025127c6818b25, 0x000330bca78de743}}, + {{0x0004aa62a2a007e7, 0x00061e0e38f62d6e, 0x00002f888fcc4782, 0x0007562b83f21c00, 0x0002dc0fd2d82ef6}}, + {{0x0006ff841119744e, 0x0002c560e8e49305, 0x0007254fefe5a57a, 0x00067ae2c560a7df, 0x0003c31be1b369f1}}}, +{{{0x0004864d08948aee, 0x0005d237438df61e, 0x0002b285601f7067, 0x00025dbcbae6d753, 0x000330b61134262d}}, + {{0x0000bc93f9cb4272, 0x0003f8f9db73182d, 0x0002b235eabae1c4, 0x0002ddbf8729551a, 0x00041cec1097e7d5}}, + {{0x000619d7a26d808a, 0x0003c3b3c2adbef2, 0x0006877c9eec7f52, 0x0003beb9ebe1b66d, 0x00026b44cd91f287}}}, +{{{0x000048478f387475, 0x00069397d9678a3e, 0x00067c8156c976f3, 0x0002eb4d5589226c, 0x0002c709e6c1c10a}}, + {{0x0007f29362730383, 0x0007fd7951459c36, 0x0007504c512d49e7, 0x000087ed7e3bc55f, 0x0007deb10149c726}}, + {{0x0002af6a8766ee7a, 0x00008aaa79a1d96c, 0x00042f92d59b2fb0, 0x0001752c40009c07, 0x00008e68e9ff62ce}}}, +{{{0x0005500a4bc130ad, 0x000127a17a938695, 0x00002a26fa34e36d, 0x000584d12e1ecc28, 0x0002f1f3f87eeba3}}, + {{0x000509d50ab8f2f9, 0x0001b8ab247be5e5, 0x0005d9b2e6b2e486, 0x0004faa5479a1339, 0x0004cb13bd738f71}}, + {{0x00048c75e515b64a, 0x00075b6952071ef0, 0x0005d46d42965406, 0x0007746106989f9f, 0x00019a1e353c0ae2}}}, +{{{0x00047560bafa05c3, 0x000418dcabcc2fa3, 0x00035991cecf8682, 0x00024371a94b8c60, 0x00041546b11c20c3}}, + {{0x000172cdd596bdbd, 0x0000731ddf881684, 0x00010426d64f8115, 0x00071a4fd8a9a3da, 0x000736bd3990266a}}, + {{0x00032d509334b3b4, 0x00016c102cae70aa, 0x0001720dd51bf445, 0x0005ae662faf9821, 0x000412295a2b87fa}}}, +{{{0x00019b88f57ed6e9, 0x0004cdbf1904a339, 0x00042b49cd4e4f2c, 0x00071a2e771909d9, 0x00014e153ebb52d2}}, + {{0x00055261e293eac6, 0x00006426759b65cc, 0x00040265ae116a48, 0x0006c02304bae5bc, 0x0000760bb8d195ad}}, + {{0x00061a17cde6818a, 0x00053dad34108827, 0x00032b32c55c55b6, 0x0002f9165f9347a3, 0x0006b34be9bc33ac}}}, +{{{0x00072f643a78c0b2, 0x0003de45c04f9e7b, 0x000706d68d30fa5c, 0x000696f63e8e2f24, 0x0002012c18f0922d}}, + {{0x000469656571f2d3, 0x0000aa61ce6f423f, 0x0003f940d71b27a1, 0x000185f19d73d16a, 0x00001b9c7b62e6dd}}, + {{0x000355e55ac89d29, 0x0003e8b414ec7101, 0x00039db07c520c90, 0x0006f41e9b77efe1, 0x00008af5b784e4ba}}}, +{{{0x000499dc881f2533, 0x00034ef26476c506, 0x0004d107d2741497, 0x000346c4bd6efdb3, 0x00032b79d71163a1}}, + {{0x000314d289cc2c4b, 0x00023450e2f1bc4e, 0x0000cd93392f92f4, 0x0001370c6a946b7d, 0x0006423c1d5afd98}}, + {{0x0005f8d9edfcb36a, 0x0001e6e8dcbf3990, 0x0007974f348af30a, 0x0006e6724ef19c7c, 0x000480a5efbc13e2}}}, +{{{0x0001e70b01622071, 0x0001f163b5f8a16a, 0x00056aaf341ad417, 0x0007989635d830f7, 0x00047aa27600cb7b}}, + {{0x00014ce442ce221f, 0x00018980a72516cc, 0x000072f80db86677, 0x000703331fda526e, 0x00024b31d47691c8}}, + {{0x00041eedc015f8c3, 0x0007cf8d27ef854a, 0x000289e3584693f9, 0x00004a7857b309a7, 0x000545b585d14dda}}}, +{{{0x0007275ea0d43a0f, 0x000681137dd7ccf7, 0x0001e79cbab79a38, 0x00022a214489a66a, 0x0000f62f9c332ba5}}, + {{0x0004e4d0e3b321e1, 0x0007451fe3d2ac40, 0x000666f678eea98d, 0x000038858667fead, 0x0004d22dc3e64c8d}}, + {{0x00046589d63b5f39, 0x0007eaf979ec3f96, 0x0004ebe81572b9a8, 0x00021b7f5d61694a, 0x0001c0fa01a36371}}}, +{{{0x000604b622943dff, 0x0001c899f6741a58, 0x00060219e2f232fb, 0x00035fae92a7f9cb, 0x0000fa3614f3b1ca}}, + {{0x00002b0e8c936a50, 0x0006b83b58b6cd21, 0x00037ed8d3e72680, 0x0000a037db9f2a62, 0x0004005419b1d2bc}}, + {{0x0003febdb9be82f0, 0x0005e74895921400, 0x000553ea38822706, 0x0005a17c24cfc88c, 0x0001fba218aef40a}}}, +{{{0x00049448fac8f53e, 0x00034f74c6e8356a, 0x0000ad780607dba2, 0x0007213a7eb63eb6, 0x000392e3acaa8c86}}, + {{0x000657043e7b0194, 0x0005c11b55efe9e7, 0x0007737bc6a074fb, 0x0000eae41ce355cc, 0x0006c535d13ff776}}, + {{0x000534e93e8a35af, 0x00008b10fd02c997, 0x00026ac2acb81e05, 0x00009d8c98ce3b79, 0x00025e17fe4d50ac}}}, +{{{0x00009bd71e04f676, 0x00025ac841f2a145, 0x0001a47eac823871, 0x0001a8a8c36c581a, 0x000255751442a9fb}}, + {{0x00077ff576f121a7, 0x0004e5f9b0fc722b, 0x00046f949b0d28c8, 0x0004cde65d17ef26, 0x0006bba828f89698}}, + {{0x0001bc6690fe3901, 0x000314132f5abc5a, 0x000611835132d528, 0x0005f24b8eb48a57, 0x000559d504f7f6b7}}}, +{{{0x00038378b3eb54d5, 0x0004d4aaa78f94ee, 0x0004a002e875a74d, 0x00010b851367b17c, 0x00001ab12d5807e3}}, + {{0x000091e7f6d266fd, 0x00036060ef037389, 0x00018788ec1d1286, 0x000287441c478eb0, 0x000123ea6a3354bd}}, + {{0x0005189041e32d96, 0x00005b062b090231, 0x0000c91766e7b78f, 0x0000aa0f55a138ec, 0x0004a3961e2c918a}}}, +{{{0x00043be0f8e6bba0, 0x00068fdffc614e3b, 0x0004e91dab5b3be0, 0x0003b1d4c9212ff0, 0x0002cd6bce3fb1db}}, + {{0x0007d644f3233f1e, 0x0001c69f9e02c064, 0x00036ae5e5266898, 0x00008fc1dad38b79, 0x00068aceead9bd41}}, + {{0x0004c90ef3d7c210, 0x000496f5a0818716, 0x00079cf88cc239b8, 0x0002cb9c306cf8db, 0x000595760d5b508f}}}, +{{{0x0001bfe104aa6397, 0x00011494ff996c25, 0x00064251623e5800, 0x0000d49fc5e044be, 0x000709fa43edcb29}}, + {{0x0002cbebfd022790, 0x0000b8822aec1105, 0x0004d1cfd226bccc, 0x000515b2fa4971be, 0x0002cb2c5df54515}}, + {{0x00025d8c63fd2aca, 0x0004c5cd29dffd61, 0x00032ec0eb48af05, 0x00018f9391f9b77c, 0x00070f029ecf0c81}}}, +{{{0x000307b32eed3e33, 0x0006748ab03ce8c2, 0x00057c0d9ab810bc, 0x00042c64a224e98c, 0x0000b7d5d8a6c314}}, + {{0x0002afaa5e10b0b9, 0x00061de08355254d, 0x0000eb587de3c28d, 0x0004f0bb9f7dbbd5, 0x00044eca5a2a74bd}}, + {{0x000448327b95d543, 0x0000146681e3a4ba, 0x00038714adc34e0c, 0x0004f26f0e298e30, 0x000272224512c7de}}}, +{{{0x000492af49c5342e, 0x0002365cdf5a0357, 0x00032138a7ffbb60, 0x0002a1f7d14646fe, 0x00011b5df18a44cc}}, + {{0x0003bb8a42a975fc, 0x0006f2d5b46b17ef, 0x0007b6a9223170e5, 0x000053713fe3b7e6, 0x00019735fd7f6bc2}}, + {{0x000390d042c84266, 0x0001efe32a8fdc75, 0x0006925ee7ae1238, 0x0004af9281d0e832, 0x0000fef911191df8}}} \ No newline at end of file diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_base_slide_multiples.data b/src/ed25519-supercop-amd64-51-30k/ge25519_base_slide_multiples.data new file mode 100644 index 0000000..b0345b6 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_base_slide_multiples.data @@ -0,0 +1,96 @@ +{{{0x00003905d740913e, 0x0000ba2817d673a2, 0x00023e2827f4e67c, 0x000133d2e0c21a34, 0x00044fd2f9298f81}}, + {{0x000493c6f58c3b85, 0x0000df7181c325f7, 0x0000f50b0b3e4cb7, 0x0005329385a44c32, 0x00007cf9d3a33d4b}}, + {{0x00011205877aaa68, 0x000479955893d579, 0x00050d66309b67a0, 0x0002d42d0dbee5ee, 0x0006f117b689f0c6}}}, +{{{0x00011fe8a4fcd265, 0x0007bcb8374faacc, 0x00052f5af4ef4d4f, 0x0005314098f98d10, 0x0002ab91587555bd}}, + {{0x0005b0a84cee9730, 0x00061d10c97155e4, 0x0004059cc8096a10, 0x00047a608da8014f, 0x0007a164e1b9a80f}}, + {{0x0006933f0dd0d889, 0x00044386bb4c4295, 0x0003cb6d3162508c, 0x00026368b872a2c6, 0x0005a2826af12b9b}}}, +{{{0x000182c3a447d6ba, 0x00022964e536eff2, 0x000192821f540053, 0x0002f9f19e788e5c, 0x000154a7e73eb1b5}}, + {{0x0002bc4408a5bb33, 0x000078ebdda05442, 0x0002ffb112354123, 0x000375ee8df5862d, 0x0002945ccf146e20}}, + {{0x0003dbf1812a8285, 0x0000fa17ba3f9797, 0x0006f69cb49c3820, 0x00034d5a0db3858d, 0x00043aabe696b3bb}}}, +{{{0x00072c9aaa3221b1, 0x000267774474f74d, 0x000064b0e9b28085, 0x0003f04ef53b27c9, 0x0001d6edd5d2e531}}, + {{0x00025cd0944ea3bf, 0x00075673b81a4d63, 0x000150b925d1c0d4, 0x00013f38d9294114, 0x000461bea69283c9}}, + {{0x00036dc801b8b3a2, 0x0000e0a7d4935e30, 0x0001deb7cecc0d7d, 0x000053a94e20dd2c, 0x0007a9fbb1c6a0f9}}}, +{{{0x0006217e039d8064, 0x0006dea408337e6d, 0x00057ac112628206, 0x000647cb65e30473, 0x00049c05a51fadc9}}, + {{0x0006678aa6a8632f, 0x0005ea3788d8b365, 0x00021bd6d6994279, 0x0007ace75919e4e3, 0x00034b9ed338add7}}, + {{0x0004e8bf9045af1b, 0x000514e33a45e0d6, 0x0007533c5b8bfe0f, 0x000583557b7e14c9, 0x00073c172021b008}}}, +{{{0x00075b0249864348, 0x00052ee11070262b, 0x000237ae54fb5acd, 0x0003bfd1d03aaab5, 0x00018ab598029d5c}}, + {{0x000700848a802ade, 0x0001e04605c4e5f7, 0x0005c0d01b9767fb, 0x0007d7889f42388b, 0x0004275aae2546d8}}, + {{0x00032cc5fd6089e9, 0x000426505c949b05, 0x00046a18880c7ad2, 0x0004a4221888ccda, 0x0003dc65522b53df}}}, +{{{0x0007013b327fbf93, 0x0001336eeded6a0d, 0x0002b565a2bbf3af, 0x000253ce89591955, 0x0000267882d17602}}, + {{0x0000c222a2007f6d, 0x000356b79bdb77ee, 0x00041ee81efe12ce, 0x000120a9bd07097d, 0x000234fd7eec346f}}, + {{0x0000a119732ea378, 0x00063bf1ba8e2a6c, 0x00069f94cc90df9a, 0x000431d1779bfc48, 0x000497ba6fdaa097}}}, +{{{0x0003cd86468ccf0b, 0x00048553221ac081, 0x0006c9464b4e0a6e, 0x00075fba84180403, 0x00043b5cd4218d05}}, + {{0x0006cc0313cfeaa0, 0x0001a313848da499, 0x0007cb534219230a, 0x00039596dedefd60, 0x00061e22917f12de}}, + {{0x0002762f9bd0b516, 0x0001c6e7fbddcbb3, 0x00075909c3ace2bd, 0x00042101972d3ec9, 0x000511d61210ae4d}}}, +{{{0x000386484420de87, 0x0002d6b25db68102, 0x000650b4962873c0, 0x0004081cfd271394, 0x00071a7fe6fe2482}}, + {{0x000676ef950e9d81, 0x0001b81ae089f258, 0x00063c4922951883, 0x0002f1d54d9b3237, 0x0006d325924ddb85}}, + {{0x000182b8a5c8c854, 0x00073fcbe5406d8e, 0x0005de3430cff451, 0x000554b967ac8c41, 0x0004746c4b6559ee}}}, +{{{0x000546c864741147, 0x0003a1df99092690, 0x0001ca8cc9f4d6bb, 0x00036b7fc9cd3b03, 0x000219663497db5e}}, + {{0x00077b3c6dc69a2b, 0x0004edf13ec2fa6e, 0x0004e85ad77beac8, 0x0007dba2b28e7bda, 0x0005c9a51de34fe9}}, + {{0x0000f1cf79f10e67, 0x00043ccb0a2b7ea2, 0x00005089dfff776a, 0x0001dd84e1d38b88, 0x0004804503c60822}}}, +{{{0x000021d23a36d175, 0x0004fd3373c6476d, 0x00020e291eeed02a, 0x00062f2ecf2e7210, 0x000771e098858de4}}, + {{0x00049ed02ca37fc7, 0x000474c2b5957884, 0x0005b8388e816683, 0x0004b6c454b76be4, 0x000553398a516506}}, + {{0x0002f5d278451edf, 0x000730b133997342, 0x0006965420eb6975, 0x000308a3bfa516cf, 0x0005a5ed1d68ff5a}}}, +{{{0x0005e0c558527359, 0x0003395b73afd75c, 0x000072afa4e4b970, 0x00062214329e0f6d, 0x000019b60135fefd}}, + {{0x0005122afe150e83, 0x0004afc966bb0232, 0x0001c478833c8268, 0x00017839c3fc148f, 0x00044acb897d8bf9}}, + {{0x000068145e134b83, 0x0001e4860982c3cc, 0x000068fb5f13d799, 0x0007c9283744547e, 0x000150c49fde6ad2}}}, +{{{0x0001863c9cdca868, 0x0003770e295a1709, 0x0000d85a3720fd13, 0x0005e0ff1f71ab06, 0x00078a6d7791e05f}}, + {{0x0003f29509471138, 0x000729eeb4ca31cf, 0x00069c22b575bfbc, 0x0004910857bce212, 0x0006b2b5a075bb99}}, + {{0x0007704b47a0b976, 0x0002ae82e91aab17, 0x00050bd6429806cd, 0x00068055158fd8ea, 0x000725c7ffc4ad55}}}, +{{{0x00002bf71cd098c0, 0x00049dabcc6cd230, 0x00040a6533f905b2, 0x000573efac2eb8a4, 0x0004cd54625f855f}}, + {{0x00026715d1cf99b2, 0x0002205441a69c88, 0x000448427dcd4b54, 0x0001d191e88abdc5, 0x000794cc9277cb1f}}, + {{0x0006c426c2ac5053, 0x0005a65ece4b095e, 0x0000c44086f26bb6, 0x0007429568197885, 0x0007008357b6fcc8}}}, +{{{0x00039fbb82584a34, 0x00047a568f257a03, 0x00014d88091ead91, 0x0002145b18b1ce24, 0x00013a92a3669d6d}}, + {{0x0000672738773f01, 0x000752bf799f6171, 0x0006b4a6dae33323, 0x0007b54696ead1dc, 0x00006ef7e9851ad0}}, + {{0x0003771cc0577de5, 0x0003ca06bb8b9952, 0x00000b81c5d50390, 0x00043512340780ec, 0x0003c296ddf8a2af}}}, +{{{0x00034d2ebb1f2541, 0x0000e815b723ff9d, 0x000286b416e25443, 0x0000bdfe38d1bee8, 0x0000a892c7007477}}, + {{0x000515f9d914a713, 0x00073191ff2255d5, 0x00054f5cc2a4bdef, 0x0003dd57fc118bcf, 0x0007a99d393490c7}}, + {{0x0002ed2436bda3e8, 0x00002afd00f291ea, 0x0000be7381dea321, 0x0003e952d4b2b193, 0x000286762d28302f}}}, +{{{0x00058e2bce2ef5bd, 0x00068ce8f78c6f8a, 0x0006ee26e39261b2, 0x00033d0aa50bcf9d, 0x0007686f2a3d6f17}}, + {{0x000036093ce35b25, 0x0003b64d7552e9cf, 0x00071ee0fe0b8460, 0x00069d0660c969e5, 0x00032f1da046a9d9}}, + {{0x000512a66d597c6a, 0x0000609a70a57551, 0x000026c08a3c464c, 0x0004531fc8ee39e1, 0x000561305f8a9ad2}}}, +{{{0x0002cc28e7b0c0d5, 0x00077b60eb8a6ce4, 0x0004042985c277a6, 0x000636657b46d3eb, 0x000030a1aef2c57c}}, + {{0x0004978dec92aed1, 0x000069adae7ca201, 0x00011ee923290f55, 0x00069641898d916c, 0x00000aaec53e35d4}}, + {{0x0001f773003ad2aa, 0x000005642cc10f76, 0x00003b48f82cfca6, 0x0002403c10ee4329, 0x00020be9c1c24065}}}, +{{{0x0000e44ae2025e60, 0x0005f97b9727041c, 0x0005683472c0ecec, 0x000188882eb1ce7c, 0x00069764c545067e}}, + {{0x000387d8249673a6, 0x0005bea8dc927c2a, 0x0005bd8ed5650ef0, 0x0000ef0e3fcd40e1, 0x000750ab3361f0ac}}, + {{0x00023283a2f81037, 0x000477aff97e23d1, 0x0000b8958dbcbb68, 0x0000205b97e8add6, 0x00054f96b3fb7075}}}, +{{{0x0005afc616b11ecd, 0x00039f4aec8f22ef, 0x0003b39e1625d92e, 0x0005f85bd4508873, 0x00078e6839fbe85d}}, + {{0x0005f20429669279, 0x00008fafae4941f5, 0x00015d83c4eb7688, 0x0001cf379eca4146, 0x0003d7fe9c52bb75}}, + {{0x00032df737b8856b, 0x0000608342f14e06, 0x0003967889d74175, 0x0001211907fba550, 0x00070f268f350088}}}, +{{{0x0004112070dcf355, 0x0007dcff9c22e464, 0x00054ada60e03325, 0x00025cd98eef769a, 0x000404e56c039b8c}}, + {{0x00064583b1805f47, 0x00022c1baf832cd0, 0x000132c01bd4d717, 0x0004ecf4c3a75b8f, 0x0007c0d345cfad88}}, + {{0x00071f4b8c78338a, 0x00062cfc16bc2b23, 0x00017cf51280d9aa, 0x0003bbae5e20a95a, 0x00020d754762aaec}}}, +{{{0x0004feb135b9f543, 0x00063bd192ad93ae, 0x00044e2ea612cdf7, 0x000670f4991583ab, 0x00038b8ada8790b4}}, + {{0x0007c36fc73bb758, 0x0004a6c797734bd1, 0x0000ef248ab3950e, 0x00063154c9a53ec8, 0x0002b8f1e46f3cee}}, + {{0x00004a9cdf51f95d, 0x0005d963fbd596b8, 0x00022d9b68ace54a, 0x0004a98e8836c599, 0x000049aeb32ceba1}}}, +{{{0x00067d3c63dcfe7e, 0x000112f0adc81aee, 0x00053df04c827165, 0x0002fe5b33b430f0, 0x00051c665e0c8d62}}, + {{0x00007d0b75fc7931, 0x00016f4ce4ba754a, 0x0005ace4c03fbe49, 0x00027e0ec12a159c, 0x000795ee17530f67}}, + {{0x00025b0a52ecbd81, 0x0005dc0695fce4a9, 0x0003b928c575047d, 0x00023bf3512686e5, 0x0006cd19bf49dc54}}}, +{{{0x0007619052179ca3, 0x0000c16593f0afd0, 0x000265c4795c7428, 0x00031c40515d5442, 0x0007520f3db40b2e}}, + {{0x0006612165afc386, 0x0001171aa36203ff, 0x0002642ea820a8aa, 0x0001f3bb7b313f10, 0x0005e01b3a7429e4}}, + {{0x00050be3d39357a1, 0x0003ab33d294a7b6, 0x0004c479ba59edb3, 0x0004c30d184d326f, 0x00071092c9ccef3c}}}, +{{{0x0000523f0364918c, 0x000687f56d638a7b, 0x00020796928ad013, 0x0005d38405a54f33, 0x0000ea15b03d0257}}, + {{0x0003d8ac74051dcf, 0x00010ab6f543d0ad, 0x0005d0f3ac0fda90, 0x0005ef1d2573e5e4, 0x0004173a5bb7137a}}, + {{0x00056e31f0f9218a, 0x0005635f88e102f8, 0x0002cbc5d969a5b8, 0x000533fbc98b347a, 0x0005fc565614a4e3}}}, +{{{0x0006570dc46d7ae5, 0x00018a9f1b91e26d, 0x000436b6183f42ab, 0x000550acaa4f8198, 0x00062711c414c454}}, + {{0x0002e1e67790988e, 0x0001e38b9ae44912, 0x000648fbb4075654, 0x00028df1d840cd72, 0x0003214c7409d466}}, + {{0x0001827406651770, 0x0004d144f286c265, 0x00017488f0ee9281, 0x00019e6cdb5c760c, 0x0005bea94073ecb8}}}, +{{{0x0005bf0912c89be4, 0x00062fadcaf38c83, 0x00025ec196b3ce2c, 0x00077655ff4f017b, 0x0003aacd5c148f61}}, + {{0x0000ce63f343d2f8, 0x0001e0a87d1e368e, 0x000045edbc019eea, 0x0006979aed28d0d1, 0x0004ad0785944f1b}}, + {{0x00063b34c3318301, 0x0000e0e62d04d0b1, 0x000676a233726701, 0x00029e9a042d9769, 0x0003aff0cb1d9028}}}, +{{{0x0005c7eb3a20405e, 0x0005fdb5aad930f8, 0x0004a757e63b8c47, 0x00028e9492972456, 0x000110e7e86f4cd2}}, + {{0x0006430bf4c53505, 0x000264c3e4507244, 0x00074c9f19a39270, 0x00073f84f799bc47, 0x0002ccf9f732bd99}}, + {{0x0000d89ed603f5e4, 0x00051e1604018af8, 0x0000b8eedc4a2218, 0x00051ba98b9384d0, 0x00005c557e0b9693}}}, +{{{0x0001ce311fc97e6f, 0x0006023f3fb5db1f, 0x0007b49775e8fc98, 0x0003ad70adbf5045, 0x0006e154c178fe98}}, + {{0x0006bbb089c20eb0, 0x0006df41fb0b9eee, 0x00051087ed87e16f, 0x000102db5c9fa731, 0x000289fef0841861}}, + {{0x00016336fed69abf, 0x0004f066b929f9ec, 0x0004e9ff9e6c5b93, 0x00018c89bc4bb2ba, 0x0006afbf642a95ca}}}, +{{{0x0000de0c62f5d2c1, 0x00049601cf734fb5, 0x0006b5c38263f0f6, 0x0004623ef5b56d06, 0x0000db4b851b9503}}, + {{0x00055070f913a8cc, 0x000765619eac2bbc, 0x0003ab5225f47459, 0x00076ced14ab5b48, 0x00012c093cedb801}}, + {{0x00047f9308b8190f, 0x000414235c621f82, 0x00031f5ff41a5a76, 0x0006736773aab96d, 0x00033aa8799c6635}}}, +{{{0x0007f51ebd085cf2, 0x00012cfa67e3f5e1, 0x0001800cf1e3d46a, 0x00054337615ff0a8, 0x000233c6f29e8e21}}, + {{0x0000f588fc156cb1, 0x000363414da4f069, 0x0007296ad9b68aea, 0x0004d3711316ae43, 0x000212cd0c1c8d58}}, + {{0x0004d5107f18c781, 0x00064a4fd3a51a5e, 0x0004f4cd0448bb37, 0x000671d38543151e, 0x0001db7778911914}}}, +{{{0x000352397c6bc26f, 0x00018a7aa0227bbe, 0x0005e68cc1ea5f8b, 0x0006fe3e3a7a1d5f, 0x00031ad97ad26e2a}}, + {{0x00014769dd701ab6, 0x00028339f1b4b667, 0x0004ab214b8ae37b, 0x00025f0aefa0b0fe, 0x0007ae2ca8a017d2}}, + {{0x000017ed0920b962, 0x000187e33b53b6fd, 0x00055829907a1463, 0x000641f248e0a792, 0x0001ed1fc53a6622}}} \ No newline at end of file diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_dbl_p1p1.s b/src/ed25519-supercop-amd64-51-30k/ge25519_dbl_p1p1.s new file mode 100644 index 0000000..07c7d6e --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_dbl_p1p1.s @@ -0,0 +1,3155 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: int64 a4 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: stack64 a4_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: int64 b4 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: stack64 b4_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: int64 d4 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: stack64 d4_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: int64 e4 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: stack64 e4_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 rx4 + +# qhasm: stack64 rx0_stack + +# qhasm: stack64 rx1_stack + +# qhasm: stack64 rx2_stack + +# qhasm: stack64 rx3_stack + +# qhasm: stack64 rx4_stack + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 rt4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: int64 squarer01 + +# qhasm: int64 squarer11 + +# qhasm: int64 squarer21 + +# qhasm: int64 squarer31 + +# qhasm: int64 squarer41 + +# qhasm: int64 squarerax + +# qhasm: int64 squarerdx + +# qhasm: int64 squaret + +# qhasm: int64 squareredmask + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1 +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_dbl_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $224,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: squarerax = *(uint64 *)(pp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(a0=int64#4 +# asm 2: mov a0=%rcx +mov %rax,%rcx + +# qhasm: squarer01 = squarerdx +# asm 1: mov squarer01=int64#5 +# asm 2: mov squarer01=%r8 +mov %rdx,%r8 + +# qhasm: squarerax = *(uint64 *)(pp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,a1=int64#6 +# asm 2: mov a1=%r9 +mov %rax,%r9 + +# qhasm: squarer11 = squarerdx +# asm 1: mov squarer11=int64#8 +# asm 2: mov squarer11=%r10 +mov %rdx,%r10 + +# qhasm: squarerax = *(uint64 *)(pp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,a2=int64#9 +# asm 2: mov a2=%r11 +mov %rax,%r11 + +# qhasm: squarer21 = squarerdx +# asm 1: mov squarer21=int64#10 +# asm 2: mov squarer21=%r12 +mov %rdx,%r12 + +# qhasm: squarerax = *(uint64 *)(pp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,a3=int64#11 +# asm 2: mov a3=%r13 +mov %rax,%r13 + +# qhasm: squarer31 = squarerdx +# asm 1: mov squarer31=int64#12 +# asm 2: mov squarer31=%r14 +mov %rdx,%r14 + +# qhasm: squarerax = *(uint64 *)(pp + 0) +# asm 1: movq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,a4=int64#13 +# asm 2: mov a4=%r15 +mov %rax,%r15 + +# qhasm: squarer41 = squarerdx +# asm 1: mov squarer41=int64#14 +# asm 2: mov squarer41=%rbx +mov %rdx,%rbx + +# qhasm: squarerax = *(uint64 *)(pp + 8) +# asm 1: movq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 8) +# asm 1: mulq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#3 +# asm 2: movq 8(squarerax=%rdx +movq 8(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 16) +# asm 1: mulq 16(squarerax=int64#3 +# asm 2: movq 16(squarerax=%rdx +movq 16(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 24) +# asm 1: mulq 24(squarerax=int64#3 +# asm 2: movq 16(squarerax=%rdx +movq 16(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(squarerax=int64#3 +# asm 2: movq 24(squarerax=%rdx +movq 24(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 24) +# asm 1: mulq 24(squarerax=int64#3 +# asm 2: movq 24(squarerax=%rdx +movq 24(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(squarerax=int64#3 +# asm 2: movq 32(squarerax=%rdx +movq 32(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(squareredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: squarer01 = (squarer01.a0) << 13 +# asm 1: shld $13,squarer41=int64#5 +# asm 2: imulq $19,squarer41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: a0 += squarer41 +# asm 1: add squaret=int64#5 +# asm 2: mov squaret=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,a1=int64#6 +# asm 2: mov a1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,a2=int64#7 +# asm 2: mov a2=%rax +mov %r8,%rax + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,a3=int64#8 +# asm 2: mov a3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,a4=int64#9 +# asm 2: mov a4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,squaret=int64#5 +# asm 2: imulq $19,squaret=%r8 +imulq $19,%r8,%r8 + +# qhasm: a0 += squaret +# asm 1: add a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rcx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r9,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %rax,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r10,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r11,88(%rsp) + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(squarerax=int64#7 +# asm 2: movq 40(squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(b0=int64#4 +# asm 2: mov b0=%rcx +mov %rax,%rcx + +# qhasm: squarer01 = squarerdx +# asm 1: mov squarer01=int64#5 +# asm 2: mov squarer01=%r8 +mov %rdx,%r8 + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(squarerax=int64#7 +# asm 2: movq 40(squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,b1=int64#6 +# asm 2: mov b1=%r9 +mov %rax,%r9 + +# qhasm: squarer11 = squarerdx +# asm 1: mov squarer11=int64#8 +# asm 2: mov squarer11=%r10 +mov %rdx,%r10 + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(squarerax=int64#7 +# asm 2: movq 40(squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,b2=int64#9 +# asm 2: mov b2=%r11 +mov %rax,%r11 + +# qhasm: squarer21 = squarerdx +# asm 1: mov squarer21=int64#10 +# asm 2: mov squarer21=%r12 +mov %rdx,%r12 + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(squarerax=int64#7 +# asm 2: movq 40(squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,b3=int64#11 +# asm 2: mov b3=%r13 +mov %rax,%r13 + +# qhasm: squarer31 = squarerdx +# asm 1: mov squarer31=int64#12 +# asm 2: mov squarer31=%r14 +mov %rdx,%r14 + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(squarerax=int64#7 +# asm 2: movq 40(squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,b4=int64#13 +# asm 2: mov b4=%r15 +mov %rax,%r15 + +# qhasm: squarer41 = squarerdx +# asm 1: mov squarer41=int64#14 +# asm 2: mov squarer41=%rbx +mov %rdx,%rbx + +# qhasm: squarerax = *(uint64 *)(pp + 48) +# asm 1: movq 48(squarerax=int64#7 +# asm 2: movq 48(squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(squarerax=int64#7 +# asm 2: movq 48(squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#7 +# asm 2: movq 48(squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#3 +# asm 2: movq 48(squarerax=%rdx +movq 48(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(squarerax=int64#7 +# asm 2: movq 56(squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(squarerax=int64#3 +# asm 2: movq 56(squarerax=%rdx +movq 56(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(squarerax=int64#3 +# asm 2: movq 56(squarerax=%rdx +movq 56(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(squarerax=int64#3 +# asm 2: movq 64(squarerax=%rdx +movq 64(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(squarerax=int64#3 +# asm 2: movq 64(squarerax=%rdx +movq 64(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(squarerax=int64#3 +# asm 2: movq 72(squarerax=%rdx +movq 72(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(squareredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: squarer01 = (squarer01.b0) << 13 +# asm 1: shld $13,squarer41=int64#5 +# asm 2: imulq $19,squarer41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: b0 += squarer41 +# asm 1: add squaret=int64#5 +# asm 2: mov squaret=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,b1=int64#6 +# asm 2: mov b1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,b2=int64#7 +# asm 2: mov b2=%rax +mov %r8,%rax + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,b3=int64#8 +# asm 2: mov b3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,b4=int64#9 +# asm 2: mov b4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,squaret=int64#5 +# asm 2: imulq $19,squaret=%r8 +imulq $19,%r8,%r8 + +# qhasm: b0 += squaret +# asm 1: add b0_stack=stack64#13 +# asm 2: movq b0_stack=96(%rsp) +movq %rcx,96(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#14 +# asm 2: movq b1_stack=104(%rsp) +movq %r9,104(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#15 +# asm 2: movq b2_stack=112(%rsp) +movq %rax,112(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#16 +# asm 2: movq b3_stack=120(%rsp) +movq %r10,120(%rsp) + +# qhasm: b4_stack = b4 +# asm 1: movq b4_stack=stack64#17 +# asm 2: movq b4_stack=128(%rsp) +movq %r11,128(%rsp) + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(c0=int64#4 +# asm 2: mov c0=%rcx +mov %rax,%rcx + +# qhasm: squarer01 = squarerdx +# asm 1: mov squarer01=int64#5 +# asm 2: mov squarer01=%r8 +mov %rdx,%r8 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,c1=int64#6 +# asm 2: mov c1=%r9 +mov %rax,%r9 + +# qhasm: squarer11 = squarerdx +# asm 1: mov squarer11=int64#8 +# asm 2: mov squarer11=%r10 +mov %rdx,%r10 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,c2=int64#9 +# asm 2: mov c2=%r11 +mov %rax,%r11 + +# qhasm: squarer21 = squarerdx +# asm 1: mov squarer21=int64#10 +# asm 2: mov squarer21=%r12 +mov %rdx,%r12 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,c3=int64#11 +# asm 2: mov c3=%r13 +mov %rax,%r13 + +# qhasm: squarer31 = squarerdx +# asm 1: mov squarer31=int64#12 +# asm 2: mov squarer31=%r14 +mov %rdx,%r14 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,c4=int64#13 +# asm 2: mov c4=%r15 +mov %rax,%r15 + +# qhasm: squarer41 = squarerdx +# asm 1: mov squarer41=int64#14 +# asm 2: mov squarer41=%rbx +mov %rdx,%rbx + +# qhasm: squarerax = *(uint64 *)(pp + 88) +# asm 1: movq 88(squarerax=int64#7 +# asm 2: movq 88(squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(squarerax=int64#7 +# asm 2: movq 88(squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#7 +# asm 2: movq 88(squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#3 +# asm 2: movq 88(squarerax=%rdx +movq 88(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(squarerax=int64#7 +# asm 2: movq 96(squarerax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(squarerax=int64#3 +# asm 2: movq 96(squarerax=%rdx +movq 96(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(squarerax=int64#3 +# asm 2: movq 96(squarerax=%rdx +movq 96(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(squarerax=int64#3 +# asm 2: movq 104(squarerax=%rdx +movq 104(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(squarerax=int64#3 +# asm 2: movq 104(squarerax=%rdx +movq 104(%rsi),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(squarerax=int64#3 +# asm 2: movq 112(squarerax=%rdx +movq 112(%rsi),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(squareredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: squarer01 = (squarer01.c0) << 13 +# asm 1: shld $13,squarer41=int64#5 +# asm 2: imulq $19,squarer41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: c0 += squarer41 +# asm 1: add squaret=int64#5 +# asm 2: mov squaret=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,c1=int64#6 +# asm 2: mov c1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,c2=int64#7 +# asm 2: mov c2=%rax +mov %r8,%rax + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,c3=int64#8 +# asm 2: mov c3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,c4=int64#9 +# asm 2: mov c4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,squaret=int64#5 +# asm 2: imulq $19,squaret=%r8 +imulq $19,%r8,%r8 + +# qhasm: c0 += squaret +# asm 1: add c0_stack=stack64#18 +# asm 2: movq c0_stack=136(%rsp) +movq %rcx,136(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#19 +# asm 2: movq c1_stack=144(%rsp) +movq %r9,144(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#20 +# asm 2: movq c2_stack=152(%rsp) +movq %rax,152(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#21 +# asm 2: movq c3_stack=160(%rsp) +movq %r10,160(%rsp) + +# qhasm: c4_stack = c4 +# asm 1: movq c4_stack=stack64#22 +# asm 2: movq c4_stack=168(%rsp) +movq %r11,168(%rsp) + +# qhasm: d0 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,>d0=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx + +# qhasm: d1 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=int64#4 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d1=%rcx +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx + +# qhasm: d2 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=int64#5 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d2=%r8 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8 + +# qhasm: d3 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=int64#6 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d3=%r9 +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9 + +# qhasm: d4 = *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234 +# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,>d4=%rax +movq crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax + +# qhasm: e0 = d0 +# asm 1: mov e0=int64#8 +# asm 2: mov e0=%r10 +mov %rdx,%r10 + +# qhasm: e1 = d1 +# asm 1: mov e1=int64#9 +# asm 2: mov e1=%r11 +mov %rcx,%r11 + +# qhasm: e2 = d2 +# asm 1: mov e2=int64#10 +# asm 2: mov e2=%r12 +mov %r8,%r12 + +# qhasm: e3 = d3 +# asm 1: mov e3=int64#11 +# asm 2: mov e3=%r13 +mov %r9,%r13 + +# qhasm: e4 = d4 +# asm 1: mov e4=int64#12 +# asm 2: mov e4=%r14 +mov %rax,%r14 + +# qhasm: d0 -= a0_stack +# asm 1: subq d0_stack=stack64#8 +# asm 2: movq d0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: d1_stack = d1 +# asm 1: movq d1_stack=stack64#9 +# asm 2: movq d1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: d2_stack = d2 +# asm 1: movq d2_stack=stack64#10 +# asm 2: movq d2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: d3_stack = d3 +# asm 1: movq d3_stack=stack64#11 +# asm 2: movq d3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: d4_stack = d4 +# asm 1: movq d4_stack=stack64#12 +# asm 2: movq d4_stack=88(%rsp) +movq %rax,88(%rsp) + +# qhasm: e0_stack = e0 +# asm 1: movq e0_stack=stack64#23 +# asm 2: movq e0_stack=176(%rsp) +movq %r10,176(%rsp) + +# qhasm: e1_stack = e1 +# asm 1: movq e1_stack=stack64#24 +# asm 2: movq e1_stack=184(%rsp) +movq %r11,184(%rsp) + +# qhasm: e2_stack = e2 +# asm 1: movq e2_stack=stack64#25 +# asm 2: movq e2_stack=192(%rsp) +movq %r12,192(%rsp) + +# qhasm: e3_stack = e3 +# asm 1: movq e3_stack=stack64#26 +# asm 2: movq e3_stack=200(%rsp) +movq %r13,200(%rsp) + +# qhasm: e4_stack = e4 +# asm 1: movq e4_stack=stack64#27 +# asm 2: movq e4_stack=208(%rsp) +movq %r14,208(%rsp) + +# qhasm: rz0 = d0 +# asm 1: mov rz0=int64#8 +# asm 2: mov rz0=%r10 +mov %rdx,%r10 + +# qhasm: rz1 = d1 +# asm 1: mov rz1=int64#9 +# asm 2: mov rz1=%r11 +mov %rcx,%r11 + +# qhasm: rz2 = d2 +# asm 1: mov rz2=int64#10 +# asm 2: mov rz2=%r12 +mov %r8,%r12 + +# qhasm: rz3 = d3 +# asm 1: mov rz3=int64#11 +# asm 2: mov rz3=%r13 +mov %r9,%r13 + +# qhasm: rz4 = d4 +# asm 1: mov rz4=int64#12 +# asm 2: mov rz4=%r14 +mov %rax,%r14 + +# qhasm: rz0 += b0_stack +# asm 1: addq rx0=int64#3 +# asm 2: movq 0(rx0=%rdx +movq 0(%rsi),%rdx + +# qhasm: rx1 = *(uint64 *)(pp + 8) +# asm 1: movq 8(rx1=int64#4 +# asm 2: movq 8(rx1=%rcx +movq 8(%rsi),%rcx + +# qhasm: rx2 = *(uint64 *)(pp + 16) +# asm 1: movq 16(rx2=int64#5 +# asm 2: movq 16(rx2=%r8 +movq 16(%rsi),%r8 + +# qhasm: rx3 = *(uint64 *)(pp + 24) +# asm 1: movq 24(rx3=int64#6 +# asm 2: movq 24(rx3=%r9 +movq 24(%rsi),%r9 + +# qhasm: rx4 = *(uint64 *)(pp + 32) +# asm 1: movq 32(rx4=int64#7 +# asm 2: movq 32(rx4=%rax +movq 32(%rsi),%rax + +# qhasm: rx0 += *(uint64 *)(pp + 40) +# asm 1: addq 40(rx0_stack=stack64#13 +# asm 2: movq rx0_stack=96(%rsp) +movq %rdx,96(%rsp) + +# qhasm: rx1_stack = rx1 +# asm 1: movq rx1_stack=stack64#14 +# asm 2: movq rx1_stack=104(%rsp) +movq %rcx,104(%rsp) + +# qhasm: rx2_stack = rx2 +# asm 1: movq rx2_stack=stack64#15 +# asm 2: movq rx2_stack=112(%rsp) +movq %r8,112(%rsp) + +# qhasm: rx3_stack = rx3 +# asm 1: movq rx3_stack=stack64#16 +# asm 2: movq rx3_stack=120(%rsp) +movq %r9,120(%rsp) + +# qhasm: rx4_stack = rx4 +# asm 1: movq rx4_stack=stack64#17 +# asm 2: movq rx4_stack=128(%rsp) +movq %rax,128(%rsp) + +# qhasm: squarerax = rx0_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq rx0=int64#2 +# asm 2: mov rx0=%rsi +mov %rax,%rsi + +# qhasm: squarer01 = squarerdx +# asm 1: mov squarer01=int64#4 +# asm 2: mov squarer01=%rcx +mov %rdx,%rcx + +# qhasm: squarerax = rx0_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,rx1=int64#5 +# asm 2: mov rx1=%r8 +mov %rax,%r8 + +# qhasm: squarer11 = squarerdx +# asm 1: mov squarer11=int64#6 +# asm 2: mov squarer11=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = rx0_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,rx2=int64#8 +# asm 2: mov rx2=%r10 +mov %rax,%r10 + +# qhasm: squarer21 = squarerdx +# asm 1: mov squarer21=int64#9 +# asm 2: mov squarer21=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = rx0_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,rx3=int64#10 +# asm 2: mov rx3=%r12 +mov %rax,%r12 + +# qhasm: squarer31 = squarerdx +# asm 1: mov squarer31=int64#11 +# asm 2: mov squarer31=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = rx0_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,rx4=int64#12 +# asm 2: mov rx4=%r14 +mov %rax,%r14 + +# qhasm: squarer41 = squarerdx +# asm 1: mov squarer41=int64#13 +# asm 2: mov squarer41=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = rx1_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx1_stack +# asm 1: mulq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: squarerax <<= 1 +# asm 1: shl $1,squarerax=int64#3 +# asm 2: movq squarerax=%rdx +movq 104(%rsp),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx4_stack +# asm 1: mulq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx2_stack +# asm 1: mulq squarerax=int64#3 +# asm 2: movq squarerax=%rdx +movq 112(%rsp),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx3_stack +# asm 1: mulq squarerax=int64#3 +# asm 2: movq squarerax=%rdx +movq 112(%rsp),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx4_stack +# asm 1: mulq squarerax=int64#3 +# asm 2: movq squarerax=%rdx +movq 120(%rsp),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx3_stack +# asm 1: mulq squarerax=int64#3 +# asm 2: movq squarerax=%rdx +movq 120(%rsp),%rdx + +# qhasm: squarerax *= 38 +# asm 1: imulq $38,squarerax=int64#7 +# asm 2: imulq $38,squarerax=%rax +imulq $38,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx4_stack +# asm 1: mulq squarerax=int64#3 +# asm 2: movq squarerax=%rdx +movq 128(%rsp),%rdx + +# qhasm: squarerax *= 19 +# asm 1: imulq $19,squarerax=int64#7 +# asm 2: imulq $19,squarerax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx4_stack +# asm 1: mulq squareredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>squareredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: squarer01 = (squarer01.rx0) << 13 +# asm 1: shld $13,squarer41=int64#4 +# asm 2: imulq $19,squarer41=%rcx +imulq $19,%r15,%rcx + +# qhasm: rx0 += squarer41 +# asm 1: add squaret=int64#4 +# asm 2: mov squaret=%rcx +mov %rsi,%rcx + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,rx1=int64#5 +# asm 2: mov rx1=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,rx2=int64#6 +# asm 2: mov rx2=%r9 +mov %rcx,%r9 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,rx3=int64#7 +# asm 2: mov rx3=%rax +mov %rcx,%rax + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,rx4=int64#8 +# asm 2: mov rx4=%r10 +mov %rcx,%r10 + +# qhasm: (uint64) squaret >>= 51 +# asm 1: shr $51,squaret=int64#4 +# asm 2: imulq $19,squaret=%rcx +imulq $19,%rcx,%rcx + +# qhasm: rx0 += squaret +# asm 1: add caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_double.c b/src/ed25519-supercop-amd64-51-30k/ge25519_double.c new file mode 100644 index 0000000..d55e2b4 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_double.c @@ -0,0 +1,8 @@ +#include "ge25519.h" + +void ge25519_double(ge25519_p3 *r, const ge25519_p3 *p) +{ + ge25519_p1p1 grp1p1; + ge25519_dbl_p1p1(&grp1p1, (ge25519_p2 *)p); + ge25519_p1p1_to_p3(r, &grp1p1); +} diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_double_scalarmult.c b/src/ed25519-supercop-amd64-51-30k/ge25519_double_scalarmult.c new file mode 100644 index 0000000..54ec027 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_double_scalarmult.c @@ -0,0 +1,97 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +#define S1_SWINDOWSIZE 5 +#define PRE1_SIZE (1<<(S1_SWINDOWSIZE-2)) +#define S2_SWINDOWSIZE 7 +#define PRE2_SIZE (1<<(S2_SWINDOWSIZE-2)) + +ge25519_niels pre2[PRE2_SIZE] = { +#include "ge25519_base_slide_multiples.data" +}; + +static const fe25519 ec2d = {{1859910466990425, 932731440258426, 1072319116312658, 1815898335770999, 633789495995903}}; + +static void setneutral(ge25519 *r) +{ + fe25519_setint(&r->x,0); + fe25519_setint(&r->y,1); + fe25519_setint(&r->z,1); + fe25519_setint(&r->t,0); +} + +/* computes [s1]p1 + [s2]p2 */ +void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p1, const sc25519 *s1, const sc25519 *s2) +{ + signed char slide1[256], slide2[256]; + ge25519_pniels pre1[PRE1_SIZE], neg; + ge25519_p3 d1; + ge25519_p1p1 t; + ge25519_niels nneg; + fe25519 d; + int i; + + sc25519_slide(slide1, s1, S1_SWINDOWSIZE); + sc25519_slide(slide2, s2, S2_SWINDOWSIZE); + + /* precomputation */ + pre1[0] = *(ge25519_pniels *)p1; + ge25519_dbl_p1p1(&t,(ge25519_p2 *)pre1); ge25519_p1p1_to_p3(&d1, &t); + /* Convert pre[0] to projective Niels representation */ + d = pre1[0].ysubx; + fe25519_sub(&pre1[0].ysubx, &pre1[0].xaddy, &pre1[0].ysubx); + fe25519_add(&pre1[0].xaddy, &pre1[0].xaddy, &d); + fe25519_mul(&pre1[0].t2d, &pre1[0].t2d, &ec2d); + + for(i=0;i= 0;--i) { + if (slide1[i] || slide2[i]) goto firstbit; + } + + for(;i>=0;i--) + { + firstbit: + + ge25519_dbl_p1p1(&t, (ge25519_p2 *)r); + + if(slide1[i]>0) + { + ge25519_p1p1_to_p3(r, &t); + ge25519_pnielsadd_p1p1(&t, r, &pre1[slide1[i]/2]); + } + else if(slide1[i]<0) + { + ge25519_p1p1_to_p3(r, &t); + neg = pre1[-slide1[i]/2]; + d = neg.ysubx; + neg.ysubx = neg.xaddy; + neg.xaddy = d; + fe25519_neg(&neg.t2d, &neg.t2d); + ge25519_pnielsadd_p1p1(&t, r, &neg); + } + + if(slide2[i]>0) + { + ge25519_p1p1_to_p3(r, &t); + ge25519_nielsadd_p1p1(&t, r, &pre2[slide2[i]/2]); + } + else if(slide2[i]<0) + { + ge25519_p1p1_to_p3(r, &t); + nneg = pre2[-slide2[i]/2]; + d = nneg.ysubx; + nneg.ysubx = nneg.xaddy; + nneg.xaddy = d; + fe25519_neg(&nneg.t2d, &nneg.t2d); + ge25519_nielsadd_p1p1(&t, r, &nneg); + } + + ge25519_p1p1_to_p2((ge25519_p2 *)r, &t); + } +} diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_isneutral.c b/src/ed25519-supercop-amd64-51-30k/ge25519_isneutral.c new file mode 100644 index 0000000..cf566db --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_isneutral.c @@ -0,0 +1,9 @@ +#include "fe25519.h" +#include "ge25519.h" + +int ge25519_isneutral_vartime(const ge25519_p3 *p) +{ + if(!fe25519_iszero_vartime(&p->x)) return 0; + if(!fe25519_iseq_vartime(&p->y, &p->z)) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_multi_scalarmult.c b/src/ed25519-supercop-amd64-51-30k/ge25519_multi_scalarmult.c new file mode 100644 index 0000000..afc6aea --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_multi_scalarmult.c @@ -0,0 +1,102 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" +#include "index_heap.h" + +static void setneutral(ge25519 *r) +{ + fe25519_setint(&r->x,0); + fe25519_setint(&r->y,1); + fe25519_setint(&r->z,1); + fe25519_setint(&r->t,0); +} + +static void ge25519_scalarmult_vartime_2limbs(ge25519 *r, ge25519 *p, sc25519 *s) +{ + if (s->v[1] == 0 && s->v[0] == 1) /* This will happen most of the time after Bos-Coster */ + *r = *p; + else if (s->v[1] == 0 && s->v[0] == 0) /* This won't ever happen, except for all scalars == 0 in Bos-Coster */ + setneutral(r); + else + { + ge25519 d; + unsigned long long mask = (1ULL << 63); + int i = 1; + while(!(mask & s->v[1]) && mask != 0) + mask >>= 1; + if(mask == 0) + { + mask = (1ULL << 63); + i = 0; + while(!(mask & s->v[0]) && mask != 0) + mask >>= 1; + } + d = *p; + mask >>= 1; + for(;mask != 0;mask >>= 1) + { + ge25519_double(&d,&d); + if(s->v[i] & mask) + ge25519_add(&d,&d,p); + } + if(i==1) + { + mask = (1ULL << 63); + for(;mask != 0;mask >>= 1) + { + ge25519_double(&d,&d); + if(s->v[0] & mask) + ge25519_add(&d,&d,p); + } + } + *r = d; + } +} + +/* caller's responsibility to ensure npoints >= 5 */ +void ge25519_multi_scalarmult_vartime(ge25519_p3 *r, ge25519_p3 *p, sc25519 *s, const unsigned long long npoints) +{ + unsigned long long pos[npoints]; + unsigned long long hlen=((npoints+1)/2)|1; + unsigned long long max1, max2,i; + + heap_init(pos, hlen, s); + + for(i=0;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[3] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced(pos, hlen, s); + } + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[2] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_3limbs(pos, hlen, s); + } + /* We know that (npoints-1)/2 scalars are only 128-bit scalars */ + heap_extend(pos, hlen, npoints, s); + hlen = npoints; + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[1] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_2limbs(pos, hlen, s); + } + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if(sc25519_iszero_vartime(&s[max2])) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_1limb(pos, hlen, s); + } + + ge25519_scalarmult_vartime_2limbs(r, &p[max1], &s[max1]); +} diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd2.s b/src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd2.s new file mode 100644 index 0000000..8b94f2b --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd2.s @@ -0,0 +1,6152 @@ + +# qhasm: int64 rp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: int64 a4 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: stack64 a4_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: int64 b4 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: stack64 b4_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: int64 d4 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: stack64 d4_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: int64 e4 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: stack64 e4_stack + +# qhasm: int64 f0 + +# qhasm: int64 f1 + +# qhasm: int64 f2 + +# qhasm: int64 f3 + +# qhasm: int64 f4 + +# qhasm: stack64 f0_stack + +# qhasm: stack64 f1_stack + +# qhasm: stack64 f2_stack + +# qhasm: stack64 f3_stack + +# qhasm: stack64 f4_stack + +# qhasm: int64 g0 + +# qhasm: int64 g1 + +# qhasm: int64 g2 + +# qhasm: int64 g3 + +# qhasm: int64 g4 + +# qhasm: stack64 g0_stack + +# qhasm: stack64 g1_stack + +# qhasm: stack64 g2_stack + +# qhasm: stack64 g3_stack + +# qhasm: stack64 g4_stack + +# qhasm: int64 h0 + +# qhasm: int64 h1 + +# qhasm: int64 h2 + +# qhasm: int64 h3 + +# qhasm: int64 h4 + +# qhasm: stack64 h0_stack + +# qhasm: stack64 h1_stack + +# qhasm: stack64 h2_stack + +# qhasm: stack64 h3_stack + +# qhasm: stack64 h4_stack + +# qhasm: int64 qt0 + +# qhasm: int64 qt1 + +# qhasm: int64 qt2 + +# qhasm: int64 qt3 + +# qhasm: int64 qt4 + +# qhasm: stack64 qt0_stack + +# qhasm: stack64 qt1_stack + +# qhasm: stack64 qt2_stack + +# qhasm: stack64 qt3_stack + +# qhasm: stack64 qt4_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: int64 t14 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: stack64 t14_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: int64 t24 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: stack64 t24_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 rx4 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 rt4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2 +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2: +mov %rsp,%r11 +and $31,%r11 +add $256,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: a0 = *(uint64 *)(rp + 40) +# asm 1: movq 40(a0=int64#3 +# asm 2: movq 40(a0=%rdx +movq 40(%rdi),%rdx + +# qhasm: a1 = *(uint64 *)(rp + 48) +# asm 1: movq 48(a1=int64#4 +# asm 2: movq 48(a1=%rcx +movq 48(%rdi),%rcx + +# qhasm: a2 = *(uint64 *)(rp + 56) +# asm 1: movq 56(a2=int64#5 +# asm 2: movq 56(a2=%r8 +movq 56(%rdi),%r8 + +# qhasm: a3 = *(uint64 *)(rp + 64) +# asm 1: movq 64(a3=int64#6 +# asm 2: movq 64(a3=%r9 +movq 64(%rdi),%r9 + +# qhasm: a4 = *(uint64 *)(rp + 72) +# asm 1: movq 72(a4=int64#7 +# asm 2: movq 72(a4=%rax +movq 72(%rdi),%rax + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#8 +# asm 2: mov b0=%r10 +mov %rdx,%r10 + +# qhasm: b1 = a1 +# asm 1: mov b1=int64#9 +# asm 2: mov b1=%r11 +mov %rcx,%r11 + +# qhasm: b2 = a2 +# asm 1: mov b2=int64#10 +# asm 2: mov b2=%r12 +mov %r8,%r12 + +# qhasm: b3 = a3 +# asm 1: mov b3=int64#11 +# asm 2: mov b3=%r13 +mov %r9,%r13 + +# qhasm: b4 = a4 +# asm 1: mov b4=int64#12 +# asm 2: mov b4=%r14 +mov %rax,%r14 + +# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %rax,88(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#13 +# asm 2: movq b0_stack=96(%rsp) +movq %r10,96(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#14 +# asm 2: movq b1_stack=104(%rsp) +movq %r11,104(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#15 +# asm 2: movq b2_stack=112(%rsp) +movq %r12,112(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#16 +# asm 2: movq b3_stack=120(%rsp) +movq %r13,120(%rsp) + +# qhasm: b4_stack = b4 +# asm 1: movq b4_stack=stack64#17 +# asm 2: movq b4_stack=128(%rsp) +movq %r14,128(%rsp) + +# qhasm: mulrax = a3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 80(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(a0=int64#4 +# asm 2: mov a0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = a4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 88(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(a1=int64#6 +# asm 2: mov a1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(a2=int64#9 +# asm 2: mov a2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(a3=int64#11 +# asm 2: mov a3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(a4=int64#13 +# asm 2: mov a4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = a1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 64(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.a0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: a0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a1=int64#6 +# asm 2: mov a1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a2=int64#7 +# asm 2: mov a2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a3=int64#8 +# asm 2: mov a3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a4=int64#9 +# asm 2: mov a4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: a0 += mult +# asm 1: add a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rcx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r9,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %rax,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r10,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r11,88(%rsp) + +# qhasm: mulrax = b3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 120(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(e0=int64#4 +# asm 2: mov e0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = b4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 128(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(e1=int64#6 +# asm 2: mov e1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(e2=int64#9 +# asm 2: mov e2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(e3=int64#11 +# asm 2: mov e3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(e4=int64#13 +# asm 2: mov e4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = b1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 104(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.e0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: e0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e1=int64#6 +# asm 2: mov e1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e2=int64#7 +# asm 2: mov e2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e3=int64#8 +# asm 2: mov e3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e4=int64#9 +# asm 2: mov e4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: e0 += mult +# asm 1: add h0=int64#3 +# asm 2: mov h0=%rdx +mov %rcx,%rdx + +# qhasm: h1 = e1 +# asm 1: mov h1=int64#5 +# asm 2: mov h1=%r8 +mov %r9,%r8 + +# qhasm: h2 = e2 +# asm 1: mov h2=int64#10 +# asm 2: mov h2=%r12 +mov %rax,%r12 + +# qhasm: h3 = e3 +# asm 1: mov h3=int64#11 +# asm 2: mov h3=%r13 +mov %r10,%r13 + +# qhasm: h4 = e4 +# asm 1: mov h4=int64#12 +# asm 2: mov h4=%r14 +mov %r11,%r14 + +# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,h0_stack=stack64#8 +# asm 2: movq h0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: h1_stack = h1 +# asm 1: movq h1_stack=stack64#9 +# asm 2: movq h1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: h2_stack = h2 +# asm 1: movq h2_stack=stack64#10 +# asm 2: movq h2_stack=72(%rsp) +movq %r12,72(%rsp) + +# qhasm: h3_stack = h3 +# asm 1: movq h3_stack=stack64#11 +# asm 2: movq h3_stack=80(%rsp) +movq %r13,80(%rsp) + +# qhasm: h4_stack = h4 +# asm 1: movq h4_stack=stack64#12 +# asm 2: movq h4_stack=88(%rsp) +movq %r14,88(%rsp) + +# qhasm: e0_stack = e0 +# asm 1: movq e0_stack=stack64#13 +# asm 2: movq e0_stack=96(%rsp) +movq %rcx,96(%rsp) + +# qhasm: e1_stack = e1 +# asm 1: movq e1_stack=stack64#14 +# asm 2: movq e1_stack=104(%rsp) +movq %r9,104(%rsp) + +# qhasm: e2_stack = e2 +# asm 1: movq e2_stack=stack64#15 +# asm 2: movq e2_stack=112(%rsp) +movq %rax,112(%rsp) + +# qhasm: e3_stack = e3 +# asm 1: movq e3_stack=stack64#16 +# asm 2: movq e3_stack=120(%rsp) +movq %r10,120(%rsp) + +# qhasm: e4_stack = e4 +# asm 1: movq e4_stack=stack64#17 +# asm 2: movq e4_stack=128(%rsp) +movq %r11,128(%rsp) + +# qhasm: mulrax = *(uint64 *)(rp + 144) +# asm 1: movq 144(mulrax=int64#3 +# asm 2: movq 144(mulrax=%rdx +movq 144(%rdi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(c0=int64#4 +# asm 2: mov c0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(rp + 152) +# asm 1: movq 152(mulrax=int64#3 +# asm 2: movq 152(mulrax=%rdx +movq 152(%rdi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(c1=int64#6 +# asm 2: mov c1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(rp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(c2=int64#9 +# asm 2: mov c2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(rp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(c3=int64#11 +# asm 2: mov c3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(rp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(c4=int64#13 +# asm 2: mov c4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(rp + 128) +# asm 1: movq 128(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 128(mulrax=%rdx +movq 128(%rdi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rdi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rdi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 152(mulrax=%rax +movq 152(%rdi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: mulr01 = (mulr01.c0) << 13 +# asm 1: shld $13,mulr41=int64#3 +# asm 2: imulq $19,mulr41=%rdx +imulq $19,%rbx,%rdx + +# qhasm: c0 += mulr41 +# asm 1: add mult=int64#3 +# asm 2: mov mult=%rdx +mov %rcx,%rdx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c1=int64#5 +# asm 2: mov c1=%r8 +mov %rdx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c2=int64#6 +# asm 2: mov c2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c3=int64#7 +# asm 2: mov c3=%rax +mov %rdx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c4=int64#8 +# asm 2: mov c4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#3 +# asm 2: imulq $19,mult=%rdx +imulq $19,%rdx,%rdx + +# qhasm: c0 += mult +# asm 1: add c0_stack=stack64#18 +# asm 2: movq c0_stack=136(%rsp) +movq %rcx,136(%rsp) + +# qhasm: f0 = *(uint64 *)(rp + 80) +# asm 1: movq 80(f0=int64#2 +# asm 2: movq 80(f0=%rsi +movq 80(%rdi),%rsi + +# qhasm: f1 = *(uint64 *)(rp + 88) +# asm 1: movq 88(f1=int64#3 +# asm 2: movq 88(f1=%rdx +movq 88(%rdi),%rdx + +# qhasm: f2 = *(uint64 *)(rp + 96) +# asm 1: movq 96(f2=int64#4 +# asm 2: movq 96(f2=%rcx +movq 96(%rdi),%rcx + +# qhasm: f3 = *(uint64 *)(rp + 104) +# asm 1: movq 104(f3=int64#9 +# asm 2: movq 104(f3=%r11 +movq 104(%rdi),%r11 + +# qhasm: f4 = *(uint64 *)(rp + 112) +# asm 1: movq 112(f4=int64#10 +# asm 2: movq 112(f4=%r12 +movq 112(%rdi),%r12 + +# qhasm: f0 += f0 +# asm 1: add g0=int64#11 +# asm 2: mov g0=%r13 +mov %rsi,%r13 + +# qhasm: g1 = f1 +# asm 1: mov g1=int64#12 +# asm 2: mov g1=%r14 +mov %rdx,%r14 + +# qhasm: g2 = f2 +# asm 1: mov g2=int64#13 +# asm 2: mov g2=%r15 +mov %rcx,%r15 + +# qhasm: g3 = f3 +# asm 1: mov g3=int64#14 +# asm 2: mov g3=%rbx +mov %r11,%rbx + +# qhasm: g4 = f4 +# asm 1: mov g4=int64#15 +# asm 2: mov g4=%rbp +mov %r12,%rbp + +# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,g0_stack=stack64#18 +# asm 2: movq g0_stack=136(%rsp) +movq %r13,136(%rsp) + +# qhasm: g1_stack = g1 +# asm 1: movq g1_stack=stack64#19 +# asm 2: movq g1_stack=144(%rsp) +movq %r14,144(%rsp) + +# qhasm: g2_stack = g2 +# asm 1: movq g2_stack=stack64#20 +# asm 2: movq g2_stack=152(%rsp) +movq %r15,152(%rsp) + +# qhasm: g3_stack = g3 +# asm 1: movq g3_stack=stack64#21 +# asm 2: movq g3_stack=160(%rsp) +movq %rbx,160(%rsp) + +# qhasm: g4_stack = g4 +# asm 1: movq g4_stack=stack64#22 +# asm 2: movq g4_stack=168(%rsp) +movq %rbp,168(%rsp) + +# qhasm: f0_stack = f0 +# asm 1: movq f0_stack=stack64#23 +# asm 2: movq f0_stack=176(%rsp) +movq %rsi,176(%rsp) + +# qhasm: f1_stack = f1 +# asm 1: movq f1_stack=stack64#24 +# asm 2: movq f1_stack=184(%rsp) +movq %rdx,184(%rsp) + +# qhasm: f2_stack = f2 +# asm 1: movq f2_stack=stack64#25 +# asm 2: movq f2_stack=192(%rsp) +movq %rcx,192(%rsp) + +# qhasm: f3_stack = f3 +# asm 1: movq f3_stack=stack64#26 +# asm 2: movq f3_stack=200(%rsp) +movq %r11,200(%rsp) + +# qhasm: f4_stack = f4 +# asm 1: movq f4_stack=stack64#27 +# asm 2: movq f4_stack=208(%rsp) +movq %r12,208(%rsp) + +# qhasm: mulrax = e3_stack +# asm 1: movq mulrax=int64#2 +# asm 2: movq mulrax=%rsi +movq 120(%rsp),%rsi + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rsi,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#28 +# asm 2: movq mulx319_stack=216(%rsp) +movq %rax,216(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq rx0=int64#2 +# asm 2: mov rx0=%rsi +mov %rax,%rsi + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#4 +# asm 2: mov mulr01=%rcx +mov %rdx,%rcx + +# qhasm: mulrax = e4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 128(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#29 +# asm 2: movq mulx419_stack=224(%rsp) +movq %rax,224(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq rx1=int64#5 +# asm 2: mov rx1=%r8 +mov %rax,%r8 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#6 +# asm 2: mov mulr11=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = e0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq rx2=int64#8 +# asm 2: mov rx2=%r10 +mov %rax,%r10 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#9 +# asm 2: mov mulr21=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = e0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq rx3=int64#10 +# asm 2: mov rx3=%r12 +mov %rax,%r12 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#11 +# asm 2: mov mulr31=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = e0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq rx4=int64#12 +# asm 2: mov rx4=%r14 +mov %rax,%r14 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#13 +# asm 2: mov mulr41=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = e1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 104(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rx0) << 13 +# asm 1: shld $13,mulr41=int64#4 +# asm 2: imulq $19,mulr41=%rcx +imulq $19,%r15,%rcx + +# qhasm: rx0 += mulr41 +# asm 1: add mult=int64#4 +# asm 2: mov mult=%rcx +mov %rsi,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx1=int64#5 +# asm 2: mov rx1=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx2=int64#6 +# asm 2: mov rx2=%r9 +mov %rcx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx3=int64#7 +# asm 2: mov rx3=%rax +mov %rcx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx4=int64#8 +# asm 2: mov rx4=%r10 +mov %rcx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#4 +# asm 2: imulq $19,mult=%rcx +imulq $19,%rcx,%rcx + +# qhasm: rx0 += mult +# asm 1: add mulrax=int64#2 +# asm 2: movq mulrax=%rsi +movq 80(%rsp),%rsi + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rsi,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#28 +# asm 2: movq mulx319_stack=216(%rsp) +movq %rax,216(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * g2_stack +# asm 1: mulq ry0=int64#2 +# asm 2: mov ry0=%rsi +mov %rax,%rsi + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#4 +# asm 2: mov mulr01=%rcx +mov %rdx,%rcx + +# qhasm: mulrax = h4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 88(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#29 +# asm 2: movq mulx419_stack=224(%rsp) +movq %rax,224(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * g1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g1_stack +# asm 1: mulq ry1=int64#5 +# asm 2: mov ry1=%r8 +mov %rax,%r8 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#6 +# asm 2: mov mulr11=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = h0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g2_stack +# asm 1: mulq ry2=int64#8 +# asm 2: mov ry2=%r10 +mov %rax,%r10 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#9 +# asm 2: mov mulr21=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = h0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g3_stack +# asm 1: mulq ry3=int64#10 +# asm 2: mov ry3=%r12 +mov %rax,%r12 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#11 +# asm 2: mov mulr31=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = h0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g4_stack +# asm 1: mulq ry4=int64#12 +# asm 2: mov ry4=%r14 +mov %rax,%r14 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#13 +# asm 2: mov mulr41=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = h1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 64(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g2_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * g4_stack +# asm 1: mulq mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.ry0) << 13 +# asm 1: shld $13,mulr41=int64#4 +# asm 2: imulq $19,mulr41=%rcx +imulq $19,%r15,%rcx + +# qhasm: ry0 += mulr41 +# asm 1: add mult=int64#4 +# asm 2: mov mult=%rcx +mov %rsi,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry1=int64#5 +# asm 2: mov ry1=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry2=int64#6 +# asm 2: mov ry2=%r9 +mov %rcx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry3=int64#7 +# asm 2: mov ry3=%rax +mov %rcx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry4=int64#8 +# asm 2: mov ry4=%r10 +mov %rcx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#4 +# asm 2: imulq $19,mult=%rcx +imulq $19,%rcx,%rcx + +# qhasm: ry0 += mult +# asm 1: add mulrax=int64#2 +# asm 2: movq mulrax=%rsi +movq 160(%rsp),%rsi + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rsi,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#28 +# asm 2: movq mulx319_stack=216(%rsp) +movq %rax,216(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq rz0=int64#2 +# asm 2: mov rz0=%rsi +mov %rax,%rsi + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#4 +# asm 2: mov mulr01=%rcx +mov %rdx,%rcx + +# qhasm: mulrax = g4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 168(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#29 +# asm 2: movq mulx419_stack=224(%rsp) +movq %rax,224(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq rz1=int64#5 +# asm 2: mov rz1=%r8 +mov %rax,%r8 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#6 +# asm 2: mov mulr11=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = g0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq rz2=int64#8 +# asm 2: mov rz2=%r10 +mov %rax,%r10 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#9 +# asm 2: mov mulr21=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = g0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq rz3=int64#10 +# asm 2: mov rz3=%r12 +mov %rax,%r12 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#11 +# asm 2: mov mulr31=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = g0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq rz4=int64#12 +# asm 2: mov rz4=%r14 +mov %rax,%r14 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#13 +# asm 2: mov mulr41=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = g1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 144(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 152(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 152(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 216(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 224(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * f4_stack +# asm 1: mulq mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rz0) << 13 +# asm 1: shld $13,mulr41=int64#4 +# asm 2: imulq $19,mulr41=%rcx +imulq $19,%r15,%rcx + +# qhasm: rz0 += mulr41 +# asm 1: add mult=int64#4 +# asm 2: mov mult=%rcx +mov %rsi,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz1=int64#5 +# asm 2: mov rz1=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz2=int64#6 +# asm 2: mov rz2=%r9 +mov %rcx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz3=int64#7 +# asm 2: mov rz3=%rax +mov %rcx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz4=int64#8 +# asm 2: mov rz4=%r10 +mov %rcx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#4 +# asm 2: imulq $19,mult=%rcx +imulq $19,%rcx,%rcx + +# qhasm: rz0 += mult +# asm 1: add mulrax=int64#2 +# asm 2: movq mulrax=%rsi +movq 120(%rsp),%rsi + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rsi,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * h2_stack +# asm 1: mulq rt0=int64#2 +# asm 2: mov rt0=%rsi +mov %rax,%rsi + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#4 +# asm 2: mov mulr01=%rcx +mov %rdx,%rcx + +# qhasm: mulrax = e4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 128(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * h1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h1_stack +# asm 1: mulq rt1=int64#5 +# asm 2: mov rt1=%r8 +mov %rax,%r8 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#6 +# asm 2: mov mulr11=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = e0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h2_stack +# asm 1: mulq rt2=int64#8 +# asm 2: mov rt2=%r10 +mov %rax,%r10 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#9 +# asm 2: mov mulr21=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = e0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h3_stack +# asm 1: mulq rt3=int64#10 +# asm 2: mov rt3=%r12 +mov %rax,%r12 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#11 +# asm 2: mov mulr31=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = e0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h4_stack +# asm 1: mulq rt4=int64#12 +# asm 2: mov rt4=%r14 +mov %rax,%r14 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#13 +# asm 2: mov mulr41=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = e1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 104(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h2_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h3_stack +# asm 1: mulq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h1_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h4_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h0_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h2_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h3_stack +# asm 1: mulq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * h4_stack +# asm 1: mulq mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rt0) << 13 +# asm 1: shld $13,mulr41=int64#4 +# asm 2: imulq $19,mulr41=%rcx +imulq $19,%r15,%rcx + +# qhasm: rt0 += mulr41 +# asm 1: add mult=int64#4 +# asm 2: mov mult=%rcx +mov %rsi,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt1=int64#5 +# asm 2: mov rt1=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt2=int64#6 +# asm 2: mov rt2=%r9 +mov %rcx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt3=int64#7 +# asm 2: mov rt3=%rax +mov %rcx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt4=int64#8 +# asm 2: mov rt4=%r10 +mov %rcx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#4 +# asm 2: imulq $19,mult=%rcx +imulq $19,%rcx,%rcx + +# qhasm: rt0 += mult +# asm 1: add caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd_p1p1.s b/src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd_p1p1.s new file mode 100644 index 0000000..b136ffe --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_nielsadd_p1p1.s @@ -0,0 +1,3161 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: int64 a4 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: stack64 a4_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: int64 b4 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: stack64 b4_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: int64 d4 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: stack64 d4_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: int64 e4 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: stack64 e4_stack + +# qhasm: int64 f0 + +# qhasm: int64 f1 + +# qhasm: int64 f2 + +# qhasm: int64 f3 + +# qhasm: int64 f4 + +# qhasm: stack64 f0_stack + +# qhasm: stack64 f1_stack + +# qhasm: stack64 f2_stack + +# qhasm: stack64 f3_stack + +# qhasm: stack64 f4_stack + +# qhasm: int64 g0 + +# qhasm: int64 g1 + +# qhasm: int64 g2 + +# qhasm: int64 g3 + +# qhasm: int64 g4 + +# qhasm: stack64 g0_stack + +# qhasm: stack64 g1_stack + +# qhasm: stack64 g2_stack + +# qhasm: stack64 g3_stack + +# qhasm: stack64 g4_stack + +# qhasm: int64 h0 + +# qhasm: int64 h1 + +# qhasm: int64 h2 + +# qhasm: int64 h3 + +# qhasm: int64 h4 + +# qhasm: stack64 h0_stack + +# qhasm: stack64 h1_stack + +# qhasm: stack64 h2_stack + +# qhasm: stack64 h3_stack + +# qhasm: stack64 h4_stack + +# qhasm: int64 qt0 + +# qhasm: int64 qt1 + +# qhasm: int64 qt2 + +# qhasm: int64 qt3 + +# qhasm: int64 qt4 + +# qhasm: stack64 qt0_stack + +# qhasm: stack64 qt1_stack + +# qhasm: stack64 qt2_stack + +# qhasm: stack64 qt3_stack + +# qhasm: stack64 qt4_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: int64 t14 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: stack64 t14_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: int64 t24 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: stack64 t24_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 rx4 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 rt4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1 +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $160,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov qp=int64#4 +# asm 2: mov qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 40) +# asm 1: movq 40(a0=int64#3 +# asm 2: movq 40(a0=%rdx +movq 40(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 48) +# asm 1: movq 48(a1=int64#5 +# asm 2: movq 48(a1=%r8 +movq 48(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 56) +# asm 1: movq 56(a2=int64#6 +# asm 2: movq 56(a2=%r9 +movq 56(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 64) +# asm 1: movq 64(a3=int64#7 +# asm 2: movq 64(a3=%rax +movq 64(%rsi),%rax + +# qhasm: a4 = *(uint64 *)(pp + 72) +# asm 1: movq 72(a4=int64#8 +# asm 2: movq 72(a4=%r10 +movq 72(%rsi),%r10 + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#9 +# asm 2: mov b0=%r11 +mov %rdx,%r11 + +# qhasm: b1 = a1 +# asm 1: mov b1=int64#10 +# asm 2: mov b1=%r12 +mov %r8,%r12 + +# qhasm: b2 = a2 +# asm 1: mov b2=int64#11 +# asm 2: mov b2=%r13 +mov %r9,%r13 + +# qhasm: b3 = a3 +# asm 1: mov b3=int64#12 +# asm 2: mov b3=%r14 +mov %rax,%r14 + +# qhasm: b4 = a4 +# asm 1: mov b4=int64#13 +# asm 2: mov b4=%r15 +mov %r10,%r15 + +# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#13 +# asm 2: movq b0_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#14 +# asm 2: movq b1_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#15 +# asm 2: movq b2_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#16 +# asm 2: movq b3_stack=120(%rsp) +movq %r14,120(%rsp) + +# qhasm: b4_stack = b4 +# asm 1: movq b4_stack=stack64#17 +# asm 2: movq b4_stack=128(%rsp) +movq %r15,128(%rsp) + +# qhasm: mulrax = a3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 80(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(a0=int64#5 +# asm 2: mov a0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = a4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 88(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(a1=int64#8 +# asm 2: mov a1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(a2=int64#10 +# asm 2: mov a2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(a3=int64#12 +# asm 2: mov a3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(a4=int64#14 +# asm 2: mov a4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = a1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 64(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.a0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: a0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a1=int64#7 +# asm 2: mov a1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a2=int64#8 +# asm 2: mov a2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a3=int64#9 +# asm 2: mov a3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a4=int64#10 +# asm 2: mov a4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: a0 += mult +# asm 1: add a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: mulrax = b3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 120(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(e0=int64#5 +# asm 2: mov e0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = b4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 128(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(e1=int64#8 +# asm 2: mov e1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(e2=int64#10 +# asm 2: mov e2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(e3=int64#12 +# asm 2: mov e3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(e4=int64#14 +# asm 2: mov e4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = b1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 104(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.e0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: e0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e1=int64#7 +# asm 2: mov e1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e2=int64#8 +# asm 2: mov e2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e3=int64#9 +# asm 2: mov e3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,e4=int64#10 +# asm 2: mov e4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: e0 += mult +# asm 1: add h0=int64#3 +# asm 2: mov h0=%rdx +mov %r8,%rdx + +# qhasm: h1 = e1 +# asm 1: mov h1=int64#6 +# asm 2: mov h1=%r9 +mov %rax,%r9 + +# qhasm: h2 = e2 +# asm 1: mov h2=int64#11 +# asm 2: mov h2=%r13 +mov %r10,%r13 + +# qhasm: h3 = e3 +# asm 1: mov h3=int64#12 +# asm 2: mov h3=%r14 +mov %r11,%r14 + +# qhasm: h4 = e4 +# asm 1: mov h4=int64#13 +# asm 2: mov h4=%r15 +mov %r12,%r15 + +# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulrax=int64#3 +# asm 2: movq 144(mulrax=%rdx +movq 144(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(c0=int64#5 +# asm 2: mov c0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = *(uint64 *)(pp + 152) +# asm 1: movq 152(mulrax=int64#3 +# asm 2: movq 152(mulrax=%rdx +movq 152(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(c1=int64#8 +# asm 2: mov c1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(c2=int64#10 +# asm 2: mov c2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(c3=int64#12 +# asm 2: mov c3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(c4=int64#14 +# asm 2: mov c4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(pp + 128) +# asm 1: movq 128(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 128(mulrax=%rdx +movq 128(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 152(mulrax=%rax +movq 152(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.c0) << 13 +# asm 1: shld $13,mulr41=int64#4 +# asm 2: imulq $19,mulr41=%rcx +imulq $19,%rbp,%rcx + +# qhasm: c0 += mulr41 +# asm 1: add mult=int64#4 +# asm 2: mov mult=%rcx +mov %r8,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c1=int64#6 +# asm 2: mov c1=%r9 +mov %rcx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c2=int64#7 +# asm 2: mov c2=%rax +mov %rcx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c3=int64#8 +# asm 2: mov c3=%r10 +mov %rcx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c4=int64#9 +# asm 2: mov c4=%r11 +mov %rcx,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#4 +# asm 2: imulq $19,mult=%rcx +imulq $19,%rcx,%rcx + +# qhasm: c0 += mult +# asm 1: add c0_stack=stack64#8 +# asm 2: movq c0_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: f0 = *(uint64 *)(pp + 80) +# asm 1: movq 80(f0=int64#3 +# asm 2: movq 80(f0=%rdx +movq 80(%rsi),%rdx + +# qhasm: f1 = *(uint64 *)(pp + 88) +# asm 1: movq 88(f1=int64#4 +# asm 2: movq 88(f1=%rcx +movq 88(%rsi),%rcx + +# qhasm: f2 = *(uint64 *)(pp + 96) +# asm 1: movq 96(f2=int64#5 +# asm 2: movq 96(f2=%r8 +movq 96(%rsi),%r8 + +# qhasm: f3 = *(uint64 *)(pp + 104) +# asm 1: movq 104(f3=int64#10 +# asm 2: movq 104(f3=%r12 +movq 104(%rsi),%r12 + +# qhasm: f4 = *(uint64 *)(pp + 112) +# asm 1: movq 112(f4=int64#2 +# asm 2: movq 112(f4=%rsi +movq 112(%rsi),%rsi + +# qhasm: f0 += f0 +# asm 1: add g0=int64#11 +# asm 2: mov g0=%r13 +mov %rdx,%r13 + +# qhasm: g1 = f1 +# asm 1: mov g1=int64#12 +# asm 2: mov g1=%r14 +mov %rcx,%r14 + +# qhasm: g2 = f2 +# asm 1: mov g2=int64#13 +# asm 2: mov g2=%r15 +mov %r8,%r15 + +# qhasm: g3 = f3 +# asm 1: mov g3=int64#14 +# asm 2: mov g3=%rbx +mov %r12,%rbx + +# qhasm: g4 = f4 +# asm 1: mov g4=int64#15 +# asm 2: mov g4=%rbp +mov %rsi,%rbp + +# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p2.s b/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p2.s new file mode 100644 index 0000000..cea50dd --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p2.s @@ -0,0 +1,2442 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 rx4 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2 +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p2: +mov %rsp,%r11 +and $31,%r11 +add $96,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: mulrax = *(uint64 *)(pp + 24) +# asm 1: movq 24(mulrax=int64#3 +# asm 2: movq 24(mulrax=%rdx +movq 24(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rx0=int64#4 +# asm 2: mov rx0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#3 +# asm 2: movq 32(mulrax=%rdx +movq 32(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(rx1=int64#6 +# asm 2: mov rx1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rx2=int64#9 +# asm 2: mov rx2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(rx3=int64#11 +# asm 2: mov rx3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(rx4=int64#13 +# asm 2: mov rx4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 8(mulrax=%rdx +movq 8(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rx0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: rx0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx1=int64#6 +# asm 2: mov rx1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx2=int64#7 +# asm 2: mov rx2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx3=int64#8 +# asm 2: mov rx3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx4=int64#9 +# asm 2: mov rx4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: rx0 += mult +# asm 1: add mulrax=int64#3 +# asm 2: movq 104(mulrax=%rdx +movq 104(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(ry0=int64#4 +# asm 2: mov ry0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(mulrax=int64#3 +# asm 2: movq 112(mulrax=%rdx +movq 112(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(ry1=int64#6 +# asm 2: mov ry1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(ry2=int64#9 +# asm 2: mov ry2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(ry3=int64#11 +# asm 2: mov ry3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(ry4=int64#13 +# asm 2: mov ry4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq 88(mulrax=%rdx +movq 88(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.ry0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: ry0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry1=int64#6 +# asm 2: mov ry1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry2=int64#7 +# asm 2: mov ry2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry3=int64#8 +# asm 2: mov ry3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry4=int64#9 +# asm 2: mov ry4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: ry0 += mult +# asm 1: add mulrax=int64#3 +# asm 2: movq 64(mulrax=%rdx +movq 64(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rz0=int64#4 +# asm 2: mov rz0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(mulrax=int64#3 +# asm 2: movq 72(mulrax=%rdx +movq 72(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(rz1=int64#6 +# asm 2: mov rz1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rz2=int64#9 +# asm 2: mov rz2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(rz3=int64#11 +# asm 2: mov rz3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(rz4=int64#13 +# asm 2: mov rz4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 48(mulrax=%rdx +movq 48(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 56(mulrax=%rdx +movq 56(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 56(mulrax=%rdx +movq 56(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: mulr01 = (mulr01.rz0) << 13 +# asm 1: shld $13,mulr41=int64#3 +# asm 2: imulq $19,mulr41=%rdx +imulq $19,%rbx,%rdx + +# qhasm: rz0 += mulr41 +# asm 1: add mult=int64#3 +# asm 2: mov mult=%rdx +mov %rcx,%rdx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz1=int64#5 +# asm 2: mov rz1=%r8 +mov %rdx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz2=int64#6 +# asm 2: mov rz2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz3=int64#7 +# asm 2: mov rz3=%rax +mov %rdx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz4=int64#8 +# asm 2: mov rz4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#3 +# asm 2: imulq $19,mult=%rdx +imulq $19,%rdx,%rdx + +# qhasm: rz0 += mult +# asm 1: add caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p3.s b/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p3.s new file mode 100644 index 0000000..86a95d0 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_p3.s @@ -0,0 +1,3202 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 rx4 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 rt4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3 +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_p3: +mov %rsp,%r11 +and $31,%r11 +add $96,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: mulrax = *(uint64 *)(pp + 24) +# asm 1: movq 24(mulrax=int64#3 +# asm 2: movq 24(mulrax=%rdx +movq 24(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rx0=int64#4 +# asm 2: mov rx0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#3 +# asm 2: movq 32(mulrax=%rdx +movq 32(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(rx1=int64#6 +# asm 2: mov rx1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rx2=int64#9 +# asm 2: mov rx2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(rx3=int64#11 +# asm 2: mov rx3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(rx4=int64#13 +# asm 2: mov rx4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 8(mulrax=%rdx +movq 8(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rx0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: rx0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx1=int64#6 +# asm 2: mov rx1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx2=int64#7 +# asm 2: mov rx2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx3=int64#8 +# asm 2: mov rx3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx4=int64#9 +# asm 2: mov rx4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: rx0 += mult +# asm 1: add mulrax=int64#3 +# asm 2: movq 104(mulrax=%rdx +movq 104(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(ry0=int64#4 +# asm 2: mov ry0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(mulrax=int64#3 +# asm 2: movq 112(mulrax=%rdx +movq 112(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(ry1=int64#6 +# asm 2: mov ry1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(ry2=int64#9 +# asm 2: mov ry2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(ry3=int64#11 +# asm 2: mov ry3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(ry4=int64#13 +# asm 2: mov ry4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq 88(mulrax=%rdx +movq 88(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.ry0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: ry0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry1=int64#6 +# asm 2: mov ry1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry2=int64#7 +# asm 2: mov ry2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry3=int64#8 +# asm 2: mov ry3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,ry4=int64#9 +# asm 2: mov ry4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: ry0 += mult +# asm 1: add mulrax=int64#3 +# asm 2: movq 64(mulrax=%rdx +movq 64(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rz0=int64#4 +# asm 2: mov rz0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(mulrax=int64#3 +# asm 2: movq 72(mulrax=%rdx +movq 72(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(rz1=int64#6 +# asm 2: mov rz1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rz2=int64#9 +# asm 2: mov rz2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(rz3=int64#11 +# asm 2: mov rz3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(rz4=int64#13 +# asm 2: mov rz4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 48(mulrax=%rdx +movq 48(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 56(mulrax=%rdx +movq 56(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 56(mulrax=%rdx +movq 56(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rz0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: rz0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz1=int64#6 +# asm 2: mov rz1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz2=int64#7 +# asm 2: mov rz2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz3=int64#8 +# asm 2: mov rz3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz4=int64#9 +# asm 2: mov rz4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: rz0 += mult +# asm 1: add mulrax=int64#3 +# asm 2: movq 24(mulrax=%rdx +movq 24(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(rt0=int64#4 +# asm 2: mov rt0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#3 +# asm 2: movq 32(mulrax=%rdx +movq 32(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(rt1=int64#6 +# asm 2: mov rt1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(rt2=int64#9 +# asm 2: mov rt2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(rt3=int64#11 +# asm 2: mov rt3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(rt4=int64#13 +# asm 2: mov rt4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 8(mulrax=%rdx +movq 8(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: mulr01 = (mulr01.rt0) << 13 +# asm 1: shld $13,mulr41=int64#3 +# asm 2: imulq $19,mulr41=%rdx +imulq $19,%rbx,%rdx + +# qhasm: rt0 += mulr41 +# asm 1: add mult=int64#3 +# asm 2: mov mult=%rdx +mov %rcx,%rdx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt1=int64#5 +# asm 2: mov rt1=%r8 +mov %rdx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt2=int64#6 +# asm 2: mov rt2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt3=int64#7 +# asm 2: mov rt3=%rax +mov %rdx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt4=int64#8 +# asm 2: mov rt4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#3 +# asm 2: imulq $19,mult=%rdx +imulq $19,%rdx,%rdx + +# qhasm: rt0 += mult +# asm 1: add caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_pniels.s b/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_pniels.s new file mode 100644 index 0000000..1f24615 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_p1p1_to_pniels.s @@ -0,0 +1,4110 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 x0 + +# qhasm: int64 x1 + +# qhasm: int64 x2 + +# qhasm: int64 x3 + +# qhasm: int64 x4 + +# qhasm: int64 y0 + +# qhasm: int64 y1 + +# qhasm: int64 y2 + +# qhasm: int64 y3 + +# qhasm: int64 y4 + +# qhasm: int64 ysubx0 + +# qhasm: int64 ysubx1 + +# qhasm: int64 ysubx2 + +# qhasm: int64 ysubx3 + +# qhasm: int64 ysubx4 + +# qhasm: int64 xaddy0 + +# qhasm: int64 xaddy1 + +# qhasm: int64 xaddy2 + +# qhasm: int64 xaddy3 + +# qhasm: int64 xaddy4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 t4 + +# qhasm: int64 t2d0 + +# qhasm: int64 t2d1 + +# qhasm: int64 t2d2 + +# qhasm: int64 t2d3 + +# qhasm: int64 t2d4 + +# qhasm: stack64 stackt0 + +# qhasm: stack64 stackt1 + +# qhasm: stack64 stackt2 + +# qhasm: stack64 stackt3 + +# qhasm: stack64 stackt4 + +# qhasm: stack64 stackx0 + +# qhasm: stack64 stackx1 + +# qhasm: stack64 stackx2 + +# qhasm: stack64 stackx3 + +# qhasm: stack64 stackx4 + +# qhasm: stack64 stacky1 + +# qhasm: stack64 stacky2 + +# qhasm: stack64 stacky3 + +# qhasm: stack64 stacky4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_p1p1_to_pniels: +mov %rsp,%r11 +and $31,%r11 +add $128,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: mulrax = *(uint64 *)(pp + 24) +# asm 1: movq 24(mulrax=int64#3 +# asm 2: movq 24(mulrax=%rdx +movq 24(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(x0=int64#4 +# asm 2: mov x0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#3 +# asm 2: movq 32(mulrax=%rdx +movq 32(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(x1=int64#6 +# asm 2: mov x1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(x2=int64#9 +# asm 2: mov x2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(x3=int64#11 +# asm 2: mov x3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(x4=int64#13 +# asm 2: mov x4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 8(mulrax=%rdx +movq 8(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.x0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: x0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,x1=int64#6 +# asm 2: mov x1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,x2=int64#7 +# asm 2: mov x2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,x3=int64#8 +# asm 2: mov x3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,x4=int64#9 +# asm 2: mov x4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: x0 += mult +# asm 1: add stackx0=stack64#8 +# asm 2: movq stackx0=56(%rsp) +movq %rcx,56(%rsp) + +# qhasm: stackx1 = x1 +# asm 1: movq stackx1=stack64#9 +# asm 2: movq stackx1=64(%rsp) +movq %r9,64(%rsp) + +# qhasm: stackx2 = x2 +# asm 1: movq stackx2=stack64#10 +# asm 2: movq stackx2=72(%rsp) +movq %rax,72(%rsp) + +# qhasm: stackx3 = x3 +# asm 1: movq stackx3=stack64#11 +# asm 2: movq stackx3=80(%rsp) +movq %r10,80(%rsp) + +# qhasm: stackx4 = x4 +# asm 1: movq stackx4=stack64#12 +# asm 2: movq stackx4=88(%rsp) +movq %r11,88(%rsp) + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(mulrax=int64#3 +# asm 2: movq 104(mulrax=%rdx +movq 104(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#13 +# asm 2: movq mulx319_stack=96(%rsp) +movq %rax,96(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(y0=int64#4 +# asm 2: mov y0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(mulrax=int64#3 +# asm 2: movq 112(mulrax=%rdx +movq 112(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#14 +# asm 2: movq mulx419_stack=104(%rsp) +movq %rax,104(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(y1=int64#6 +# asm 2: mov y1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(y2=int64#9 +# asm 2: mov y2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(y3=int64#11 +# asm 2: mov y3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(y4=int64#13 +# asm 2: mov y4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq 88(mulrax=%rdx +movq 88(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.y0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: y0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,y1=int64#6 +# asm 2: mov y1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,y2=int64#7 +# asm 2: mov y2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,y3=int64#8 +# asm 2: mov y3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,y4=int64#9 +# asm 2: mov y4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: y0 += mult +# asm 1: add ysubx0=int64#3 +# asm 2: mov ysubx0=%rdx +mov %rcx,%rdx + +# qhasm: ysubx1 = y1 +# asm 1: mov ysubx1=int64#5 +# asm 2: mov ysubx1=%r8 +mov %r9,%r8 + +# qhasm: ysubx2 = y2 +# asm 1: mov ysubx2=int64#10 +# asm 2: mov ysubx2=%r12 +mov %rax,%r12 + +# qhasm: ysubx3 = y3 +# asm 1: mov ysubx3=int64#11 +# asm 2: mov ysubx3=%r13 +mov %r10,%r13 + +# qhasm: ysubx4 = y4 +# asm 1: mov ysubx4=int64#12 +# asm 2: mov ysubx4=%r14 +mov %r11,%r14 + +# qhasm: ysubx0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,x0=int64#13 +# asm 2: movq x0=%r15 +movq 56(%rsp),%r15 + +# qhasm: ysubx0 -= x0 +# asm 1: sub x1=int64#13 +# asm 2: movq x1=%r15 +movq 64(%rsp),%r15 + +# qhasm: ysubx1 -= x1 +# asm 1: sub x2=int64#13 +# asm 2: movq x2=%r15 +movq 72(%rsp),%r15 + +# qhasm: ysubx2 -= x2 +# asm 1: sub x3=int64#13 +# asm 2: movq x3=%r15 +movq 80(%rsp),%r15 + +# qhasm: ysubx3 -= x3 +# asm 1: sub x4=int64#13 +# asm 2: movq x4=%r15 +movq 88(%rsp),%r15 + +# qhasm: ysubx4 -= x4 +# asm 1: sub mulrax=int64#3 +# asm 2: movq 64(mulrax=%rdx +movq 64(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rz0=int64#4 +# asm 2: mov rz0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(mulrax=int64#3 +# asm 2: movq 72(mulrax=%rdx +movq 72(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(rz1=int64#6 +# asm 2: mov rz1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(rz2=int64#9 +# asm 2: mov rz2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(rz3=int64#11 +# asm 2: mov rz3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(rz4=int64#13 +# asm 2: mov rz4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 48) +# asm 1: movq 48(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 48(mulrax=%rdx +movq 48(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 56(mulrax=%rdx +movq 56(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 56(mulrax=%rdx +movq 56(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 152) +# asm 1: mulq 152(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rz0) << 13 +# asm 1: shld $13,mulr41=int64#5 +# asm 2: imulq $19,mulr41=%r8 +imulq $19,%rbx,%r8 + +# qhasm: rz0 += mulr41 +# asm 1: add mult=int64#5 +# asm 2: mov mult=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz1=int64#6 +# asm 2: mov rz1=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz2=int64#7 +# asm 2: mov rz2=%rax +mov %r8,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz3=int64#8 +# asm 2: mov rz3=%r10 +mov %r8,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rz4=int64#9 +# asm 2: mov rz4=%r11 +mov %r8,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#5 +# asm 2: imulq $19,mult=%r8 +imulq $19,%r8,%r8 + +# qhasm: rz0 += mult +# asm 1: add mulrax=int64#3 +# asm 2: movq 24(mulrax=%rdx +movq 24(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(t0=int64#4 +# asm 2: mov t0=%rcx +mov %rax,%rcx + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#5 +# asm 2: mov mulr01=%r8 +mov %rdx,%r8 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#3 +# asm 2: movq 32(mulrax=%rdx +movq 32(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(t1=int64#6 +# asm 2: mov t1=%r9 +mov %rax,%r9 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#8 +# asm 2: mov mulr11=%r10 +mov %rdx,%r10 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(t2=int64#9 +# asm 2: mov t2=%r11 +mov %rax,%r11 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#10 +# asm 2: mov mulr21=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(t3=int64#11 +# asm 2: mov t3=%r13 +mov %rax,%r13 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#12 +# asm 2: mov mulr31=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(t4=int64#13 +# asm 2: mov t4=%r15 +mov %rax,%r15 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#14 +# asm 2: mov mulr41=%rbx +mov %rdx,%rbx + +# qhasm: mulrax = *(uint64 *)(pp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 8(mulrax=%rdx +movq 8(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 16(mulrax=%rdx +movq 16(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(pp + 112) +# asm 1: mulq 112(mulredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: mulr01 = (mulr01.t0) << 13 +# asm 1: shld $13,mulr41=int64#3 +# asm 2: imulq $19,mulr41=%rdx +imulq $19,%rbx,%rdx + +# qhasm: t0 += mulr41 +# asm 1: add mult=int64#3 +# asm 2: mov mult=%rdx +mov %rcx,%rdx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t1=int64#5 +# asm 2: mov t1=%r8 +mov %rdx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t2=int64#6 +# asm 2: mov t2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t3=int64#7 +# asm 2: mov t3=%rax +mov %rdx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t4=int64#8 +# asm 2: mov t4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#3 +# asm 2: imulq $19,mult=%rdx +imulq $19,%rdx,%rdx + +# qhasm: t0 += mult +# asm 1: add stackt0=stack64#8 +# asm 2: movq stackt0=56(%rsp) +movq %rcx,56(%rsp) + +# qhasm: stackt1 = t1 +# asm 1: movq stackt1=stack64#9 +# asm 2: movq stackt1=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: stackt2 = t2 +# asm 1: movq stackt2=stack64#10 +# asm 2: movq stackt2=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: stackt3 = t3 +# asm 1: movq stackt3=stack64#11 +# asm 2: movq stackt3=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: stackt4 = t4 +# asm 1: movq stackt4=stack64#12 +# asm 2: movq stackt4=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: mulrax = stackt3 +# asm 1: movq mulrax=int64#2 +# asm 2: movq mulrax=%rsi +movq 80(%rsp),%rsi + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rsi,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#13 +# asm 2: movq mulx319_stack=96(%rsp) +movq %rax,96(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: t2d0 = mulrax +# asm 1: mov t2d0=int64#2 +# asm 2: mov t2d0=%rsi +mov %rax,%rsi + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#4 +# asm 2: mov mulr01=%rcx +mov %rdx,%rcx + +# qhasm: mulrax = stackt4 +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 88(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#14 +# asm 2: movq mulx419_stack=104(%rsp) +movq %rax,104(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? t2d0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? t2d0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: t2d1 = mulrax +# asm 1: mov t2d1=int64#5 +# asm 2: mov t2d1=%r8 +mov %rax,%r8 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#6 +# asm 2: mov mulr11=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = stackt0 +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: t2d2 = mulrax +# asm 1: mov t2d2=int64#8 +# asm 2: mov t2d2=%r10 +mov %rax,%r10 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#9 +# asm 2: mov mulr21=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = stackt0 +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: t2d3 = mulrax +# asm 1: mov t2d3=int64#10 +# asm 2: mov t2d3=%r12 +mov %rax,%r12 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#11 +# asm 2: mov mulr31=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = stackt0 +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: t2d4 = mulrax +# asm 1: mov t2d4=int64#12 +# asm 2: mov t2d4=%r14 +mov %rax,%r14 + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#13 +# asm 2: mov mulr41=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = stackt1 +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? t2d1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? t2d2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: carry? t2d3 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? t2d4 += mulrax +# asm 1: add mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 64(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? t2d0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? t2d2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? t2d3 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: carry? t2d4 += mulrax +# asm 1: add mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? t2d0 += mulrax +# asm 1: add mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? t2d1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? t2d3 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D1 + +# qhasm: carry? t2d4 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? t2d1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? t2d2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D0 + +# qhasm: carry? t2d4 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D2 + +# qhasm: carry? t2d1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D3 + +# qhasm: carry? t2d2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_EC2D4 + +# qhasm: carry? t2d3 += mulrax +# asm 1: add mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.t2d0) << 13 +# asm 1: shld $13,mulr41=int64#4 +# asm 2: imulq $19,mulr41=%rcx +imulq $19,%r15,%rcx + +# qhasm: t2d0 += mulr41 +# asm 1: add mult=int64#4 +# asm 2: mov mult=%rcx +mov %rsi,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t2d1=int64#5 +# asm 2: mov t2d1=%r8 +mov %rcx,%r8 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t2d2=int64#6 +# asm 2: mov t2d2=%r9 +mov %rcx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t2d3=int64#7 +# asm 2: mov t2d3=%rax +mov %rcx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,t2d4=int64#8 +# asm 2: mov t2d4=%r10 +mov %rcx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#4 +# asm 2: imulq $19,mult=%rcx +imulq $19,%rcx,%rcx + +# qhasm: t2d0 += mult +# asm 1: add caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_pack.c b/src/ed25519-supercop-amd64-51-30k/ge25519_pack.c new file mode 100644 index 0000000..f289fe5 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_pack.c @@ -0,0 +1,13 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +void ge25519_pack(unsigned char r[32], const ge25519_p3 *p) +{ + fe25519 tx, ty, zi; + fe25519_invert(&zi, &p->z); + fe25519_mul(&tx, &p->x, &zi); + fe25519_mul(&ty, &p->y, &zi); + fe25519_pack(r, &ty); + r[31] ^= fe25519_getparity(&tx) << 7; +} diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_pnielsadd_p1p1.s b/src/ed25519-supercop-amd64-51-30k/ge25519_pnielsadd_p1p1.s new file mode 100644 index 0000000..512af25 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_pnielsadd_p1p1.s @@ -0,0 +1,3791 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: int64 a4 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: stack64 a4_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: int64 b4 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: stack64 b4_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 c4 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: stack64 c4_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: int64 d4 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: stack64 d4_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: int64 t14 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: stack64 t14_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: int64 t24 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: stack64 t24_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 rx4 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rz4 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 rt4 + +# qhasm: int64 x0 + +# qhasm: int64 x1 + +# qhasm: int64 x2 + +# qhasm: int64 x3 + +# qhasm: int64 x4 + +# qhasm: int64 mulr01 + +# qhasm: int64 mulr11 + +# qhasm: int64 mulr21 + +# qhasm: int64 mulr31 + +# qhasm: int64 mulr41 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mult + +# qhasm: int64 mulredmask + +# qhasm: stack64 mulx219_stack + +# qhasm: stack64 mulx319_stack + +# qhasm: stack64 mulx419_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1 +.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1 +_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1: +crypto_sign_ed25519_amd64_51_30k_batch_ge25519_pnielsadd_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $160,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov qp=int64#4 +# asm 2: mov qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 40) +# asm 1: movq 40(a0=int64#3 +# asm 2: movq 40(a0=%rdx +movq 40(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 48) +# asm 1: movq 48(a1=int64#5 +# asm 2: movq 48(a1=%r8 +movq 48(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 56) +# asm 1: movq 56(a2=int64#6 +# asm 2: movq 56(a2=%r9 +movq 56(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 64) +# asm 1: movq 64(a3=int64#7 +# asm 2: movq 64(a3=%rax +movq 64(%rsi),%rax + +# qhasm: a4 = *(uint64 *)(pp + 72) +# asm 1: movq 72(a4=int64#8 +# asm 2: movq 72(a4=%r10 +movq 72(%rsi),%r10 + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#9 +# asm 2: mov b0=%r11 +mov %rdx,%r11 + +# qhasm: b1 = a1 +# asm 1: mov b1=int64#10 +# asm 2: mov b1=%r12 +mov %r8,%r12 + +# qhasm: b2 = a2 +# asm 1: mov b2=int64#11 +# asm 2: mov b2=%r13 +mov %r9,%r13 + +# qhasm: b3 = a3 +# asm 1: mov b3=int64#12 +# asm 2: mov b3=%r14 +mov %rax,%r14 + +# qhasm: b4 = a4 +# asm 1: mov b4=int64#13 +# asm 2: mov b4=%r15 +mov %r10,%r15 + +# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#13 +# asm 2: movq b0_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#14 +# asm 2: movq b1_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#15 +# asm 2: movq b2_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#16 +# asm 2: movq b3_stack=120(%rsp) +movq %r14,120(%rsp) + +# qhasm: b4_stack = b4 +# asm 1: movq b4_stack=stack64#17 +# asm 2: movq b4_stack=128(%rsp) +movq %r15,128(%rsp) + +# qhasm: mulrax = a3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 80(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(a0=int64#5 +# asm 2: mov a0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = a4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 88(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(a1=int64#8 +# asm 2: mov a1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(a2=int64#10 +# asm 2: mov a2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(a3=int64#12 +# asm 2: mov a3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = a0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(a4=int64#14 +# asm 2: mov a4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = a1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 64(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 72(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8) +# asm 1: mulq 8(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0) +# asm 1: mulq 0(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16) +# asm 1: mulq 16(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24) +# asm 1: mulq 24(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32) +# asm 1: mulq 32(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.a0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: a0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a1=int64#7 +# asm 2: mov a1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a2=int64#8 +# asm 2: mov a2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a3=int64#9 +# asm 2: mov a3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,a4=int64#10 +# asm 2: mov a4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: a0 += mult +# asm 1: add a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: a4_stack = a4 +# asm 1: movq a4_stack=stack64#12 +# asm 2: movq a4_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: mulrax = b3_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 120(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#18 +# asm 2: movq mulx319_stack=136(%rsp) +movq %rax,136(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(rx0=int64#5 +# asm 2: mov rx0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = b4_stack +# asm 1: movq mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 128(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#19 +# asm 2: movq mulx419_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(rx1=int64#8 +# asm 2: mov rx1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(rx2=int64#10 +# asm 2: mov rx2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(rx3=int64#12 +# asm 2: mov rx3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = b0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(rx4=int64#14 +# asm 2: mov rx4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = b1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 104(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#3 +# asm 2: movq mulrax=%rdx +movq 112(%rsp),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48) +# asm 1: mulq 48(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40) +# asm 1: mulq 40(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56) +# asm 1: mulq 56(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64) +# asm 1: mulq 64(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72) +# asm 1: mulq 72(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.rx0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: rx0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx1=int64#7 +# asm 2: mov rx1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx2=int64#8 +# asm 2: mov rx2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx3=int64#9 +# asm 2: mov rx3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rx4=int64#10 +# asm 2: mov rx4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: rx0 += mult +# asm 1: add ry0=int64#3 +# asm 2: mov ry0=%rdx +mov %r8,%rdx + +# qhasm: ry1 = rx1 +# asm 1: mov ry1=int64#6 +# asm 2: mov ry1=%r9 +mov %rax,%r9 + +# qhasm: ry2 = rx2 +# asm 1: mov ry2=int64#11 +# asm 2: mov ry2=%r13 +mov %r10,%r13 + +# qhasm: ry3 = rx3 +# asm 1: mov ry3=int64#12 +# asm 2: mov ry3=%r14 +mov %r11,%r14 + +# qhasm: ry4 = rx4 +# asm 1: mov ry4=int64#13 +# asm 2: mov ry4=%r15 +mov %r12,%r15 + +# qhasm: rx0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,mulrax=int64#3 +# asm 2: movq 144(mulrax=%rdx +movq 144(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#8 +# asm 2: movq mulx319_stack=56(%rsp) +movq %rax,56(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(c0=int64#5 +# asm 2: mov c0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = *(uint64 *)(pp + 152) +# asm 1: movq 152(mulrax=int64#3 +# asm 2: movq 152(mulrax=%rdx +movq 152(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#9 +# asm 2: movq mulx419_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(c1=int64#8 +# asm 2: mov c1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(c2=int64#10 +# asm 2: mov c2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(c3=int64#12 +# asm 2: mov c3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 120) +# asm 1: movq 120(mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(c4=int64#14 +# asm 2: mov c4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(pp + 128) +# asm 1: movq 128(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq 128(mulrax=%rax +movq 128(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 128(mulrax=%rdx +movq 128(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq 136(mulrax=%rax +movq 136(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#3 +# asm 2: movq 136(mulrax=%rdx +movq 136(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq 144(mulrax=%rax +movq 144(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 128) +# asm 1: mulq 128(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulrax=int64#7 +# asm 2: movq 152(mulrax=%rax +movq 152(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 120) +# asm 1: mulq 120(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 136) +# asm 1: mulq 136(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 144) +# asm 1: mulq 144(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 152) +# asm 1: mulq 152(mulredmask=int64#3 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx + +# qhasm: mulr01 = (mulr01.c0) << 13 +# asm 1: shld $13,mulr41=int64#6 +# asm 2: imulq $19,mulr41=%r9 +imulq $19,%rbp,%r9 + +# qhasm: c0 += mulr41 +# asm 1: add mult=int64#6 +# asm 2: mov mult=%r9 +mov %r8,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c1=int64#7 +# asm 2: mov c1=%rax +mov %r9,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c2=int64#8 +# asm 2: mov c2=%r10 +mov %r9,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c3=int64#9 +# asm 2: mov c3=%r11 +mov %r9,%r11 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,c4=int64#10 +# asm 2: mov c4=%r12 +mov %r9,%r12 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#6 +# asm 2: imulq $19,mult=%r9 +imulq $19,%r9,%r9 + +# qhasm: c0 += mult +# asm 1: add c0_stack=stack64#8 +# asm 2: movq c0_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#9 +# asm 2: movq c1_stack=64(%rsp) +movq %rax,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#10 +# asm 2: movq c2_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#11 +# asm 2: movq c3_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: c4_stack = c4 +# asm 1: movq c4_stack=stack64#12 +# asm 2: movq c4_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(mulrax=int64#3 +# asm 2: movq 104(mulrax=%rdx +movq 104(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx319_stack = mulrax +# asm 1: movq mulx319_stack=stack64#13 +# asm 2: movq mulx319_stack=96(%rsp) +movq %rax,96(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(rt0=int64#5 +# asm 2: mov rt0=%r8 +mov %rax,%r8 + +# qhasm: mulr01 = mulrdx +# asm 1: mov mulr01=int64#6 +# asm 2: mov mulr01=%r9 +mov %rdx,%r9 + +# qhasm: mulrax = *(uint64 *)(pp + 112) +# asm 1: movq 112(mulrax=int64#3 +# asm 2: movq 112(mulrax=%rdx +movq 112(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: mulx419_stack = mulrax +# asm 1: movq mulx419_stack=stack64#14 +# asm 2: movq mulx419_stack=104(%rsp) +movq %rax,104(%rsp) + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(rt1=int64#8 +# asm 2: mov rt1=%r10 +mov %rax,%r10 + +# qhasm: mulr11 = mulrdx +# asm 1: mov mulr11=int64#9 +# asm 2: mov mulr11=%r11 +mov %rdx,%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(rt2=int64#10 +# asm 2: mov rt2=%r12 +mov %rax,%r12 + +# qhasm: mulr21 = mulrdx +# asm 1: mov mulr21=int64#11 +# asm 2: mov mulr21=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(rt3=int64#12 +# asm 2: mov rt3=%r14 +mov %rax,%r14 + +# qhasm: mulr31 = mulrdx +# asm 1: mov mulr31=int64#13 +# asm 2: mov mulr31=%r15 +mov %rdx,%r15 + +# qhasm: mulrax = *(uint64 *)(pp + 80) +# asm 1: movq 80(mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(rt4=int64#14 +# asm 2: mov rt4=%rbx +mov %rax,%rbx + +# qhasm: mulr41 = mulrdx +# asm 1: mov mulr41=int64#15 +# asm 2: mov mulr41=%rbp +mov %rdx,%rbp + +# qhasm: mulrax = *(uint64 *)(pp + 88) +# asm 1: movq 88(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 88(mulrax=%rdx +movq 88(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#3 +# asm 2: movq 96(mulrax=%rdx +movq 96(%rsi),%rdx + +# qhasm: mulrax *= 19 +# asm 1: imulq $19,mulrax=int64#7 +# asm 2: imulq $19,mulrax=%rax +imulq $19,%rdx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88) +# asm 1: mulq 88(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80) +# asm 1: mulq 80(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96) +# asm 1: mulq 96(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104) +# asm 1: mulq 104(mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112) +# asm 1: mulq 112(mulredmask=int64#2 +# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi +movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi + +# qhasm: mulr01 = (mulr01.rt0) << 13 +# asm 1: shld $13,mulr41=int64#3 +# asm 2: imulq $19,mulr41=%rdx +imulq $19,%rbp,%rdx + +# qhasm: rt0 += mulr41 +# asm 1: add mult=int64#3 +# asm 2: mov mult=%rdx +mov %r8,%rdx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt1=int64#4 +# asm 2: mov rt1=%rcx +mov %rdx,%rcx + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt2=int64#6 +# asm 2: mov rt2=%r9 +mov %rdx,%r9 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt3=int64#7 +# asm 2: mov rt3=%rax +mov %rdx,%rax + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,rt4=int64#8 +# asm 2: mov rt4=%r10 +mov %rdx,%r10 + +# qhasm: (uint64) mult >>= 51 +# asm 1: shr $51,mult=int64#3 +# asm 2: imulq $19,mult=%rdx +imulq $19,%rdx,%rdx + +# qhasm: rt0 += mult +# asm 1: add rz0=int64#2 +# asm 2: mov rz0=%rsi +mov %r8,%rsi + +# qhasm: rz1 = rt1 +# asm 1: mov rz1=int64#3 +# asm 2: mov rz1=%rdx +mov %rcx,%rdx + +# qhasm: rz2 = rt2 +# asm 1: mov rz2=int64#9 +# asm 2: mov rz2=%r11 +mov %r9,%r11 + +# qhasm: rz3 = rt3 +# asm 1: mov rz3=int64#10 +# asm 2: mov rz3=%r12 +mov %rax,%r12 + +# qhasm: rz4 = rt4 +# asm 1: mov rz4=int64#11 +# asm 2: mov rz4=%r13 +mov %r10,%r13 + +# qhasm: rt0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0 +# asm 1: add crypto_sign_ed25519_amd64_51_30k_batch_2P0,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_scalarmult_base.c b/src/ed25519-supercop-amd64-51-30k/ge25519_scalarmult_base.c new file mode 100644 index 0000000..9cf2951 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_scalarmult_base.c @@ -0,0 +1,50 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +/* Multiples of the base point in Niels' representation */ +static const ge25519_niels ge25519_base_multiples_niels[] = { +#include "ge25519_base_niels_smalltables.data" +}; + +/* d */ +static const fe25519 ecd = {{929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575}}; + +void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s) +{ + signed char b[64]; + int i; + ge25519_niels t; + fe25519 d; + + sc25519_window4(b,s); + + ge25519_p1p1 tp1p1; + choose_t((ge25519_niels *)r, 0, (signed long long) b[1], ge25519_base_multiples_niels); + fe25519_sub(&d, &r->y, &r->x); + fe25519_add(&r->y, &r->y, &r->x); + r->x = d; + r->t = r->z; + fe25519_setint(&r->z,2); + for(i=3;i<64;i+=2) + { + choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels); + ge25519_nielsadd2(r, &t); + } + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p3(r, &tp1p1); + choose_t(&t, (unsigned long long) 0, (signed long long) b[0], ge25519_base_multiples_niels); + fe25519_mul(&t.t2d, &t.t2d, &ecd); + ge25519_nielsadd2(r, &t); + for(i=2;i<64;i+=2) + { + choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels); + ge25519_nielsadd2(r, &t); + } +} diff --git a/src/ed25519-supercop-amd64-51-30k/ge25519_unpackneg.c b/src/ed25519-supercop-amd64-51-30k/ge25519_unpackneg.c new file mode 100644 index 0000000..05d4855 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/ge25519_unpackneg.c @@ -0,0 +1,60 @@ +#include "fe25519.h" +#include "ge25519.h" + +/* d */ +static const fe25519 ecd = {{929955233495203, 466365720129213, 1662059464998953, 2033849074728123, 1442794654840575}}; +/* sqrt(-1) */ +static const fe25519 sqrtm1 = {{1718705420411056, 234908883556509, 2233514472574048, 2117202627021982, 765476049583133}}; + +/* return 0 on success, -1 otherwise */ +int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32]) +{ + fe25519 t, chk, num, den, den2, den4, den6; + unsigned char par = p[31] >> 7; + + fe25519_setint(&r->z,1); + fe25519_unpack(&r->y, p); + fe25519_square(&num, &r->y); /* x = y^2 */ + fe25519_mul(&den, &num, &ecd); /* den = dy^2 */ + fe25519_sub(&num, &num, &r->z); /* x = y^2-1 */ + fe25519_add(&den, &r->z, &den); /* den = dy^2+1 */ + + /* Computation of sqrt(num/den) + 1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8) + */ + fe25519_square(&den2, &den); + fe25519_square(&den4, &den2); + fe25519_mul(&den6, &den4, &den2); + fe25519_mul(&t, &den6, &num); + fe25519_mul(&t, &t, &den); + + fe25519_pow2523(&t, &t); + /* 2. computation of r->x = t * num * den^3 + */ + fe25519_mul(&t, &t, &num); + fe25519_mul(&t, &t, &den); + fe25519_mul(&t, &t, &den); + fe25519_mul(&r->x, &t, &den); + + /* 3. Check whether sqrt computation gave correct result, multiply by sqrt(-1) if not: + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + fe25519_mul(&r->x, &r->x, &sqrtm1); + + /* 4. Now we have one of the two square roots, except if input was not a square + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + return -1; + + /* 5. Choose the desired square root according to parity: + */ + if(fe25519_getparity(&r->x) != (1-par)) + fe25519_neg(&r->x, &r->x); + + fe25519_mul(&r->t, &r->x, &r->y); + return 0; +} diff --git a/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced.s b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced.s new file mode 100644 index 0000000..bad2b76 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced.s @@ -0,0 +1,476 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced +.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced +_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced: +crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#10 +# asm 2: movq 8(c1=%r12 +movq 8(%r10),%r12 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#11 +# asm 2: movq 16(c2=%r13 +movq 16(%r10),%r13 + +# qhasm: c3 = *(uint64 *)(spc + 24) +# asm 1: movq 24(c3=int64#12 +# asm 2: movq 24(c3=%r14 +movq 24(%r10),%r14 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#8 +# asm 2: movq 8(c1=%r10 +movq 8(%r9),%r10 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#9 +# asm 2: movq 16(c2=%r11 +movq 16(%r9),%r11 + +# qhasm: c3 = *(uint64 *)(spc + 24) +# asm 1: movq 24(c3=int64#10 +# asm 2: movq 24(c3=%r12 +movq 24(%r9),%r12 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_1limb.s b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_1limb.s new file mode 100644 index 0000000..b0b6204 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_1limb.s @@ -0,0 +1,416 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb +.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb +_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb: +crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_2limbs.s b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_2limbs.s new file mode 100644 index 0000000..750f8f6 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_2limbs.s @@ -0,0 +1,436 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs +.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs +_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs: +crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#10 +# asm 2: movq 8(c1=%r12 +movq 8(%r10),%r12 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#8 +# asm 2: movq 8(c1=%r10 +movq 8(%r9),%r10 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_3limbs.s b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_3limbs.s new file mode 100644 index 0000000..e5aeda9 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/heap_rootreplaced_3limbs.s @@ -0,0 +1,456 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs +.globl crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs +_crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs: +crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#10 +# asm 2: movq 8(c1=%r12 +movq 8(%r10),%r12 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#11 +# asm 2: movq 16(c2=%r13 +movq 16(%r10),%r13 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#8 +# asm 2: movq 8(c1=%r10 +movq 8(%r9),%r10 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#9 +# asm 2: movq 16(c2=%r11 +movq 16(%r9),%r11 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/hram.c b/src/ed25519-supercop-amd64-51-30k/hram.c new file mode 100644 index 0000000..f020f6f --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/hram.c @@ -0,0 +1,13 @@ +#include "crypto_hash_sha512.h" +#include "hram.h" + +void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen) +{ + unsigned long long i; + + for (i = 0;i < 32;++i) playground[i] = sm[i]; + for (i = 32;i < 64;++i) playground[i] = pk[i-32]; + for (i = 64;i < smlen;++i) playground[i] = sm[i]; + + crypto_hash_sha512(hram,playground,smlen); +} diff --git a/src/ed25519-supercop-amd64-51-30k/hram.h b/src/ed25519-supercop-amd64-51-30k/hram.h new file mode 100644 index 0000000..8cffd84 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/hram.h @@ -0,0 +1,8 @@ +#ifndef HRAM_H +#define HRAM_H + +#define get_hram crypto_sign_ed25519_amd64_51_30k_batch_get_hram + +extern void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen); + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/implementors b/src/ed25519-supercop-amd64-51-30k/implementors new file mode 100644 index 0000000..9b5399a --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/implementors @@ -0,0 +1,5 @@ +Daniel J. Bernstein +Niels Duif +Tanja Lange +lead: Peter Schwabe +Bo-Yin Yang diff --git a/src/ed25519-supercop-amd64-51-30k/index_heap.c b/src/ed25519-supercop-amd64-51-30k/index_heap.c new file mode 100644 index 0000000..f29f7a2 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/index_heap.c @@ -0,0 +1,58 @@ +#include "sc25519.h" +#include "index_heap.h" + +/* caller's responsibility to ensure hlen>=3 */ +void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars) +{ + h[0] = 0; + unsigned long long i=1; + while(i 0) + { + /* if(sc25519_lt_vartime(&scalars[h[ppos]], &scalars[h[pos]])) */ + if(sc25519_lt(&scalars[h[ppos]], &scalars[h[pos]])) + { + t = h[ppos]; + h[ppos] = h[pos]; + h[pos] = t; + pos = ppos; + ppos = (pos-1)/2; + } + else break; + } + (*hlen)++; +} + +/* Put the largest value in the heap in max1, the second largest in max2 */ +void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars) +{ + *max1 = h[0]; + *max2 = h[1]; + if(sc25519_lt(&scalars[h[1]],&scalars[h[2]])) + *max2 = h[2]; +} + +/* After the root has been replaced, restore heap property */ +/* extern void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +*/ +/* extern void heap_rootreplaced_shortscalars(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +*/ diff --git a/src/ed25519-supercop-amd64-51-30k/index_heap.h b/src/ed25519-supercop-amd64-51-30k/index_heap.h new file mode 100644 index 0000000..8c35195 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/index_heap.h @@ -0,0 +1,31 @@ +#ifndef INDEX_HEAP_H +#define INDEX_HEAP_H + +#include "sc25519.h" + +#define heap_init crypto_sign_ed25519_amd64_51_30k_batch_heap_init +#define heap_extend crypto_sign_ed25519_amd64_51_30k_batch_heap_extend +#define heap_pop crypto_sign_ed25519_amd64_51_30k_batch_heap_pop +#define heap_push crypto_sign_ed25519_amd64_51_30k_batch_heap_push +#define heap_get2max crypto_sign_ed25519_amd64_51_30k_batch_heap_get2max +#define heap_rootreplaced crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced +#define heap_rootreplaced_3limbs crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_3limbs +#define heap_rootreplaced_2limbs crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_2limbs +#define heap_rootreplaced_1limb crypto_sign_ed25519_amd64_51_30k_batch_heap_rootreplaced_1limb + +void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); + +void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars); + +unsigned long long heap_pop(unsigned long long *h, unsigned long long *hlen, sc25519 *scalars); + +void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars); + +void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars); + +void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_3limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_2limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_1limb(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/keypair.c b/src/ed25519-supercop-amd64-51-30k/keypair.c new file mode 100644 index 0000000..bb04e8d --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/keypair.c @@ -0,0 +1,30 @@ +#include "randombytes.h" +#include "crypto_hash_sha512.h" +#include "crypto_sign.h" +#include "ge25519.h" +#include "hram.h" + +int crypto_sign_keypair( + unsigned char *pk, + unsigned char *sk + ) +{ + sc25519 scsk; + ge25519 gepk; + unsigned char extsk[64]; + int i; + + randombytes(sk, 32); + crypto_hash_sha512(extsk, sk, 32); + extsk[0] &= 248; + extsk[31] &= 127; + extsk[31] |= 64; + + sc25519_from32bytes(&scsk,extsk); + + ge25519_scalarmult_base(&gepk, &scsk); + ge25519_pack(pk, &gepk); + for(i=0;i<32;i++) + sk[32 + i] = pk[i]; + return 0; +} diff --git a/src/ed25519-supercop-amd64-51-30k/open.c b/src/ed25519-supercop-amd64-51-30k/open.c new file mode 100644 index 0000000..1f73fe9 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/open.c @@ -0,0 +1,45 @@ +#include "crypto_sign.h" +#include "crypto_verify_32.h" +#include "crypto_hash_sha512.h" +#include "ge25519.h" +#include "hram.h" + +int crypto_sign_open( + unsigned char *m,unsigned long long *mlen, + const unsigned char *sm,unsigned long long smlen, + const unsigned char *pk + ) +{ + int i; + unsigned char t2[32]; + ge25519 get1, get2; + sc25519 schram, scs; + unsigned char hram[crypto_hash_sha512_BYTES]; + + *mlen = (unsigned long long) -1; + + if (smlen < 64) goto badsig; + if (ge25519_unpackneg_vartime(&get1, pk)) goto badsig; + + get_hram(hram,sm,pk,m,smlen); + + sc25519_from64bytes(&schram, hram); + + sc25519_from32bytes(&scs, sm+32); + + ge25519_double_scalarmult_vartime(&get2, &get1, &schram, &scs); + ge25519_pack(t2, &get2); + + if (!crypto_verify_32(sm, t2)) + { + for(i=0;icaller4_stack=stack64#1 +# asm 2: movq caller4_stack=0(%rsp) +movq %r14,0(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#2 +# asm 2: movq caller5_stack=8(%rsp) +movq %r15,8(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#3 +# asm 2: movq caller6_stack=16(%rsp) +movq %rbx,16(%rsp) + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(r0=int64#4 +# asm 2: movq 0(r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#6 +# asm 2: movq 16(r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 += *(uint64 *)(yp + 0) +# asm 1: addq 0(t0=int64#3 +# asm 2: mov t0=%rdx +mov %rcx,%rdx + +# qhasm: t1 = r1 +# asm 1: mov t1=int64#7 +# asm 2: mov t1=%rax +mov %r8,%rax + +# qhasm: t2 = r2 +# asm 1: mov t2=int64#8 +# asm 2: mov t2=%r10 +mov %r9,%r10 + +# qhasm: t3 = r3 +# asm 1: mov t3=int64#12 +# asm 2: mov t3=%r14 +mov %rsi,%r14 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 0(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 8(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 16(%rsp),%rbx + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_barrett.s b/src/ed25519-supercop-amd64-51-30k/sc25519_barrett.s new file mode 100644 index 0000000..f784ea2 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_barrett.s @@ -0,0 +1,1188 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 q23 + +# qhasm: int64 q24 + +# qhasm: int64 q30 + +# qhasm: int64 q31 + +# qhasm: int64 q32 + +# qhasm: int64 q33 + +# qhasm: int64 r20 + +# qhasm: int64 r21 + +# qhasm: int64 r22 + +# qhasm: int64 r23 + +# qhasm: int64 r24 + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 rax + +# qhasm: int64 rdx + +# qhasm: int64 c + +# qhasm: int64 zero + +# qhasm: int64 mask + +# qhasm: int64 nmask + +# qhasm: stack64 q30_stack + +# qhasm: stack64 q31_stack + +# qhasm: stack64 q32_stack + +# qhasm: stack64 q33_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett +.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett +_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett: +crypto_sign_ed25519_amd64_51_30k_batch_sc25519_barrett: +mov %rsp,%r11 +and $31,%r11 +add $96,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: zero ^= zero +# asm 1: xor rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 + +# qhasm: q23 = rax +# asm 1: mov q23=int64#10 +# asm 2: mov q23=%r12 +mov %rax,%r12 + +# qhasm: c = rdx +# asm 1: mov c=int64#11 +# asm 2: mov c=%r13 +mov %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 + +# qhasm: q24 = rax +# asm 1: mov q24=int64#12 +# asm 2: mov q24=%r14 +mov %rax,%r14 + +# qhasm: carry? q24 += c +# asm 1: add rax=int64#7 +# asm 2: movq 32(rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 + +# qhasm: carry? q23 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 32(rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 32(rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 + +# qhasm: carry? q30 += rax +# asm 1: add rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1 + +# qhasm: carry? q23 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 + +# qhasm: carry? q30 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 + +# qhasm: carry? q31 += rax +# asm 1: add rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0 + +# qhasm: carry? q23 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 + +# qhasm: carry? q30 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 + +# qhasm: carry? q31 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 + +# qhasm: carry? q32 += rax +# asm 1: add rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU0 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU1 + +# qhasm: carry? q30 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc q30_stack=stack64#8 +# asm 2: movq q30_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU2 + +# qhasm: carry? q31 += rax +# asm 1: add c=int64#5 +# asm 2: mov $0,>c=%r8 +mov $0,%r8 + +# qhasm: c += rdx + carry +# asm 1: adc q31_stack=stack64#9 +# asm 2: movq q31_stack=64(%rsp) +movq %r9,64(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU3 + +# qhasm: carry? q32 += rax +# asm 1: add c=int64#5 +# asm 2: mov $0,>c=%r8 +mov $0,%r8 + +# qhasm: c += rdx + carry +# asm 1: adc q32_stack=stack64#10 +# asm 2: movq q32_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_MU4 +mulq crypto_sign_ed25519_amd64_51_30k_batch_MU4 + +# qhasm: carry? q33 += rax +# asm 1: add q33_stack=stack64#11 +# asm 2: movq q33_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: rax = q30_stack +# asm 1: movq rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 + +# qhasm: r20 = rax +# asm 1: mov r20=int64#5 +# asm 2: mov r20=%r8 +mov %rax,%r8 + +# qhasm: c = rdx +# asm 1: mov c=int64#6 +# asm 2: mov c=%r9 +mov %rdx,%r9 + +# qhasm: rax = q30_stack +# asm 1: movq rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 + +# qhasm: r21 = rax +# asm 1: mov r21=int64#8 +# asm 2: mov r21=%r10 +mov %rax,%r10 + +# qhasm: carry? r21 += c +# asm 1: add c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 + +# qhasm: r22 = rax +# asm 1: mov r22=int64#9 +# asm 2: mov r22=%r11 +mov %rax,%r11 + +# qhasm: carry? r22 += c +# asm 1: add c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER3 + +# qhasm: free rdx + +# qhasm: r23 = rax +# asm 1: mov r23=int64#10 +# asm 2: mov r23=%r12 +mov %rax,%r12 + +# qhasm: r23 += c +# asm 1: add rax=int64#7 +# asm 2: movq rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 + +# qhasm: carry? r21 += rax +# asm 1: add c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 + +# qhasm: carry? r22 += rax +# asm 1: add c=int64#4 +# asm 2: mov $0,>c=%rcx +mov $0,%rcx + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER2 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add rax=int64#7 +# asm 2: movq rax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 + +# qhasm: carry? r22 += rax +# asm 1: add c=int64#4 +# asm 2: mov $0,>c=%rcx +mov $0,%rcx + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER1 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add rax=int64#7 +# asm 2: movq rax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add r0=int64#3 +# asm 2: movq 0(r0=%rdx +movq 0(%rsi),%rdx + +# qhasm: carry? r0 -= r20 +# asm 1: sub t0=int64#4 +# asm 2: mov t0=%rcx +mov %rdx,%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: carry? r1 -= r21 - carry +# asm 1: sbb t1=int64#6 +# asm 2: mov t1=%r9 +mov %r8,%r9 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#7 +# asm 2: movq 16(r2=%rax +movq 16(%rsi),%rax + +# qhasm: carry? r2 -= r22 - carry +# asm 1: sbb t2=int64#8 +# asm 2: mov t2=%r10 +mov %rax,%r10 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: r3 -= r23 - carry +# asm 1: sbb t3=int64#9 +# asm 2: mov t3=%r11 +mov %rsi,%r11 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,t0=int64#4 +# asm 2: mov t0=%rcx +mov %rdx,%rcx + +# qhasm: r1 = t1 if !unsigned< +# asm 1: cmovae t1=int64#6 +# asm 2: mov t1=%r9 +mov %r8,%r9 + +# qhasm: r2 = t2 if !unsigned< +# asm 1: cmovae t2=int64#8 +# asm 2: mov t2=%r10 +mov %rax,%r10 + +# qhasm: r3 = t3 if !unsigned< +# asm 1: cmovae t3=int64#9 +# asm 2: mov t3=%r11 +mov %rsi,%r11 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_51_30k_batch_ORDER0,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_from32bytes.c b/src/ed25519-supercop-amd64-51-30k/sc25519_from32bytes.c new file mode 100644 index 0000000..7f21e68 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_from32bytes.c @@ -0,0 +1,55 @@ +#include "sc25519.h" + +/*Arithmetic modulo the group order n = 2^252 + 27742317777372353535851937790883648493 + * = 7237005577332262213973186563042994240857116359379907606001950938285454250989 + */ + +/* Contains order, 2*order, 4*order, 8*order, each represented in 4 consecutive unsigned long long */ +static const unsigned long long order[16] = {0x5812631A5CF5D3EDULL, 0x14DEF9DEA2F79CD6ULL, + 0x0000000000000000ULL, 0x1000000000000000ULL, + 0xB024C634B9EBA7DAULL, 0x29BDF3BD45EF39ACULL, + 0x0000000000000000ULL, 0x2000000000000000ULL, + 0x60498C6973D74FB4ULL, 0x537BE77A8BDE7359ULL, + 0x0000000000000000ULL, 0x4000000000000000ULL, + 0xC09318D2E7AE9F68ULL, 0xA6F7CEF517BCE6B2ULL, + 0x0000000000000000ULL, 0x8000000000000000ULL}; + +static unsigned long long smaller(unsigned long long a,unsigned long long b) +{ + unsigned long long atop = a >> 32; + unsigned long long abot = a & 4294967295; + unsigned long long btop = b >> 32; + unsigned long long bbot = b & 4294967295; + unsigned long long atopbelowbtop = (atop - btop) >> 63; + unsigned long long atopeqbtop = ((atop ^ btop) - 1) >> 63; + unsigned long long abotbelowbbot = (abot - bbot) >> 63; + return atopbelowbtop | (atopeqbtop & abotbelowbbot); +} + +void sc25519_from32bytes(sc25519 *r, const unsigned char x[32]) +{ + unsigned long long t[4]; + unsigned long long b; + unsigned long long mask; + int i, j; + + /* assuming little-endian */ + r->v[0] = *(unsigned long long *)x; + r->v[1] = *(((unsigned long long *)x)+1); + r->v[2] = *(((unsigned long long *)x)+2); + r->v[3] = *(((unsigned long long *)x)+3); + + for(j=3;j>=0;j--) + { + b=0; + for(i=0;i<4;i++) + { + b += order[4*j+i]; /* no overflow for this particular order */ + t[i] = r->v[i] - b; + b = smaller(r->v[i],b); + } + mask = b - 1; + for(i=0;i<4;i++) + r->v[i] ^= mask & (r->v[i] ^ t[i]); + } +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_from64bytes.c b/src/ed25519-supercop-amd64-51-30k/sc25519_from64bytes.c new file mode 100644 index 0000000..8e76a1b --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_from64bytes.c @@ -0,0 +1,7 @@ +#include "sc25519.h" + +void sc25519_from64bytes(sc25519 *r, const unsigned char x[64]) +{ + /* assuming little-endian representation of unsigned long long */ + sc25519_barrett(r, (unsigned long long *)x); +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_from_shortsc.c b/src/ed25519-supercop-amd64-51-30k/sc25519_from_shortsc.c new file mode 100644 index 0000000..3b8ff2f --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_from_shortsc.c @@ -0,0 +1,9 @@ +#include "sc25519.h" + +void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x) +{ + r->v[0] = x->v[0]; + r->v[1] = x->v[1]; + r->v[2] = 0; + r->v[3] = 0; +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_iszero.c b/src/ed25519-supercop-amd64-51-30k/sc25519_iszero.c new file mode 100644 index 0000000..21f593d --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_iszero.c @@ -0,0 +1,10 @@ +#include "sc25519.h" + +int sc25519_iszero_vartime(const sc25519 *x) +{ + if(x->v[0] != 0) return 0; + if(x->v[1] != 0) return 0; + if(x->v[2] != 0) return 0; + if(x->v[3] != 0) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_lt.s b/src/ed25519-supercop-amd64-51-30k/sc25519_lt.s new file mode 100644 index 0000000..515faa5 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_lt.s @@ -0,0 +1,131 @@ + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: int64 ret + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: output ret + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 doof + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt +.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt +_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt: +crypto_sign_ed25519_amd64_51_30k_batch_sc25519_lt: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: t0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(t0=int64#3 +# asm 2: movq 0(t0=%rdx +movq 0(%rdi),%rdx + +# qhasm: t1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(t1=int64#4 +# asm 2: movq 8(t1=%rcx +movq 8(%rdi),%rcx + +# qhasm: t2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(t2=int64#5 +# asm 2: movq 16(t2=%r8 +movq 16(%rdi),%r8 + +# qhasm: t3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(t3=int64#1 +# asm 2: movq 24(t3=%rdi +movq 24(%rdi),%rdi + +# qhasm: carry? t0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(ret=int64#1 +# asm 2: mov $0,>ret=%rdi +mov $0,%rdi + +# qhasm: doof = 1 +# asm 1: mov $1,>doof=int64#2 +# asm 2: mov $1,>doof=%rsi +mov $1,%rsi + +# qhasm: ret = doof if carry +# asm 1: cmovc v, y->v); + sc25519_barrett(r, t); +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_mul_shortsc.c b/src/ed25519-supercop-amd64-51-30k/sc25519_mul_shortsc.c new file mode 100644 index 0000000..0c67250 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_mul_shortsc.c @@ -0,0 +1,9 @@ +#include "sc25519.h" + +void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y) +{ + /* XXX: This wants to be faster */ + sc25519 t; + sc25519_from_shortsc(&t, y); + sc25519_mul(r, x, &t); +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_slide.c b/src/ed25519-supercop-amd64-51-30k/sc25519_slide.c new file mode 100644 index 0000000..4e52010 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_slide.c @@ -0,0 +1,49 @@ +#include "sc25519.h" + +void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize) +{ + int i,j,k,b,m=(1<<(swindowsize-1))-1, soplen=256; + unsigned long long sv0 = s->v[0]; + unsigned long long sv1 = s->v[1]; + unsigned long long sv2 = s->v[2]; + unsigned long long sv3 = s->v[3]; + + /* first put the binary expansion into r */ + for(i=0;i<64;i++) { + r[i] = sv0 & 1; + r[i+64] = sv1 & 1; + r[i+128] = sv2 & 1; + r[i+192] = sv3 & 1; + sv0 >>= 1; + sv1 >>= 1; + sv2 >>= 1; + sv3 >>= 1; + } + + /* Making it sliding window */ + for (j = 0;j < soplen;++j) + { + if (r[j]) { + for (b = 1;b < soplen - j && b <= 6;++b) { + if (r[j] + (r[j + b] << b) <= m) + { + r[j] += r[j + b] << b; r[j + b] = 0; + } + else if (r[j] - (r[j + b] << b) >= -m) + { + r[j] -= r[j + b] << b; + for (k = j + b;k < soplen;++k) + { + if (!r[k]) { + r[k] = 1; + break; + } + r[k] = 0; + } + } + else if (r[j + b]) + break; + } + } + } +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_sub_nored.s b/src/ed25519-supercop-amd64-51-30k/sc25519_sub_nored.s new file mode 100644 index 0000000..ff2d69a --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_sub_nored.s @@ -0,0 +1,142 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored +.globl crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored +_crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored: +crypto_sign_ed25519_amd64_51_30k_batch_sc25519_sub_nored: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(r0=int64#4 +# asm 2: movq 0(r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#6 +# asm 2: movq 16(r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(v]; +} diff --git a/src/ed25519-supercop-amd64-51-30k/sc25519_window4.c b/src/ed25519-supercop-amd64-51-30k/sc25519_window4.c new file mode 100644 index 0000000..683a1d4 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sc25519_window4.c @@ -0,0 +1,27 @@ +#include "sc25519.h" + +void sc25519_window4(signed char r[64], const sc25519 *s) +{ + char carry; + int i; + for(i=0;i<16;i++) + r[i] = (s->v[0] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+16] = (s->v[1] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+32] = (s->v[2] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+48] = (s->v[3] >> (4*i)) & 15; + + /* Making it signed */ + carry = 0; + for(i=0;i<63;i++) + { + r[i] += carry; + r[i+1] += r[i] >> 4; + r[i] &= 15; + carry = r[i] >> 3; + r[i] -= carry << 4; + } + r[63] += carry; +} diff --git a/src/ed25519-supercop-amd64-51-30k/sign.c b/src/ed25519-supercop-amd64-51-30k/sign.c new file mode 100644 index 0000000..3c3a2c8 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sign.c @@ -0,0 +1,56 @@ +#include "crypto_sign.h" +#include "crypto_hash_sha512.h" +#include "ge25519.h" +#include "hram.h" + +int crypto_sign( + unsigned char *sm,unsigned long long *smlen, + const unsigned char *m,unsigned long long mlen, + const unsigned char *sk + ) +{ + sc25519 sck, scs, scsk; + ge25519 ger; + unsigned char r[32]; + unsigned char s[32]; + unsigned char extsk[64]; + unsigned long long i; + unsigned char hmg[crypto_hash_sha512_BYTES]; + unsigned char hram[crypto_hash_sha512_BYTES]; + + crypto_hash_sha512(extsk, sk, 32); + extsk[0] &= 248; + extsk[31] &= 127; + extsk[31] |= 64; + + *smlen = mlen+64; + for(i=0;icaller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: yp = yp +# asm 1: mov yp=int64#4 +# asm 2: mov yp=%rcx +mov %rdx,%rcx + +# qhasm: r4 = 0 +# asm 1: mov $0,>r4=int64#5 +# asm 2: mov $0,>r4=%r8 +mov $0,%r8 + +# qhasm: r5 = 0 +# asm 1: mov $0,>r5=int64#6 +# asm 2: mov $0,>r5=%r9 +mov $0,%r9 + +# qhasm: r6 = 0 +# asm 1: mov $0,>r6=int64#8 +# asm 2: mov $0,>r6=%r10 +mov $0,%r10 + +# qhasm: r7 = 0 +# asm 1: mov $0,>r7=int64#9 +# asm 2: mov $0,>r7=%r11 +mov $0,%r11 + +# qhasm: zero = 0 +# asm 1: mov $0,>zero=int64#10 +# asm 2: mov $0,>zero=%r12 +mov $0,%r12 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(r0=int64#11 +# asm 2: mov r0=%r13 +mov %rax,%r13 + +# qhasm: c = rdx +# asm 1: mov c=int64#12 +# asm 2: mov c=%r14 +mov %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(r1=int64#13 +# asm 2: mov r1=%r15 +mov %rax,%r15 + +# qhasm: carry? r1 += c +# asm 1: add c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(r2=int64#14 +# asm 2: mov r2=%rbx +mov %rax,%rbx + +# qhasm: carry? r2 += c +# asm 1: add c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(r3=int64#15 +# asm 2: mov r3=%rbp +mov %rax,%rbp + +# qhasm: carry? r3 += c +# asm 1: add rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/api.h b/src/ed25519-supercop-amd64-64-24k/api.h new file mode 100644 index 0000000..1d0c988 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/api.h @@ -0,0 +1,4 @@ +#define CRYPTO_SECRETKEYBYTES 64 +#define CRYPTO_PUBLICKEYBYTES 32 +#define CRYPTO_BYTES 64 + diff --git a/src/ed25519-supercop-amd64-64-24k/architectures b/src/ed25519-supercop-amd64-64-24k/architectures new file mode 100644 index 0000000..21d5bd8 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/architectures @@ -0,0 +1 @@ +amd64 diff --git a/src/ed25519-supercop-amd64-64-24k/batch.c b/src/ed25519-supercop-amd64-64-24k/batch.c new file mode 100644 index 0000000..955392e --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/batch.c @@ -0,0 +1,94 @@ +#include "crypto_sign.h" + +#include "crypto_verify_32.h" +#include "crypto_hash_sha512.h" +#include "randombytes.h" + +#include "ge25519.h" +#include "hram.h" + +#define MAXBATCH 64 + +int crypto_sign_open_batch( + unsigned char* const m[],unsigned long long mlen[], + unsigned char* const sm[],const unsigned long long smlen[], + unsigned char* const pk[], + unsigned long long num + ) +{ + int ret = 0; + unsigned long long i, j; + shortsc25519 r[MAXBATCH]; + sc25519 scalars[2*MAXBATCH+1]; + ge25519 points[2*MAXBATCH+1]; + unsigned char hram[crypto_hash_sha512_BYTES]; + unsigned long long batchsize; + + for (i = 0;i < num;++i) mlen[i] = -1; + + while (num >= 3) { + batchsize = num; + if (batchsize > MAXBATCH) batchsize = MAXBATCH; + + for (i = 0;i < batchsize;++i) + if (smlen[i] < 64) goto fallback; + + randombytes((unsigned char*)r,sizeof(shortsc25519) * batchsize); + + /* Computing scalars[0] = ((r1s1 + r2s2 + ...)) */ + for(i=0;icaller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: tp_stack = tp +# asm 1: movq tp_stack=stack64#8 +# asm 2: movq tp_stack=56(%rsp) +movq %rdi,56(%rsp) + +# qhasm: pos *= 768 +# asm 1: imulq $768,pos=int64#1 +# asm 2: imulq $768,pos=%rdi +imulq $768,%rsi,%rdi + +# qhasm: mask = b +# asm 1: mov mask=int64#2 +# asm 2: mov mask=%rsi +mov %rdx,%rsi + +# qhasm: (int64) mask >>= 7 +# asm 1: sar $7,u=int64#5 +# asm 2: mov u=%r8 +mov %rdx,%r8 + +# qhasm: u += mask +# asm 1: add tysubx0=int64#2 +# asm 2: mov $1,>tysubx0=%rsi +mov $1,%rsi + +# qhasm: tysubx1 = 0 +# asm 1: mov $0,>tysubx1=int64#6 +# asm 2: mov $0,>tysubx1=%r9 +mov $0,%r9 + +# qhasm: tysubx2 = 0 +# asm 1: mov $0,>tysubx2=int64#7 +# asm 2: mov $0,>tysubx2=%rax +mov $0,%rax + +# qhasm: tysubx3 = 0 +# asm 1: mov $0,>tysubx3=int64#8 +# asm 2: mov $0,>tysubx3=%r10 +mov $0,%r10 + +# qhasm: txaddy0 = 1 +# asm 1: mov $1,>txaddy0=int64#9 +# asm 2: mov $1,>txaddy0=%r11 +mov $1,%r11 + +# qhasm: txaddy1 = 0 +# asm 1: mov $0,>txaddy1=int64#10 +# asm 2: mov $0,>txaddy1=%r12 +mov $0,%r12 + +# qhasm: txaddy2 = 0 +# asm 1: mov $0,>txaddy2=int64#11 +# asm 2: mov $0,>txaddy2=%r13 +mov $0,%r13 + +# qhasm: txaddy3 = 0 +# asm 1: mov $0,>txaddy3=int64#12 +# asm 2: mov $0,>txaddy3=%r14 +mov $0,%r14 + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#13 +# asm 2: movq 0(t=%r15 +movq 0(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 8(t=%r15 +movq 8(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 16(t=%r15 +movq 16(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 24(t=%r15 +movq 24(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 32(t=%r15 +movq 32(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 40(t=%r15 +movq 40(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 48(t=%r15 +movq 48(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 56(t=%r15 +movq 56(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 96(t=%r15 +movq 96(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 104(t=%r15 +movq 104(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 112(t=%r15 +movq 112(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 120(t=%r15 +movq 120(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 128(t=%r15 +movq 128(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 136(t=%r15 +movq 136(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 144(t=%r15 +movq 144(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 152(t=%r15 +movq 152(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 192(t=%r15 +movq 192(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 200(t=%r15 +movq 200(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 208(t=%r15 +movq 208(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 216(t=%r15 +movq 216(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 224(t=%r15 +movq 224(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 232(t=%r15 +movq 232(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 240(t=%r15 +movq 240(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 248(t=%r15 +movq 248(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 288(t=%r15 +movq 288(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 296(t=%r15 +movq 296(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 304(t=%r15 +movq 304(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 312(t=%r15 +movq 312(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 320(t=%r15 +movq 320(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 328(t=%r15 +movq 328(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 336(t=%r15 +movq 336(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 344(t=%r15 +movq 344(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 384(t=%r15 +movq 384(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 392(t=%r15 +movq 392(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 400(t=%r15 +movq 400(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 408(t=%r15 +movq 408(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 416(t=%r15 +movq 416(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 424(t=%r15 +movq 424(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 432(t=%r15 +movq 432(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 440(t=%r15 +movq 440(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 480(t=%r15 +movq 480(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 488(t=%r15 +movq 488(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 496(t=%r15 +movq 496(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 504(t=%r15 +movq 504(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 512(t=%r15 +movq 512(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 520(t=%r15 +movq 520(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 528(t=%r15 +movq 528(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 536(t=%r15 +movq 536(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 576(t=%r15 +movq 576(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 584(t=%r15 +movq 584(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 592(t=%r15 +movq 592(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 600(t=%r15 +movq 600(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 608(t=%r15 +movq 608(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 616(t=%r15 +movq 616(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 624(t=%r15 +movq 624(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 632(t=%r15 +movq 632(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 672(t=%r15 +movq 672(%rcx,%rdi),%r15 + +# qhasm: tysubx0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 680(t=%r15 +movq 680(%rcx,%rdi),%r15 + +# qhasm: tysubx1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 688(t=%r15 +movq 688(%rcx,%rdi),%r15 + +# qhasm: tysubx2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 696(t=%r15 +movq 696(%rcx,%rdi),%r15 + +# qhasm: tysubx3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 704(t=%r15 +movq 704(%rcx,%rdi),%r15 + +# qhasm: txaddy0 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 712(t=%r15 +movq 712(%rcx,%rdi),%r15 + +# qhasm: txaddy1 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 720(t=%r15 +movq 720(%rcx,%rdi),%r15 + +# qhasm: txaddy2 = t if = +# asm 1: cmove t=int64#13 +# asm 2: movq 728(t=%r15 +movq 728(%rcx,%rdi),%r15 + +# qhasm: txaddy3 = t if = +# asm 1: cmove t=int64#13 +# asm 2: mov t=%r15 +mov %rsi,%r15 + +# qhasm: tysubx0 = txaddy0 if signed< +# asm 1: cmovl t=int64#13 +# asm 2: mov t=%r15 +mov %r9,%r15 + +# qhasm: tysubx1 = txaddy1 if signed< +# asm 1: cmovl t=int64#13 +# asm 2: mov t=%r15 +mov %rax,%r15 + +# qhasm: tysubx2 = txaddy2 if signed< +# asm 1: cmovl t=int64#13 +# asm 2: mov t=%r15 +mov %r10,%r15 + +# qhasm: tysubx3 = txaddy3 if signed< +# asm 1: cmovl tp=int64#13 +# asm 2: movq tp=%r15 +movq 56(%rsp),%r15 + +# qhasm: *(uint64 *)(tp + 0) = tysubx0 +# asm 1: movq tt2d0=int64#2 +# asm 2: mov $0,>tt2d0=%rsi +mov $0,%rsi + +# qhasm: tt2d1 = 0 +# asm 1: mov $0,>tt2d1=int64#6 +# asm 2: mov $0,>tt2d1=%r9 +mov $0,%r9 + +# qhasm: tt2d2 = 0 +# asm 1: mov $0,>tt2d2=int64#7 +# asm 2: mov $0,>tt2d2=%rax +mov $0,%rax + +# qhasm: tt2d3 = 0 +# asm 1: mov $0,>tt2d3=int64#8 +# asm 2: mov $0,>tt2d3=%r10 +mov $0,%r10 + +# qhasm: =? u - 1 +# asm 1: cmp $1,t=int64#9 +# asm 2: movq 64(t=%r11 +movq 64(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 72(t=%r11 +movq 72(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 80(t=%r11 +movq 80(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 88(t=%r11 +movq 88(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 160(t=%r11 +movq 160(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 168(t=%r11 +movq 168(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 176(t=%r11 +movq 176(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 184(t=%r11 +movq 184(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 256(t=%r11 +movq 256(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 264(t=%r11 +movq 264(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 272(t=%r11 +movq 272(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 280(t=%r11 +movq 280(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 352(t=%r11 +movq 352(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 360(t=%r11 +movq 360(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 368(t=%r11 +movq 368(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 376(t=%r11 +movq 376(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 448(t=%r11 +movq 448(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 456(t=%r11 +movq 456(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 464(t=%r11 +movq 464(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 472(t=%r11 +movq 472(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 544(t=%r11 +movq 544(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 552(t=%r11 +movq 552(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 560(t=%r11 +movq 560(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 568(t=%r11 +movq 568(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 640(t=%r11 +movq 640(%rcx,%rdi),%r11 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 648(t=%r11 +movq 648(%rcx,%rdi),%r11 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 656(t=%r11 +movq 656(%rcx,%rdi),%r11 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#9 +# asm 2: movq 664(t=%r11 +movq 664(%rcx,%rdi),%r11 + +# qhasm: tt2d3 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 736(t=%r8 +movq 736(%rcx,%rdi),%r8 + +# qhasm: tt2d0 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 744(t=%r8 +movq 744(%rcx,%rdi),%r8 + +# qhasm: tt2d1 = t if = +# asm 1: cmove t=int64#5 +# asm 2: movq 752(t=%r8 +movq 752(%rcx,%rdi),%r8 + +# qhasm: tt2d2 = t if = +# asm 1: cmove t=int64#1 +# asm 2: movq 760(t=%rdi +movq 760(%rcx,%rdi),%rdi + +# qhasm: tt2d3 = t if = +# asm 1: cmove tt0=int64#1 +# asm 2: mov $0,>tt0=%rdi +mov $0,%rdi + +# qhasm: tt1 = 0 +# asm 1: mov $0,>tt1=int64#4 +# asm 2: mov $0,>tt1=%rcx +mov $0,%rcx + +# qhasm: tt2 = 0 +# asm 1: mov $0,>tt2=int64#5 +# asm 2: mov $0,>tt2=%r8 +mov $0,%r8 + +# qhasm: tt3 = 0 +# asm 1: mov $0,>tt3=int64#9 +# asm 2: mov $0,>tt3=%r11 +mov $0,%r11 + +# qhasm: carry? tt0 -= tt2d0 +# asm 1: sub subt0=int64#10 +# asm 2: mov $0,>subt0=%r12 +mov $0,%r12 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#11 +# asm 2: mov $38,>subt1=%r13 +mov $38,%r13 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/consts.s b/src/ed25519-supercop-amd64-64-24k/consts.s new file mode 100644 index 0000000..c999a6b --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/consts.s @@ -0,0 +1,39 @@ +.data + +.globl crypto_sign_ed25519_amd64_64_24k_batch_121666 +.globl crypto_sign_ed25519_amd64_64_24k_batch_MU0 +.globl crypto_sign_ed25519_amd64_64_24k_batch_MU1 +.globl crypto_sign_ed25519_amd64_64_24k_batch_MU2 +.globl crypto_sign_ed25519_amd64_64_24k_batch_MU3 +.globl crypto_sign_ed25519_amd64_64_24k_batch_MU4 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ORDER1 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ORDER2 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ORDER3 +.globl crypto_sign_ed25519_amd64_64_24k_batch_EC2D0 +.globl crypto_sign_ed25519_amd64_64_24k_batch_EC2D1 +.globl crypto_sign_ed25519_amd64_64_24k_batch_EC2D2 +.globl crypto_sign_ed25519_amd64_64_24k_batch_EC2D3 +.globl crypto_sign_ed25519_amd64_64_24k_batch_38 + +.p2align 4 + +crypto_sign_ed25519_amd64_64_24k_batch_121666: .quad 121666 + +crypto_sign_ed25519_amd64_64_24k_batch_MU0: .quad 0xED9CE5A30A2C131B +crypto_sign_ed25519_amd64_64_24k_batch_MU1: .quad 0x2106215D086329A7 +crypto_sign_ed25519_amd64_64_24k_batch_MU2: .quad 0xFFFFFFFFFFFFFFEB +crypto_sign_ed25519_amd64_64_24k_batch_MU3: .quad 0xFFFFFFFFFFFFFFFF +crypto_sign_ed25519_amd64_64_24k_batch_MU4: .quad 0x000000000000000F + +crypto_sign_ed25519_amd64_64_24k_batch_ORDER0: .quad 0x5812631A5CF5D3ED +crypto_sign_ed25519_amd64_64_24k_batch_ORDER1: .quad 0x14DEF9DEA2F79CD6 +crypto_sign_ed25519_amd64_64_24k_batch_ORDER2: .quad 0x0000000000000000 +crypto_sign_ed25519_amd64_64_24k_batch_ORDER3: .quad 0x1000000000000000 + +crypto_sign_ed25519_amd64_64_24k_batch_EC2D0: .quad 0xEBD69B9426B2F146 +crypto_sign_ed25519_amd64_64_24k_batch_EC2D1: .quad 0x00E0149A8283B156 +crypto_sign_ed25519_amd64_64_24k_batch_EC2D2: .quad 0x198E80F2EEF3D130 +crypto_sign_ed25519_amd64_64_24k_batch_EC2D3: .quad 0xA406D9DC56DFFCE7 + +crypto_sign_ed25519_amd64_64_24k_batch_38: .quad 38 diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519.h b/src/ed25519-supercop-amd64-64-24k/fe25519.h new file mode 100644 index 0000000..633e7bc --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519.h @@ -0,0 +1,64 @@ +#ifndef FE25519_H +#define FE25519_H + +#define fe25519 crypto_sign_ed25519_amd64_64_24k_batch_fe25519 +#define fe25519_freeze crypto_sign_ed25519_amd64_64_24k_batch_fe25519_freeze +#define fe25519_unpack crypto_sign_ed25519_amd64_64_24k_batch_fe25519_unpack +#define fe25519_pack crypto_sign_ed25519_amd64_64_24k_batch_fe25519_pack +#define fe25519_iszero_vartime crypto_sign_ed25519_amd64_64_24k_batch_fe25519_iszero_vartime +#define fe25519_iseq_vartime crypto_sign_ed25519_amd64_64_24k_batch_fe25519_iseq_vartime +#define fe25519_cmov crypto_sign_ed25519_amd64_64_24k_batch_fe25519_cmov +#define fe25519_setint crypto_sign_ed25519_amd64_64_24k_batch_fe25519_setint +#define fe25519_neg crypto_sign_ed25519_amd64_64_24k_batch_fe25519_neg +#define fe25519_getparity crypto_sign_ed25519_amd64_64_24k_batch_fe25519_getparity +#define fe25519_add crypto_sign_ed25519_amd64_64_24k_batch_fe25519_add +#define fe25519_sub crypto_sign_ed25519_amd64_64_24k_batch_fe25519_sub +#define fe25519_mul crypto_sign_ed25519_amd64_64_24k_batch_fe25519_mul +#define fe25519_mul121666 crypto_sign_ed25519_amd64_64_24k_batch_fe25519_mul121666 +#define fe25519_square crypto_sign_ed25519_amd64_64_24k_batch_fe25519_square +#define fe25519_invert crypto_sign_ed25519_amd64_64_24k_batch_fe25519_invert +#define fe25519_pow2523 crypto_sign_ed25519_amd64_64_24k_batch_fe25519_pow2523 + +typedef struct +{ + unsigned long long v[4]; +} +fe25519; + +void fe25519_freeze(fe25519 *r); + +void fe25519_unpack(fe25519 *r, const unsigned char x[32]); + +void fe25519_pack(unsigned char r[32], const fe25519 *x); + +void fe25519_cmov(fe25519 *r, const fe25519 *x, unsigned char b); + +void fe25519_cswap(fe25519 *r, fe25519 *x, unsigned char b); + +void fe25519_setint(fe25519 *r, unsigned int v); + +void fe25519_neg(fe25519 *r, const fe25519 *x); + +unsigned char fe25519_getparity(const fe25519 *x); + +int fe25519_iszero_vartime(const fe25519 *x); + +int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y); + +void fe25519_add(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_sub(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_mul(fe25519 *r, const fe25519 *x, const fe25519 *y); + +void fe25519_mul121666(fe25519 *r, const fe25519 *x); + +void fe25519_square(fe25519 *r, const fe25519 *x); + +void fe25519_pow(fe25519 *r, const fe25519 *x, const unsigned char *e); + +void fe25519_invert(fe25519 *r, const fe25519 *x); + +void fe25519_pow2523(fe25519 *r, const fe25519 *x); + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_add.s b/src/ed25519-supercop-amd64-64-24k/fe25519_add.s new file mode 100644 index 0000000..b24d540 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_add.s @@ -0,0 +1,189 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_fe25519_add +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_fe25519_add +.globl crypto_sign_ed25519_amd64_64_24k_batch_fe25519_add +_crypto_sign_ed25519_amd64_64_24k_batch_fe25519_add: +crypto_sign_ed25519_amd64_64_24k_batch_fe25519_add: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(r0=int64#4 +# asm 2: movq 0(r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#6 +# asm 2: movq 16(r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 += *(uint64 *)(yp + 0) +# asm 1: addq 0(addt0=int64#3 +# asm 2: mov $0,>addt0=%rdx +mov $0,%rdx + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: r0 = *(uint64 *) (rp + 0) +# asm 1: movq 0(r0=int64#2 +# asm 2: movq 0(r0=%rsi +movq 0(%rdi),%rsi + +# qhasm: r1 = *(uint64 *) (rp + 8) +# asm 1: movq 8(r1=int64#3 +# asm 2: movq 8(r1=%rdx +movq 8(%rdi),%rdx + +# qhasm: r2 = *(uint64 *) (rp + 16) +# asm 1: movq 16(r2=int64#4 +# asm 2: movq 16(r2=%rcx +movq 16(%rdi),%rcx + +# qhasm: r3 = *(uint64 *) (rp + 24) +# asm 1: movq 24(r3=int64#5 +# asm 2: movq 24(r3=%r8 +movq 24(%rdi),%r8 + +# qhasm: t0 = r0 +# asm 1: mov t0=int64#6 +# asm 2: mov t0=%r9 +mov %rsi,%r9 + +# qhasm: t1 = r1 +# asm 1: mov t1=int64#7 +# asm 2: mov t1=%rax +mov %rdx,%rax + +# qhasm: t2 = r2 +# asm 1: mov t2=int64#8 +# asm 2: mov t2=%r10 +mov %rcx,%r10 + +# qhasm: t3 = r3 +# asm 1: mov t3=int64#9 +# asm 2: mov t3=%r11 +mov %r8,%r11 + +# qhasm: two63 = 1 +# asm 1: mov $1,>two63=int64#10 +# asm 2: mov $1,>two63=%r12 +mov $1,%r12 + +# qhasm: two63 <<= 63 +# asm 1: shl $63,t0=int64#6 +# asm 2: mov t0=%r9 +mov %rsi,%r9 + +# qhasm: t1 = r1 +# asm 1: mov t1=int64#7 +# asm 2: mov t1=%rax +mov %rdx,%rax + +# qhasm: t2 = r2 +# asm 1: mov t2=int64#8 +# asm 2: mov t2=%r10 +mov %rcx,%r10 + +# qhasm: t3 = r3 +# asm 1: mov t3=int64#9 +# asm 2: mov t3=%r11 +mov %r8,%r11 + +# qhasm: carry? t0 += 19 +# asm 1: add $19,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_getparity.c b/src/ed25519-supercop-amd64-64-24k/fe25519_getparity.c new file mode 100644 index 0000000..a003ec8 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_getparity.c @@ -0,0 +1,8 @@ +#include "fe25519.h" + +unsigned char fe25519_getparity(const fe25519 *x) +{ + fe25519 t = *x; + fe25519_freeze(&t); + return (unsigned char)t.v[0] & 1; +} diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_invert.c b/src/ed25519-supercop-amd64-64-24k/fe25519_invert.c new file mode 100644 index 0000000..a46d141 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_invert.c @@ -0,0 +1,60 @@ +#include "fe25519.h" + +void fe25519_invert(fe25519 *r, const fe25519 *x) +{ + fe25519 z2; + fe25519 z9; + fe25519 z11; + fe25519 z2_5_0; + fe25519 z2_10_0; + fe25519 z2_20_0; + fe25519 z2_50_0; + fe25519 z2_100_0; + fe25519 t; + int i; + + /* 2 */ fe25519_square(&z2,x); + /* 4 */ fe25519_square(&t,&z2); + /* 8 */ fe25519_square(&t,&t); + /* 9 */ fe25519_mul(&z9,&t,x); + /* 11 */ fe25519_mul(&z11,&z9,&z2); + /* 22 */ fe25519_square(&t,&z11); + /* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9); + + /* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0); + /* 2^20 - 2^10 */ for (i = 1;i < 5;i++) { fe25519_square(&t,&t); } + /* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0); + + /* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0); + /* 2^20 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0); + + /* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0); + /* 2^40 - 2^20 */ for (i = 1;i < 20;i++) { fe25519_square(&t,&t); } + /* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0); + + /* 2^41 - 2^1 */ fe25519_square(&t,&t); + /* 2^50 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0); + + /* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0); + /* 2^100 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0); + + /* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0); + /* 2^200 - 2^100 */ for (i = 1;i < 100;i++) { fe25519_square(&t,&t); } + /* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0); + + /* 2^201 - 2^1 */ fe25519_square(&t,&t); + /* 2^250 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0); + + /* 2^251 - 2^1 */ fe25519_square(&t,&t); + /* 2^252 - 2^2 */ fe25519_square(&t,&t); + /* 2^253 - 2^3 */ fe25519_square(&t,&t); + + /* 2^254 - 2^4 */ fe25519_square(&t,&t); + + /* 2^255 - 2^5 */ fe25519_square(&t,&t); + /* 2^255 - 21 */ fe25519_mul(r,&t,&z11); +} diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_iseq.c b/src/ed25519-supercop-amd64-64-24k/fe25519_iseq.c new file mode 100644 index 0000000..bf72f8c --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_iseq.c @@ -0,0 +1,14 @@ +#include "fe25519.h" + +int fe25519_iseq_vartime(const fe25519 *x, const fe25519 *y) +{ + fe25519 t1 = *x; + fe25519 t2 = *y; + fe25519_freeze(&t1); + fe25519_freeze(&t2); + if(t1.v[0] != t2.v[0]) return 0; + if(t1.v[1] != t2.v[1]) return 0; + if(t1.v[2] != t2.v[2]) return 0; + if(t1.v[3] != t2.v[3]) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_iszero.c b/src/ed25519-supercop-amd64-64-24k/fe25519_iszero.c new file mode 100644 index 0000000..99e4daf --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_iszero.c @@ -0,0 +1,12 @@ +#include "fe25519.h" + +int fe25519_iszero_vartime(const fe25519 *x) +{ + fe25519 t = *x; + fe25519_freeze(&t); + if (t.v[0]) return 0; + if (t.v[1]) return 0; + if (t.v[2]) return 0; + if (t.v[3]) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_mul.s b/src/ed25519-supercop-amd64-64-24k/fe25519_mul.s new file mode 100644 index 0000000..8b3663f --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_mul.s @@ -0,0 +1,843 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_fe25519_mul +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_fe25519_mul +.globl crypto_sign_ed25519_amd64_64_24k_batch_fe25519_mul +_crypto_sign_ed25519_amd64_64_24k_batch_fe25519_mul: +crypto_sign_ed25519_amd64_64_24k_batch_fe25519_mul: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: yp = yp +# asm 1: mov yp=int64#4 +# asm 2: mov yp=%rcx +mov %rdx,%rcx + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(mulx0=int64#10 +# asm 2: movq 0(mulx0=%r12 +movq 0(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul r0=int64#11 +# asm 2: mov r0=%r13 +mov %rax,%r13 + +# qhasm: r1 = mulrdx +# asm 1: mov r1=int64#12 +# asm 2: mov r1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(yp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul r2=int64#13 +# asm 2: mov $0,>r2=%r15 +mov $0,%r15 + +# qhasm: r2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul r3=int64#14 +# asm 2: mov $0,>r3=%rbx +mov $0,%rbx + +# qhasm: r3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq 8(mulx1=%r12 +movq 8(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq 16(mulx2=%r12 +movq 16(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#2 +# asm 2: movq 24(mulx3=%rsi +movq 24(%rsi),%rsi + +# qhasm: mulrax = *(uint64 *)(yp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? r0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_neg.c b/src/ed25519-supercop-amd64-64-24k/fe25519_neg.c new file mode 100644 index 0000000..235b209 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_neg.c @@ -0,0 +1,8 @@ +#include "fe25519.h" + +void fe25519_neg(fe25519 *r, const fe25519 *x) +{ + fe25519 t; + fe25519_setint(&t,0); + fe25519_sub(r,&t,x); +} diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_pack.c b/src/ed25519-supercop-amd64-64-24k/fe25519_pack.c new file mode 100644 index 0000000..caf5185 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_pack.c @@ -0,0 +1,13 @@ +#include "fe25519.h" + +/* Assumes input x being reduced below 2^255 */ +void fe25519_pack(unsigned char r[32], const fe25519 *x) +{ + int i; + fe25519 t; + t = *x; + fe25519_freeze(&t); + /* assuming little-endian */ + for(i=0;i<32;i++) r[i] = i[(unsigned char *)&t.v]; +} + diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_pow2523.c b/src/ed25519-supercop-amd64-64-24k/fe25519_pow2523.c new file mode 100644 index 0000000..60042a0 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_pow2523.c @@ -0,0 +1,55 @@ +#include "fe25519.h" + +void fe25519_pow2523(fe25519 *r, const fe25519 *x) +{ + fe25519 z2; + fe25519 z9; + fe25519 z11; + fe25519 z2_5_0; + fe25519 z2_10_0; + fe25519 z2_20_0; + fe25519 z2_50_0; + fe25519 z2_100_0; + fe25519 t; + int i; + + /* 2 */ fe25519_square(&z2,x); + /* 4 */ fe25519_square(&t,&z2); + /* 8 */ fe25519_square(&t,&t); + /* 9 */ fe25519_mul(&z9,&t,x); + /* 11 */ fe25519_mul(&z11,&z9,&z2); + /* 22 */ fe25519_square(&t,&z11); + /* 2^5 - 2^0 = 31 */ fe25519_mul(&z2_5_0,&t,&z9); + + /* 2^6 - 2^1 */ fe25519_square(&t,&z2_5_0); + /* 2^10 - 2^5 */ for (i = 1;i < 5;i++) { fe25519_square(&t,&t); } + /* 2^10 - 2^0 */ fe25519_mul(&z2_10_0,&t,&z2_5_0); + + /* 2^11 - 2^1 */ fe25519_square(&t,&z2_10_0); + /* 2^20 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^20 - 2^0 */ fe25519_mul(&z2_20_0,&t,&z2_10_0); + + /* 2^21 - 2^1 */ fe25519_square(&t,&z2_20_0); + /* 2^40 - 2^20 */ for (i = 1;i < 20;i++) { fe25519_square(&t,&t); } + /* 2^40 - 2^0 */ fe25519_mul(&t,&t,&z2_20_0); + + /* 2^41 - 2^1 */ fe25519_square(&t,&t); + /* 2^50 - 2^10 */ for (i = 1;i < 10;i++) { fe25519_square(&t,&t); } + /* 2^50 - 2^0 */ fe25519_mul(&z2_50_0,&t,&z2_10_0); + + /* 2^51 - 2^1 */ fe25519_square(&t,&z2_50_0); + /* 2^100 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^100 - 2^0 */ fe25519_mul(&z2_100_0,&t,&z2_50_0); + + /* 2^101 - 2^1 */ fe25519_square(&t,&z2_100_0); + /* 2^200 - 2^100 */ for (i = 1;i < 100;i++) { fe25519_square(&t,&t); } + /* 2^200 - 2^0 */ fe25519_mul(&t,&t,&z2_100_0); + + /* 2^201 - 2^1 */ fe25519_square(&t,&t); + /* 2^250 - 2^50 */ for (i = 1;i < 50;i++) { fe25519_square(&t,&t); } + /* 2^250 - 2^0 */ fe25519_mul(&t,&t,&z2_50_0); + + /* 2^251 - 2^1 */ fe25519_square(&t,&t); + /* 2^252 - 2^2 */ fe25519_square(&t,&t); + /* 2^252 - 3 */ fe25519_mul(r,&t,x); +} diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_setint.c b/src/ed25519-supercop-amd64-64-24k/fe25519_setint.c new file mode 100644 index 0000000..585c4bd --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_setint.c @@ -0,0 +1,9 @@ +#include "fe25519.h" + +void fe25519_setint(fe25519 *r, unsigned int v) +{ + r->v[0] = v; + r->v[1] = 0; + r->v[2] = 0; + r->v[3] = 0; +} diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_square.s b/src/ed25519-supercop-amd64-64-24k/fe25519_square.s new file mode 100644 index 0000000..cdb3d95 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_square.s @@ -0,0 +1,617 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 squarer4 + +# qhasm: int64 squarer5 + +# qhasm: int64 squarer6 + +# qhasm: int64 squarer7 + +# qhasm: int64 squarerax + +# qhasm: int64 squarerdx + +# qhasm: int64 squaret1 + +# qhasm: int64 squaret2 + +# qhasm: int64 squaret3 + +# qhasm: int64 squarec + +# qhasm: int64 squarezero + +# qhasm: int64 squarei38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_fe25519_square +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_fe25519_square +.globl crypto_sign_ed25519_amd64_64_24k_batch_fe25519_square +_crypto_sign_ed25519_amd64_64_24k_batch_fe25519_square: +crypto_sign_ed25519_amd64_64_24k_batch_fe25519_square: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(xp + 8) +# asm 1: movq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(r1=int64#5 +# asm 2: mov r1=%r8 +mov %rax,%r8 + +# qhasm: r2 = squarerdx +# asm 1: mov r2=int64#6 +# asm 2: mov r2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(xp + 16) +# asm 1: movq 16(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) +# asm 1: mulq 8(r3=int64#8 +# asm 2: mov r3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov squarer4=int64#9 +# asm 2: mov squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(xp + 24) +# asm 1: movq 24(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16) +# asm 1: mulq 16(squarer5=int64#10 +# asm 2: mov squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov squarer6=int64#11 +# asm 2: mov squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(xp + 16) +# asm 1: movq 16(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) +# asm 1: mulq 8(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) +# asm 1: mulq 0(r0=int64#12 +# asm 2: mov r0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov squaret1=int64#13 +# asm 2: mov squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(xp + 8) +# asm 1: movq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) +# asm 1: mulq 8(squaret2=int64#14 +# asm 2: mov squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov squaret3=int64#15 +# asm 2: mov squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(xp + 16) +# asm 1: movq 16(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16) +# asm 1: mulq 16(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24) +# asm 1: mulq 24(squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r0 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r12,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r1 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r13,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r2 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %rcx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? r3 += squarerax +# asm 1: add squarer4=int64#2 +# asm 2: mov $0,>squarer4=%rsi +mov $0,%rsi + +# qhasm: squarer4 += squarerdx + carry +# asm 1: adc squarer4=int64#2 +# asm 2: imulq $38,squarer4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? r0 += squarer4 +# asm 1: add squarezero=int64#2 +# asm 2: mov $0,>squarezero=%rsi +mov $0,%rsi + +# qhasm: squarei38 = 38 +# asm 1: mov $38,>squarei38=int64#3 +# asm 2: mov $38,>squarei38=%rdx +mov $38,%rdx + +# qhasm: squarezero = squarei38 if carry +# asm 1: cmovc caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/fe25519_sub.s b/src/ed25519-supercop-amd64-64-24k/fe25519_sub.s new file mode 100644 index 0000000..0f31a82 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/fe25519_sub.s @@ -0,0 +1,189 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_fe25519_sub +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_fe25519_sub +.globl crypto_sign_ed25519_amd64_64_24k_batch_fe25519_sub +_crypto_sign_ed25519_amd64_64_24k_batch_fe25519_sub: +crypto_sign_ed25519_amd64_64_24k_batch_fe25519_sub: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(r0=int64#4 +# asm 2: movq 0(r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#6 +# asm 2: movq 16(r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(subt0=int64#3 +# asm 2: mov $0,>subt0=%rdx +mov $0,%rdx + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#7 +# asm 2: mov $38,>subt1=%rax +mov $38,%rax + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae v[0] = *(unsigned long long *)x; + r->v[1] = *(((unsigned long long *)x)+1); + r->v[2] = *(((unsigned long long *)x)+2); + r->v[3] = *(((unsigned long long *)x)+3); + r->v[3] &= 0x7fffffffffffffffULL; +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519.h b/src/ed25519-supercop-amd64-64-24k/ge25519.h new file mode 100644 index 0000000..fc4aa2a --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519.h @@ -0,0 +1,95 @@ +#ifndef GE25519_H +#define GE25519_H + +#include "fe25519.h" +#include "sc25519.h" + +#define ge25519 crypto_sign_ed25519_amd64_64_24k_batch_ge25519 +#define ge25519_base crypto_sign_ed25519_amd64_64_24k_batch_ge25519_base +#define ge25519_unpackneg_vartime crypto_sign_ed25519_amd64_64_24k_batch_unpackneg_vartime +#define ge25519_pack crypto_sign_ed25519_amd64_64_24k_batch_pack +#define ge25519_isneutral_vartime crypto_sign_ed25519_amd64_64_24k_batch_isneutral_vartime +#define ge25519_add crypto_sign_ed25519_amd64_64_24k_batch_ge25519_add +#define ge25519_double crypto_sign_ed25519_amd64_64_24k_batch_ge25519_double +#define ge25519_double_scalarmult_vartime crypto_sign_ed25519_amd64_64_24k_batch_double_scalarmult_vartime +#define ge25519_multi_scalarmult_vartime crypto_sign_ed25519_amd64_64_24k_batch_ge25519_multi_scalarmult_vartime +#define ge25519_scalarmult_base crypto_sign_ed25519_amd64_64_24k_batch_scalarmult_base +#define ge25519_p1p1_to_p2 crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p2 +#define ge25519_p1p1_to_p3 crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p3 +#define ge25519_add_p1p1 crypto_sign_ed25519_amd64_64_24k_batch_ge25519_add_p1p1 +#define ge25519_dbl_p1p1 crypto_sign_ed25519_amd64_64_24k_batch_ge25519_dbl_p1p1 +#define choose_t crypto_sign_ed25519_amd64_64_24k_batch_choose_t +#define ge25519_nielsadd2 crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd2 +#define ge25519_nielsadd_p1p1 crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd_p1p1 +#define ge25519_pnielsadd_p1p1 crypto_sign_ed25519_amd64_64_24k_batch_ge25519_pnielsadd_p1p1 + + +#define ge25519_p3 ge25519 + +typedef struct +{ + fe25519 x; + fe25519 y; + fe25519 z; + fe25519 t; +} ge25519; + +typedef struct +{ + fe25519 x; + fe25519 z; + fe25519 y; + fe25519 t; +} ge25519_p1p1; + +typedef struct +{ + fe25519 x; + fe25519 y; + fe25519 z; +} ge25519_p2; + +typedef struct +{ + fe25519 ysubx; + fe25519 xaddy; + fe25519 t2d; +} ge25519_niels; + +typedef struct +{ + fe25519 ysubx; + fe25519 xaddy; + fe25519 z; + fe25519 t2d; +} ge25519_pniels; + +extern void ge25519_p1p1_to_p2(ge25519_p2 *r, const ge25519_p1p1 *p); +extern void ge25519_p1p1_to_p3(ge25519_p3 *r, const ge25519_p1p1 *p); +extern void ge25519_add_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_p3 *q); +extern void ge25519_dbl_p1p1(ge25519_p1p1 *r, const ge25519_p2 *p); +extern void choose_t(ge25519_niels *t, unsigned long long pos, signed long long b, const ge25519_niels *base_multiples); +extern void ge25519_nielsadd2(ge25519_p3 *r, const ge25519_niels *q); +extern void ge25519_nielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_niels *q); +extern void ge25519_pnielsadd_p1p1(ge25519_p1p1 *r, const ge25519_p3 *p, const ge25519_pniels *q); + +extern const ge25519 ge25519_base; + +extern int ge25519_unpackneg_vartime(ge25519 *r, const unsigned char p[32]); + +extern void ge25519_pack(unsigned char r[32], const ge25519 *p); + +extern int ge25519_isneutral_vartime(const ge25519 *p); + +extern void ge25519_add(ge25519 *r, const ge25519 *p, const ge25519 *q); + +extern void ge25519_double(ge25519 *r, const ge25519 *p); + +/* computes [s1]p1 + [s2]ge25519_base */ +extern void ge25519_double_scalarmult_vartime(ge25519 *r, const ge25519 *p1, const sc25519 *s1, const sc25519 *s2); + +extern void ge25519_multi_scalarmult_vartime(ge25519 *r, ge25519 *p, sc25519 *s, const unsigned long long npoints); + +extern void ge25519_scalarmult_base(ge25519 *r, const sc25519 *s); + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_add.c b/src/ed25519-supercop-amd64-64-24k/ge25519_add.c new file mode 100644 index 0000000..c4d1c68 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_add.c @@ -0,0 +1,8 @@ +#include "ge25519.h" + +void ge25519_add(ge25519_p3 *r, const ge25519_p3 *p, const ge25519_p3 *q) +{ + ge25519_p1p1 grp1p1; + ge25519_add_p1p1(&grp1p1, p, q); + ge25519_p1p1_to_p3(r, &grp1p1); +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_add_p1p1.s b/src/ed25519-supercop-amd64-64-24k/ge25519_add_p1p1.s new file mode 100644 index 0000000..092bd78 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_add_p1p1.s @@ -0,0 +1,4452 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 x0 + +# qhasm: int64 x1 + +# qhasm: int64 x2 + +# qhasm: int64 x3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_ge25519_add_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_ge25519_add_p1p1 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ge25519_add_p1p1 +_crypto_sign_ed25519_amd64_64_24k_batch_ge25519_add_p1p1: +crypto_sign_ed25519_amd64_64_24k_batch_ge25519_add_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $192,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov qp=int64#4 +# asm 2: mov qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(a0=int64#3 +# asm 2: movq 32(a0=%rdx +movq 32(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(a1=int64#5 +# asm 2: movq 40(a1=%r8 +movq 40(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(a2=int64#6 +# asm 2: movq 48(a2=%r9 +movq 48(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(a3=int64#7 +# asm 2: movq 56(a3=%rax +movq 56(%rsi),%rax + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#8 +# asm 2: mov b0=%r10 +mov %rdx,%r10 + +# qhasm: b1 = a1 +# asm 1: mov b1=int64#9 +# asm 2: mov b1=%r11 +mov %r8,%r11 + +# qhasm: b2 = a2 +# asm 1: mov b2=int64#10 +# asm 2: mov b2=%r12 +mov %r9,%r12 + +# qhasm: b3 = a3 +# asm 1: mov b3=int64#11 +# asm 2: mov b3=%r13 +mov %rax,%r13 + +# qhasm: carry? a0 -= *(uint64 *)(pp + 0) +# asm 1: subq 0(subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#12 +# asm 2: movq b0_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#13 +# asm 2: movq b1_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#14 +# asm 2: movq b2_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#15 +# asm 2: movq b3_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: t10 = *(uint64 *)(qp + 32) +# asm 1: movq 32(t10=int64#3 +# asm 2: movq 32(t10=%rdx +movq 32(%rcx),%rdx + +# qhasm: t11 = *(uint64 *)(qp + 40) +# asm 1: movq 40(t11=int64#5 +# asm 2: movq 40(t11=%r8 +movq 40(%rcx),%r8 + +# qhasm: t12 = *(uint64 *)(qp + 48) +# asm 1: movq 48(t12=int64#6 +# asm 2: movq 48(t12=%r9 +movq 48(%rcx),%r9 + +# qhasm: t13 = *(uint64 *)(qp + 56) +# asm 1: movq 56(t13=int64#7 +# asm 2: movq 56(t13=%rax +movq 56(%rcx),%rax + +# qhasm: t20 = t10 +# asm 1: mov t20=int64#8 +# asm 2: mov t20=%r10 +mov %rdx,%r10 + +# qhasm: t21 = t11 +# asm 1: mov t21=int64#9 +# asm 2: mov t21=%r11 +mov %r8,%r11 + +# qhasm: t22 = t12 +# asm 1: mov t22=int64#10 +# asm 2: mov t22=%r12 +mov %r9,%r12 + +# qhasm: t23 = t13 +# asm 1: mov t23=int64#11 +# asm 2: mov t23=%r13 +mov %rax,%r13 + +# qhasm: carry? t10 -= *(uint64 *) (qp + 0) +# asm 1: subq 0(subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae t10_stack=stack64#16 +# asm 2: movq t10_stack=120(%rsp) +movq %rdx,120(%rsp) + +# qhasm: t11_stack = t11 +# asm 1: movq t11_stack=stack64#17 +# asm 2: movq t11_stack=128(%rsp) +movq %r8,128(%rsp) + +# qhasm: t12_stack = t12 +# asm 1: movq t12_stack=stack64#18 +# asm 2: movq t12_stack=136(%rsp) +movq %r9,136(%rsp) + +# qhasm: t13_stack = t13 +# asm 1: movq t13_stack=stack64#19 +# asm 2: movq t13_stack=144(%rsp) +movq %rax,144(%rsp) + +# qhasm: t20_stack = t20 +# asm 1: movq t20_stack=stack64#20 +# asm 2: movq t20_stack=152(%rsp) +movq %r10,152(%rsp) + +# qhasm: t21_stack = t21 +# asm 1: movq t21_stack=stack64#21 +# asm 2: movq t21_stack=160(%rsp) +movq %r11,160(%rsp) + +# qhasm: t22_stack = t22 +# asm 1: movq t22_stack=stack64#22 +# asm 2: movq t22_stack=168(%rsp) +movq %r12,168(%rsp) + +# qhasm: t23_stack = t23 +# asm 1: movq t23_stack=stack64#23 +# asm 2: movq t23_stack=176(%rsp) +movq %r13,176(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = a0_stack +# asm 1: movq mulx0=int64#10 +# asm 2: movq mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a0=int64#11 +# asm 2: mov a0=%r13 +mov %rax,%r13 + +# qhasm: a1 = mulrdx +# asm 1: mov a1=int64#12 +# asm 2: mov a1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = t11_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a2=int64#13 +# asm 2: mov $0,>a2=%r15 +mov $0,%r15 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a3=int64#14 +# asm 2: mov $0,>a3=%rbx +mov $0,%rbx + +# qhasm: a3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = t10_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? a0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = b0_stack +# asm 1: movq mulx0=int64#10 +# asm 2: movq mulx0=%r12 +movq 88(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx0=int64#11 +# asm 2: mov rx0=%r13 +mov %rax,%r13 + +# qhasm: rx1 = mulrdx +# asm 1: mov rx1=int64#12 +# asm 2: mov rx1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = t21_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx2=int64#13 +# asm 2: mov $0,>rx2=%r15 +mov $0,%r15 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx3=int64#14 +# asm 2: mov $0,>rx3=%rbx +mov $0,%rbx + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq mulx1=%r12 +movq 96(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq mulx2=%r12 +movq 104(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq mulx3=%r12 +movq 112(%rsp),%r12 + +# qhasm: mulrax = t20_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? rx0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc ry0=int64#3 +# asm 2: mov ry0=%rdx +mov %r13,%rdx + +# qhasm: ry1 = rx1 +# asm 1: mov ry1=int64#5 +# asm 2: mov ry1=%r8 +mov %r14,%r8 + +# qhasm: ry2 = rx2 +# asm 1: mov ry2=int64#6 +# asm 2: mov ry2=%r9 +mov %r15,%r9 + +# qhasm: ry3 = rx3 +# asm 1: mov ry3=int64#7 +# asm 2: mov ry3=%rax +mov %rbx,%rax + +# qhasm: carry? ry0 += a0_stack +# asm 1: addq addt0=int64#8 +# asm 2: mov $0,>addt0=%r10 +mov $0,%r10 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#9 +# asm 2: mov $38,>addt1=%r11 +mov $38,%r11 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae subt0=int64#8 +# asm 2: mov $0,>subt0=%r10 +mov $0,%r10 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#9 +# asm 2: mov $38,>subt1=%r11 +mov $38,%r11 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulx0=int64#10 +# asm 2: movq 96(mulx0=%r12 +movq 96(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c0=int64#11 +# asm 2: mov c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov c1=int64#12 +# asm 2: mov c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq 104(mulx1=%r12 +movq 104(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq 112(mulx2=%r12 +movq 112(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq 120(mulx3=%r12 +movq 120(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? c0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc c0_stack=stack64#8 +# asm 2: movq c0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#9 +# asm 2: movq c1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#10 +# asm 2: movq c2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#11 +# asm 2: movq c3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = c0_stack +# asm 1: movq mulx0=int64#10 +# asm 2: movq mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c0=int64#11 +# asm 2: mov c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov c1=int64#12 +# asm 2: mov c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_EC2D1 +# asm 1: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_EC2D0 +# asm 1: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D0,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D1,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D2,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,>mulrax=%rax +movq crypto_sign_ed25519_amd64_64_24k_batch_EC2D3,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? c0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc c0_stack=stack64#8 +# asm 2: movq c0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#9 +# asm 2: movq c1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#10 +# asm 2: movq c2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#11 +# asm 2: movq c3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulx0=int64#10 +# asm 2: movq 64(mulx0=%r12 +movq 64(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt0=int64#11 +# asm 2: mov rt0=%r13 +mov %rax,%r13 + +# qhasm: rt1 = mulrdx +# asm 1: mov rt1=int64#12 +# asm 2: mov rt1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt2=int64#13 +# asm 2: mov $0,>rt2=%r15 +mov $0,%r15 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt3=int64#14 +# asm 2: mov $0,>rt3=%rbx +mov $0,%rbx + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq 72(mulx1=%r12 +movq 72(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq 80(mulx2=%r12 +movq 80(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#2 +# asm 2: movq 88(mulx3=%rsi +movq 88(%rsi),%rsi + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rt0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae rz0=int64#2 +# asm 2: mov rz0=%rsi +mov %r13,%rsi + +# qhasm: rz1 = rt1 +# asm 1: mov rz1=int64#3 +# asm 2: mov rz1=%rdx +mov %r14,%rdx + +# qhasm: rz2 = rt2 +# asm 1: mov rz2=int64#4 +# asm 2: mov rz2=%rcx +mov %r15,%rcx + +# qhasm: rz3 = rt3 +# asm 1: mov rz3=int64#5 +# asm 2: mov rz3=%r8 +mov %rbx,%r8 + +# qhasm: carry? rz0 += c0_stack +# asm 1: addq addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae subt0=int64#6 +# asm 2: mov $0,>subt0=%r9 +mov $0,%r9 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#7 +# asm 2: mov $38,>subt1=%rax +mov $38,%rax + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_base.c b/src/ed25519-supercop-amd64-64-24k/ge25519_base.c new file mode 100644 index 0000000..a7ae978 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_base.c @@ -0,0 +1,7 @@ +#include "ge25519.h" + +const ge25519 ge25519_base = {{{0xC9562D608F25D51A, 0x692CC7609525A7B2, 0xC0A4E231FDD6DC5C, 0x216936D3CD6E53FE}}, + {{0x6666666666666658, 0x6666666666666666, 0x6666666666666666, 0x6666666666666666}}, + {{0x0000000000000001, 0x0000000000000000, 0x0000000000000000, 000000000000000000}}, + {{0x6DDE8AB3A5B7DDA3, 0x20F09F80775152F5, 0x66EA4E8E64ABE37D, 0x67875F0FD78B7665}}}; + diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_base_niels_smalltables.data b/src/ed25519-supercop-amd64-64-24k/ge25519_base_niels_smalltables.data new file mode 100644 index 0000000..a31f6f2 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_base_niels_smalltables.data @@ -0,0 +1,768 @@ +{{{0x9d103905d740913e, 0xfd399f05d140beb3, 0xa5c18434688f8a09, 0x44fd2f9298f81267}}, + {{0x2fbc93c6f58c3b85, 0xcf932dc6fb8c0e19, 0x270b4898643d42c2, 0x07cf9d3a33d4ba65}}, + {{0xdbbd15674b6fbb59, 0x41e13f00eea2a5ea, 0xcdd49d1cc957c6fa, 0x4f0ebe1faf16ecca}}}, +{{{0x8a99a56042b4d5a8, 0x8f2b810c4e60acf6, 0xe09e236bb16e37aa, 0x6bb595a669c92555}}, + {{0x9224e7fc933c71d7, 0x9f469d967a0ff5b5, 0x5aa69a65e1d60702, 0x590c063fa87d2e2e}}, + {{0x6e347eaadad36802, 0xbaf3599383ee4805, 0x3bcabe10e6076826, 0x49314f0a165ed1b8}}}, +{{{0x56611fe8a4fcd265, 0x3bd353fde5c1ba7d, 0x8131f31a214bd6bd, 0x2ab91587555bda62}}, + {{0xaf25b0a84cee9730, 0x025a8430e8864b8a, 0xc11b50029f016732, 0x7a164e1b9a80f8f4}}, + {{0x9bf211f4f1674834, 0xb84e6b17f62df895, 0xd7de6f075b722a4e, 0x549a04b963bb2a21}}}, +{{{0x95fe050a056818bf, 0x327e89715660faa9, 0xc3e8e3cd06a05073, 0x27933f4c7445a49a}}, + {{0x287351b98efc099f, 0x6765c6f47dfd2538, 0xca348d3dfb0a9265, 0x680e910321e58727}}, + {{0xbf1e45ece51426b0, 0xe32bc63d6dba0f94, 0xe42974d58cf852c0, 0x44f079b1b0e64c18}}}, +{{{0x7f9182c3a447d6ba, 0xd50014d14b2729b7, 0xe33cf11cb864a087, 0x154a7e73eb1b55f3}}, + {{0xa212bc4408a5bb33, 0x8d5048c3c75eed02, 0xdd1beb0c5abfec44, 0x2945ccf146e206eb}}, + {{0xc832a179e7d003b3, 0x5f729d0a00124d7e, 0x62c1d4a10e6d8ff3, 0x68b8ac5938b27a98}}}, +{{{0x499806b67b7d8ca4, 0x575be28427d22739, 0xbb085ce7204553b9, 0x38b64c41ae417884}}, + {{0x3a0ceeeb77157131, 0x9b27158900c8af88, 0x8065b668da59a736, 0x51e57bb6a2cc38bd}}, + {{0x8f9dad91689de3a4, 0x175f2428f8fb9137, 0x050ab5329fcfb988, 0x7865dfa21354c09f}}}, +{{{0xba6f2c9aaa3221b1, 0x6ca021533bba23a7, 0x9dea764f92192c3a, 0x1d6edd5d2e5317e0}}, + {{0x6b1a5cd0944ea3bf, 0x7470353ab39dc0d2, 0x71b2528228542e49, 0x461bea69283c927e}}, + {{0x217a8aacab0fda36, 0xa528c6543d3549c8, 0x37d05b8b13ab7568, 0x233cef623a2cbc37}}}, +{{{0xe2a75dedf39234d9, 0x963d7680e1b558f9, 0x2c2741ac6e3c23fb, 0x3a9024a1320e01c3}}, + {{0x59b7596604dd3e8f, 0x6cb30377e288702c, 0xb1339c665ed9c323, 0x0915e76061bce52f}}, + {{0xdf7de835a834a37e, 0x8be19cda689857ea, 0x2c1185367167b326, 0x589eb3d9dbefd5c2}}}, +{{{0xed5b635449aa515e, 0xa865c49f0bc6823a, 0x850c1fe95b42d1c4, 0x30d76d6f03d315b9}}, + {{0x2eccdd0e632f9c1d, 0x51d0b69676893115, 0x52dfb76ba8637a58, 0x6dd37d49a00eef39}}, + {{0x6c4444172106e4c7, 0xfb53d680928d7f69, 0xb4739ea4694d3f26, 0x10c697112e864bb0}}}, +{{{0x6493c4277dbe5fde, 0x265d4fad19ad7ea2, 0x0e00dfc846304590, 0x25e61cabed66fe09}}, + {{0x0ca62aa08358c805, 0x6a3d4ae37a204247, 0x7464d3a63b11eddc, 0x03bf9baf550806ef}}, + {{0x3f13e128cc586604, 0x6f5873ecb459747e, 0xa0b63dedcc1268f5, 0x566d78634586e22c}}}, +{{{0x1637a49f9cc10834, 0xbc8e56d5a89bc451, 0x1cb5ec0f7f7fd2db, 0x33975bca5ecc35d9}}, + {{0xa1054285c65a2fd0, 0x6c64112af31667c3, 0x680ae240731aee58, 0x14fba5f34793b22a}}, + {{0x3cd746166985f7d4, 0x593e5e84c9c80057, 0x2fc3f2b67b61131e, 0x14829cea83fc526c}}}, +{{{0xff437b8497dd95c2, 0x6c744e30aa4eb5a7, 0x9e0c5d613c85e88b, 0x2fd9c71e5f758173}}, + {{0x21e70b2f4e71ecb8, 0xe656ddb940a477e3, 0xbf6556cece1d4f80, 0x05fc3bc4535d7b7e}}, + {{0x24b8b3ae52afdedd, 0x3495638ced3b30cf, 0x33a4bc83a9be8195, 0x373767475c651f04}}}, +{{{0x2fba99fd40d1add9, 0xb307166f96f4d027, 0x4363f05215f03bae, 0x1fbea56c3b18f999}}, + {{0x634095cb14246590, 0xef12144016c15535, 0x9e38140c8910bc60, 0x6bf5905730907c8c}}, + {{0x0fa778f1e1415b8a, 0x06409ff7bac3a77e, 0x6f52d7b89aa29a50, 0x02521cf67a635a56}}}, +{{{0x513fee0b0a9d5294, 0x8f98e75c0fdf5a66, 0xd4618688bfe107ce, 0x3fa00a7e71382ced}}, + {{0xb1146720772f5ee4, 0xe8f894b196079ace, 0x4af8224d00ac824a, 0x001753d9f7cd6cc4}}, + {{0x3c69232d963ddb34, 0x1dde87dab4973858, 0xaad7d1f9a091f285, 0x12b5fe2fa048edb6}}}, +{{{0x71f0fbc496fce34d, 0x73b9826badf35bed, 0xd2047261ff28c561, 0x749b76f96fb1206f}}, + {{0xdf2b7c26ad6f1e92, 0x4b66d323504b8913, 0x8c409dc0751c8bc3, 0x6f7e93c20796c7b8}}, + {{0x1f5af604aea6ae05, 0xc12351f1bee49c99, 0x61a808b5eeff6b66, 0x0fcec10f01e02151}}}, +{{{0x644d58a649fe1e44, 0x21fcaea231ad777e, 0x02441c5a887fd0d2, 0x4901aa7183c511f3}}, + {{0x3df2d29dc4244e45, 0x2b020e7493d8de0a, 0x6cc8067e820c214d, 0x413779166feab90a}}, + {{0x08b1b7548c1af8f0, 0xce0f7a7c246299b4, 0xf760b0f91e06d939, 0x41bb887b726d1213}}}, +{{{0x40e87d44744346be, 0x1d48dad415b52b25, 0x7c3a8a18a13b603e, 0x4eb728c12fcdbdf7}}, + {{0x7e234c597c6691ae, 0x64889d3d0a85b4c8, 0xdae2c90c354afae7, 0x0a871e070c6a9e1d}}, + {{0x3301b5994bbc8989, 0x736bae3a5bdd4260, 0x0d61ade219d59e3c, 0x3ee7300f2685d464}}}, +{{{0xf5d255e49e7dd6b7, 0x8016115c610b1eac, 0x3c99975d92e187ca, 0x13815762979125c2}}, + {{0x43fa7947841e7518, 0xe5c6fa59639c46d7, 0xa1065e1de3052b74, 0x7d47c6a2cfb89030}}, + {{0x3fdad0148ef0d6e0, 0x9d3e749a91546f3c, 0x71ec621026bb8157, 0x148cf58d34c9ec80}}}, +{{{0x46a492f67934f027, 0x469984bef6840aa9, 0x5ca1bc2a89611854, 0x3ff2fa1ebd5dbbd4}}, + {{0xe2572f7d9ae4756d, 0x56c345bb88f3487f, 0x9fd10b6d6960a88d, 0x278febad4eaea1b9}}, + {{0xb1aa681f8c933966, 0x8c21949c20290c98, 0x39115291219d3c52, 0x4104dd02fe9c677b}}}, +{{{0x72b2bf5e1124422a, 0xa1fa0c3398a33ab5, 0x94cb6101fa52b666, 0x2c863b00afaf53d5}}, + {{0x81214e06db096ab8, 0x21a8b6c90ce44f35, 0x6524c12a409e2af5, 0x0165b5a48efca481}}, + {{0xf190a474a0846a76, 0x12eff984cd2f7cc0, 0x695e290658aa2b8f, 0x591b67d9bffec8b8}}}, +{{{0x312f0d1c80b49bfa, 0x5979515eabf3ec8a, 0x727033c09ef01c88, 0x3de02ec7ca8f7bcb}}, + {{0x99b9b3719f18b55d, 0xe465e5faa18c641e, 0x61081136c29f05ed, 0x489b4f867030128b}}, + {{0xd232102d3aeb92ef, 0xe16253b46116a861, 0x3d7eabe7190baa24, 0x49f5fbba496cbebf}}}, +{{{0x30949a108a5bcfd4, 0xdc40dd70bc6473eb, 0x92c294c1307c0d1c, 0x5604a86dcbfa6e74}}, + {{0x155d628c1e9c572e, 0x8a4d86acc5884741, 0x91a352f6515763eb, 0x06a1a6c28867515b}}, + {{0x7288d1d47c1764b6, 0x72541140e0418b51, 0x9f031a6018acf6d1, 0x20989e89fe2742c6}}}, +{{{0x499777fd3a2dcc7f, 0x32857c2ca54fd892, 0xa279d864d207e3a0, 0x0403ed1d0ca67e29}}, + {{0x1674278b85eaec2e, 0x5621dc077acb2bdf, 0x640a4c1661cbf45a, 0x730b9950f70595d3}}, + {{0xc94b2d35874ec552, 0xc5e6c8cf98246f8d, 0xf7cb46fa16c035ce, 0x5bd7454308303dcc}}}, +{{{0x7f9ad19528b24cc2, 0x7f6b54656335c181, 0x66b8b66e4fc07236, 0x133a78007380ad83}}, + {{0x85c4932115e7792a, 0xc64c89a2bdcdddc9, 0x9d1e3da8ada3d762, 0x5bb7db123067f82c}}, + {{0x0961f467c6ca62be, 0x04ec21d6211952ee, 0x182360779bd54770, 0x740dca6d58f0e0d2}}}, +{{{0xdf48ee0752cfce4e, 0xc3fffaf306ec08b7, 0x05710b2ab95459c4, 0x161d25fa963ea38d}}, + {{0x231a8c570478433c, 0xb7b5270ec281439d, 0xdbaa99eae3d9079f, 0x2c03f5256c2b03d9}}, + {{0x790f18757b53a47d, 0x307b0130cf0c5879, 0x31903d77257ef7f9, 0x699468bdbd96bbaf}}}, +{{{0xbd1f2f46f4dafecf, 0x7cef0114a47fd6f7, 0xd31ffdda4a47b37f, 0x525219a473905785}}, + {{0xd8dd3de66aa91948, 0x485064c22fc0d2cc, 0x9b48246634fdea2f, 0x293e1c4e6c4a2e3a}}, + {{0x376e134b925112e1, 0x703778b5dca15da0, 0xb04589af461c3111, 0x5b605c447f032823}}}, +{{{0xb965805920c47c89, 0xe7f0100c923b8fcc, 0x0001256502e2ef77, 0x24a76dcea8aeb3ee}}, + {{0x3be9fec6f0e7f04c, 0x866a579e75e34962, 0x5542ef161e1de61a, 0x2f12fef4cc5abdd5}}, + {{0x0a4522b2dfc0c740, 0x10d06e7f40c9a407, 0xc6cf144178cff668, 0x5e607b2518a43790}}}, +{{{0x58b31d8f6cdf1818, 0x35cfa74fc36258a2, 0xe1b3ff4f66e61d6e, 0x5067acab6ccdd5f7}}, + {{0xa02c431ca596cf14, 0xe3c42d40aed3e400, 0xd24526802e0f26db, 0x201f33139e457068}}, + {{0xfd527f6b08039d51, 0x18b14964017c0006, 0xd5220eb02e25a4a8, 0x397cba8862460375}}}, +{{{0x30c13093f05959b2, 0xe23aa18de9a97976, 0x222fd491721d5e26, 0x2339d320766e6c3a}}, + {{0x7815c3fbc81379e7, 0xa6619420dde12af1, 0xffa9c0f885a8fdd5, 0x771b4022c1e1c252}}, + {{0xd87dd986513a2fa7, 0xf5ac9b71f9d4cf08, 0xd06bc31b1ea283b3, 0x331a189219971a76}}}, +{{{0xf5166f45fb4f80c6, 0x9c36c7de61c775cf, 0xe3d4e81b9041d91c, 0x31167c6b83bdfe21}}, + {{0x26512f3a9d7572af, 0x5bcbe28868074a9e, 0x84edc1c11180f7c4, 0x1ac9619ff649a67b}}, + {{0xf22b3842524b1068, 0x5068343bee9ce987, 0xfc9d71844a6250c8, 0x612436341f08b111}}}, +{{{0xd99d41db874e898d, 0x09fea5f16c07dc20, 0x793d2c67d00f9bbc, 0x46ebe2309e5eff40}}, + {{0x8b6349e31a2d2638, 0x9ddfb7009bd3fd35, 0x7f8bf1b8a3a06ba4, 0x1522aa3178d90445}}, + {{0x2c382f5369614938, 0xdafe409ab72d6d10, 0xe8c83391b646f227, 0x45fe70f50524306c}}}, +{{{0xda4875a6960c0b8c, 0x5b68d076ef0e2f20, 0x07fb51cf3d0b8fd4, 0x428d1623a0e392d4}}, + {{0x62f24920c8951491, 0x05f007c83f630ca2, 0x6fbb45d2f5c9d4b8, 0x16619f6db57a2245}}, + {{0x084f4a4401a308fd, 0xa82219c376a5caac, 0xdeb8de4643d1bc7d, 0x1d81592d60bd38c6}}}, +{{{0x61368756a60dac5f, 0x17e02f6aebabdc57, 0x7f193f2d4cce0f7d, 0x20234a7789ecdcf0}}, + {{0x8765b69f7b85c5e8, 0x6ff0678bd168bab2, 0x3a70e77c1d330f9b, 0x3a5f6d51b0af8e7c}}, + {{0x76d20db67178b252, 0x071c34f9d51ed160, 0xf62a4a20b3e41170, 0x7cd682353cffe366}}}, +{{{0x0be1a45bd887fab6, 0x2a846a32ba403b6e, 0xd9921012e96e6000, 0x2838c8863bdc0943}}, + {{0xa665cd6068acf4f3, 0x42d92d183cd7e3d3, 0x5759389d336025d9, 0x3ef0253b2b2cd8ff}}, + {{0xd16bb0cf4a465030, 0xfa496b4115c577ab, 0x82cfae8af4ab419d, 0x21dcb8a606a82812}}}, +{{{0x5c6004468c9d9fc8, 0x2540096ed42aa3cb, 0x125b4d4c12ee2f9c, 0x0bc3d08194a31dab}}, + {{0x9a8d00fabe7731ba, 0x8203607e629e1889, 0xb2cc023743f3d97f, 0x5d840dbf6c6f678b}}, + {{0x706e380d309fe18b, 0x6eb02da6b9e165c7, 0x57bbba997dae20ab, 0x3a4276232ac196dd}}}, +{{{0x4b42432c8a7084fa, 0x898a19e3dfb9e545, 0xbe9f00219c58e45d, 0x1ff177cea16debd1}}, + {{0x3bf8c172db447ecb, 0x5fcfc41fc6282dbd, 0x80acffc075aa15fe, 0x0770c9e824e1a9f9}}, + {{0xcf61d99a45b5b5fd, 0x860984e91b3a7924, 0xe7300919303e3e89, 0x39f264fd41500b1e}}}, +{{{0xa7ad3417dbe7e29c, 0xbd94376a2b9c139c, 0xa0e91b8e93597ba9, 0x1712d73468889840}}, + {{0xd19b4aabfe097be1, 0xa46dfce1dfe01929, 0xc3c908942ca6f1ff, 0x65c621272c35f14e}}, + {{0xe72b89f8ce3193dd, 0x4d103356a125c0bb, 0x0419a93d2e1cfe83, 0x22f9800ab19ce272}}}, +{{{0x605a368a3e9ef8cb, 0xe3e9c022a5504715, 0x553d48b05f24248f, 0x13f416cd647626e5}}, + {{0x42029fdd9a6efdac, 0xb912cebe34a54941, 0x640f64b987bdf37b, 0x4171a4d38598cab4}}, + {{0xfa2758aa99c94c8c, 0x23006f6fb000b807, 0xfbd291ddadda5392, 0x508214fa574bd1ab}}}, +{{{0xc20269153ed6fe4b, 0xa65a6739511d77c4, 0xcbde26462c14af94, 0x22f960ec6faba74b}}, + {{0x461a15bb53d003d6, 0xb2102888bcf3c965, 0x27c576756c683a5a, 0x3a7758a4c86cb447}}, + {{0x548111f693ae5076, 0x1dae21df1dfd54a6, 0x12248c90f3115e65, 0x5d9fd15f8de7f494}}}, +{{{0x031408d36d63727f, 0x6a379aefd7c7b533, 0xa9e18fc5ccaee24b, 0x332f35914f8fbed3}}, + {{0x3f244d2aeed7521e, 0x8e3a9028432e9615, 0xe164ba772e9c16d4, 0x3bc187fa47eb98d8}}, + {{0x6d470115ea86c20c, 0x998ab7cb6c46d125, 0xd77832b53a660188, 0x450d81ce906fba03}}}, +{{{0x6e7bb6a1a6205275, 0xaa4f21d7413c8e83, 0x6f56d155e88f5cb2, 0x2de25d4ba6345be1}}, + {{0xd074d8961cae743f, 0xf86d18f5ee1c63ed, 0x97bdc55be7f4ed29, 0x4cbad279663ab108}}, + {{0x80d19024a0d71fcd, 0xc525c20afb288af8, 0xb1a3974b5f3a6419, 0x7d7fbcefe2007233}}}, +{{{0xfaef1e6a266b2801, 0x866c68c4d5739f16, 0xf68a2fbc1b03762c, 0x5975435e87b75a8d}}, + {{0xcd7c5dc5f3c29094, 0xc781a29a2a9105ab, 0x80c61d36421c3058, 0x4f9cd196dcd8d4d7}}, + {{0x199297d86a7b3768, 0xd0d058241ad17a63, 0xba029cad5c1c0c17, 0x7ccdd084387a0307}}}, +{{{0xdca6422c6d260417, 0xae153d50948240bd, 0xa9c0c1b4fb68c677, 0x428bd0ed61d0cf53}}, + {{0x9b0c84186760cc93, 0xcdae007a1ab32a99, 0xa88dec86620bda18, 0x3593ca848190ca44}}, + {{0x9213189a5e849aa7, 0xd4d8c33565d8facd, 0x8c52545b53fdbbd1, 0x27398308da2d63e6}}}, +{{{0x42c38d28435ed413, 0xbd50f3603278ccc9, 0xbb07ab1a79da03ef, 0x269597aebe8c3355}}, + {{0xb9a10e4c0a702453, 0x0fa25866d57d1bde, 0xffb9d9b5cd27daf7, 0x572c2945492c33fd}}, + {{0xc77fc745d6cd30be, 0xe4dfe8d3e3baaefb, 0xa22c8830aa5dda0c, 0x7f985498c05bca80}}}, +{{{0x3849ce889f0be117, 0x8005ad1b7b54a288, 0x3da3c39f23fc921c, 0x76c2ec470a31f304}}, + {{0xd35615520fbf6363, 0x08045a45cf4dfba6, 0xeec24fbc873fa0c2, 0x30f2653cd69b12e7}}, + {{0x8a08c938aac10c85, 0x46179b60db276bcb, 0xa920c01e0e6fac70, 0x2f1273f1596473da}}}, +{{{0x4739fc7c8ae01e11, 0xfd5274904a6aab9f, 0x41d98a8287728f2e, 0x5d9e572ad85b69f2}}, + {{0x30488bd755a70bc0, 0x06d6b5a4f1d442e7, 0xead1a69ebc596162, 0x38ac1997edc5f784}}, + {{0x0666b517a751b13b, 0x747d06867e9b858c, 0xacacc011454dde49, 0x22dfcd9cbfe9e69c}}}, +{{{0x8ddbd2e0c30d0cd9, 0xad8e665facbb4333, 0x8f6b258c322a961f, 0x6b2916c05448c1c7}}, + {{0x56ec59b4103be0a1, 0x2ee3baecd259f969, 0x797cb29413f5cd32, 0x0fe9877824cde472}}, + {{0x7edb34d10aba913b, 0x4ea3cd822e6dac0e, 0x66083dff6578f815, 0x4c303f307ff00a17}}}, +{{{0xd30a3bd617b28c85, 0xc5d377b739773bea, 0xc6c6e78c1e6a5cbf, 0x0d61b8f78b2ab7c4}}, + {{0x29fc03580dd94500, 0xecd27aa46fbbec93, 0x130a155fc2e2a7f8, 0x416b151ab706a1d5}}, + {{0x56a8d7efe9c136b0, 0xbd07e5cd58e44b20, 0xafe62fda1b57e0ab, 0x191a2af74277e8d2}}}, +{{{0xce16f74bc53c1431, 0x2b9725ce2072edde, 0xb8b9c36fb5b23ee7, 0x7e2e0e450b5cc908}}, + {{0x9fe62b434f460efb, 0xded303d4a63607d6, 0xf052210eb7a0da24, 0x237e7dbe00545b93}}, + {{0x013575ed6701b430, 0x231094e69f0bfd10, 0x75320f1583e47f22, 0x71afa699b11155e3}}}, +{{{0x65ce6f9b3953b61d, 0xc65839eaafa141e6, 0x0f435ffda9f759fe, 0x021142e9c2b1c28e}}, + {{0xea423c1c473b50d6, 0x51e87a1f3b38ef10, 0x9b84bf5fb2c9be95, 0x00731fbc78f89a1c}}, + {{0xe430c71848f81880, 0xbf960c225ecec119, 0xb6dae0836bba15e3, 0x4c4d6f3347e15808}}}, +{{{0x18f7eccfc17d1fc9, 0x6c75f5a651403c14, 0xdbde712bf7ee0cdf, 0x193fddaaa7e47a22}}, + {{0x2f0cddfc988f1970, 0x6b916227b0b9f51b, 0x6ec7b6c4779176be, 0x38bf9500a88f9fa8}}, + {{0x1fd2c93c37e8876f, 0xa2f61e5a18d1462c, 0x5080f58239241276, 0x6a6fb99ebf0d4969}}}, +{{{0x6a46c1bb560855eb, 0x2416bb38f893f09d, 0xd71d11378f71acc1, 0x75f76914a31896ea}}, + {{0xeeb122b5b6e423c6, 0x939d7010f286ff8e, 0x90a92a831dcf5d8c, 0x136fda9f42c5eb10}}, + {{0xf94cdfb1a305bdd1, 0x0f364b9d9ff82c08, 0x2a87d8a5c3bb588a, 0x022183510be8dcba}}}, +{{{0x4af766385ead2d14, 0xa08ed880ca7c5830, 0x0d13a6e610211e3d, 0x6a071ce17b806c03}}, + {{0x9d5a710143307a7f, 0xb063de9ec47da45f, 0x22bbfe52be927ad3, 0x1387c441fd40426c}}, + {{0xb5d3c3d187978af8, 0x722b5a3d7f0e4413, 0x0d7b4848bb477ca0, 0x3171b26aaf1edc92}}}, +{{{0xa92f319097564ca8, 0xff7bb84c2275e119, 0x4f55fe37a4875150, 0x221fd4873cf0835a}}, + {{0xa60db7d8b28a47d1, 0xa6bf14d61770a4f1, 0xd4a1f89353ddbd58, 0x6c514a63344243e9}}, + {{0x2322204f3a156341, 0xfb73e0e9ba0a032d, 0xfce0dd4c410f030e, 0x48daa596fb924aaa}}}, +{{{0x6eca8e665ca59cc7, 0xa847254b2e38aca0, 0x31afc708d21e17ce, 0x676dd6fccad84af7}}, + {{0x14f61d5dc84c9793, 0x9941f9e3ef418206, 0xcdf5b88f346277ac, 0x58c837fa0e8a79a9}}, + {{0x0cf9688596fc9058, 0x1ddcbbf37b56a01b, 0xdcc2e77d4935d66a, 0x1c4f73f2c6a57f0a}}}, +{{{0x0e7a4fbd305fa0bb, 0x829d4ce054c663ad, 0xf421c3832fe33848, 0x795ac80d1bf64c42}}, + {{0xb36e706efc7c3484, 0x73dfc9b4c3c1cf61, 0xeb1d79c9781cc7e5, 0x70459adb7daf675c}}, + {{0x1b91db4991b42bb3, 0x572696234b02dcca, 0x9fdf9ee51f8c78dc, 0x5fe162848ce21fd3}}}, +{{{0x4e59214fe194961a, 0x49be7dc70d71cd4f, 0x9300cfd23b50f22d, 0x4789d446fc917232}}, + {{0x2879852d5d7cb208, 0xb8dedd70687df2e7, 0xdc0bffab21687891, 0x2b44c043677daa35}}, + {{0x1a1c87ab074eb78e, 0xfac6d18e99daf467, 0x3eacbbcd484f9067, 0x60c52eef2bb9a4e4}}}, +{{{0x0b5d89bc3bfd8bf1, 0xb06b9237c9f3551a, 0x0e4c16b0d53028f5, 0x10bc9c312ccfcaab}}, + {{0x702bc5c27cae6d11, 0x44c7699b54a48cab, 0xefbc4056ba492eb2, 0x70d77248d9b6676d}}, + {{0xaa8ae84b3ec2a05b, 0x98699ef4ed1781e0, 0x794513e4708e85d1, 0x63755bd3a976f413}}}, +{{{0xb55fa03e2ad10853, 0x356f75909ee63569, 0x9ff9f1fdbe69b890, 0x0d8cc1c48bc16f84}}, + {{0x3dc7101897f1acb7, 0x5dda7d5ec165bbd8, 0x508e5b9c0fa1020f, 0x2763751737c52a56}}, + {{0x029402d36eb419a9, 0xf0b44e7e77b460a5, 0xcfa86230d43c4956, 0x70c2dd8a7ad166e7}}}, +{{{0x656194509f6fec0e, 0xee2e7ea946c6518d, 0x9733c1f367e09b5c, 0x2e0fac6363948495}}, + {{0x91d4967db8ed7e13, 0x74252f0ad776817a, 0xe40982e00d852564, 0x32b8613816a53ce5}}, + {{0x79e7f7bee448cd64, 0x6ac83a67087886d0, 0xf89fd4d9a0e4db2e, 0x4179215c735a4f41}}}, +{{{0x8c7094e7d7dced2a, 0x97fb8ac347d39c70, 0xe13be033a906d902, 0x700344a30cd99d76}}, + {{0xe4ae33b9286bcd34, 0xb7ef7eb6559dd6dc, 0x278b141fb3d38e1f, 0x31fa85662241c286}}, + {{0xaf826c422e3622f4, 0xc12029879833502d, 0x9bc1b7e12b389123, 0x24bb2312a9952489}}}, +{{{0xb1a8ed1732de67c3, 0x3cb49418461b4948, 0x8ebd434376cfbcd2, 0x0fee3e871e188008}}, + {{0x41f80c2af5f85c6b, 0x687284c304fa6794, 0x8945df99a3ba1bad, 0x0d1d2af9ffeb5d16}}, + {{0xa9da8aa132621edf, 0x30b822a159226579, 0x4004197ba79ac193, 0x16acd79718531d76}}}, +{{{0x72df72af2d9b1d3d, 0x63462a36a432245a, 0x3ecea07916b39637, 0x123e0ef6b9302309}}, + {{0xc959c6c57887b6ad, 0x94e19ead5f90feba, 0x16e24e62a342f504, 0x164ed34b18161700}}, + {{0x487ed94c192fe69a, 0x61ae2cea3a911513, 0x877bf6d3b9a4de27, 0x78da0fc61073f3eb}}}, +{{{0x5bf15d28e52bc66a, 0x2c47e31870f01a8e, 0x2419afbc06c28bdd, 0x2d25deeb256b173a}}, + {{0xa29f80f1680c3a94, 0x71f77e151ae9e7e6, 0x1100f15848017973, 0x054aa4b316b38ddd}}, + {{0xdfc8468d19267cb8, 0x0b28789c66e54daf, 0x2aeb1d2a666eec17, 0x134610a6ab7da760}}}, +{{{0x51138ec78df6b0fe, 0x5397da89e575f51b, 0x09207a1d717af1b9, 0x2102fdba2b20d650}}, + {{0xcd2a65e777d1f515, 0x548991878faa60f1, 0xb1b73bbcdabc06e5, 0x654878cba97cc9fb}}, + {{0x969ee405055ce6a1, 0x36bca7681251ad29, 0x3a1af517aa7da415, 0x0ad725db29ecb2ba}}}, +{{{0xdc4267b1834e2457, 0xb67544b570ce1bc5, 0x1af07a0bf7d15ed7, 0x4aefcffb71a03650}}, + {{0xfec7bc0c9b056f85, 0x537d5268e7f5ffd7, 0x77afc6624312aefa, 0x4f675f5302399fd9}}, + {{0xc32d36360415171e, 0xcd2bef118998483b, 0x870a6eadd0945110, 0x0bccbb72a2a86561}}}, +{{{0x185e962feab1a9c8, 0x86e7e63565147dcd, 0xb092e031bb5b6df2, 0x4024f0ab59d6b73e}}, + {{0x186d5e4c50fe1296, 0xe0397b82fee89f7e, 0x3bc7f6c5507031b0, 0x6678fd69108f37c2}}, + {{0x1586fa31636863c2, 0x07f68c48572d33f2, 0x4f73cc9f789eaefc, 0x2d42e2108ead4701}}}, +{{{0x97f5131594dfd29b, 0x6155985d313f4c6a, 0xeba13f0708455010, 0x676b2608b8d2d322}}, + {{0x21717b0d0f537593, 0x914e690b131e064c, 0x1bb687ae752ae09f, 0x420bf3a79b423c6e}}, + {{0x8138ba651c5b2b47, 0x8671b6ec311b1b80, 0x7bff0cb1bc3135b0, 0x745d2ffa9c0cf1e0}}}, +{{{0xbf525a1e2bc9c8bd, 0xea5b260826479d81, 0xd511c70edf0155db, 0x1ae23ceb960cf5d0}}, + {{0x6036df5721d34e6a, 0xb1db8827997bb3d0, 0xd3c209c3c8756afa, 0x06e15be54c1dc839}}, + {{0x5b725d871932994a, 0x32351cb5ceb1dab0, 0x7dc41549dab7ca05, 0x58ded861278ec1f7}}}, +{{{0xd8173793f266c55c, 0xc8c976c5cc454e49, 0x5ce382f8bc26c3a8, 0x2ff39de85485f6f9}}, + {{0x2dfb5ba8b6c2c9a8, 0x48eeef8ef52c598c, 0x33809107f12d1573, 0x08ba696b531d5bd8}}, + {{0x77ed3eeec3efc57a, 0x04e05517d4ff4811, 0xea3d7a3ff1a671cb, 0x120633b4947cfe54}}}, +{{{0x0b94987891610042, 0x4ee7b13cecebfae8, 0x70be739594f0a4c0, 0x35d30a99b4d59185}}, + {{0x82bd31474912100a, 0xde237b6d7e6fbe06, 0xe11e761911ea79c6, 0x07433be3cb393bde}}, + {{0xff7944c05ce997f4, 0x575d3de4b05c51a3, 0x583381fd5a76847c, 0x2d873ede7af6da9f}}}, +{{{0x157a316443373409, 0xfab8b7eef4aa81d9, 0xb093fee6f5a64806, 0x2e773654707fa7b6}}, + {{0xaa6202e14e5df981, 0xa20d59175015e1f5, 0x18a275d3bae21d6c, 0x0543618a01600253}}, + {{0x0deabdf4974c23c1, 0xaa6f0a259dce4693, 0x04202cb8a29aba2c, 0x4b1443362d07960d}}}, +{{{0xccc4b7c7b66e1f7a, 0x44157e25f50c2f7e, 0x3ef06dfc713eaf1c, 0x582f446752da63f7}}, + {{0x967c54e91c529ccb, 0x30f6269264c635fb, 0x2747aff478121965, 0x17038418eaf66f5c}}, + {{0xc6317bd320324ce4, 0xa81042e8a4488bc4, 0xb21ef18b4e5a1364, 0x0c2a1c4bcda28dc9}}}, +{{{0xd24dc7d06f1f0447, 0xb2269e3edb87c059, 0xd15b0272fbb2d28f, 0x7c558bd1c6f64877}}, + {{0xedc4814869bd6945, 0x0d6d907dbe1c8d22, 0xc63bd212d55cc5ab, 0x5a6a9b30a314dc83}}, + {{0xd0ec1524d396463d, 0x12bb628ac35a24f0, 0xa50c3a791cbc5fa4, 0x0404a5ca0afbafc3}}}, +{{{0x8c1f40070aa743d6, 0xccbad0cb5b265ee8, 0x574b046b668fd2de, 0x46395bfdcadd9633}}, + {{0x62bc9e1b2a416fd1, 0xb5c6f728e350598b, 0x04343fd83d5d6967, 0x39527516e7f8ee98}}, + {{0x117fdb2d1a5d9a9c, 0x9c7745bcd1005c2a, 0xefd4bef154d56fea, 0x76579a29e822d016}}}, +{{{0x45b68e7e49c02a17, 0x23cd51a2bca9a37f, 0x3ed65f11ec224c1b, 0x43a384dc9e05bdb1}}, + {{0x333cb51352b434f2, 0xd832284993de80e1, 0xb5512887750d35ce, 0x02c514bb2a2777c1}}, + {{0x684bd5da8bf1b645, 0xfb8bd37ef6b54b53, 0x313916d7a9b0d253, 0x1160920961548059}}}, +{{{0xb44d166929dacfaa, 0xda529f4c8413598f, 0xe9ef63ca453d5559, 0x351e125bc5698e0b}}, + {{0x7a385616369b4dcd, 0x75c02ca7655c3563, 0x7dc21bf9d4f18021, 0x2f637d7491e6e042}}, + {{0xd4b49b461af67bbe, 0xd603037ac8ab8961, 0x71dee19ff9a699fb, 0x7f182d06e7ce2a9a}}}, +{{{0x7a7c8e64ab0168ec, 0xcb5a4a5515edc543, 0x095519d347cd0eda, 0x67d4ac8c343e93b0}}, + {{0x09454b728e217522, 0xaa58e8f4d484b8d8, 0xd358254d7f46903c, 0x44acc043241c5217}}, + {{0x1c7d6bbb4f7a5777, 0x8b35fed4918313e1, 0x4adca1c6c96b4684, 0x556d1c8312ad71bd}}}, +{{{0x17ef40e30c8d3982, 0x31f7073e15a3fa34, 0x4f21f3cb0773646e, 0x746c6c6d1d824eff}}, + {{0x81f06756b11be821, 0x0faff82310a3f3dd, 0xf8b2d0556a99465d, 0x097abe38cc8c7f05}}, + {{0x0c49c9877ea52da4, 0x4c4369559bdc1d43, 0x022c3809f7ccebd2, 0x577e14a34bee84bd}}}, +{{{0xf0e268ac61a73b0a, 0xf2fafa103791a5f5, 0xc1e13e826b6d00e9, 0x60fa7ee96fd78f42}}, + {{0x94fecebebd4dd72b, 0xf46a4fda060f2211, 0x124a5977c0c8d1ff, 0x705304b8fb009295}}, + {{0xb63d1d354d296ec6, 0xf3c3053e5fad31d8, 0x670b958cb4bd42ec, 0x21398e0ca16353fd}}}, +{{{0x89f5058a382b33f3, 0x5ae2ba0bad48c0b4, 0x8f93b503a53db36e, 0x5aa3ed9d95a232e6}}, + {{0x2798aaf9b4b75601, 0x5eac72135c8dad72, 0xd2ceaa6161b7a023, 0x1bbfb284e98f7d4e}}, + {{0x656777e9c7d96561, 0xcb2b125472c78036, 0x65053299d9506eee, 0x4a07e14e5e8957cc}}}, +{{{0x4ee412cb980df999, 0xa315d76f3c6ec771, 0xbba5edde925c77fd, 0x3f0bac391d313402}}, + {{0x240b58cdc477a49b, 0xfd38dade6447f017, 0x19928d32a7c86aad, 0x50af7aed84afa081}}, + {{0x6e4fde0115f65be5, 0x29982621216109b2, 0x780205810badd6d9, 0x1921a316baebd006}}}, +{{{0x89422f7edfb870fc, 0x2c296beb4f76b3bd, 0x0738f1d436c24df7, 0x6458df41e273aeb0}}, + {{0xd75aad9ad9f3c18b, 0x566a0eef60b1c19c, 0x3e9a0bac255c0ed9, 0x7b049deca062c7f5}}, + {{0xdccbe37a35444483, 0x758879330fedbe93, 0x786004c312c5dd87, 0x6093dccbc2950e64}}}, +{{{0x1ff39a8585e0706d, 0x36d0a5d8b3e73933, 0x43b9f2e1718f453b, 0x57d1ea084827a97c}}, + {{0x6bdeeebe6084034b, 0x3199c2b6780fb854, 0x973376abb62d0695, 0x6e3180c98b647d90}}, + {{0xee7ab6e7a128b071, 0xa4c1596d93a88baa, 0xf7b4de82b2216130, 0x363e999ddd97bd18}}}, +{{{0x96a843c135ee1fc4, 0x976eb35508e4c8cf, 0xb42f6801b58cd330, 0x48ee9b78693a052b}}, + {{0x2f1848dce24baec6, 0x769b7255babcaf60, 0x90cb3c6e3cefe931, 0x231f979bc6f9b355}}, + {{0x5c31de4bcc2af3c6, 0xb04bb030fe208d1f, 0xb78d7009c14fb466, 0x079bfa9b08792413}}}, +{{{0xe3903a51da300df4, 0x843964233da95ab0, 0xed3cf12d0b356480, 0x038c77f684817194}}, + {{0xf3c9ed80a2d54245, 0x0aa08b7877f63952, 0xd76dac63d1085475, 0x1ef4fb159470636b}}, + {{0x854e5ee65b167bec, 0x59590a4296d0cdc2, 0x72b2df3498102199, 0x575ee92a4a0bff56}}}, +{{{0xd4c080908a182fcf, 0x30e170c299489dbd, 0x05babd5752f733de, 0x43d4e7112cd3fd00}}, + {{0x5d46bc450aa4d801, 0xc3af1227a533b9d8, 0x389e3b262b8906c2, 0x200a1e7e382f581b}}, + {{0x518db967eaf93ac5, 0x71bc989b056652c0, 0xfe2b85d9567197f5, 0x050eca52651e4e38}}}, +{{{0xc3431ade453f0c9c, 0xe9f5045eff703b9b, 0xfcd97ac9ed847b3d, 0x4b0ee6c21c58f4c6}}, + {{0x97ac397660e668ea, 0x9b19bbfe153ab497, 0x4cb179b534eca79f, 0x6151c09fa131ae57}}, + {{0x3af55c0dfdf05d96, 0xdd262ee02ab4ee7a, 0x11b2bb8712171709, 0x1fef24fa800f030b}}}, +{{{0x37d653fb1aa73196, 0x0f9495303fd76418, 0xad200b09fb3a17b2, 0x544d49292fc8613e}}, + {{0x22d2aff530976b86, 0x8d90b806c2d24604, 0xdca1896c4de5bae5, 0x28005fe6c8340c17}}, + {{0x6aefba9f34528688, 0x5c1bff9425107da1, 0xf75bbbcd66d94b36, 0x72e472930f316dfa}}}, +{{{0x2695208c9781084f, 0xb1502a0b23450ee1, 0xfd9daea603efde02, 0x5a9d2e8c2733a34c}}, + {{0x07f3f635d32a7627, 0x7aaa4d865f6566f0, 0x3c85e79728d04450, 0x1fee7f000fe06438}}, + {{0x765305da03dbf7e5, 0xa4daf2491434cdbd, 0x7b4ad5cdd24a88ec, 0x00f94051ee040543}}}, +{{{0x8d356b23c3d330b2, 0xf21c8b9bb0471b06, 0xb36c316c6e42b83c, 0x07d79c7e8beab10d}}, + {{0xd7ef93bb07af9753, 0x583ed0cf3db766a7, 0xce6998bf6e0b1ec5, 0x47b7ffd25dd40452}}, + {{0x87fbfb9cbc08dd12, 0x8a066b3ae1eec29b, 0x0d57242bdb1fc1bf, 0x1c3520a35ea64bb6}}}, +{{{0x80d253a6bccba34a, 0x3e61c3a13838219b, 0x90c3b6019882e396, 0x1c3d05775d0ee66f}}, + {{0xcda86f40216bc059, 0x1fbb231d12bcd87e, 0xb4956a9e17c70990, 0x38750c3b66d12e55}}, + {{0x692ef1409422e51a, 0xcbc0c73c2b5df671, 0x21014fe7744ce029, 0x0621e2c7d330487c}}}, +{{{0xaf9860cc8259838d, 0x90ea48c1c69f9adc, 0x6526483765581e30, 0x0007d6097bd3a5bc}}, + {{0xb7ae1796b0dbf0f3, 0x54dfafb9e17ce196, 0x25923071e9aaa3b4, 0x5d8e589ca1002e9d}}, + {{0xc0bf1d950842a94b, 0xb2d3c363588f2e3e, 0x0a961438bb51e2ef, 0x1583d7783c1cbf86}}}, +{{{0xeceea2ef5da27ae1, 0x597c3a1455670174, 0xc9a62a126609167a, 0x252a5f2e81ed8f70}}, + {{0x90034704cc9d28c7, 0x1d1b679ef72cc58f, 0x16e12b5fbe5b8726, 0x4958064e83c5580a}}, + {{0x0d2894265066e80d, 0xfcc3f785307c8c6b, 0x1b53da780c1112fd, 0x079c170bd843b388}}}, +{{{0x0506ece464fa6fff, 0xbee3431e6205e523, 0x3579422451b8ea42, 0x6dec05e34ac9fb00}}, + {{0xcdd6cd50c0d5d056, 0x9af7686dbb03573b, 0x3ca6723ff3c3ef48, 0x6768c0d7317b8acc}}, + {{0x94b625e5f155c1b3, 0x417bf3a7997b7b91, 0xc22cbddc6d6b2600, 0x51445e14ddcd52f4}}}, +{{{0x57502b4b3b144951, 0x8e67ff6b444bbcb3, 0xb8bd6927166385db, 0x13186f31e39295c8}}, + {{0x893147ab2bbea455, 0x8c53a24f92079129, 0x4b49f948be30f7a7, 0x12e990086e4fd43d}}, + {{0xf10c96b37fdfbb2e, 0x9f9a935e121ceaf9, 0xdf1136c43a5b983f, 0x77b2e3f05d3e99af}}}, +{{{0x296fa9c59c2ec4de, 0xbc8b61bf4f84f3cb, 0x1c7706d917a8f908, 0x63b795fc7ad3255d}}, + {{0xd598639c12ddb0a4, 0xa5d19f30c024866b, 0xd17c2f0358fce460, 0x07a195152e095e8a}}, + {{0xa8368f02389e5fc8, 0x90433b02cf8de43b, 0xafa1fd5dc5412643, 0x3e8fe83d032f0137}}}, +{{{0x2f8b15b90570a294, 0x94f2427067084549, 0xde1c5ae161bbfd84, 0x75ba3b797fac4007}}, + {{0x08704c8de8efd13c, 0xdfc51a8e33e03731, 0xa59d5da51260cde3, 0x22d60899a6258c86}}, + {{0x6239dbc070cdd196, 0x60fe8a8b6c7d8a9a, 0xb38847bceb401260, 0x0904d07b87779e5e}}}, +{{{0xb4ce1fd4ddba919c, 0xcf31db3ec74c8daa, 0x2c63cc63ad86cc51, 0x43e2143fbc1dde07}}, + {{0xf4322d6648f940b9, 0x06952f0cbd2d0c39, 0x167697ada081f931, 0x6240aacebaf72a6c}}, + {{0xf834749c5ba295a0, 0xd6947c5bca37d25a, 0x66f13ba7e7c9316a, 0x56bdaf238db40cac}}}, +{{{0x362ab9e3f53533eb, 0x338568d56eb93d40, 0x9e0e14521d5a5572, 0x1d24a86d83741318}}, + {{0x1310d36cc19d3bb2, 0x062a6bb7622386b9, 0x7c9b8591d7a14f5c, 0x03aa31507e1e5754}}, + {{0xf4ec7648ffd4ce1f, 0xe045eaf054ac8c1c, 0x88d225821d09357c, 0x43b261dc9aeb4859}}}, +{{{0xe55b1e1988bb79bb, 0xa09ed07dc17a359d, 0xb02c2ee2603dea33, 0x326055cf5b276bc2}}, + {{0x19513d8b6c951364, 0x94fe7126000bf47b, 0x028d10ddd54f9567, 0x02b4d5e242940964}}, + {{0xb4a155cb28d18df2, 0xeacc4646186ce508, 0xc49cf4936c824389, 0x27a6c809ae5d3410}}}, +{{{0x8ba6ebcd1f0db188, 0x37d3d73a675a5be8, 0xf22edfa315f5585a, 0x2cb67174ff60a17e}}, + {{0xcd2c270ac43d6954, 0xdd4a3e576a66cab2, 0x79fa592469d7036c, 0x221503603d8c2599}}, + {{0x59eecdf9390be1d0, 0xa9422044728ce3f1, 0x82891c667a94f0f4, 0x7b1df4b73890f436}}}, +{{{0xe492f2e0b3b2a224, 0x7c6c9e062b551160, 0x15eb8fe20d7f7b0e, 0x61fcef2658fc5992}}, + {{0x5f2e221807f8f58c, 0xe3555c9fd49409d4, 0xb2aaa88d1fb6a630, 0x68698245d352e03d}}, + {{0xdbb15d852a18187a, 0xf3e4aad386ddacd7, 0x44bae2810ff6c482, 0x46cf4c473daf01cf}}}, +{{{0x426525ed9ec4e5f9, 0x0e5eda0116903303, 0x72b1a7f2cbe5cadc, 0x29387bcd14eb5f40}}, + {{0x213c6ea7f1498140, 0x7c1e7ef8392b4854, 0x2488c38c5629ceba, 0x1065aae50d8cc5bb}}, + {{0x1c2c4525df200d57, 0x5c3b2dd6bfca674a, 0x0a07e7b1e1834030, 0x69a198e64f1ce716}}}, +{{{0x9062b2e0d91a78bc, 0x47c9889cc8509667, 0x9df54a66405070b8, 0x7369e6a92493a1bf}}, + {{0xe1014434dcc5caed, 0x47ed5d963c84fb33, 0x70019576ed86a0e7, 0x25b2697bd267f9e4}}, + {{0x9d673ffb13986864, 0x3ca5fbd9415dc7b8, 0xe04ecc3bdf273b5e, 0x1420683db54e4cd2}}}, +{{{0xb478bd1e249dd197, 0x620c35005e58c102, 0xfb02d32fccbaac5c, 0x60b63bebf508a72d}}, + {{0x34eebb6fc1cc5ad0, 0x6a1b0ce99646ac8b, 0xd3b0da49a66bde53, 0x31e83b4161d081c1}}, + {{0x97e8c7129e062b4f, 0x49e48f4f29320ad8, 0x5bece14b6f18683f, 0x55cf1eb62d550317}}}, +{{{0x5879101065c23d58, 0x8b9d086d5094819c, 0xe2402fa912c55fa7, 0x669a6564570891d4}}, + {{0x3076b5e37df58c52, 0xd73ab9dde799cc36, 0xbd831ce34913ee20, 0x1a56fbaa62ba0133}}, + {{0x943e6b505c9dc9ec, 0x302557bba77c371a, 0x9873ae5641347651, 0x13c4836799c58a5c}}}, +{{{0x423a5d465ab3e1b9, 0xfc13c187c7f13f61, 0x19f83664ecb5b9b6, 0x66f80c93a637b607}}, + {{0xc4dcfb6a5d8bd080, 0xdeebc4ec571a4842, 0xd4b2e883b8e55365, 0x50bdc87dc8e5b827}}, + {{0x606d37836edfe111, 0x32353e15f011abd9, 0x64b03ac325b73b96, 0x1dd56444725fd5ae}}}, +{{{0x8fa47ff83362127d, 0xbc9f6ac471cd7c15, 0x6e71454349220c8b, 0x0e645912219f732e}}, + {{0xc297e60008bac89a, 0x7d4cea11eae1c3e0, 0xf3e38be19fe7977c, 0x3a3a450f63a305cd}}, + {{0x078f2f31d8394627, 0x389d3183de94a510, 0xd1e36c6d17996f80, 0x318c8d9393a9a87b}}}, +{{{0xf2745d032afffe19, 0x0c9f3c497f24db66, 0xbc98d3e3ba8598ef, 0x224c7c679a1d5314}}, + {{0x5d669e29ab1dd398, 0xfc921658342d9e3b, 0x55851dfdf35973cd, 0x509a41c325950af6}}, + {{0xbdc06edca6f925e9, 0x793ef3f4641b1f33, 0x82ec12809d833e89, 0x05bff02328a11389}}}, +{{{0x3632137023cae00b, 0x544acf0ad1accf59, 0x96741049d21a1c88, 0x780b8cc3fa2a44a7}}, + {{0x6881a0dd0dc512e4, 0x4fe70dc844a5fafe, 0x1f748e6b8f4a5240, 0x576277cdee01a3ea}}, + {{0x1ef38abc234f305f, 0x9a577fbd1405de08, 0x5e82a51434e62a0d, 0x5ff418726271b7a1}}}, +{{{0x398e080c1789db9d, 0xa7602025f3e778f5, 0xfa98894c06bd035d, 0x106a03dc25a966be}}, + {{0xe5db47e813b69540, 0xf35d2a3b432610e1, 0xac1f26e938781276, 0x29d4db8ca0a0cb69}}, + {{0xd9ad0aaf333353d0, 0x38669da5acd309e5, 0x3c57658ac888f7f0, 0x4ab38a51052cbefa}}}, +{{{0xda7c2b256768d593, 0x98c1c0574422ca13, 0xf1a80bd5ca0ace1d, 0x29cdd1adc088a690}}, + {{0xd6cfd1ef5fddc09c, 0xe82b3efdf7575dce, 0x25d56b5d201634c2, 0x3041c6bb04ed2b9b}}, + {{0x0ff2f2f9d956e148, 0xade797759f356b2e, 0x1a4698bb5f6c025c, 0x104bbd6814049a7b}}}, +{{{0x51f0fd3168f1ed67, 0x2c811dcdd86f3bc2, 0x44dc5c4304d2f2de, 0x5be8cc57092a7149}}, + {{0xa95d9a5fd67ff163, 0xe92be69d4cc75681, 0xb7f8024cde20f257, 0x204f2a20fb072df5}}, + {{0xc8143b3d30ebb079, 0x7589155abd652e30, 0x653c3c318f6d5c31, 0x2570fb17c279161f}}}, +{{{0x3efa367f2cb61575, 0xf5f96f761cd6026c, 0xe8c7142a65b52562, 0x3dcb65ea53030acd}}, + {{0x192ea9550bb8245a, 0xc8e6fba88f9050d1, 0x7986ea2d88a4c935, 0x241c5f91de018668}}, + {{0x28d8172940de6caa, 0x8fbf2cf022d9733a, 0x16d7fcdd235b01d1, 0x08420edd5fcdf0e5}}}, +{{{0xcdff20ab8362fa4a, 0x57e118d4e21a3e6e, 0xe3179617fc39e62b, 0x0d9a53efbc1769fd}}, + {{0x0358c34e04f410ce, 0xb6135b5a276e0685, 0x5d9670c7ebb91521, 0x04d654f321db889c}}, + {{0x5e7dc116ddbdb5d5, 0x2954deb68da5dd2d, 0x1cb608173334a292, 0x4a7a4f2618991ad7}}}, +{{{0xf4a718025fb15f95, 0x3df65f346b5c1b8f, 0xcdfcf08500e01112, 0x11b50c4cddd31848}}, + {{0x24c3b291af372a4b, 0x93da8270718147f2, 0xdd84856486899ef2, 0x4a96314223e0ee33}}, + {{0xa6e8274408a4ffd6, 0x738e177e9c1576d9, 0x773348b63d02b3f2, 0x4f4bce4dce6bcc51}}}, +{{{0xa71fce5ae2242584, 0x26ea725692f58a9e, 0xd21a09d71cea3cf4, 0x73fcdd14b71c01e6}}, + {{0x30e2616ec49d0b6f, 0xe456718fcaec2317, 0x48eb409bf26b4fa6, 0x3042cee561595f37}}, + {{0x427e7079449bac41, 0x855ae36dbce2310a, 0x4cae76215f841a7c, 0x389e740c9a9ce1d6}}}, +{{{0x64fcb3ae34dcb9ce, 0x97500323e348d0ad, 0x45b3f07d62c6381b, 0x61545379465a6788}}, + {{0xc9bd78f6570eac28, 0xe55b0b3227919ce1, 0x65fc3eaba19b91ed, 0x25c425e5d6263690}}, + {{0x3f3e06a6f1d7de6e, 0x3ef976278e062308, 0x8c14f6264e8a6c77, 0x6539a08915484759}}}, +{{{0xe9d21f74c3d2f773, 0xc150544125c46845, 0x624e5ce8f9b99e33, 0x11c5e4aac5cd186c}}, + {{0xddc4dbd414bb4a19, 0x19b2bc3c98424f8e, 0x48a89fd736ca7169, 0x0f65320ef019bd90}}, + {{0xd486d1b1cafde0c6, 0x4f3fe6e3163b5181, 0x59a8af0dfaf2939a, 0x4cabc7bdec33072a}}}, +{{{0x239e9624089c0a2e, 0xc748c4c03afe4738, 0x17dbed2a764fa12a, 0x639b93f0321c8582}}, + {{0xc08f788f3f78d289, 0xfe30a72ca1404d9f, 0xf2778bfccf65cc9d, 0x7ee498165acb2021}}, + {{0x7bd508e39111a1c3, 0x2b2b90d480907489, 0xe7d2aec2ae72fd19, 0x0edf493c85b602a6}}}, +{{{0xaecc8158599b5a68, 0xea574f0febade20e, 0x4fe41d7422b67f07, 0x403b92e3019d4fb4}}, + {{0x6767c4d284764113, 0xa090403ff7f5f835, 0x1c8fcffacae6bede, 0x04c00c54d1dfa369}}, + {{0x4dc22f818b465cf8, 0x71a0f35a1480eff8, 0xaee8bfad04c7d657, 0x355bb12ab26176f4}}}, +{{{0xa71e64cc7493bbf4, 0xe5bd84d9eca3b0c3, 0x0a6bc50cfa05e785, 0x0f9b8132182ec312}}, + {{0xa301dac75a8c7318, 0xed90039db3ceaa11, 0x6f077cbf3bae3f2d, 0x7518eaf8e052ad8e}}, + {{0xa48859c41b7f6c32, 0x0f2d60bcf4383298, 0x1815a929c9b1d1d9, 0x47c3871bbb1755c4}}}, +{{{0x5144539771ec4f48, 0xf805b17dc98c5d6e, 0xf762c11a47c3c66b, 0x00b89b85764699dc}}, + {{0xfbe65d50c85066b0, 0x62ecc4b0b3a299b0, 0xe53754ea441ae8e0, 0x08fea02ce8d48d5f}}, + {{0x824ddd7668deead0, 0xc86445204b685d23, 0xb514cfcd5d89d665, 0x473829a74f75d537}}}, +{{{0x82d2da754679c418, 0xe63bd7d8b2618df0, 0x355eef24ac47eb0a, 0x2078684c4833c6b4}}, + {{0x23d9533aad3902c9, 0x64c2ddceef03588f, 0x15257390cfe12fb4, 0x6c668b4d44e4d390}}, + {{0x3b48cf217a78820c, 0xf76a0ab281273e97, 0xa96c65a78c8eed7b, 0x7411a6054f8a433f}}}, +{{{0x4d659d32b99dc86d, 0x044cdc75603af115, 0xb34c712cdcc2e488, 0x7c136574fb8134ff}}, + {{0x579ae53d18b175b4, 0x68713159f392a102, 0x8455ecba1eef35f5, 0x1ec9a872458c398f}}, + {{0xb8e6a4d400a2509b, 0x9b81d7020bc882b4, 0x57e7cc9bf1957561, 0x3add88a5c7cd6460}}}, +{{{0xab895770b635dcf2, 0x02dfef6cf66c1fbc, 0x85530268beb6d187, 0x249929fccc879e74}}, + {{0x85c298d459393046, 0x8f7e35985ff659ec, 0x1d2ca22af2f66e3a, 0x61ba1131a406a720}}, + {{0xa3d0a0f116959029, 0x023b6b6cba7ebd89, 0x7bf15a3e26783307, 0x5620310cbbd8ece7}}}, +{{{0x528993434934d643, 0xb9dbf806a51222f5, 0x8f6d878fc3f41c22, 0x37676a2a4d9d9730}}, + {{0x6646b5f477e285d6, 0x40e8ff676c8f6193, 0xa6ec7311abb594dd, 0x7ec846f3658cec4d}}, + {{0x9b5e8f3f1da22ec7, 0x130f1d776c01cd13, 0x214c8fcfa2989fb8, 0x6daaf723399b9dd5}}}, +{{{0x5f3a7562eb3dbe47, 0xf7ea38548ebda0b8, 0x00c3e53145747299, 0x1304e9e71627d551}}, + {{0x583b04bfacad8ea2, 0x29b743e8148be884, 0x2b1e583b0810c5db, 0x2b5449e58eb3bbaa}}, + {{0x789814d26adc9cfe, 0x3c1bab3f8b48dd0b, 0xda0fe1fff979c60a, 0x4468de2d7c2dd693}}}, +{{{0x51bb355e9419469e, 0x33e6dc4c23ddc754, 0x93a5b6d6447f9962, 0x6cce7c6ffb44bd63}}, + {{0x4b9ad8c6f86307ce, 0x21113531435d0c28, 0xd4a866c5657a772c, 0x5da6427e63247352}}, + {{0x1a94c688deac22ca, 0xb9066ef7bbae1ff8, 0x88ad8c388d59580f, 0x58f29abfe79f2ca8}}}, +{{{0xe90ecfab8de73e68, 0x54036f9f377e76a5, 0xf0495b0bbe015982, 0x577629c4a7f41e36}}, + {{0x4b5a64bf710ecdf6, 0xb14ce538462c293c, 0x3643d056d50b3ab9, 0x6af93724185b4870}}, + {{0x3220024509c6a888, 0xd2e036134b558973, 0x83e236233c33289f, 0x701f25bb0caec18f}}}, +{{{0xc3a8b0f8e4616ced, 0xf700660e9e25a87d, 0x61e3061ff4bca59c, 0x2e0c92bfbdc40be9}}, + {{0x9d18f6d97cbec113, 0x844a06e674bfdbe4, 0x20f5b522ac4e60d6, 0x720a5bc050955e51}}, + {{0x0c3f09439b805a35, 0xe84e8b376242abfc, 0x691417f35c229346, 0x0e9b9cbb144ef0ec}}}, +{{{0xfbbad48ffb5720ad, 0xee81916bdbf90d0e, 0xd4813152635543bf, 0x221104eb3f337bd8}}, + {{0x8dee9bd55db1beee, 0xc9c3ab370a723fb9, 0x44a8f1bf1c68d791, 0x366d44191cfd3cde}}, + {{0x9e3c1743f2bc8c14, 0x2eda26fcb5856c3b, 0xccb82f0e68a7fb97, 0x4167a4e6bc593244}}}, +{{{0x643b9d2876f62700, 0x5d1d9d400e7668eb, 0x1b4b430321fc0684, 0x7938bb7e2255246a}}, + {{0xc2be2665f8ce8fee, 0xe967ff14e880d62c, 0xf12e6e7e2f364eee, 0x34b33370cb7ed2f6}}, + {{0xcdc591ee8681d6cc, 0xce02109ced85a753, 0xed7485c158808883, 0x1176fc6e2dfe65e4}}}, +{{{0xb4af6cd05b9c619b, 0x2ddfc9f4b2a58480, 0x3d4fa502ebe94dc4, 0x08fc3a4c677d5f34}}, + {{0xdb90e28949770eb8, 0x98fbcc2aacf440a3, 0x21354ffeded7879b, 0x1f6a3e54f26906b6}}, + {{0x60a4c199d30734ea, 0x40c085b631165cd6, 0xe2333e23f7598295, 0x4f2fad0116b900d1}}}, +{{{0x44beb24194ae4e54, 0x5f541c511857ef6c, 0xa61e6b2d368d0498, 0x445484a4972ef7ab}}, + {{0x962cd91db73bb638, 0xe60577aafc129c08, 0x6f619b39f3b61689, 0x3451995f2944ee81}}, + {{0x9152fcd09fea7d7c, 0x4a816c94b0935cf6, 0x258e9aaa47285c40, 0x10b89ca6042893b7}}}, +{{{0x3d5947499718289c, 0x12ebf8c524533f26, 0x0262bfcb14c3ef15, 0x20b878d577b7518e}}, + {{0x753941be5a45f06e, 0xd07caeed6d9c5f65, 0x11776b9c72ff51b6, 0x17d2d1d9ef0d4da9}}, + {{0x27f2af18073f3e6a, 0xfd3fe519d7521069, 0x22e3b72c3ca60022, 0x72214f63cc65c6a7}}}, +{{{0xb4e37f405307a693, 0xaba714d72f336795, 0xd6fbd0a773761099, 0x5fdf48c58171cbc9}}, + {{0x1d9db7b9f43b29c9, 0xd605824a4f518f75, 0xf2c072bd312f9dc4, 0x1f24ac855a1545b0}}, + {{0x24d608328e9505aa, 0x4748c1d10c1420ee, 0xc7ffe45c06fb25a2, 0x00ba739e2ae395e6}}}, +{{{0x592e98de5c8790d6, 0xe5bfb7d345c2a2df, 0x115a3b60f9b49922, 0x03283a3e67ad78f3}}, + {{0xae4426f5ea88bb26, 0x360679d984973bfb, 0x5c9f030c26694e50, 0x72297de7d518d226}}, + {{0x48241dc7be0cb939, 0x32f19b4d8b633080, 0xd3dfc90d02289308, 0x05e1296846271945}}}, +{{{0xba82eeb32d9c495a, 0xceefc8fcf12bb97c, 0xb02dabae93b5d1e0, 0x39c00c9c13698d9b}}, + {{0xadbfbbc8242c4550, 0xbcc80cecd03081d9, 0x843566a6f5c8df92, 0x78cf25d38258ce4c}}, + {{0x15ae6b8e31489d68, 0xaa851cab9c2bf087, 0xc9a75a97f04efa05, 0x006b52076b3ff832}}}, +{{{0x29e0cfe19d95781c, 0xb681df18966310e2, 0x57df39d370516b39, 0x4d57e3443bc76122}}, + {{0xf5cb7e16b9ce082d, 0x3407f14c417abc29, 0xd4b36bce2bf4a7ab, 0x7de2e9561a9f75ce}}, + {{0xde70d4f4b6a55ecb, 0x4801527f5d85db99, 0xdbc9c440d3ee9a81, 0x6b2a90af1a6029ed}}}, +{{{0x6923f4fc9ae61e97, 0x5735281de03f5fd1, 0xa764ae43e6edd12d, 0x5fd8f4e9d12d3e4a}}, + {{0x77ebf3245bb2d80a, 0xd8301b472fb9079b, 0xc647e6f24cee7333, 0x465812c8276c2109}}, + {{0x4d43beb22a1062d9, 0x7065fb753831dc16, 0x180d4a7bde2968d7, 0x05b32c2b1cb16790}}}, +{{{0xc8c05eccd24da8fd, 0xa1cf1aac05dfef83, 0xdbbeeff27df9cd61, 0x3b5556a37b471e99}}, + {{0xf7fca42c7ad58195, 0x3214286e4333f3cc, 0xb6c29d0d340b979d, 0x31771a48567307e1}}, + {{0x32b0c524e14dd482, 0xedb351541a2ba4b6, 0xa3d16048282b5af3, 0x4fc079d27a7336eb}}}, +{{{0x51c938b089bf2f7f, 0x2497bd6502dfe9a7, 0xffffc09c7880e453, 0x124567cecaf98e92}}, + {{0xdc348b440c86c50d, 0x1337cbc9cc94e651, 0x6422f74d643e3cb9, 0x241170c2bae3cd08}}, + {{0x3ff9ab860ac473b4, 0xf0911dee0113e435, 0x4ae75060ebc6c4af, 0x3f8612966c87000d}}}, +{{{0x559a0cc9782a0dde, 0x551dcdb2ea718385, 0x7f62865b31ef238c, 0x504aa7767973613d}}, + {{0x9c18fcfa36048d13, 0x29159db373899ddd, 0xdc9f350b9f92d0aa, 0x26f57eee878a19d4}}, + {{0x0cab2cd55687efb1, 0x5180d162247af17b, 0x85c15a344f5a2467, 0x4041943d9dba3069}}}, +{{{0xc3c0eeba43ebcc96, 0x8d749c9c26ea9caf, 0xd9fa95ee1c77ccc6, 0x1420a1d97684340f}}, + {{0x4b217743a26caadd, 0x47a6b424648ab7ce, 0xcb1d4f7a03fbc9e3, 0x12d931429800d019}}, + {{0x00c67799d337594f, 0x5e3c5140b23aa47b, 0x44182854e35ff395, 0x1b4f92314359a012}}}, +{{{0x3e5c109d89150951, 0x39cefa912de9696a, 0x20eae43f975f3020, 0x239b572a7f132dae}}, + {{0x33cf3030a49866b1, 0x251f73d2215f4859, 0xab82aa4051def4f6, 0x5ff191d56f9a23f6}}, + {{0x819ed433ac2d9068, 0x2883ab795fc98523, 0xef4572805593eb3d, 0x020c526a758f36cb}}}, +{{{0x779834f89ed8dbbc, 0xc8f2aaf9dc7ca46c, 0xa9524cdca3e1b074, 0x02aacc4615313877}}, + {{0xe931ef59f042cc89, 0x2c589c9d8e124bb6, 0xadc8e18aaec75997, 0x452cfe0a5602c50c}}, + {{0x86a0f7a0647877df, 0xbbc464270e607c9f, 0xab17ea25f1fb11c9, 0x4cfb7d7b304b877b}}}, +{{{0x72b43d6cb89b75fe, 0x54c694d99c6adc80, 0xb8c3aa373ee34c9f, 0x14b4622b39075364}}, + {{0xe28699c29789ef12, 0x2b6ecd71df57190d, 0xc343c857ecc970d0, 0x5b1d4cbc434d3ac5}}, + {{0xb6fb2615cc0a9f26, 0x3a4f0e2bb88dcce5, 0x1301498b3369a705, 0x2f98f71258592dd1}}}, +{{{0x0c94a74cb50f9e56, 0x5b1ff4a98e8e1320, 0x9a2acc2182300f67, 0x3a6ae249d806aaf9}}, + {{0x2e12ae444f54a701, 0xfcfe3ef0a9cbd7de, 0xcebf890d75835de0, 0x1d8062e9e7614554}}, + {{0x657ada85a9907c5a, 0x1a0ea8b591b90f62, 0x8d0e1dfbdf34b4e9, 0x298b8ce8aef25ff3}}}, +{{{0x2a927953eff70cb2, 0x4b89c92a79157076, 0x9418457a30a7cf6a, 0x34b8a8404d5ce485}}, + {{0x837a72ea0a2165de, 0x3fab07b40bcf79f6, 0x521636c77738ae70, 0x6ba6271803a7d7dc}}, + {{0xc26eecb583693335, 0xd5a813df63b5fefd, 0xa293aa9aa4b22573, 0x71d62bdd465e1c6a}}}, +{{{0x6533cc28d378df80, 0xf6db43790a0fa4b4, 0xe3645ff9f701da5a, 0x74d5f317f3172ba4}}, + {{0xcd2db5dab1f75ef5, 0xd77f95cf16b065f5, 0x14571fea3f49f085, 0x1c333621262b2b3d}}, + {{0xa86fe55467d9ca81, 0x398b7c752b298c37, 0xda6d0892e3ac623b, 0x4aebcc4547e9d98c}}}, +{{{0x12f0071b276d01c9, 0xe7b8bac586c48c70, 0x5308129b71d6fba9, 0x5d88fbf95a3db792}}, + {{0x0b408d9e7354b610, 0x806b32535ba85b6e, 0xdbe63a034a58a207, 0x173bd9ddc9a1df2c}}, + {{0x2b500f1efe5872df, 0x58d6582ed43918c1, 0xe6ed278ec9673ae0, 0x06e1cd13b19ea319}}}, +{{{0x40d0ad516f166f23, 0x118e32931fab6abe, 0x3fe35e14a04d088e, 0x3080603526e16266}}, + {{0x472baf629e5b0353, 0x3baa0b90278d0447, 0x0c785f469643bf27, 0x7f3a6a1a8d837b13}}, + {{0xf7e644395d3d800b, 0x95a8d555c901edf6, 0x68cd7830592c6339, 0x30d0fded2e51307e}}}, +{{{0xe0594d1af21233b3, 0x1bdbe78ef0cc4d9c, 0x6965187f8f499a77, 0x0a9214202c099868}}, + {{0x9cb4971e68b84750, 0xa09572296664bbcf, 0x5c8de72672fa412b, 0x4615084351c589d9}}, + {{0xbc9019c0aeb9a02e, 0x55c7110d16034cae, 0x0e6df501659932ec, 0x3bca0d2895ca5dfe}}}, +{{{0x40f031bc3c5d62a4, 0x19fc8b3ecff07a60, 0x98183da2130fb545, 0x5631deddae8f13cd}}, + {{0x9c688eb69ecc01bf, 0xf0bc83ada644896f, 0xca2d955f5f7a9fe2, 0x4ea8b4038df28241}}, + {{0x2aed460af1cad202, 0x46305305a48cee83, 0x9121774549f11a5f, 0x24ce0930542ca463}}}, +{{{0x1fe890f5fd06c106, 0xb5c468355d8810f2, 0x827808fe6e8caf3e, 0x41d4e3c28a06d74b}}, + {{0x3fcfa155fdf30b85, 0xd2f7168e36372ea4, 0xb2e064de6492f844, 0x549928a7324f4280}}, + {{0xf26e32a763ee1a2e, 0xae91e4b7d25ffdea, 0xbc3bd33bd17f4d69, 0x491b66dec0dcff6a}}}, +{{{0x98f5b13dc7ea32a7, 0xe3d5f8cc7e16db98, 0xac0abf52cbf8d947, 0x08f338d0c85ee4ac}}, + {{0x75f04a8ed0da64a1, 0xed222caf67e2284b, 0x8234a3791f7b7ba4, 0x4cf6b8b0b7018b67}}, + {{0xc383a821991a73bd, 0xab27bc01df320c7a, 0xc13d331b84777063, 0x530d4a82eb078a99}}}, +{{{0x004c3630e1f94825, 0x7e2d78268cab535a, 0xc7482323cc84ff8b, 0x65ea753f101770b9}}, + {{0x6d6973456c9abf9e, 0x257fb2fc4900a880, 0x2bacf412c8cfb850, 0x0db3e7e00cbfbd5b}}, + {{0x3d66fc3ee2096363, 0x81d62c7f61b5cb6b, 0x0fbe044213443b1a, 0x02a4ec1921e1a1db}}}, +{{{0x5ce6259a3b24b8a2, 0xb8577acc45afa0b8, 0xcccbe6e88ba07037, 0x3d143c51127809bf}}, + {{0xf5c86162f1cf795f, 0x118c861926ee57f2, 0x172124851c063578, 0x36d12b5dec067fcf}}, + {{0x126d279179154557, 0xd5e48f5cfc783a0a, 0x36bdb6e8df179bac, 0x2ef517885ba82859}}}, +{{{0x4637974e8c58aedc, 0xb9ef22fbabf041a4, 0xe185d956e980718a, 0x2f1b78fab143a8a6}}, + {{0x96eebffb305b2f51, 0xd3f938ad889596b8, 0xf0f52dc746d5dd25, 0x57968290bb3a0095}}, + {{0xf71ab8430a20e101, 0xf393658d24f0ec47, 0xcf7509a86ee2eed1, 0x7dc43e35dc2aa3e1}}}, +{{{0x85966665887dd9c3, 0xc90f9b314bb05355, 0xc6e08df8ef2079b1, 0x7ef72016758cc12f}}, + {{0x5a782a5c273e9718, 0x3576c6995e4efd94, 0x0f2ed8051f237d3e, 0x044fb81d82d50a99}}, + {{0xc1df18c5a907e3d9, 0x57b3371dce4c6359, 0xca704534b201bb49, 0x7f79823f9c30dd2e}}}, +{{{0x8334d239a3b513e8, 0xc13670d4b91fa8d8, 0x12b54136f590bd33, 0x0a4e0373d784d9b4}}, + {{0x6a9c1ff068f587ba, 0x0827894e0050c8de, 0x3cbf99557ded5be7, 0x64a9b0431c06d6f0}}, + {{0x2eb3d6a15b7d2919, 0xb0b4f6a0d53a8235, 0x7156ce4389a45d47, 0x071a7d0ace18346c}}}, +{{{0xd3072daac887ba0b, 0x01262905bfa562ee, 0xcf543002c0ef768b, 0x2c3bcc7146ea7e9c}}, + {{0xcc0c355220e14431, 0x0d65950709b15141, 0x9af5621b209d5f36, 0x7c69bcf7617755d3}}, + {{0x07f0d7eb04e8295f, 0x10db18252f50f37d, 0xe951a9a3171798d7, 0x6f5a9a7322aca51d}}}, +{{{0x8ba1000c2f41c6c5, 0xc49f79c10cfefb9b, 0x4efa47703cc51c9f, 0x494e21a2e147afca}}, + {{0xe729d4eba3d944be, 0x8d9e09408078af9e, 0x4525567a47869c03, 0x02ab9680ee8d3b24}}, + {{0xefa48a85dde50d9a, 0x219a224e0fb9a249, 0xfa091f1dd91ef6d9, 0x6b5d76cbea46bb34}}}, +{{{0x8857556cec0cd994, 0x6472dc6f5cd01dba, 0xaf0169148f42b477, 0x0ae333f685277354}}, + {{0xe0f941171e782522, 0xf1e6ae74036936d3, 0x408b3ea2d0fcc746, 0x16fb869c03dd313e}}, + {{0x288e199733b60962, 0x24fc72b4d8abe133, 0x4811f7ed0991d03e, 0x3f81e38b8f70d075}}}, +{{{0x7f910fcc7ed9affe, 0x545cb8a12465874b, 0xa8397ed24b0c4704, 0x50510fc104f50993}}, + {{0x0adb7f355f17c824, 0x74b923c3d74299a4, 0xd57c3e8bcbf8eaf7, 0x0ad3e2d34cdedc3d}}, + {{0x6f0c0fc5336e249d, 0x745ede19c331cfd9, 0xf2d6fd0009eefe1c, 0x127c158bf0fa1ebe}}}, +{{{0xf6197c422e9879a2, 0xa44addd452ca3647, 0x9b413fc14b4eaccb, 0x354ef87d07ef4f68}}, + {{0xdea28fc4ae51b974, 0x1d9973d3744dfe96, 0x6240680b873848a8, 0x4ed82479d167df95}}, + {{0xfee3b52260c5d975, 0x50352efceb41b0b8, 0x8808ac30a9f6653c, 0x302d92d20539236d}}}, +{{{0x7813c1a2bca4283d, 0xed62f091a1863dd9, 0xaec7bcb8c268fa86, 0x10e5d3b76f1cae4c}}, + {{0x2dbc6fb6e4e0f177, 0x04e1bf29a4bd6a93, 0x5e1966d4787af6e8, 0x0edc5f5eb426d060}}, + {{0x5453bfd653da8e67, 0xe9dc1eec24a9f641, 0xbf87263b03578a23, 0x45b46c51361cba72}}}, +{{{0xa9402abf314f7fa1, 0xe257f1dc8e8cf450, 0x1dbbd54b23a8be84, 0x2177bfa36dcb713b}}, + {{0xce9d4ddd8a7fe3e4, 0xab13645676620e30, 0x4b594f7bb30e9958, 0x5c1c0aef321229df}}, + {{0x37081bbcfa79db8f, 0x6048811ec25f59b3, 0x087a76659c832487, 0x4ae619387d8ab5bb}}}, +{{{0x8ddbf6aa5344a32e, 0x7d88eab4b41b4078, 0x5eb0eb974a130d60, 0x1a00d91b17bf3e03}}, + {{0x61117e44985bfb83, 0xfce0462a71963136, 0x83ac3448d425904b, 0x75685abe5ba43d64}}, + {{0x6e960933eb61f2b2, 0x543d0fa8c9ff4952, 0xdf7275107af66569, 0x135529b623b0e6aa}}}, +{{{0x18f0dbd7add1d518, 0x979f7888cfc11f11, 0x8732e1f07114759b, 0x79b5b81a65ca3a01}}, + {{0xf5c716bce22e83fe, 0xb42beb19e80985c1, 0xec9da63714254aae, 0x5972ea051590a613}}, + {{0x0fd4ac20dc8f7811, 0x9a9ad294ac4d4fa8, 0xc01b2d64b3360434, 0x4f7e9c95905f3bdb}}}, +{{{0x62674bbc5781302e, 0xd8520f3989addc0f, 0x8c2999ae53fbd9c6, 0x31993ad92e638e4c}}, + {{0x71c8443d355299fe, 0x8bcd3b1cdbebead7, 0x8092499ef1a49466, 0x1942eec4a144adc8}}, + {{0x7dac5319ae234992, 0x2c1b3d910cea3e92, 0x553ce494253c1122, 0x2a0a65314ef9ca75}}}, +{{{0x2db7937ff7f927c2, 0xdb741f0617d0a635, 0x5982f3a21155af76, 0x4cf6e218647c2ded}}, + {{0xcf361acd3c1c793a, 0x2f9ebcac5a35bc3b, 0x60e860e9a8cda6ab, 0x055dc39b6dea1a13}}, + {{0xb119227cc28d5bb6, 0x07e24ebc774dffab, 0xa83c78cee4a32c89, 0x121a307710aa24b6}}}, +{{{0xe4db5d5e9f034a97, 0xe153fc093034bc2d, 0x460546919551d3b1, 0x333fc76c7a40e52d}}, + {{0xd659713ec77483c9, 0x88bfe077b82b96af, 0x289e28231097bcd3, 0x527bb94a6ced3a9b}}, + {{0x563d992a995b482e, 0x3405d07c6e383801, 0x485035de2f64d8e5, 0x6b89069b20a7a9f7}}}, +{{{0x812aa0416270220d, 0x995a89faf9245b4e, 0xffadc4ce5072ef05, 0x23bc2103aa73eb73}}, + {{0x4082fa8cb5c7db77, 0x068686f8c734c155, 0x29e6c8d9f6e7a57e, 0x0473d308a7639bcf}}, + {{0xcaee792603589e05, 0x2b4b421246dcc492, 0x02a1ef74e601a94f, 0x102f73bfde04341a}}}, +{{{0xeb18b9ab7f5745c6, 0x023a8aee5787c690, 0xb72712da2df7afa9, 0x36597d25ea5c013d}}, + {{0xa2b4dae0b5511c9a, 0x7ac860292bffff06, 0x981f375df5504234, 0x3f6bd725da4ea12d}}, + {{0x734d8d7b106058ac, 0xd940579e6fc6905f, 0x6466f8f99202932d, 0x7b7ecc19da60d6d0}}}, +{{{0x78c2373c695c690d, 0xdd252e660642906e, 0x951d44444ae12bd2, 0x4235ad7601743956}}, + {{0x6dae4a51a77cfa9b, 0x82263654e7a38650, 0x09bbffcd8f2d82db, 0x03bedc661bf5caba}}, + {{0x6258cb0d078975f5, 0x492942549189f298, 0xa0cab423e2e36ee4, 0x0e7ce2b0cdf066a1}}}, +{{{0xc494643ac48c85a3, 0xfd361df43c6139ad, 0x09db17dd3ae94d48, 0x666e0a5d8fb4674a}}, + {{0xfea6fedfd94b70f9, 0xf130c051c1fcba2d, 0x4882d47e7f2fab89, 0x615256138aeceeb5}}, + {{0x2abbf64e4870cb0d, 0xcd65bcf0aa458b6b, 0x9abe4eba75e8985d, 0x7f0bc810d514dee4}}}, +{{{0xb9006ba426f4136f, 0x8d67369e57e03035, 0xcbc8dfd94f463c28, 0x0d1f8dbcf8eedbf5}}, + {{0x83ac9dad737213a0, 0x9ff6f8ba2ef72e98, 0x311e2edd43ec6957, 0x1d3a907ddec5ab75}}, + {{0xba1693313ed081dc, 0x29329fad851b3480, 0x0128013c030321cb, 0x00011b44a31bfde3}}}, +{{{0x3fdfa06c3fc66c0c, 0x5d40e38e4dd60dd2, 0x7ae38b38268e4d71, 0x3ac48d916e8357e1}}, + {{0x16561f696a0aa75c, 0xc1bf725c5852bd6a, 0x11a8dd7f9a7966ad, 0x63d988a2d2851026}}, + {{0x00120753afbd232e, 0xe92bceb8fdd8f683, 0xf81669b384e72b91, 0x33fad52b2368a066}}}, +{{{0x540649c6c5e41e16, 0x0af86430333f7735, 0xb2acfcd2f305e746, 0x16c0f429a256dca7}}, + {{0x8d2cc8d0c422cfe8, 0x072b4f7b05a13acb, 0xa3feb6e6ecf6a56f, 0x3cc355ccb90a71e2}}, + {{0xe9b69443903e9131, 0xb8a494cb7a5637ce, 0xc87cd1a4baba9244, 0x631eaf426bae7568}}}, +{{{0xb3e90410da66fe9f, 0x85dd4b526c16e5a6, 0xbc3d97611ef9bf83, 0x5599648b1ea919b5}}, + {{0x47d975b9a3700de8, 0x7280c5fbe2f80552, 0x53658f2732e45de1, 0x431f2c7f665f80b5}}, + {{0xd6026344858f7b19, 0x14ab352fa1ea514a, 0x8900441a2090a9d7, 0x7b04715f91253b26}}}, +{{{0x83edbd28acf6ae43, 0x86357c8b7d5c7ab4, 0xc0404769b7eb2c44, 0x59b37bf5c2f6583f}}, + {{0xb376c280c4e6bac6, 0x970ed3dd6d1d9b0b, 0xb09a9558450bf944, 0x48d0acfa57cde223}}, + {{0xb60f26e47dabe671, 0xf1d1a197622f3a37, 0x4208ce7ee9960394, 0x16234191336d3bdb}}}, +{{{0xb9e499def6267ff6, 0x7772ca7b742c0843, 0x23a0153fe9a4f2b1, 0x2cdfdfecd5d05006}}, + {{0xdd499cd61ff38640, 0x29cd9bc3063625a0, 0x51e2d8023dd73dc3, 0x4a25707a203b9231}}, + {{0x2ab7668a53f6ed6a, 0x304242581dd170a1, 0x4000144c3ae20161, 0x5721896d248e49fc}}}, +{{{0x0b6e5517fd181bae, 0x9022629f2bb963b4, 0x5509bce932064625, 0x578edd74f63c13da}}, + {{0x285d5091a1d0da4e, 0x4baa6fa7b5fe3e08, 0x63e5177ce19393b3, 0x03c935afc4b030fd}}, + {{0x997276c6492b0c3d, 0x47ccc2c4dfe205fc, 0xdcd29b84dd623a3c, 0x3ec2ab590288c7a2}}}, +{{{0xa1a0d27be4d87bb9, 0xa98b4deb61391aed, 0x99a0ddd073cb9b83, 0x2dd5c25a200fcace}}, + {{0xa7213a09ae32d1cb, 0x0f2b87df40f5c2d5, 0x0baea4c6e81eab29, 0x0e1bf66c6adbac5e}}, + {{0xe2abd5e9792c887e, 0x1a020018cb926d5d, 0xbfba69cdbaae5f1e, 0x730548b35ae88f5f}}}, +{{{0xc43551a3cba8b8ee, 0x65a26f1db2115f16, 0x760f4f52ab8c3850, 0x3043443b411db8ca}}, + {{0x805b094ba1d6e334, 0xbf3ef17709353f19, 0x423f06cb0622702b, 0x585a2277d87845dd}}, + {{0xa18a5f8233d48962, 0x6698c4b5ec78257f, 0xa78e6fa5373e41ff, 0x7656278950ef981f}}}, +{{{0x38c3cf59d51fc8c0, 0x9bedd2fd0506b6f2, 0x26bf109fab570e8f, 0x3f4160a8c1b846a6}}, + {{0xe17073a3ea86cf9d, 0x3a8cfbb707155fdc, 0x4853e7fc31838a8e, 0x28bbf484b613f616}}, + {{0xf2612f5c6f136c7c, 0xafead107f6dd11be, 0x527e9ad213de6f33, 0x1e79cb358188f75d}}}, +{{{0x013436c3eef7e3f1, 0x828b6a7ffe9e10f8, 0x7ff908e5bcf9defc, 0x65d7951b3a3b3831}}, + {{0x77e953d8f5e08181, 0x84a50c44299dded9, 0xdc6c2d0c864525e5, 0x478ab52d39d1f2f4}}, + {{0x66a6a4d39252d159, 0xe5dde1bc871ac807, 0xb82c6b40a6c1c96f, 0x16d87a411a212214}}}, +{{{0xb3bd7e5a42066215, 0x879be3cd0c5a24c1, 0x57c05db1d6f994b7, 0x28f87c8165f38ca6}}, + {{0xfba4d5e2d54e0583, 0xe21fafd72ebd99fa, 0x497ac2736ee9778f, 0x1f990b577a5a6dde}}, + {{0xa3344ead1be8f7d6, 0x7d1e50ebacea798f, 0x77c6569e520de052, 0x45882fe1534d6d3e}}}, +{{{0x6669345d757983d6, 0x62b6ed1117aa11a6, 0x7ddd1857985e128f, 0x688fe5b8f626f6dd}}, + {{0xd8ac9929943c6fe4, 0xb5f9f161a38392a2, 0x2699db13bec89af3, 0x7dcf843ce405f074}}, + {{0x6c90d6484a4732c0, 0xd52143fdca563299, 0xb3be28c3915dc6e1, 0x6739687e7327191b}}}, +{{{0xef782014385675a6, 0xa2649f30aafda9e8, 0x4cd1eb505cdfa8cb, 0x46115aba1d4dc0b3}}, + {{0xa66dcc9dc80c1ac0, 0x97a05cf41b38a436, 0xa7ebf3be95dbd7c6, 0x7da0b8f68d7e7dab}}, + {{0xd40f1953c3b5da76, 0x1dac6f7321119e9b, 0x03cc6021feb25960, 0x5a5f887e83674b4b}}}, +{{{0x8f6301cf70a13d11, 0xcfceb815350dd0c4, 0xf70297d4a4bca47e, 0x3669b656e44d1434}}, + {{0x9e9628d3a0a643b9, 0xb5c3cb00e6c32064, 0x9b5302897c2dec32, 0x43e37ae2d5d1c70c}}, + {{0x387e3f06eda6e133, 0x67301d5199a13ac0, 0xbd5ad8f836263811, 0x6a21e6cd4fd5e9be}}}, +{{{0xf1c6170a3046e65f, 0x58712a2a00d23524, 0x69dbbd3c8c82b755, 0x586bf9f1a195ff57}}, + {{0xef4129126699b2e3, 0x71d30847708d1301, 0x325432d01182b0bd, 0x45371b07001e8b36}}, + {{0xa6db088d5ef8790b, 0x5278f0dc610937e5, 0xac0349d261a16eb8, 0x0eafb03790e52179}}}, +{{{0x960555c13748042f, 0x219a41e6820baa11, 0x1c81f73873486d0c, 0x309acc675a02c661}}, + {{0x5140805e0f75ae1d, 0xec02fbe32662cc30, 0x2cebdf1eea92396d, 0x44ae3344c5435bb3}}, + {{0x9cf289b9bba543ee, 0xf3760e9d5ac97142, 0x1d82e5c64f9360aa, 0x62d5221b7f94678f}}}, +{{{0x524c299c18d0936d, 0xc86bb56c8a0c1a0c, 0xa375052edb4a8631, 0x5c0efde4bc754562}}, + {{0x7585d4263af77a3c, 0xdfae7b11fee9144d, 0xa506708059f7193d, 0x14f29a5383922037}}, + {{0xdf717edc25b2d7f5, 0x21f970db99b53040, 0xda9234b7c3ed4c62, 0x5e72365c7bee093e}}}, +{{{0x575bfc074571217f, 0x3779675d0694d95b, 0x9a0a37bbf4191e33, 0x77f1104c47b4eabc}}, + {{0x7d9339062f08b33e, 0x5b9659e5df9f32be, 0xacff3dad1f9ebdfd, 0x70b20555cb7349b7}}, + {{0xbe5113c555112c4c, 0x6688423a9a881fcd, 0x446677855e503b47, 0x0e34398f4a06404a}}}, +{{{0xb67d22d93ecebde8, 0x09b3e84127822f07, 0x743fa61fb05b6d8d, 0x5e5405368a362372}}, + {{0x18930b093e4b1928, 0x7de3e10e73f3f640, 0xf43217da73395d6f, 0x6f8aded6ca379c3e}}, + {{0xe340123dfdb7b29a, 0x487b97e1a21ab291, 0xf9967d02fde6949e, 0x780de72ec8d3de97}}}, +{{{0x0ae28545089ae7bc, 0x388ddecf1c7f4d06, 0x38ac15510a4811b8, 0x0eb28bf671928ce4}}, + {{0x671feaf300f42772, 0x8f72eb2a2a8c41aa, 0x29a17fd797373292, 0x1defc6ad32b587a6}}, + {{0xaf5bbe1aef5195a7, 0x148c1277917b15ed, 0x2991f7fb7ae5da2e, 0x467d201bf8dd2867}}}, +{{{0x95fe919a74ef4fad, 0x3a827becf6a308a2, 0x964e01d309a47b01, 0x71c43c4f5ba3c797}}, + {{0xbc1ef4bd567ae7a9, 0x3f624cb2d64498bd, 0xe41064d22c1f4ec8, 0x2ef9c5a5ba384001}}, + {{0xb6fd6df6fa9e74cd, 0xf18278bce4af267a, 0x8255b3d0f1ef990e, 0x5a758ca390c5f293}}}, +{{{0xa2b72710d9462495, 0x3aa8c6d2d57d5003, 0xe3d400bfa0b487ca, 0x2dbae244b3eb72ec}}, + {{0x8ce0918b1d61dc94, 0x8ded36469a813066, 0xd4e6a829afe8aad3, 0x0a738027f639d43f}}, + {{0x980f4a2f57ffe1cc, 0x00670d0de1839843, 0x105c3f4a49fb15fd, 0x2698ca635126a69c}}}, +{{{0xe765318832b0ba78, 0x381831f7925cff8b, 0x08a81b91a0291fcc, 0x1fb43dcc49caeb07}}, + {{0x2e3d702f5e3dd90e, 0x9e3f0918e4d25386, 0x5e773ef6024da96a, 0x3c004b0c4afa3332}}, + {{0x9aa946ac06f4b82b, 0x1ca284a5a806c4f3, 0x3ed3265fc6cd4787, 0x6b43fd01cd1fd217}}}, +{{{0xc7a75d4b4697c544, 0x15fdf848df0fffbf, 0x2868b9ebaa46785a, 0x5a68d7105b52f714}}, + {{0xb5c742583e760ef3, 0x75dc52b9ee0ab990, 0xbf1427c2072b923f, 0x73420b2d6ff0d9f0}}, + {{0xaf2cf6cb9e851e06, 0x8f593913c62238c4, 0xda8ab89699fbf373, 0x3db5632fea34bc9e}}}, +{{{0xf46eee2bf75dd9d8, 0x0d17b1f6396759a5, 0x1bf2d131499e7273, 0x04321adf49d75f13}}, + {{0x2e4990b1829825d5, 0xedeaeb873e9a8991, 0xeef03d394c704af8, 0x59197ea495df2b0e}}, + {{0x04e16019e4e55aae, 0xe77b437a7e2f92e9, 0xc7ce2dc16f159aa4, 0x45eafdc1f4d70cc0}}}, +{{{0x698401858045d72b, 0x4c22faa2cf2f0651, 0x941a36656b222dc6, 0x5a5eebc80362dade}}, + {{0xb60e4624cfccb1ed, 0x59dbc292bd5c0395, 0x31a09d1ddc0481c9, 0x3f73ceea5d56d940}}, + {{0xb7a7bfd10a4e8dc6, 0xbe57007e44c9b339, 0x60c1207f1557aefa, 0x26058891266218db}}}, +{{{0x59f704a68360ff04, 0xc3d93fde7661e6f4, 0x831b2a7312873551, 0x54ad0c2e4e615d57}}, + {{0x4c818e3cc676e542, 0x5e422c9303ceccad, 0xec07cccab4129f08, 0x0dedfa10b24443b8}}, + {{0xee3b67d5b82b522a, 0x36f163469fa5c1eb, 0xa5b4d2f26ec19fd3, 0x62ecb2baa77a9408}}}, +{{{0xe5ed795261152b3d, 0x4962357d0eddd7d1, 0x7482c8d0b96b4c71, 0x2e59f919a966d8be}}, + {{0x92072836afb62874, 0x5fcd5e8579e104a5, 0x5aad01adc630a14a, 0x61913d5075663f98}}, + {{0x0dc62d361a3231da, 0xfa47583294200270, 0x02d801513f9594ce, 0x3ddbc2a131c05d5c}}}, +{{{0x9adc0ff9ce5ec54b, 0x039c2a6b8c2f130d, 0x028007c7f0f89515, 0x78968314ac04b36b}}, + {{0xf3aa57a22796bb14, 0x883abab79b07da21, 0xe54be21831a0391c, 0x5ee7fb38d83205f9}}, + {{0x538dfdcb41446a8e, 0xa5acfda9434937f9, 0x46af908d263c8c78, 0x61d0633c9bca0d09}}}, +{{{0x63744935ffdb2566, 0xc5bd6b89780b68bb, 0x6f1b3280553eec03, 0x6e965fd847aed7f5}}, + {{0xada328bcf8fc73df, 0xee84695da6f037fc, 0x637fb4db38c2a909, 0x5b23ac2df8067bdc}}, + {{0x9ad2b953ee80527b, 0xe88f19aafade6d8d, 0x0e711704150e82cf, 0x79b9bbb9dd95dedc}}}, +{{{0xebb355406a3126c2, 0xd26383a868c8c393, 0x6c0c6429e5b97a82, 0x5065f158c9fd2147}}, + {{0xd1997dae8e9f7374, 0xa032a2f8cfbb0816, 0xcd6cba126d445f0a, 0x1ba811460accb834}}, + {{0x708169fb0c429954, 0xe14600acd76ecf67, 0x2eaab98a70e645ba, 0x3981f39e58a4faf2}}}, +{{{0x18fb8a7559230a93, 0x1d168f6960e6f45d, 0x3a85a94514a93cb5, 0x38dc083705acd0fd}}, + {{0xc845dfa56de66fde, 0xe152a5002c40483a, 0xe9d2e163c7b4f632, 0x30f4452edcbc1b65}}, + {{0x856d2782c5759740, 0xfa134569f99cbecc, 0x8844fc73c0ea4e71, 0x632d9a1a593f2469}}}, +{{{0xf6bb6b15b807cba6, 0x1823c7dfbc54f0d7, 0xbb1d97036e29670b, 0x0b24f48847ed4a57}}, + {{0xbf09fd11ed0c84a7, 0x63f071810d9f693a, 0x21908c2d57cf8779, 0x3a5a7df28af64ba2}}, + {{0xdcdad4be511beac7, 0xa4538075ed26ccf2, 0xe19cff9f005f9a65, 0x34fcf74475481f63}}}, +{{{0xc197e04c789767ca, 0xb8714dcb38d9467d, 0x55de888283f95fa8, 0x3d3bdc164dfa63f7}}, + {{0xa5bb1dab78cfaa98, 0x5ceda267190b72f2, 0x9309c9110a92608e, 0x0119a3042fb374b0}}, + {{0x67a2d89ce8c2177d, 0x669da5f66895d0c1, 0xf56598e5b282a2b0, 0x56c088f1ede20a73}}}, +{{{0x336d3d1110a86e17, 0xd7f388320b75b2fa, 0xf915337625072988, 0x09674c6b99108b87}}, + {{0x581b5fac24f38f02, 0xa90be9febae30cbd, 0x9a2169028acf92f0, 0x038b7ea48359038f}}, + {{0x9f4ef82199316ff8, 0x2f49d282eaa78d4f, 0x0971a5ab5aef3174, 0x6e5e31025969eb65}}}, +{{{0xb16c62f587e593fb, 0x4999eddeca5d3e71, 0xb491c1e014cc3e6d, 0x08f5114789a8dba8}}, + {{0x3304fb0e63066222, 0xfb35068987acba3f, 0xbd1924778c1061a3, 0x3058ad43d1838620}}, + {{0x323c0ffde57663d0, 0x05c3df38a22ea610, 0xbdc78abdac994f9a, 0x26549fa4efe3dc99}}}, +{{{0x741d5a461e6bf9d6, 0x2305b3fc7777a581, 0xd45574a26474d3d9, 0x1926e1dc6401e0ff}}, + {{0xdb468549af3f666e, 0xd77fcf04f14a0ea5, 0x3df23ff7a4ba0c47, 0x3a10dfe132ce3c85}}, + {{0xe07f4e8aea17cea0, 0x2fd515463a1fc1fd, 0x175322fd31f2c0f1, 0x1fa1d01d861e5d15}}}, +{{{0xcc8055947d599832, 0x1e4656da37f15520, 0x99f6f7744e059320, 0x773563bc6a75cf33}}, + {{0x38dcac00d1df94ab, 0x2e712bddd1080de9, 0x7f13e93efdd5e262, 0x73fced18ee9a01e5}}, + {{0x06b1e90863139cb3, 0xa493da67c5a03ecd, 0x8d77cec8ad638932, 0x1f426b701b864f44}}}, +{{{0xefc9264c41911c01, 0xf1a3b7b817a22c25, 0x5875da6bf30f1447, 0x4e1af5271d31b090}}, + {{0xf17e35c891a12552, 0xb76b8153575e9c76, 0xfa83406f0d9b723e, 0x0b76bb1b3fa7e438}}, + {{0x08b8c1f97f92939b, 0xbe6771cbd444ab6e, 0x22e5646399bb8017, 0x7b6dd61eb772a955}}}, +{{{0xb7adc1e850f33d92, 0x7998fa4f608cd5cf, 0xad962dbd8dfc5bdb, 0x703e9bceaf1d2f4f}}, + {{0x5730abf9ab01d2c7, 0x16fb76dc40143b18, 0x866cbe65a0cbb281, 0x53fa9b659bff6afe}}, + {{0x6c14c8e994885455, 0x843a5d6665aed4e5, 0x181bb73ebcd65af1, 0x398d93e5c4c61f50}}}, +{{{0x1c4bd16733e248f3, 0xbd9e128715bf0a5f, 0xd43f8cf0a10b0376, 0x53b09b5ddf191b13}}, + {{0xc3877c60d2e7e3f2, 0x3b34aaa030828bb1, 0x283e26e7739ef138, 0x699c9c9002c30577}}, + {{0xf306a7235946f1cc, 0x921718b5cce5d97d, 0x28cdd24781b4e975, 0x51caf30c6fcdd907}}}, +{{{0xa60ba7427674e00a, 0x630e8570a17a7bf3, 0x3758563dcf3324cc, 0x5504aa292383fdaa}}, + {{0x737af99a18ac54c7, 0x903378dcc51cb30f, 0x2b89bc334ce10cc7, 0x12ae29c189f8e99a}}, + {{0xa99ec0cb1f0d01cf, 0x0dd1efcc3a34f7ae, 0x55ca7521d09c4e22, 0x5fd14fe958eba5ea}}}, +{{{0xb5dc2ddf2845ab2c, 0x069491b10a7fe993, 0x4daaf3d64002e346, 0x093ff26e586474d1}}, + {{0x3c42fe5ebf93cb8e, 0xbedfa85136d4565f, 0xe0f0859e884220e8, 0x7dd73f960725d128}}, + {{0xb10d24fe68059829, 0x75730672dbaf23e5, 0x1367253ab457ac29, 0x2f59bcbc86b470a4}}}, +{{{0x83847d429917135f, 0xad1b911f567d03d7, 0x7e7748d9be77aad1, 0x5458b42e2e51af4a}}, + {{0x7041d560b691c301, 0x85201b3fadd7e71e, 0x16c2e16311335585, 0x2aa55e3d010828b1}}, + {{0xed5192e60c07444f, 0x42c54e2d74421d10, 0x352b4c82fdb5c864, 0x13e9004a8a768664}}}, +{{{0x739d8845832fcedb, 0xfa38d6c9ae6bf863, 0x32bc0dcab74ffef7, 0x73937e8814bce45e}}, + {{0xbb2e00c9193b877f, 0xece3a890e0dc506b, 0xecf3b7c036de649f, 0x5f46040898de9e1a}}, + {{0xb9037116297bf48d, 0xa9d13b22d4f06834, 0xe19715574696bdc6, 0x2cf8a4e891d5e835}}}, +{{{0x6d93fd8707110f67, 0xdd4c09d37c38b549, 0x7cb16a4cc2736a86, 0x2049bd6e58252a09}}, + {{0x2cb5487e17d06ba2, 0x24d2381c3950196b, 0xd7659c8185978a30, 0x7a6f7f2891d6a4f6}}, + {{0x7d09fd8d6a9aef49, 0xf0ee60be5b3db90b, 0x4c21b52c519ebfd4, 0x6011aadfc545941d}}}, +{{{0x5f67926dcf95f83c, 0x7c7e856171289071, 0xd6a1e7f3998f7a5b, 0x6fc5cc1b0b62f9e0}}, + {{0x63ded0c802cbf890, 0xfbd098ca0dff6aaa, 0x624d0afdb9b6ed99, 0x69ce18b779340b1e}}, + {{0xd1ef5528b29879cb, 0xdd1aae3cd47e9092, 0x127e0442189f2352, 0x15596b3ae57101f1}}}, +{{{0x462739d23f9179a2, 0xff83123197d6ddcf, 0x1307deb553f2148a, 0x0d2237687b5f4dda}}, + {{0x09ff31167e5124ca, 0x0be4158bd9c745df, 0x292b7d227ef556e5, 0x3aa4e241afb6d138}}, + {{0x2cc138bf2a3305f5, 0x48583f8fa2e926c3, 0x083ab1a25549d2eb, 0x32fcaa6e4687a36c}}}, +{{{0x7bc56e8dc57d9af5, 0x3e0bd2ed9df0bdf2, 0xaac014de22efe4a3, 0x4627e9cefebd6a5c}}, + {{0x3207a4732787ccdf, 0x17e31908f213e3f8, 0xd5b2ecd7f60d964e, 0x746f6336c2600be9}}, + {{0x3f4af345ab6c971c, 0xe288eb729943731f, 0x33596a8a0344186d, 0x7b4917007ed66293}}}, +{{{0x2d85fb5cab84b064, 0x497810d289f3bc14, 0x476adc447b15ce0c, 0x122ba376f844fd7b}}, + {{0x54341b28dd53a2dd, 0xaa17905bdf42fc3f, 0x0ff592d94dd2f8f4, 0x1d03620fe08cd37d}}, + {{0xc20232cda2b4e554, 0x9ed0fd42115d187f, 0x2eabb4be7dd479d9, 0x02c70bf52b68ec4c}}}, +{{{0xa287ec4b5d0b2fbb, 0x415c5790074882ca, 0xe044a61ec1d0815c, 0x26334f0a409ef5e0}}, + {{0xace532bf458d72e1, 0x5be768e07cb73cb5, 0x56cf7d94ee8bbde7, 0x6b0697e3feb43a03}}, + {{0xb6c8f04adf62a3c0, 0x3ef000ef076da45d, 0x9c9cb95849f0d2a9, 0x1cc37f43441b2fae}}}, +{{{0x508f565a5cc7324f, 0xd061c4c0e506a922, 0xfb18abdb5c45ac19, 0x6c6809c10380314a}}, + {{0xd76656f1c9ceaeb9, 0x1c5b15f818e5656a, 0x26e72832844c2334, 0x3a346f772f196838}}, + {{0xd2d55112e2da6ac8, 0xe9bd0331b1e851ed, 0x960746dd8ec67262, 0x05911b9f6ef7c5d0}}}, +{{{0xc1339983f5df0ebb, 0xc0f3758f512c4cac, 0x2cf1130a0bb398e1, 0x6b3cecf9aa270c62}}, + {{0x5349acf3512eeaef, 0x20c141d31cc1cb49, 0x24180c07a99a688d, 0x555ef9d1c64b2d17}}, + {{0x36a770ba3b73bd08, 0x624aef08a3afbf0c, 0x5737ff98b40946f2, 0x675f4de13381749d}}}, +{{{0x0e2c52036b1782fc, 0x64816c816cad83b4, 0xd0dcbdd96964073e, 0x13d99df70164c520}}, + {{0xa12ff6d93bdab31d, 0x0725d80f9d652dfe, 0x019c4ff39abe9487, 0x60f450b882cd3c43}}, + {{0x014b5ec321e5c0ca, 0x4fcb69c9d719bfa2, 0x4e5f1c18750023a0, 0x1c06de9e55edac80}}}, +{{{0x990f7ad6a33ec4e2, 0x6608f938be2ee08e, 0x9ca143c563284515, 0x4cf38a1fec2db60d}}, + {{0xffd52b40ff6d69aa, 0x34530b18dc4049bb, 0x5e4a5c2fa34d9897, 0x78096f8e7d32ba2d}}, + {{0xa0aaaa650dfa5ce7, 0xf9c49e2a48b5478c, 0x4f09cc7d7003725b, 0x373cad3a26091abe}}}, +{{{0xb294634d82c9f57c, 0x1fcbfde124934536, 0x9e9c4db3418cdb5a, 0x0040f3d9454419fc}}, + {{0xf1bea8fb89ddbbad, 0x3bcb2cbc61aeaecb, 0x8f58a7bb1f9b8d9d, 0x21547eda5112a686}}, + {{0xdefde939fd5986d3, 0xf4272c89510a380c, 0xb72ba407bb3119b9, 0x63550a334a254df4}}}, +{{{0x6507d6edb569cf37, 0x178429b00ca52ee1, 0xea7c0090eb6bd65d, 0x3eea62c7daf78f51}}, + {{0x9bba584572547b49, 0xf305c6fae2c408e0, 0x60e8fa69c734f18d, 0x39a92bafaa7d767a}}, + {{0x9d24c713e693274e, 0x5f63857768dbd375, 0x70525560eb8ab39a, 0x68436a0665c9c4cd}}}, +{{{0xbc0235e8202f3f27, 0xc75c00e264f975b0, 0x91a4e9d5a38c2416, 0x17b6e7f68ab789f9}}, + {{0x1e56d317e820107c, 0xc5266844840ae965, 0xc1e0a1c6320ffc7a, 0x5373669c91611472}}, + {{0x5d2814ab9a0e5257, 0x908f2084c9cab3fc, 0xafcaf5885b2d1eca, 0x1cb4b5a678f87d11}}}, +{{{0xb664c06b394afc6c, 0x0c88de2498da5fb1, 0x4f8d03164bcad834, 0x330bca78de7434a2}}, + {{0x6b74aa62a2a007e7, 0xf311e0b0f071c7b1, 0x5707e438000be223, 0x2dc0fd2d82ef6eac}}, + {{0x982eff841119744e, 0xf9695e962b074724, 0xc58ac14fbfc953fb, 0x3c31be1b369f1cf5}}}, +{{{0xb0f4864d08948aee, 0x07dc19ee91ba1c6f, 0x7975cdaea6aca158, 0x330b61134262d4bb}}, + {{0xc168bc93f9cb4272, 0xaeb8711fc7cedb98, 0x7f0e52aa34ac8d7a, 0x41cec1097e7d55bb}}, + {{0xf79619d7a26d808a, 0xbb1fd49e1d9e156d, 0x73d7c36cdba1df27, 0x26b44cd91f28777d}}}, +{{{0x51f048478f387475, 0xb25dbcf49cbecb3c, 0x9aab1244d99f2055, 0x2c709e6c1c10a5d6}}, + {{0xe1b7f29362730383, 0x4b5279ffebca8a2c, 0xdafc778abfd41314, 0x7deb10149c72610f}}, + {{0xcb62af6a8766ee7a, 0x66cbec045553cd0e, 0x588001380f0be4b5, 0x08e68e9ff62ce2ea}}}, +{{{0x34ad500a4bc130ad, 0x8d38db493d0bd49c, 0xa25c3d98500a89be, 0x2f1f3f87eeba3b09}}, + {{0x2f2d09d50ab8f2f9, 0xacb9218dc55923df, 0x4a8f342673766cb9, 0x4cb13bd738f719f5}}, + {{0xf7848c75e515b64a, 0xa59501badb4a9038, 0xc20d313f3f751b50, 0x19a1e353c0ae2ee8}}}, +{{{0x7d1c7560bafa05c3, 0xb3e1a0a0c6e55e61, 0xe3529718c0d66473, 0x41546b11c20c3486}}, + {{0xb42172cdd596bdbd, 0x93e0454398eefc40, 0x9fb15347b44109b5, 0x736bd3990266ae34}}, + {{0x85532d509334b3b4, 0x46fd114b60816573, 0xcc5f5f30425c8375, 0x412295a2b87fab5c}}}, +{{{0x19c99b88f57ed6e9, 0x5393cb266df8c825, 0x5cee3213b30ad273, 0x14e153ebb52d2e34}}, + {{0x2e655261e293eac6, 0x845a92032133acdb, 0x460975cb7900996b, 0x0760bb8d195add80}}, + {{0x413e1a17cde6818a, 0x57156da9ed69a084, 0x2cbf268f46caccb1, 0x6b34be9bc33ac5f2}}}, +{{{0xf3df2f643a78c0b2, 0x4c3e971ef22e027c, 0xec7d1c5e49c1b5a3, 0x2012c18f0922dd2d}}, + {{0x11fc69656571f2d3, 0xc6c9e845530e737a, 0xe33ae7a2d4fe5035, 0x01b9c7b62e6dd30b}}, + {{0x880b55e55ac89d29, 0x1483241f45a0a763, 0x3d36efdfc2e76c1f, 0x08af5b784e4bade8}}}, +{{{0x283499dc881f2533, 0x9d0525da779323b6, 0x897addfb673441f4, 0x32b79d71163a168d}}, + {{0xe27314d289cc2c4b, 0x4be4bd11a287178d, 0x18d528d6fa3364ce, 0x6423c1d5afd9826e}}, + {{0xcc85f8d9edfcb36a, 0x22bcc28f3746e5f9, 0xe49de338f9e5d3cd, 0x480a5efbc13e2dcc}}}, +{{{0x0b51e70b01622071, 0x06b505cf8b1dafc5, 0x2c6bb061ef5aabcd, 0x47aa27600cb7bf31}}, + {{0xb6614ce442ce221f, 0x6e199dcc4c053928, 0x663fb4a4dc1cbe03, 0x24b31d47691c8e06}}, + {{0x2a541eedc015f8c3, 0x11a4fe7e7c693f7c, 0xf0af66134ea278d6, 0x545b585d14dda094}}}, +{{{0x67bf275ea0d43a0f, 0xade68e34089beebe, 0x4289134cd479e72e, 0x0f62f9c332ba5454}}, + {{0x6204e4d0e3b321e1, 0x3baa637a28ff1e95, 0x0b0ccffd5b99bd9e, 0x4d22dc3e64c8d071}}, + {{0xfcb46589d63b5f39, 0x5cae6a3f57cbcf61, 0xfebac2d2953afa05, 0x1c0fa01a36371436}}}, +{{{0xd2c604b622943dff, 0xbc8cbece44cfb3a0, 0x5d254ff397808678, 0x0fa3614f3b1ca6bf}}, + {{0x69082b0e8c936a50, 0xf9c9a035c1dac5b6, 0x6fb73e54c4dfb634, 0x4005419b1d2bc140}}, + {{0xa003febdb9be82f0, 0x2089c1af3a44ac90, 0xf8499f911954fa8e, 0x1fba218aef40ab42}}}, +{{{0xab549448fac8f53e, 0x81f6e89a7ba63741, 0x74fd6c7d6c2b5e01, 0x392e3acaa8c86e42}}, + {{0x4f3e57043e7b0194, 0xa81d3eee08daaf7f, 0xc839c6ab99dcdef1, 0x6c535d13ff7761d5}}, + {{0x4cbd34e93e8a35af, 0x2e0781445887e816, 0x19319c76f29ab0ab, 0x25e17fe4d50ac13b}}}, +{{{0x0a289bd71e04f676, 0x208e1c52d6420f95, 0x5186d8b034691fab, 0x255751442a9fb351}}, + {{0x915f7ff576f121a7, 0xc34a32272fcd87e3, 0xccba2fde4d1be526, 0x6bba828f8969899b}}, + {{0xe2d1bc6690fe3901, 0x4cb54a18a0997ad5, 0x971d6914af8460d4, 0x559d504f7f6b7be4}}}, +{{{0xa7738378b3eb54d5, 0x1d69d366a5553c7c, 0x0a26cf62f92800ba, 0x01ab12d5807e3217}}, + {{0x9c4891e7f6d266fd, 0x0744a19b0307781b, 0x88388f1d6061e23b, 0x123ea6a3354bd50e}}, + {{0x118d189041e32d96, 0xb9ede3c2d8315848, 0x1eab4271d83245d9, 0x4a3961e2c918a154}}}, +{{{0x71dc3be0f8e6bba0, 0xd6cef8347effe30a, 0xa992425fe13a476a, 0x2cd6bce3fb1db763}}, + {{0x0327d644f3233f1e, 0x499a260e34fcf016, 0x83b5a716f2dab979, 0x68aceead9bd4111f}}, + {{0x38b4c90ef3d7c210, 0x308e6e24b7ad040c, 0x3860d9f1b7e73e23, 0x595760d5b508f597}}}, +{{{0x6129bfe104aa6397, 0x8f960008a4a7fccb, 0x3f8bc0897d909458, 0x709fa43edcb291a9}}, + {{0x882acbebfd022790, 0x89af3305c4115760, 0x65f492e37d3473f4, 0x2cb2c5df54515a2b}}, + {{0xeb0a5d8c63fd2aca, 0xd22bc1662e694eff, 0x2723f36ef8cbb03a, 0x70f029ecf0c8131f}}}, +{{{0x461307b32eed3e33, 0xae042f33a45581e7, 0xc94449d3195f0366, 0x0b7d5d8a6c314858}}, + {{0x2a6aafaa5e10b0b9, 0x78f0a370ef041aa9, 0x773efb77aa3ad61f, 0x44eca5a2a74bd9e1}}, + {{0x25d448327b95d543, 0x70d38300a3340f1d, 0xde1c531c60e1c52b, 0x272224512c7de9e4}}}, +{{{0x1abc92af49c5342e, 0xffeed811b2e6fad0, 0xefa28c8dfcc84e29, 0x11b5df18a44cc543}}, + {{0xbf7bbb8a42a975fc, 0x8c5c397796ada358, 0xe27fc76fcdedaa48, 0x19735fd7f6bc20a6}}, + {{0xe3ab90d042c84266, 0xeb848e0f7f19547e, 0x2503a1d065a497b9, 0x0fef911191df895f}}} \ No newline at end of file diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_base_slide_multiples.data b/src/ed25519-supercop-amd64-64-24k/ge25519_base_slide_multiples.data new file mode 100644 index 0000000..32a5d47 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_base_slide_multiples.data @@ -0,0 +1,96 @@ +{{{0x9d103905d740913e, 0xfd399f05d140beb3, 0xa5c18434688f8a09, 0x44fd2f9298f81267}}, + {{0x2fbc93c6f58c3b85, 0xcf932dc6fb8c0e19, 0x270b4898643d42c2, 0x07cf9d3a33d4ba65}}, + {{0xabc91205877aaa68, 0x26d9e823ccaac49e, 0x5a1b7dcbdd43598c, 0x6f117b689f0c65a8}}}, +{{{0x56611fe8a4fcd265, 0x3bd353fde5c1ba7d, 0x8131f31a214bd6bd, 0x2ab91587555bda62}}, + {{0xaf25b0a84cee9730, 0x025a8430e8864b8a, 0xc11b50029f016732, 0x7a164e1b9a80f8f4}}, + {{0x14ae933f0dd0d889, 0x589423221c35da62, 0xd170e5458cf2db4c, 0x5a2826af12b9b4c6}}}, +{{{0x7f9182c3a447d6ba, 0xd50014d14b2729b7, 0xe33cf11cb864a087, 0x154a7e73eb1b55f3}}, + {{0xa212bc4408a5bb33, 0x8d5048c3c75eed02, 0xdd1beb0c5abfec44, 0x2945ccf146e206eb}}, + {{0xbcbbdbf1812a8285, 0x270e0807d0bdd1fc, 0xb41b670b1bbda72d, 0x43aabe696b3bb69a}}}, +{{{0xba6f2c9aaa3221b1, 0x6ca021533bba23a7, 0x9dea764f92192c3a, 0x1d6edd5d2e5317e0}}, + {{0x6b1a5cd0944ea3bf, 0x7470353ab39dc0d2, 0x71b2528228542e49, 0x461bea69283c927e}}, + {{0xf1836dc801b8b3a2, 0xb3035f47053ea49a, 0x529c41ba5877adf3, 0x7a9fbb1c6a0f90a7}}}, +{{{0xf36e217e039d8064, 0x98a081b6f520419b, 0x96cbc608e75eb044, 0x49c05a51fadc9c8f}}, + {{0x9b2e678aa6a8632f, 0xa6509e6f51bc46c5, 0xceb233c9c686f5b5, 0x34b9ed338add7f59}}, + {{0x06b4e8bf9045af1b, 0xe2ff83e8a719d22f, 0xaaf6fc2993d4cf16, 0x73c172021b008b06}}}, +{{{0x315f5b0249864348, 0x3ed6b36977088381, 0xa3a075556a8deb95, 0x18ab598029d5c77f}}, + {{0x2fbf00848a802ade, 0xe5d9fecf02302e27, 0x113e847117703406, 0x4275aae2546d8faf}}, + {{0xd82b2cc5fd6089e9, 0x031eb4a13282e4a4, 0x44311199b51a8622, 0x3dc65522b53df948}}}, +{{{0x506f013b327fbf93, 0xaefcebc99b776f6b, 0x9d12b232aaad5968, 0x0267882d176024a7}}, + {{0xbf70c222a2007f6d, 0xbf84b39ab5bcdedb, 0x537a0e12fb07ba07, 0x234fd7eec346f241}}, + {{0x5360a119732ea378, 0x2437e6b1df8dd471, 0xa2ef37f891a7e533, 0x497ba6fdaa097863}}}, +{{{0x040bcd86468ccf0b, 0xd3829ba42a9910d6, 0x7508300807b25192, 0x43b5cd4218d05ebf}}, + {{0x24cecc0313cfeaa0, 0x8648c28d189c246d, 0x2dbdbdfac1f2d4d0, 0x61e22917f12de72b}}, + {{0x5d9a762f9bd0b516, 0xeb38af4e373fdeee, 0x032e5a7d93d64270, 0x511d61210ae4d842}}}, +{{{0x081386484420de87, 0x8a1cf016b592edb4, 0x39fa4e2729942d25, 0x71a7fe6fe2482810}}, + {{0x92c676ef950e9d81, 0xa54620cdc0d7044f, 0xaa9b36646f8f1248, 0x6d325924ddb855e3}}, + {{0x6c7182b8a5c8c854, 0x33fd1479fe5f2a03, 0x72cf591883778d0c, 0x4746c4b6559eeaa9}}}, +{{{0x348546c864741147, 0x7d35aedd0efcc849, 0xff939a760672a332, 0x219663497db5e6d6}}, + {{0xd3777b3c6dc69a2b, 0xdefab2276f89f617, 0x45651cf7b53a16b5, 0x5c9a51de34fe9fb7}}, + {{0xf510f1cf79f10e67, 0xffdddaa1e658515b, 0x09c3a71710142277, 0x4804503c608223bb}}}, +{{{0x3b6821d23a36d175, 0xbbb40aa7e99b9e32, 0x5d9e5ce420838a47, 0x771e098858de4c5e}}, + {{0xc4249ed02ca37fc7, 0xa059a0e3a615acab, 0x88a96ed7c96e0e23, 0x553398a51650696d}}, + {{0x9a12f5d278451edf, 0x3ada5d7985899ccb, 0x477f4a2d9fa59508, 0x5a5ed1d68ff5a611}}}, +{{{0xbae5e0c558527359, 0x392e5c19cadb9d7e, 0x28653c1eda1cabe9, 0x019b60135fefdc44}}, + {{0x1195122afe150e83, 0xcf209a257e4b35d8, 0x7387f8291e711e20, 0x44acb897d8bf92f0}}, + {{0x1e6068145e134b83, 0xc4f5e64f24304c16, 0x506e88a8fc1a3ed7, 0x150c49fde6ad2f92}}}, +{{{0xb849863c9cdca868, 0xc83f44dbb8714ad0, 0xfe3ee3560c36168d, 0x78a6d7791e05fbc1}}, + {{0x8e7bf29509471138, 0x5d6fef394f75a651, 0x10af79c425a708ad, 0x6b2b5a075bb99922}}, + {{0x58bf704b47a0b976, 0xa601b355741748d5, 0xaa2b1fb1d542f590, 0x725c7ffc4ad55d00}}}, +{{{0x91802bf71cd098c0, 0xfe416ca4ed5e6366, 0xdf585d714902994c, 0x4cd54625f855fae7}}, + {{0xe4426715d1cf99b2, 0x7352d51102a20d34, 0x23d1157b8b12109f, 0x794cc9277cb1f3a3}}, + {{0x4af6c426c2ac5053, 0xbc9aedad32f67258, 0x2ad032f10a311021, 0x7008357b6fcc8e85}}}, +{{{0xd01b9fbb82584a34, 0x47ab6463d2b4792b, 0xb631639c48536202, 0x13a92a3669d6d428}}, + {{0x0b88672738773f01, 0xb8ccc8fa95fbccfb, 0x8d2dd5a3b9ad29b6, 0x06ef7e9851ad0f6a}}, + {{0xca93771cc0577de5, 0x7540e41e5035dc5c, 0x24680f01d802e071, 0x3c296ddf8a2af86a}}}, +{{{0xfceb4d2ebb1f2541, 0xb89510c740adb91f, 0xfc71a37dd0a1ad05, 0x0a892c700747717b}}, + {{0xaead15f9d914a713, 0xa92f7bf98c8ff912, 0xaff823179f53d730, 0x7a99d393490c77ba}}, + {{0x8f52ed2436bda3e8, 0x77a8c84157e80794, 0xa5a96563262f9ce0, 0x286762d28302f7d2}}}, +{{{0x7c558e2bce2ef5bd, 0xe4986cb46747bc63, 0x154a179f3bbb89b8, 0x7686f2a3d6f1767a}}, + {{0x4e7836093ce35b25, 0x82e1181db26baa97, 0x0cc192d3cbc7b83f, 0x32f1da046a9d9d3a}}, + {{0xaa8d12a66d597c6a, 0x8f11930304d3852b, 0x3f91dc73c209b022, 0x561305f8a9ad28a6}}}, +{{{0x6722cc28e7b0c0d5, 0x709de9bbdb075c53, 0xcaf68da7d7010a61, 0x030a1aef2c57cc6c}}, + {{0x100c978dec92aed1, 0xca43d5434d6d73e5, 0x83131b22d847ba48, 0x00aaec53e35d4d2c}}, + {{0x7bb1f773003ad2aa, 0x0b3f29802b216608, 0x7821dc86520ed23e, 0x20be9c1c24065480}}}, +{{{0x20e0e44ae2025e60, 0xb03b3b2fcbdcb938, 0x105d639cf95a0d1c, 0x69764c545067e311}}, + {{0xe15387d8249673a6, 0x5943bc2df546e493, 0x1c7f9a81c36f63b5, 0x750ab3361f0ac1de}}, + {{0x1e8a3283a2f81037, 0x6f2eda23bd7fcbf1, 0xb72fd15bac2e2563, 0x54f96b3fb7075040}}}, +{{{0x177dafc616b11ecd, 0x89764b9cfa576479, 0xb7a8a110e6ece785, 0x78e6839fbe85dbf0}}, + {{0x0fadf20429669279, 0x3adda2047d7d724a, 0x6f3d94828c5760f1, 0x3d7fe9c52bb7539e}}, + {{0x70332df737b8856b, 0x75d05d43041a178a, 0x320ff74aa0e59e22, 0x70f268f350088242}}}, +{{{0x2324112070dcf355, 0x380cc97ee7fce117, 0xb31ddeed3552b698, 0x404e56c039b8c4b9}}, + {{0x66864583b1805f47, 0xf535c5d160dd7c19, 0xe9874eb71e4cb006, 0x7c0d345cfad889d9}}, + {{0x591f1f4b8c78338a, 0xa0366ab167e0b5e1, 0x5cbc4152b45f3d44, 0x20d754762aaec777}}}, +{{{0x9d74feb135b9f543, 0x84b37df1de8c956c, 0xe9322b0757138ba9, 0x38b8ada8790b4ce1}}, + {{0x5e8fc36fc73bb758, 0xace543a5363cbb9a, 0xa9934a7d903bc922, 0x2b8f1e46f3ceec62}}, + {{0xb5c04a9cdf51f95d, 0x2b3952aecb1fdeac, 0x1d106d8b328b66da, 0x049aeb32ceba1953}}}, +{{{0xd7767d3c63dcfe7e, 0x209c594897856e40, 0xb6676861e14f7c13, 0x51c665e0c8d625fc}}, + {{0xaa507d0b75fc7931, 0x0fef924b7a6725d3, 0x1d82542b396b3930, 0x795ee17530f674fc}}, + {{0x254a5b0a52ecbd81, 0x5d411f6ee034afe7, 0xe6a24d0dcaee4a31, 0x6cd19bf49dc54477}}}, +{{{0x7e87619052179ca3, 0x571d0a060b2c9f85, 0x80a2baa88499711e, 0x7520f3db40b2e638}}, + {{0x1ffe612165afc386, 0x082a2a88b8d51b10, 0x76f6627e20990baa, 0x5e01b3a7429e43e7}}, + {{0x3db50be3d39357a1, 0x967b6cdd599e94a5, 0x1a309a64df311e6e, 0x71092c9ccef3c986}}}, +{{{0x53d8523f0364918c, 0xa2b404f43fab6b1c, 0x080b4a9e6681e5a4, 0x0ea15b03d0257ba7}}, + {{0x856bd8ac74051dcf, 0x03f6a40855b7aa1e, 0x3a4ae7cbc9743ceb, 0x4173a5bb7137abde}}, + {{0x17c56e31f0f9218a, 0x5a696e2b1afc4708, 0xf7931668f4b2f176, 0x5fc565614a4e3a67}}}, +{{{0x136e570dc46d7ae5, 0x0fd0aacc54f8dc8f, 0x59549f03310dad86, 0x62711c414c454aa1}}, + {{0x4892e1e67790988e, 0x01d5950f1c5cd722, 0xe3b0819ae5923eed, 0x3214c7409d46651b}}, + {{0x1329827406651770, 0x3ba4a0668a279436, 0xd9b6b8ec185d223c, 0x5bea94073ecb833c}}}, +{{{0x641dbf0912c89be4, 0xacf38b317d6e579c, 0xabfe9e02f697b065, 0x3aacd5c148f61eec}}, + {{0xb470ce63f343d2f8, 0x0067ba8f0543e8f1, 0x35da51a1a2117b6f, 0x4ad0785944f1bd2f}}, + {{0x858e3b34c3318301, 0xdc99c04707316826, 0x34085b2ed39da88c, 0x3aff0cb1d902853d}}}, +{{{0x87c5c7eb3a20405e, 0x8ee311efedad56c9, 0x29252e48ad29d5f9, 0x110e7e86f4cd251d}}, + {{0x9226430bf4c53505, 0x68e49c13261f2283, 0x09ef33788fd327c6, 0x2ccf9f732bd99e7f}}, + {{0x57c0d89ed603f5e4, 0x12888628f0b0200c, 0x53172709a02e3bb7, 0x05c557e0b9693a37}}}, +{{{0xd8f9ce311fc97e6f, 0x7a3f263011f9fdae, 0xe15b7ea08bed25dd, 0x6e154c178fe9875a}}, + {{0xf776bbb089c20eb0, 0x61f85bf6fa0fd85c, 0xb6b93f4e634421fb, 0x289fef0841861205}}, + {{0xcf616336fed69abf, 0x9b16e4e78335c94f, 0x13789765753a7fe7, 0x6afbf642a95ca319}}}, +{{{0x7da8de0c62f5d2c1, 0x98fc3da4b00e7b9a, 0x7deb6ada0dad70e0, 0x0db4b851b95038c4}}, + {{0x5de55070f913a8cc, 0x7d1d167b2b0cf561, 0xda2956b690ead489, 0x12c093cedb801ed9}}, + {{0xfc147f9308b8190f, 0x06969da0a11ae310, 0xcee75572dac7d7fd, 0x33aa8799c6635ce6}}}, +{{{0xaf0ff51ebd085cf2, 0x78f51a8967d33f1f, 0x6ec2bfe15060033c, 0x233c6f29e8e21a86}}, + {{0x8348f588fc156cb1, 0x6da2ba9b1a0a6d27, 0xe2262d5c87ca5ab6, 0x212cd0c1c8d589a6}}, + {{0xd2f4d5107f18c781, 0x122ecdf2527e9d28, 0xa70a862a3d3d3341, 0x1db7778911914ce3}}}, +{{{0xddf352397c6bc26f, 0x7a97e2cc53d50113, 0x7c74f43abf79a330, 0x31ad97ad26e2adfc}}, + {{0xb3394769dd701ab6, 0xe2b8ded419cf8da5, 0x15df4161fd2ac852, 0x7ae2ca8a017d24be}}, + {{0xb7e817ed0920b962, 0x1e8518cc3f19da9d, 0xe491c14f25560a64, 0x1ed1fc53a6622c83}}} \ No newline at end of file diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_dbl_p1p1.s b/src/ed25519-supercop-amd64-64-24k/ge25519_dbl_p1p1.s new file mode 100644 index 0000000..c1df68b --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_dbl_p1p1.s @@ -0,0 +1,2891 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: stack64 rx0_stack + +# qhasm: stack64 rx1_stack + +# qhasm: stack64 rx2_stack + +# qhasm: stack64 rx3_stack + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 ry4 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 squarer4 + +# qhasm: int64 squarer5 + +# qhasm: int64 squarer6 + +# qhasm: int64 squarer7 + +# qhasm: int64 squarerax + +# qhasm: int64 squarerdx + +# qhasm: int64 squaret1 + +# qhasm: int64 squaret2 + +# qhasm: int64 squaret3 + +# qhasm: int64 squarec + +# qhasm: int64 squarezero + +# qhasm: int64 squarei38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_ge25519_dbl_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_ge25519_dbl_p1p1 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ge25519_dbl_p1p1 +_crypto_sign_ed25519_amd64_64_24k_batch_ge25519_dbl_p1p1: +crypto_sign_ed25519_amd64_64_24k_batch_ge25519_dbl_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $192,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 8) +# asm 1: movq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(a1=int64#5 +# asm 2: mov a1=%r8 +mov %rax,%r8 + +# qhasm: a2 = squarerdx +# asm 1: mov a2=int64#6 +# asm 2: mov a2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(pp + 16) +# asm 1: movq 16(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 8) +# asm 1: mulq 8(a3=int64#8 +# asm 2: mov a3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov squarer4=int64#9 +# asm 2: mov squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 24) +# asm 1: movq 24(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 16) +# asm 1: mulq 16(squarer5=int64#10 +# asm 2: mov squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov squarer6=int64#11 +# asm 2: mov squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 16) +# asm 1: movq 16(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 8) +# asm 1: mulq 8(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(squarerax=int64#7 +# asm 2: movq 0(squarerax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 0) +# asm 1: mulq 0(a0=int64#12 +# asm 2: mov a0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov squaret1=int64#13 +# asm 2: mov squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(pp + 8) +# asm 1: movq 8(squarerax=int64#7 +# asm 2: movq 8(squarerax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 8) +# asm 1: mulq 8(squaret2=int64#14 +# asm 2: mov squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov squaret3=int64#15 +# asm 2: mov squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(pp + 16) +# asm 1: movq 16(squarerax=int64#7 +# asm 2: movq 16(squarerax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 16) +# asm 1: mulq 16(squarerax=int64#7 +# asm 2: movq 24(squarerax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 24) +# asm 1: mulq 24(squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a0 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r12,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a1 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r13,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a2 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %rcx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a3 += squarerax +# asm 1: add squarer4=int64#4 +# asm 2: mov $0,>squarer4=%rcx +mov $0,%rcx + +# qhasm: squarer4 += squarerdx + carry +# asm 1: adc squarer4=int64#3 +# asm 2: imulq $38,squarer4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? a0 += squarer4 +# asm 1: add squarezero=int64#3 +# asm 2: mov $0,>squarezero=%rdx +mov $0,%rdx + +# qhasm: squarei38 = 38 +# asm 1: mov $38,>squarei38=int64#4 +# asm 2: mov $38,>squarei38=%rcx +mov $38,%rcx + +# qhasm: squarezero = squarei38 if carry +# asm 1: cmovc a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r14,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r10,80(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(squarerax=int64#7 +# asm 2: movq 40(squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(b1=int64#5 +# asm 2: mov b1=%r8 +mov %rax,%r8 + +# qhasm: b2 = squarerdx +# asm 1: mov b2=int64#6 +# asm 2: mov b2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(pp + 48) +# asm 1: movq 48(squarerax=int64#7 +# asm 2: movq 48(squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(b3=int64#8 +# asm 2: mov b3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov squarer4=int64#9 +# asm 2: mov squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 56) +# asm 1: movq 56(squarerax=int64#7 +# asm 2: movq 56(squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(squarer5=int64#10 +# asm 2: mov squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov squarer6=int64#11 +# asm 2: mov squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 48) +# asm 1: movq 48(squarerax=int64#7 +# asm 2: movq 48(squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(squarerax=int64#7 +# asm 2: movq 56(squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(squarerax=int64#7 +# asm 2: movq 56(squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(squarerax=int64#7 +# asm 2: movq 32(squarerax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 32) +# asm 1: mulq 32(b0=int64#12 +# asm 2: mov b0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov squaret1=int64#13 +# asm 2: mov squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(pp + 40) +# asm 1: movq 40(squarerax=int64#7 +# asm 2: movq 40(squarerax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 40) +# asm 1: mulq 40(squaret2=int64#14 +# asm 2: mov squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov squaret3=int64#15 +# asm 2: mov squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(pp + 48) +# asm 1: movq 48(squarerax=int64#7 +# asm 2: movq 48(squarerax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 48) +# asm 1: mulq 48(squarerax=int64#7 +# asm 2: movq 56(squarerax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 56) +# asm 1: mulq 56(squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? b0 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r12,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? b1 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r13,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? b2 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %rcx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? b3 += squarerax +# asm 1: add squarer4=int64#4 +# asm 2: mov $0,>squarer4=%rcx +mov $0,%rcx + +# qhasm: squarer4 += squarerdx + carry +# asm 1: adc squarer4=int64#3 +# asm 2: imulq $38,squarer4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? b0 += squarer4 +# asm 1: add squarezero=int64#3 +# asm 2: mov $0,>squarezero=%rdx +mov $0,%rdx + +# qhasm: squarei38 = 38 +# asm 1: mov $38,>squarei38=int64#4 +# asm 2: mov $38,>squarei38=%rcx +mov $38,%rcx + +# qhasm: squarezero = squarei38 if carry +# asm 1: cmovc b0_stack=stack64#12 +# asm 2: movq b0_stack=88(%rsp) +movq %r14,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#13 +# asm 2: movq b1_stack=96(%rsp) +movq %r8,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#14 +# asm 2: movq b2_stack=104(%rsp) +movq %r9,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#15 +# asm 2: movq b3_stack=112(%rsp) +movq %r10,112(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#4 +# asm 2: mov $0,>squarer7=%rcx +mov $0,%rcx + +# qhasm: squarerax = *(uint64 *)(pp + 72) +# asm 1: movq 72(squarerax=int64#7 +# asm 2: movq 72(squarerax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(c1=int64#5 +# asm 2: mov c1=%r8 +mov %rax,%r8 + +# qhasm: c2 = squarerdx +# asm 1: mov c2=int64#6 +# asm 2: mov c2=%r9 +mov %rdx,%r9 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(c3=int64#8 +# asm 2: mov c3=%r10 +mov %rax,%r10 + +# qhasm: squarer4 = squarerdx +# asm 1: mov squarer4=int64#9 +# asm 2: mov squarer4=%r11 +mov %rdx,%r11 + +# qhasm: squarerax = *(uint64 *)(pp + 88) +# asm 1: movq 88(squarerax=int64#7 +# asm 2: movq 88(squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(squarer5=int64#10 +# asm 2: mov squarer5=%r12 +mov %rax,%r12 + +# qhasm: squarer6 = squarerdx +# asm 1: mov squarer6=int64#11 +# asm 2: mov squarer6=%r13 +mov %rdx,%r13 + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(squarerax=int64#7 +# asm 2: movq 88(squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(squarerax=int64#7 +# asm 2: movq 88(squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(squarerax=int64#7 +# asm 2: movq 64(squarerax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 64) +# asm 1: mulq 64(c0=int64#12 +# asm 2: mov c0=%r14 +mov %rax,%r14 + +# qhasm: squaret1 = squarerdx +# asm 1: mov squaret1=int64#13 +# asm 2: mov squaret1=%r15 +mov %rdx,%r15 + +# qhasm: squarerax = *(uint64 *)(pp + 72) +# asm 1: movq 72(squarerax=int64#7 +# asm 2: movq 72(squarerax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 72) +# asm 1: mulq 72(squaret2=int64#14 +# asm 2: mov squaret2=%rbx +mov %rax,%rbx + +# qhasm: squaret3 = squarerdx +# asm 1: mov squaret3=int64#15 +# asm 2: mov squaret3=%rbp +mov %rdx,%rbp + +# qhasm: squarerax = *(uint64 *)(pp + 80) +# asm 1: movq 80(squarerax=int64#7 +# asm 2: movq 80(squarerax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 80) +# asm 1: mulq 80(squarerax=int64#7 +# asm 2: movq 88(squarerax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(pp + 88) +# asm 1: mulq 88(squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c0 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r12,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c1 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r13,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c2 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %rcx,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c3 += squarerax +# asm 1: add squarer4=int64#4 +# asm 2: mov $0,>squarer4=%rcx +mov $0,%rcx + +# qhasm: squarer4 += squarerdx + carry +# asm 1: adc squarer4=int64#3 +# asm 2: imulq $38,squarer4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? c0 += squarer4 +# asm 1: add squarezero=int64#3 +# asm 2: mov $0,>squarezero=%rdx +mov $0,%rdx + +# qhasm: squarei38 = 38 +# asm 1: mov $38,>squarei38=int64#4 +# asm 2: mov $38,>squarei38=%rcx +mov $38,%rcx + +# qhasm: squarezero = squarei38 if carry +# asm 1: cmovc addt0=int64#3 +# asm 2: mov $0,>addt0=%rdx +mov $0,%rdx + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#4 +# asm 2: mov $38,>addt1=%rcx +mov $38,%rcx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae c0_stack=stack64#16 +# asm 2: movq c0_stack=120(%rsp) +movq %r14,120(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#17 +# asm 2: movq c1_stack=128(%rsp) +movq %r8,128(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#18 +# asm 2: movq c2_stack=136(%rsp) +movq %r9,136(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#19 +# asm 2: movq c3_stack=144(%rsp) +movq %r10,144(%rsp) + +# qhasm: d0 = 0 +# asm 1: mov $0,>d0=int64#3 +# asm 2: mov $0,>d0=%rdx +mov $0,%rdx + +# qhasm: d1 = 0 +# asm 1: mov $0,>d1=int64#4 +# asm 2: mov $0,>d1=%rcx +mov $0,%rcx + +# qhasm: d2 = 0 +# asm 1: mov $0,>d2=int64#5 +# asm 2: mov $0,>d2=%r8 +mov $0,%r8 + +# qhasm: d3 = 0 +# asm 1: mov $0,>d3=int64#6 +# asm 2: mov $0,>d3=%r9 +mov $0,%r9 + +# qhasm: carry? d0 -= a0_stack +# asm 1: subq subt0=int64#7 +# asm 2: mov $0,>subt0=%rax +mov $0,%rax + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#8 +# asm 2: mov $38,>subt1=%r10 +mov $38,%r10 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae d0_stack=stack64#8 +# asm 2: movq d0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: d1_stack = d1 +# asm 1: movq d1_stack=stack64#9 +# asm 2: movq d1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: d2_stack = d2 +# asm 1: movq d2_stack=stack64#10 +# asm 2: movq d2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: d3_stack = d3 +# asm 1: movq d3_stack=stack64#11 +# asm 2: movq d3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: e0 = 0 +# asm 1: mov $0,>e0=int64#7 +# asm 2: mov $0,>e0=%rax +mov $0,%rax + +# qhasm: e1 = 0 +# asm 1: mov $0,>e1=int64#8 +# asm 2: mov $0,>e1=%r10 +mov $0,%r10 + +# qhasm: e2 = 0 +# asm 1: mov $0,>e2=int64#9 +# asm 2: mov $0,>e2=%r11 +mov $0,%r11 + +# qhasm: e3 = 0 +# asm 1: mov $0,>e3=int64#10 +# asm 2: mov $0,>e3=%r12 +mov $0,%r12 + +# qhasm: carry? e0 -= b0_stack +# asm 1: subq subt0=int64#11 +# asm 2: mov $0,>subt0=%r13 +mov $0,%r13 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#12 +# asm 2: mov $38,>subt1=%r14 +mov $38,%r14 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae e0_stack=stack64#20 +# asm 2: movq e0_stack=152(%rsp) +movq %rax,152(%rsp) + +# qhasm: e1_stack = e1 +# asm 1: movq e1_stack=stack64#21 +# asm 2: movq e1_stack=160(%rsp) +movq %r10,160(%rsp) + +# qhasm: e2_stack = e2 +# asm 1: movq e2_stack=stack64#22 +# asm 2: movq e2_stack=168(%rsp) +movq %r11,168(%rsp) + +# qhasm: e3_stack = e3 +# asm 1: movq e3_stack=stack64#23 +# asm 2: movq e3_stack=176(%rsp) +movq %r12,176(%rsp) + +# qhasm: rz0 = d0 +# asm 1: mov rz0=int64#7 +# asm 2: mov rz0=%rax +mov %rdx,%rax + +# qhasm: rz1 = d1 +# asm 1: mov rz1=int64#8 +# asm 2: mov rz1=%r10 +mov %rcx,%r10 + +# qhasm: rz2 = d2 +# asm 1: mov rz2=int64#9 +# asm 2: mov rz2=%r11 +mov %r8,%r11 + +# qhasm: rz3 = d3 +# asm 1: mov rz3=int64#10 +# asm 2: mov rz3=%r12 +mov %r9,%r12 + +# qhasm: carry? rz0 += b0_stack +# asm 1: addq addt0=int64#11 +# asm 2: mov $0,>addt0=%r13 +mov $0,%r13 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#12 +# asm 2: mov $38,>addt1=%r14 +mov $38,%r14 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae subt0=int64#11 +# asm 2: mov $0,>subt0=%r13 +mov $0,%r13 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#12 +# asm 2: mov $38,>subt1=%r14 +mov $38,%r14 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae subt0=int64#3 +# asm 2: mov $0,>subt0=%rdx +mov $0,%rdx + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#4 +# asm 2: mov $38,>subt1=%rcx +mov $38,%rcx + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae rx0=int64#3 +# asm 2: movq 0(rx0=%rdx +movq 0(%rsi),%rdx + +# qhasm: rx1 = *(uint64 *)(pp + 8) +# asm 1: movq 8(rx1=int64#4 +# asm 2: movq 8(rx1=%rcx +movq 8(%rsi),%rcx + +# qhasm: rx2 = *(uint64 *)(pp + 16) +# asm 1: movq 16(rx2=int64#5 +# asm 2: movq 16(rx2=%r8 +movq 16(%rsi),%r8 + +# qhasm: rx3 = *(uint64 *)(pp + 24) +# asm 1: movq 24(rx3=int64#6 +# asm 2: movq 24(rx3=%r9 +movq 24(%rsi),%r9 + +# qhasm: carry? rx0 += *(uint64 *)(pp + 32) +# asm 1: addq 32(addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae rx0_stack=stack64#12 +# asm 2: movq rx0_stack=88(%rsp) +movq %rdx,88(%rsp) + +# qhasm: rx1_stack = rx1 +# asm 1: movq rx1_stack=stack64#13 +# asm 2: movq rx1_stack=96(%rsp) +movq %rcx,96(%rsp) + +# qhasm: rx2_stack = rx2 +# asm 1: movq rx2_stack=stack64#14 +# asm 2: movq rx2_stack=104(%rsp) +movq %r8,104(%rsp) + +# qhasm: rx3_stack = rx3 +# asm 1: movq rx3_stack=stack64#15 +# asm 2: movq rx3_stack=112(%rsp) +movq %r9,112(%rsp) + +# qhasm: squarer7 = 0 +# asm 1: mov $0,>squarer7=int64#2 +# asm 2: mov $0,>squarer7=%rsi +mov $0,%rsi + +# qhasm: squarerax = rx1_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq rx1=int64#4 +# asm 2: mov rx1=%rcx +mov %rax,%rcx + +# qhasm: rx2 = squarerdx +# asm 1: mov rx2=int64#5 +# asm 2: mov rx2=%r8 +mov %rdx,%r8 + +# qhasm: squarerax = rx2_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx1_stack +# asm 1: mulq rx3=int64#6 +# asm 2: mov rx3=%r9 +mov %rax,%r9 + +# qhasm: squarer4 = squarerdx +# asm 1: mov squarer4=int64#8 +# asm 2: mov squarer4=%r10 +mov %rdx,%r10 + +# qhasm: squarerax = rx3_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx2_stack +# asm 1: mulq squarer5=int64#9 +# asm 2: mov squarer5=%r11 +mov %rax,%r11 + +# qhasm: squarer6 = squarerdx +# asm 1: mov squarer6=int64#10 +# asm 2: mov squarer6=%r12 +mov %rdx,%r12 + +# qhasm: squarerax = rx2_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx1_stack +# asm 1: mulq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 88(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx0_stack +# asm 1: mulq rx0=int64#11 +# asm 2: mov rx0=%r13 +mov %rax,%r13 + +# qhasm: squaret1 = squarerdx +# asm 1: mov squaret1=int64#12 +# asm 2: mov squaret1=%r14 +mov %rdx,%r14 + +# qhasm: squarerax = rx1_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 96(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx1_stack +# asm 1: mulq squaret2=int64#13 +# asm 2: mov squaret2=%r15 +mov %rax,%r15 + +# qhasm: squaret3 = squarerdx +# asm 1: mov squaret3=int64#14 +# asm 2: mov squaret3=%rbx +mov %rdx,%rbx + +# qhasm: squarerax = rx2_stack +# asm 1: movq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 104(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx2_stack +# asm 1: mulq squarerax=int64#7 +# asm 2: movq squarerax=%rax +movq 112(%rsp),%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * rx3_stack +# asm 1: mulq squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r10,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx0 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r11,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx1 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %r12,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx2 += squarerax +# asm 1: add squarerax=int64#7 +# asm 2: mov squarerax=%rax +mov %rsi,%rax + +# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx3 += squarerax +# asm 1: add squarer4=int64#2 +# asm 2: mov $0,>squarer4=%rsi +mov $0,%rsi + +# qhasm: squarer4 += squarerdx + carry +# asm 1: adc squarer4=int64#2 +# asm 2: imulq $38,squarer4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rx0 += squarer4 +# asm 1: add squarezero=int64#2 +# asm 2: mov $0,>squarezero=%rsi +mov $0,%rsi + +# qhasm: squarei38 = 38 +# asm 1: mov $38,>squarei38=int64#3 +# asm 2: mov $38,>squarei38=%rdx +mov $38,%rdx + +# qhasm: squarezero = squarei38 if carry +# asm 1: cmovc addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_double.c b/src/ed25519-supercop-amd64-64-24k/ge25519_double.c new file mode 100644 index 0000000..d55e2b4 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_double.c @@ -0,0 +1,8 @@ +#include "ge25519.h" + +void ge25519_double(ge25519_p3 *r, const ge25519_p3 *p) +{ + ge25519_p1p1 grp1p1; + ge25519_dbl_p1p1(&grp1p1, (ge25519_p2 *)p); + ge25519_p1p1_to_p3(r, &grp1p1); +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_double_scalarmult.c b/src/ed25519-supercop-amd64-64-24k/ge25519_double_scalarmult.c new file mode 100644 index 0000000..30c922a --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_double_scalarmult.c @@ -0,0 +1,102 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +#define S1_SWINDOWSIZE 5 +#define PRE1_SIZE (1<<(S1_SWINDOWSIZE-2)) +#define S2_SWINDOWSIZE 7 +#define PRE2_SIZE (1<<(S2_SWINDOWSIZE-2)) + +ge25519_niels pre2[PRE2_SIZE] = { +#include "ge25519_base_slide_multiples.data" +}; + +static const fe25519 ec2d = {{0xEBD69B9426B2F146, 0x00E0149A8283B156, 0x198E80F2EEF3D130, 0xA406D9DC56DFFCE7}}; + +static void setneutral(ge25519 *r) +{ + fe25519_setint(&r->x,0); + fe25519_setint(&r->y,1); + fe25519_setint(&r->z,1); + fe25519_setint(&r->t,0); +} + +/* computes [s1]p1 + [s2]p2 */ +void ge25519_double_scalarmult_vartime(ge25519_p3 *r, const ge25519_p3 *p1, const sc25519 *s1, const sc25519 *s2) +{ + signed char slide1[256], slide2[256]; + ge25519_pniels pre1[PRE1_SIZE], neg; + ge25519_p3 d1; + ge25519_p1p1 t; + ge25519_niels nneg; + fe25519 d; + int i; + + sc25519_slide(slide1, s1, S1_SWINDOWSIZE); + sc25519_slide(slide2, s2, S2_SWINDOWSIZE); + + /* precomputation */ + pre1[0] = *(ge25519_pniels *)p1; + ge25519_dbl_p1p1(&t,(ge25519_p2 *)pre1); ge25519_p1p1_to_p3(&d1, &t); + /* Convert pre[0] to projective Niels representation */ + d = pre1[0].ysubx; + fe25519_sub(&pre1[0].ysubx, &pre1[0].xaddy, &pre1[0].ysubx); + fe25519_add(&pre1[0].xaddy, &pre1[0].xaddy, &d); + fe25519_mul(&pre1[0].t2d, &pre1[0].t2d, &ec2d); + + for(i=0;i= 0;--i) { + if (slide1[i] || slide2[i]) goto firstbit; + } + + for(;i>=0;i--) + { + firstbit: + + ge25519_dbl_p1p1(&t, (ge25519_p2 *)r); + + if(slide1[i]>0) + { + ge25519_p1p1_to_p3(r, &t); + ge25519_pnielsadd_p1p1(&t, r, &pre1[slide1[i]/2]); + } + else if(slide1[i]<0) + { + ge25519_p1p1_to_p3(r, &t); + neg = pre1[-slide1[i]/2]; + d = neg.ysubx; + neg.ysubx = neg.xaddy; + neg.xaddy = d; + fe25519_neg(&neg.t2d, &neg.t2d); + ge25519_pnielsadd_p1p1(&t, r, &neg); + } + + if(slide2[i]>0) + { + ge25519_p1p1_to_p3(r, &t); + ge25519_nielsadd_p1p1(&t, r, &pre2[slide2[i]/2]); + } + else if(slide2[i]<0) + { + ge25519_p1p1_to_p3(r, &t); + nneg = pre2[-slide2[i]/2]; + d = nneg.ysubx; + nneg.ysubx = nneg.xaddy; + nneg.xaddy = d; + fe25519_neg(&nneg.t2d, &nneg.t2d); + ge25519_nielsadd_p1p1(&t, r, &nneg); + } + + ge25519_p1p1_to_p2((ge25519_p2 *)r, &t); + } +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_isneutral.c b/src/ed25519-supercop-amd64-64-24k/ge25519_isneutral.c new file mode 100644 index 0000000..cf566db --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_isneutral.c @@ -0,0 +1,9 @@ +#include "fe25519.h" +#include "ge25519.h" + +int ge25519_isneutral_vartime(const ge25519_p3 *p) +{ + if(!fe25519_iszero_vartime(&p->x)) return 0; + if(!fe25519_iseq_vartime(&p->y, &p->z)) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_multi_scalarmult.c b/src/ed25519-supercop-amd64-64-24k/ge25519_multi_scalarmult.c new file mode 100644 index 0000000..afc6aea --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_multi_scalarmult.c @@ -0,0 +1,102 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" +#include "index_heap.h" + +static void setneutral(ge25519 *r) +{ + fe25519_setint(&r->x,0); + fe25519_setint(&r->y,1); + fe25519_setint(&r->z,1); + fe25519_setint(&r->t,0); +} + +static void ge25519_scalarmult_vartime_2limbs(ge25519 *r, ge25519 *p, sc25519 *s) +{ + if (s->v[1] == 0 && s->v[0] == 1) /* This will happen most of the time after Bos-Coster */ + *r = *p; + else if (s->v[1] == 0 && s->v[0] == 0) /* This won't ever happen, except for all scalars == 0 in Bos-Coster */ + setneutral(r); + else + { + ge25519 d; + unsigned long long mask = (1ULL << 63); + int i = 1; + while(!(mask & s->v[1]) && mask != 0) + mask >>= 1; + if(mask == 0) + { + mask = (1ULL << 63); + i = 0; + while(!(mask & s->v[0]) && mask != 0) + mask >>= 1; + } + d = *p; + mask >>= 1; + for(;mask != 0;mask >>= 1) + { + ge25519_double(&d,&d); + if(s->v[i] & mask) + ge25519_add(&d,&d,p); + } + if(i==1) + { + mask = (1ULL << 63); + for(;mask != 0;mask >>= 1) + { + ge25519_double(&d,&d); + if(s->v[0] & mask) + ge25519_add(&d,&d,p); + } + } + *r = d; + } +} + +/* caller's responsibility to ensure npoints >= 5 */ +void ge25519_multi_scalarmult_vartime(ge25519_p3 *r, ge25519_p3 *p, sc25519 *s, const unsigned long long npoints) +{ + unsigned long long pos[npoints]; + unsigned long long hlen=((npoints+1)/2)|1; + unsigned long long max1, max2,i; + + heap_init(pos, hlen, s); + + for(i=0;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[3] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced(pos, hlen, s); + } + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[2] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_3limbs(pos, hlen, s); + } + /* We know that (npoints-1)/2 scalars are only 128-bit scalars */ + heap_extend(pos, hlen, npoints, s); + hlen = npoints; + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if((s[max1].v[1] == 0) || (sc25519_iszero_vartime(&s[max2]))) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_2limbs(pos, hlen, s); + } + for(;;i++) + { + heap_get2max(pos, &max1, &max2, s); + if(sc25519_iszero_vartime(&s[max2])) break; + sc25519_sub_nored(&s[max1],&s[max1],&s[max2]); + ge25519_add(&p[max2],&p[max2],&p[max1]); + heap_rootreplaced_1limb(pos, hlen, s); + } + + ge25519_scalarmult_vartime_2limbs(r, &p[max1], &s[max1]); +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd2.s b/src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd2.s new file mode 100644 index 0000000..b82dc99 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd2.s @@ -0,0 +1,5649 @@ + +# qhasm: int64 rp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: int64 f0 + +# qhasm: int64 f1 + +# qhasm: int64 f2 + +# qhasm: int64 f3 + +# qhasm: stack64 f0_stack + +# qhasm: stack64 f1_stack + +# qhasm: stack64 f2_stack + +# qhasm: stack64 f3_stack + +# qhasm: int64 g0 + +# qhasm: int64 g1 + +# qhasm: int64 g2 + +# qhasm: int64 g3 + +# qhasm: stack64 g0_stack + +# qhasm: stack64 g1_stack + +# qhasm: stack64 g2_stack + +# qhasm: stack64 g3_stack + +# qhasm: int64 h0 + +# qhasm: int64 h1 + +# qhasm: int64 h2 + +# qhasm: int64 h3 + +# qhasm: stack64 h0_stack + +# qhasm: stack64 h1_stack + +# qhasm: stack64 h2_stack + +# qhasm: stack64 h3_stack + +# qhasm: int64 qt0 + +# qhasm: int64 qt1 + +# qhasm: int64 qt2 + +# qhasm: int64 qt3 + +# qhasm: stack64 qt0_stack + +# qhasm: stack64 qt1_stack + +# qhasm: stack64 qt2_stack + +# qhasm: stack64 qt3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd2 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd2 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd2 +_crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd2: +crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd2: +mov %rsp,%r11 +and $31,%r11 +add $192,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: a0 = *(uint64 *)(rp + 32) +# asm 1: movq 32(a0=int64#3 +# asm 2: movq 32(a0=%rdx +movq 32(%rdi),%rdx + +# qhasm: a1 = *(uint64 *)(rp + 40) +# asm 1: movq 40(a1=int64#4 +# asm 2: movq 40(a1=%rcx +movq 40(%rdi),%rcx + +# qhasm: a2 = *(uint64 *)(rp + 48) +# asm 1: movq 48(a2=int64#5 +# asm 2: movq 48(a2=%r8 +movq 48(%rdi),%r8 + +# qhasm: a3 = *(uint64 *)(rp + 56) +# asm 1: movq 56(a3=int64#6 +# asm 2: movq 56(a3=%r9 +movq 56(%rdi),%r9 + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#7 +# asm 2: mov b0=%rax +mov %rdx,%rax + +# qhasm: b1 = a1 +# asm 1: mov b1=int64#8 +# asm 2: mov b1=%r10 +mov %rcx,%r10 + +# qhasm: b2 = a2 +# asm 1: mov b2=int64#9 +# asm 2: mov b2=%r11 +mov %r8,%r11 + +# qhasm: b3 = a3 +# asm 1: mov b3=int64#10 +# asm 2: mov b3=%r12 +mov %r9,%r12 + +# qhasm: carry? a0 -= *(uint64 *) (rp + 0) +# asm 1: subq 0(subt0=int64#11 +# asm 2: mov $0,>subt0=%r13 +mov $0,%r13 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#12 +# asm 2: mov $38,>subt1=%r14 +mov $38,%r14 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#11 +# asm 2: mov $0,>addt0=%r13 +mov $0,%r13 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#12 +# asm 2: mov $38,>addt1=%r14 +mov $38,%r14 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#12 +# asm 2: movq b0_stack=88(%rsp) +movq %rax,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#13 +# asm 2: movq b1_stack=96(%rsp) +movq %r10,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#14 +# asm 2: movq b2_stack=104(%rsp) +movq %r11,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#15 +# asm 2: movq b3_stack=112(%rsp) +movq %r12,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = a0_stack +# asm 1: movq mulx0=int64#9 +# asm 2: movq mulx0=%r11 +movq 56(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a0=int64#10 +# asm 2: mov a0=%r12 +mov %rax,%r12 + +# qhasm: a1 = mulrdx +# asm 1: mov a1=int64#11 +# asm 2: mov a1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a2=int64#12 +# asm 2: mov $0,>a2=%r14 +mov $0,%r14 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a3=int64#13 +# asm 2: mov $0,>a3=%r15 +mov $0,%r15 + +# qhasm: a3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq mulx1=%r11 +movq 64(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq mulx2=%r11 +movq 72(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq mulx3=%r11 +movq 80(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? a0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r12,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r13,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r14,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %r15,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = b0_stack +# asm 1: movq mulx0=int64#9 +# asm 2: movq mulx0=%r11 +movq 88(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul e0=int64#10 +# asm 2: mov e0=%r12 +mov %rax,%r12 + +# qhasm: e1 = mulrdx +# asm 1: mov e1=int64#11 +# asm 2: mov e1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul e2=int64#12 +# asm 2: mov $0,>e2=%r14 +mov $0,%r14 + +# qhasm: e2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul e3=int64#13 +# asm 2: mov $0,>e3=%r15 +mov $0,%r15 + +# qhasm: e3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq mulx1=%r11 +movq 96(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq mulx2=%r11 +movq 104(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq mulx3=%r11 +movq 112(%rsp),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? e0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc h0=int64#3 +# asm 2: mov h0=%rdx +mov %r12,%rdx + +# qhasm: h1 = e1 +# asm 1: mov h1=int64#4 +# asm 2: mov h1=%rcx +mov %r13,%rcx + +# qhasm: h2 = e2 +# asm 1: mov h2=int64#5 +# asm 2: mov h2=%r8 +mov %r14,%r8 + +# qhasm: h3 = e3 +# asm 1: mov h3=int64#6 +# asm 2: mov h3=%r9 +mov %r15,%r9 + +# qhasm: carry? e0 -= a0_stack +# asm 1: subq subt0=int64#7 +# asm 2: mov $0,>subt0=%rax +mov $0,%rax + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#8 +# asm 2: mov $38,>subt1=%r10 +mov $38,%r10 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#7 +# asm 2: mov $0,>addt0=%rax +mov $0,%rax + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#8 +# asm 2: mov $38,>addt1=%r10 +mov $38,%r10 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae h0_stack=stack64#8 +# asm 2: movq h0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: h1_stack = h1 +# asm 1: movq h1_stack=stack64#9 +# asm 2: movq h1_stack=64(%rsp) +movq %rcx,64(%rsp) + +# qhasm: h2_stack = h2 +# asm 1: movq h2_stack=stack64#10 +# asm 2: movq h2_stack=72(%rsp) +movq %r8,72(%rsp) + +# qhasm: h3_stack = h3 +# asm 1: movq h3_stack=stack64#11 +# asm 2: movq h3_stack=80(%rsp) +movq %r9,80(%rsp) + +# qhasm: e0_stack = e0 +# asm 1: movq e0_stack=stack64#12 +# asm 2: movq e0_stack=88(%rsp) +movq %r12,88(%rsp) + +# qhasm: e1_stack = e1 +# asm 1: movq e1_stack=stack64#13 +# asm 2: movq e1_stack=96(%rsp) +movq %r13,96(%rsp) + +# qhasm: e2_stack = e2 +# asm 1: movq e2_stack=stack64#14 +# asm 2: movq e2_stack=104(%rsp) +movq %r14,104(%rsp) + +# qhasm: e3_stack = e3 +# asm 1: movq e3_stack=stack64#15 +# asm 2: movq e3_stack=112(%rsp) +movq %r15,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(rp + 96) +# asm 1: movq 96(mulx0=int64#9 +# asm 2: movq 96(mulx0=%r11 +movq 96(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c0=int64#10 +# asm 2: mov c0=%r12 +mov %rax,%r12 + +# qhasm: c1 = mulrdx +# asm 1: mov c1=int64#11 +# asm 2: mov c1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c2=int64#12 +# asm 2: mov $0,>c2=%r14 +mov $0,%r14 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c3=int64#13 +# asm 2: mov $0,>c3=%r15 +mov $0,%r15 + +# qhasm: c3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 104(mulx1=%r11 +movq 104(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 112(mulx2=%r11 +movq 112(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 120(mulx3=%r11 +movq 120(%rdi),%r11 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? c0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc f0=int64#2 +# asm 2: movq 64(f0=%rsi +movq 64(%rdi),%rsi + +# qhasm: f1 = *(uint64 *)(rp + 72) +# asm 1: movq 72(f1=int64#3 +# asm 2: movq 72(f1=%rdx +movq 72(%rdi),%rdx + +# qhasm: f2 = *(uint64 *)(rp + 80) +# asm 1: movq 80(f2=int64#4 +# asm 2: movq 80(f2=%rcx +movq 80(%rdi),%rcx + +# qhasm: f3 = *(uint64 *)(rp + 88) +# asm 1: movq 88(f3=int64#5 +# asm 2: movq 88(f3=%r8 +movq 88(%rdi),%r8 + +# qhasm: carry? f0 += f0 +# asm 1: add addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae g0=int64#6 +# asm 2: mov g0=%r9 +mov %rsi,%r9 + +# qhasm: g1 = f1 +# asm 1: mov g1=int64#7 +# asm 2: mov g1=%rax +mov %rdx,%rax + +# qhasm: g2 = f2 +# asm 1: mov g2=int64#8 +# asm 2: mov g2=%r10 +mov %rcx,%r10 + +# qhasm: g3 = f3 +# asm 1: mov g3=int64#9 +# asm 2: mov g3=%r11 +mov %r8,%r11 + +# qhasm: carry? f0 -= c0 +# asm 1: sub subt0=int64#14 +# asm 2: mov $0,>subt0=%rbx +mov $0,%rbx + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#15 +# asm 2: mov $38,>subt1=%rbp +mov $38,%rbp + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#10 +# asm 2: mov $0,>addt0=%r12 +mov $0,%r12 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#11 +# asm 2: mov $38,>addt1=%r13 +mov $38,%r13 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae g0_stack=stack64#16 +# asm 2: movq g0_stack=120(%rsp) +movq %r9,120(%rsp) + +# qhasm: g1_stack = g1 +# asm 1: movq g1_stack=stack64#17 +# asm 2: movq g1_stack=128(%rsp) +movq %rax,128(%rsp) + +# qhasm: g2_stack = g2 +# asm 1: movq g2_stack=stack64#18 +# asm 2: movq g2_stack=136(%rsp) +movq %r10,136(%rsp) + +# qhasm: g3_stack = g3 +# asm 1: movq g3_stack=stack64#19 +# asm 2: movq g3_stack=144(%rsp) +movq %r11,144(%rsp) + +# qhasm: f0_stack = f0 +# asm 1: movq f0_stack=stack64#20 +# asm 2: movq f0_stack=152(%rsp) +movq %rsi,152(%rsp) + +# qhasm: f1_stack = f1 +# asm 1: movq f1_stack=stack64#21 +# asm 2: movq f1_stack=160(%rsp) +movq %rdx,160(%rsp) + +# qhasm: f2_stack = f2 +# asm 1: movq f2_stack=stack64#22 +# asm 2: movq f2_stack=168(%rsp) +movq %rcx,168(%rsp) + +# qhasm: f3_stack = f3 +# asm 1: movq f3_stack=stack64#23 +# asm 2: movq f3_stack=176(%rsp) +movq %r8,176(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = e0_stack +# asm 1: movq mulx0=int64#8 +# asm 2: movq mulx0=%r10 +movq 88(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx0=int64#9 +# asm 2: mov rx0=%r11 +mov %rax,%r11 + +# qhasm: rx1 = mulrdx +# asm 1: mov rx1=int64#10 +# asm 2: mov rx1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = f1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx2=int64#11 +# asm 2: mov $0,>rx2=%r13 +mov $0,%r13 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx3=int64#12 +# asm 2: mov $0,>rx3=%r14 +mov $0,%r14 + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#8 +# asm 2: movq mulx1=%r10 +movq 96(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#8 +# asm 2: movq mulx2=%r10 +movq 104(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#8 +# asm 2: movq mulx3=%r10 +movq 112(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rx0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = h0_stack +# asm 1: movq mulx0=int64#8 +# asm 2: movq mulx0=%r10 +movq 56(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry0=int64#9 +# asm 2: mov ry0=%r11 +mov %rax,%r11 + +# qhasm: ry1 = mulrdx +# asm 1: mov ry1=int64#10 +# asm 2: mov ry1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = g1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry2=int64#11 +# asm 2: mov $0,>ry2=%r13 +mov $0,%r13 + +# qhasm: ry2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry3=int64#12 +# asm 2: mov $0,>ry3=%r14 +mov $0,%r14 + +# qhasm: ry3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#8 +# asm 2: movq mulx1=%r10 +movq 64(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#8 +# asm 2: movq mulx2=%r10 +movq 72(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#8 +# asm 2: movq mulx3=%r10 +movq 80(%rsp),%r10 + +# qhasm: mulrax = g0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 120(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 128(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 136(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 144(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? ry0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = g0_stack +# asm 1: movq mulx0=int64#8 +# asm 2: movq mulx0=%r10 +movq 120(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz0=int64#9 +# asm 2: mov rz0=%r11 +mov %rax,%r11 + +# qhasm: rz1 = mulrdx +# asm 1: mov rz1=int64#10 +# asm 2: mov rz1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = f1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz2=int64#11 +# asm 2: mov $0,>rz2=%r13 +mov $0,%r13 + +# qhasm: rz2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz3=int64#12 +# asm 2: mov $0,>rz3=%r14 +mov $0,%r14 + +# qhasm: rz3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#8 +# asm 2: movq mulx1=%r10 +movq 128(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#8 +# asm 2: movq mulx2=%r10 +movq 136(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#8 +# asm 2: movq mulx3=%r10 +movq 144(%rsp),%r10 + +# qhasm: mulrax = f0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 152(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 160(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 168(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 176(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rz0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#4 +# asm 2: mov $0,>mulr5=%rcx +mov $0,%rcx + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#5 +# asm 2: mov $0,>mulr6=%r8 +mov $0,%r8 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#6 +# asm 2: mov $0,>mulr7=%r9 +mov $0,%r9 + +# qhasm: mulx0 = e0_stack +# asm 1: movq mulx0=int64#8 +# asm 2: movq mulx0=%r10 +movq 88(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt0=int64#9 +# asm 2: mov rt0=%r11 +mov %rax,%r11 + +# qhasm: rt1 = mulrdx +# asm 1: mov rt1=int64#10 +# asm 2: mov rt1=%r12 +mov %rdx,%r12 + +# qhasm: mulrax = h1_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt2=int64#11 +# asm 2: mov $0,>rt2=%r13 +mov $0,%r13 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt3=int64#12 +# asm 2: mov $0,>rt3=%r14 +mov $0,%r14 + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#8 +# asm 2: movq mulx1=%r10 +movq 96(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#8 +# asm 2: movq mulx2=%r10 +movq 104(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#8 +# asm 2: movq mulx3=%r10 +movq 112(%rsp),%r10 + +# qhasm: mulrax = h0_stack +# asm 1: movq mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#13 +# asm 2: mov $0,>mulc=%r15 +mov $0,%r15 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq mulrax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rsi,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rt0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd_p1p1.s b/src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd_p1p1.s new file mode 100644 index 0000000..f731dcf --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_nielsadd_p1p1.s @@ -0,0 +1,3010 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 e0 + +# qhasm: int64 e1 + +# qhasm: int64 e2 + +# qhasm: int64 e3 + +# qhasm: stack64 e0_stack + +# qhasm: stack64 e1_stack + +# qhasm: stack64 e2_stack + +# qhasm: stack64 e3_stack + +# qhasm: int64 f0 + +# qhasm: int64 f1 + +# qhasm: int64 f2 + +# qhasm: int64 f3 + +# qhasm: stack64 f0_stack + +# qhasm: stack64 f1_stack + +# qhasm: stack64 f2_stack + +# qhasm: stack64 f3_stack + +# qhasm: int64 g0 + +# qhasm: int64 g1 + +# qhasm: int64 g2 + +# qhasm: int64 g3 + +# qhasm: stack64 g0_stack + +# qhasm: stack64 g1_stack + +# qhasm: stack64 g2_stack + +# qhasm: stack64 g3_stack + +# qhasm: int64 h0 + +# qhasm: int64 h1 + +# qhasm: int64 h2 + +# qhasm: int64 h3 + +# qhasm: stack64 h0_stack + +# qhasm: stack64 h1_stack + +# qhasm: stack64 h2_stack + +# qhasm: stack64 h3_stack + +# qhasm: int64 qt0 + +# qhasm: int64 qt1 + +# qhasm: int64 qt2 + +# qhasm: int64 qt3 + +# qhasm: stack64 qt0_stack + +# qhasm: stack64 qt1_stack + +# qhasm: stack64 qt2_stack + +# qhasm: stack64 qt3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd_p1p1 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd_p1p1 +_crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd_p1p1: +crypto_sign_ed25519_amd64_64_24k_batch_ge25519_nielsadd_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $128,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov qp=int64#4 +# asm 2: mov qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(a0=int64#3 +# asm 2: movq 32(a0=%rdx +movq 32(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(a1=int64#5 +# asm 2: movq 40(a1=%r8 +movq 40(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(a2=int64#6 +# asm 2: movq 48(a2=%r9 +movq 48(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(a3=int64#7 +# asm 2: movq 56(a3=%rax +movq 56(%rsi),%rax + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#8 +# asm 2: mov b0=%r10 +mov %rdx,%r10 + +# qhasm: b1 = a1 +# asm 1: mov b1=int64#9 +# asm 2: mov b1=%r11 +mov %r8,%r11 + +# qhasm: b2 = a2 +# asm 1: mov b2=int64#10 +# asm 2: mov b2=%r12 +mov %r9,%r12 + +# qhasm: b3 = a3 +# asm 1: mov b3=int64#11 +# asm 2: mov b3=%r13 +mov %rax,%r13 + +# qhasm: carry? a0 -= *(uint64 *) (pp + 0) +# asm 1: subq 0(subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#12 +# asm 2: movq b0_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#13 +# asm 2: movq b1_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#14 +# asm 2: movq b2_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#15 +# asm 2: movq b3_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = a0_stack +# asm 1: movq mulx0=int64#10 +# asm 2: movq mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a0=int64#11 +# asm 2: mov a0=%r13 +mov %rax,%r13 + +# qhasm: a1 = mulrdx +# asm 1: mov a1=int64#12 +# asm 2: mov a1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a2=int64#13 +# asm 2: mov $0,>a2=%r15 +mov $0,%r15 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a3=int64#14 +# asm 2: mov $0,>a3=%rbx +mov $0,%rbx + +# qhasm: a3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? a0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = b0_stack +# asm 1: movq mulx0=int64#10 +# asm 2: movq mulx0=%r12 +movq 88(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul e0=int64#11 +# asm 2: mov e0=%r13 +mov %rax,%r13 + +# qhasm: e1 = mulrdx +# asm 1: mov e1=int64#12 +# asm 2: mov e1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul e2=int64#13 +# asm 2: mov $0,>e2=%r15 +mov $0,%r15 + +# qhasm: e2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul e3=int64#14 +# asm 2: mov $0,>e3=%rbx +mov $0,%rbx + +# qhasm: e3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq mulx1=%r12 +movq 96(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq mulx2=%r12 +movq 104(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq mulx3=%r12 +movq 112(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? e3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? e0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc h0=int64#3 +# asm 2: mov h0=%rdx +mov %r13,%rdx + +# qhasm: h1 = e1 +# asm 1: mov h1=int64#5 +# asm 2: mov h1=%r8 +mov %r14,%r8 + +# qhasm: h2 = e2 +# asm 1: mov h2=int64#6 +# asm 2: mov h2=%r9 +mov %r15,%r9 + +# qhasm: h3 = e3 +# asm 1: mov h3=int64#7 +# asm 2: mov h3=%rax +mov %rbx,%rax + +# qhasm: carry? e0 -= a0_stack +# asm 1: subq subt0=int64#8 +# asm 2: mov $0,>subt0=%r10 +mov $0,%r10 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#9 +# asm 2: mov $38,>subt1=%r11 +mov $38,%r11 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#8 +# asm 2: mov $0,>addt0=%r10 +mov $0,%r10 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#9 +# asm 2: mov $38,>addt1=%r11 +mov $38,%r11 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulx0=int64#10 +# asm 2: movq 96(mulx0=%r12 +movq 96(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c0=int64#11 +# asm 2: mov c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov c1=int64#12 +# asm 2: mov c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq 104(mulx1=%r12 +movq 104(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq 112(mulx2=%r12 +movq 112(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq 120(mulx3=%r12 +movq 120(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? c0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc f0=int64#3 +# asm 2: movq 64(f0=%rdx +movq 64(%rsi),%rdx + +# qhasm: f1 = *(uint64 *)(pp + 72) +# asm 1: movq 72(f1=int64#4 +# asm 2: movq 72(f1=%rcx +movq 72(%rsi),%rcx + +# qhasm: f2 = *(uint64 *)(pp + 80) +# asm 1: movq 80(f2=int64#5 +# asm 2: movq 80(f2=%r8 +movq 80(%rsi),%r8 + +# qhasm: f3 = *(uint64 *)(pp + 88) +# asm 1: movq 88(f3=int64#2 +# asm 2: movq 88(f3=%rsi +movq 88(%rsi),%rsi + +# qhasm: carry? f0 += f0 +# asm 1: add addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae g0=int64#6 +# asm 2: mov g0=%r9 +mov %rdx,%r9 + +# qhasm: g1 = f1 +# asm 1: mov g1=int64#7 +# asm 2: mov g1=%rax +mov %rcx,%rax + +# qhasm: g2 = f2 +# asm 1: mov g2=int64#8 +# asm 2: mov g2=%r10 +mov %r8,%r10 + +# qhasm: g3 = f3 +# asm 1: mov g3=int64#9 +# asm 2: mov g3=%r11 +mov %rsi,%r11 + +# qhasm: carry? f0 -= c0 +# asm 1: sub subt0=int64#10 +# asm 2: mov $0,>subt0=%r12 +mov $0,%r12 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#15 +# asm 2: mov $38,>subt1=%rbp +mov $38,%rbp + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#10 +# asm 2: mov $0,>addt0=%r12 +mov $0,%r12 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#11 +# asm 2: mov $38,>addt1=%r13 +mov $38,%r13 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p2.s b/src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p2.s new file mode 100644 index 0000000..e1cd6bc --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p2.s @@ -0,0 +1,2174 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p2 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p2 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p2 +_crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p2: +crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p2: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulx0=int64#9 +# asm 2: movq 0(mulx0=%r11 +movq 0(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx0=int64#10 +# asm 2: mov rx0=%r12 +mov %rax,%r12 + +# qhasm: rx1 = mulrdx +# asm 1: mov rx1=int64#11 +# asm 2: mov rx1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx2=int64#12 +# asm 2: mov $0,>rx2=%r14 +mov $0,%r14 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx3=int64#13 +# asm 2: mov $0,>rx3=%r15 +mov $0,%r15 + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 8(mulx1=%r11 +movq 8(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 16(mulx2=%r11 +movq 16(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 24(mulx3=%r11 +movq 24(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? rx0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulx0=int64#9 +# asm 2: movq 64(mulx0=%r11 +movq 64(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry0=int64#10 +# asm 2: mov ry0=%r12 +mov %rax,%r12 + +# qhasm: ry1 = mulrdx +# asm 1: mov ry1=int64#11 +# asm 2: mov ry1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry2=int64#12 +# asm 2: mov $0,>ry2=%r14 +mov $0,%r14 + +# qhasm: ry2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry3=int64#13 +# asm 2: mov $0,>ry3=%r15 +mov $0,%r15 + +# qhasm: ry3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 72(mulx1=%r11 +movq 72(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 80(mulx2=%r11 +movq 80(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 88(mulx3=%r11 +movq 88(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? ry0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulx0=int64#9 +# asm 2: movq 32(mulx0=%r11 +movq 32(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz0=int64#10 +# asm 2: mov rz0=%r12 +mov %rax,%r12 + +# qhasm: rz1 = mulrdx +# asm 1: mov rz1=int64#11 +# asm 2: mov rz1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz2=int64#12 +# asm 2: mov $0,>rz2=%r14 +mov $0,%r14 + +# qhasm: rz2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz3=int64#13 +# asm 2: mov $0,>rz3=%r15 +mov $0,%r15 + +# qhasm: rz3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 40(mulx1=%r11 +movq 40(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 48(mulx2=%r11 +movq 48(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 56(mulx3=%r11 +movq 56(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rz0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p3.s b/src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p3.s new file mode 100644 index 0000000..f56526c --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_p1p1_to_p3.s @@ -0,0 +1,2844 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p3 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p3 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p3 +_crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p3: +crypto_sign_ed25519_amd64_64_24k_batch_ge25519_p1p1_to_p3: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulx0=int64#9 +# asm 2: movq 0(mulx0=%r11 +movq 0(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx0=int64#10 +# asm 2: mov rx0=%r12 +mov %rax,%r12 + +# qhasm: rx1 = mulrdx +# asm 1: mov rx1=int64#11 +# asm 2: mov rx1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx2=int64#12 +# asm 2: mov $0,>rx2=%r14 +mov $0,%r14 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx3=int64#13 +# asm 2: mov $0,>rx3=%r15 +mov $0,%r15 + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 8(mulx1=%r11 +movq 8(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 16(mulx2=%r11 +movq 16(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 24(mulx3=%r11 +movq 24(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? rx0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulx0=int64#9 +# asm 2: movq 64(mulx0=%r11 +movq 64(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry0=int64#10 +# asm 2: mov ry0=%r12 +mov %rax,%r12 + +# qhasm: ry1 = mulrdx +# asm 1: mov ry1=int64#11 +# asm 2: mov ry1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry2=int64#12 +# asm 2: mov $0,>ry2=%r14 +mov $0,%r14 + +# qhasm: ry2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul ry3=int64#13 +# asm 2: mov $0,>ry3=%r15 +mov $0,%r15 + +# qhasm: ry3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 72(mulx1=%r11 +movq 72(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 80(mulx2=%r11 +movq 80(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 88(mulx3=%r11 +movq 88(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? ry3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? ry0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(mulx0=int64#9 +# asm 2: movq 32(mulx0=%r11 +movq 32(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz0=int64#10 +# asm 2: mov rz0=%r12 +mov %rax,%r12 + +# qhasm: rz1 = mulrdx +# asm 1: mov rz1=int64#11 +# asm 2: mov rz1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 104) +# asm 1: movq 104(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz2=int64#12 +# asm 2: mov $0,>rz2=%r14 +mov $0,%r14 + +# qhasm: rz2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rz3=int64#13 +# asm 2: mov $0,>rz3=%r15 +mov $0,%r15 + +# qhasm: rz3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 40(mulx1=%r11 +movq 40(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 48(mulx2=%r11 +movq 48(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 56(mulx3=%r11 +movq 56(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rz3 += mulrax +# asm 1: add mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%rcx,%rdx + +# qhasm: carry? rz0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#4 +# asm 2: mov $38,>muli38=%rcx +mov $38,%rcx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc mulr4=int64#4 +# asm 2: mov $0,>mulr4=%rcx +mov $0,%rcx + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#5 +# asm 2: mov $0,>mulr5=%r8 +mov $0,%r8 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#6 +# asm 2: mov $0,>mulr6=%r9 +mov $0,%r9 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#8 +# asm 2: mov $0,>mulr7=%r10 +mov $0,%r10 + +# qhasm: mulx0 = *(uint64 *)(pp + 0) +# asm 1: movq 0(mulx0=int64#9 +# asm 2: movq 0(mulx0=%r11 +movq 0(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt0=int64#10 +# asm 2: mov rt0=%r12 +mov %rax,%r12 + +# qhasm: rt1 = mulrdx +# asm 1: mov rt1=int64#11 +# asm 2: mov rt1=%r13 +mov %rdx,%r13 + +# qhasm: mulrax = *(uint64 *)(pp + 72) +# asm 1: movq 72(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt2=int64#12 +# asm 2: mov $0,>rt2=%r14 +mov $0,%r14 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt3=int64#13 +# asm 2: mov $0,>rt3=%r15 +mov $0,%r15 + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#9 +# asm 2: movq 8(mulx1=%r11 +movq 8(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#9 +# asm 2: movq 16(mulx2=%r11 +movq 16(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#9 +# asm 2: movq 24(mulx3=%r11 +movq 24(%rsi),%r11 + +# qhasm: mulrax = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#14 +# asm 2: mov $0,>mulc=%rbx +mov $0,%rbx + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rsi),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %rcx,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rt0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_pack.c b/src/ed25519-supercop-amd64-64-24k/ge25519_pack.c new file mode 100644 index 0000000..f289fe5 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_pack.c @@ -0,0 +1,13 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +void ge25519_pack(unsigned char r[32], const ge25519_p3 *p) +{ + fe25519 tx, ty, zi; + fe25519_invert(&zi, &p->z); + fe25519_mul(&tx, &p->x, &zi); + fe25519_mul(&ty, &p->y, &zi); + fe25519_pack(r, &ty); + r[31] ^= fe25519_getparity(&tx) << 7; +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_pnielsadd_p1p1.s b/src/ed25519-supercop-amd64-64-24k/ge25519_pnielsadd_p1p1.s new file mode 100644 index 0000000..45c06ed --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_pnielsadd_p1p1.s @@ -0,0 +1,3580 @@ + +# qhasm: int64 rp + +# qhasm: int64 pp + +# qhasm: int64 qp + +# qhasm: input rp + +# qhasm: input pp + +# qhasm: input qp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 a0 + +# qhasm: int64 a1 + +# qhasm: int64 a2 + +# qhasm: int64 a3 + +# qhasm: stack64 a0_stack + +# qhasm: stack64 a1_stack + +# qhasm: stack64 a2_stack + +# qhasm: stack64 a3_stack + +# qhasm: int64 b0 + +# qhasm: int64 b1 + +# qhasm: int64 b2 + +# qhasm: int64 b3 + +# qhasm: stack64 b0_stack + +# qhasm: stack64 b1_stack + +# qhasm: stack64 b2_stack + +# qhasm: stack64 b3_stack + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: stack64 c0_stack + +# qhasm: stack64 c1_stack + +# qhasm: stack64 c2_stack + +# qhasm: stack64 c3_stack + +# qhasm: int64 d0 + +# qhasm: int64 d1 + +# qhasm: int64 d2 + +# qhasm: int64 d3 + +# qhasm: stack64 d0_stack + +# qhasm: stack64 d1_stack + +# qhasm: stack64 d2_stack + +# qhasm: stack64 d3_stack + +# qhasm: int64 t10 + +# qhasm: int64 t11 + +# qhasm: int64 t12 + +# qhasm: int64 t13 + +# qhasm: stack64 t10_stack + +# qhasm: stack64 t11_stack + +# qhasm: stack64 t12_stack + +# qhasm: stack64 t13_stack + +# qhasm: int64 t20 + +# qhasm: int64 t21 + +# qhasm: int64 t22 + +# qhasm: int64 t23 + +# qhasm: stack64 t20_stack + +# qhasm: stack64 t21_stack + +# qhasm: stack64 t22_stack + +# qhasm: stack64 t23_stack + +# qhasm: int64 rx0 + +# qhasm: int64 rx1 + +# qhasm: int64 rx2 + +# qhasm: int64 rx3 + +# qhasm: int64 ry0 + +# qhasm: int64 ry1 + +# qhasm: int64 ry2 + +# qhasm: int64 ry3 + +# qhasm: int64 rz0 + +# qhasm: int64 rz1 + +# qhasm: int64 rz2 + +# qhasm: int64 rz3 + +# qhasm: int64 rt0 + +# qhasm: int64 rt1 + +# qhasm: int64 rt2 + +# qhasm: int64 rt3 + +# qhasm: int64 x0 + +# qhasm: int64 x1 + +# qhasm: int64 x2 + +# qhasm: int64 x3 + +# qhasm: int64 mulr4 + +# qhasm: int64 mulr5 + +# qhasm: int64 mulr6 + +# qhasm: int64 mulr7 + +# qhasm: int64 mulrax + +# qhasm: int64 mulrdx + +# qhasm: int64 mulx0 + +# qhasm: int64 mulx1 + +# qhasm: int64 mulx2 + +# qhasm: int64 mulx3 + +# qhasm: int64 mulc + +# qhasm: int64 mulzero + +# qhasm: int64 muli38 + +# qhasm: int64 addt0 + +# qhasm: int64 addt1 + +# qhasm: int64 subt0 + +# qhasm: int64 subt1 + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_ge25519_pnielsadd_p1p1 +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_ge25519_pnielsadd_p1p1 +.globl crypto_sign_ed25519_amd64_64_24k_batch_ge25519_pnielsadd_p1p1 +_crypto_sign_ed25519_amd64_64_24k_batch_ge25519_pnielsadd_p1p1: +crypto_sign_ed25519_amd64_64_24k_batch_ge25519_pnielsadd_p1p1: +mov %rsp,%r11 +and $31,%r11 +add $128,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: qp = qp +# asm 1: mov qp=int64#4 +# asm 2: mov qp=%rcx +mov %rdx,%rcx + +# qhasm: a0 = *(uint64 *)(pp + 32) +# asm 1: movq 32(a0=int64#3 +# asm 2: movq 32(a0=%rdx +movq 32(%rsi),%rdx + +# qhasm: a1 = *(uint64 *)(pp + 40) +# asm 1: movq 40(a1=int64#5 +# asm 2: movq 40(a1=%r8 +movq 40(%rsi),%r8 + +# qhasm: a2 = *(uint64 *)(pp + 48) +# asm 1: movq 48(a2=int64#6 +# asm 2: movq 48(a2=%r9 +movq 48(%rsi),%r9 + +# qhasm: a3 = *(uint64 *)(pp + 56) +# asm 1: movq 56(a3=int64#7 +# asm 2: movq 56(a3=%rax +movq 56(%rsi),%rax + +# qhasm: b0 = a0 +# asm 1: mov b0=int64#8 +# asm 2: mov b0=%r10 +mov %rdx,%r10 + +# qhasm: b1 = a1 +# asm 1: mov b1=int64#9 +# asm 2: mov b1=%r11 +mov %r8,%r11 + +# qhasm: b2 = a2 +# asm 1: mov b2=int64#10 +# asm 2: mov b2=%r12 +mov %r9,%r12 + +# qhasm: b3 = a3 +# asm 1: mov b3=int64#11 +# asm 2: mov b3=%r13 +mov %rax,%r13 + +# qhasm: carry? a0 -= *(uint64 *)(pp + 0) +# asm 1: subq 0(subt0=int64#12 +# asm 2: mov $0,>subt0=%r14 +mov $0,%r14 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#13 +# asm 2: mov $38,>subt1=%r15 +mov $38,%r15 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae addt0=int64#12 +# asm 2: mov $0,>addt0=%r14 +mov $0,%r14 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#13 +# asm 2: mov $38,>addt1=%r15 +mov $38,%r15 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %rdx,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r8,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r9,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rax,80(%rsp) + +# qhasm: b0_stack = b0 +# asm 1: movq b0_stack=stack64#12 +# asm 2: movq b0_stack=88(%rsp) +movq %r10,88(%rsp) + +# qhasm: b1_stack = b1 +# asm 1: movq b1_stack=stack64#13 +# asm 2: movq b1_stack=96(%rsp) +movq %r11,96(%rsp) + +# qhasm: b2_stack = b2 +# asm 1: movq b2_stack=stack64#14 +# asm 2: movq b2_stack=104(%rsp) +movq %r12,104(%rsp) + +# qhasm: b3_stack = b3 +# asm 1: movq b3_stack=stack64#15 +# asm 2: movq b3_stack=112(%rsp) +movq %r13,112(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = a0_stack +# asm 1: movq mulx0=int64#10 +# asm 2: movq mulx0=%r12 +movq 56(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a0=int64#11 +# asm 2: mov a0=%r13 +mov %rax,%r13 + +# qhasm: a1 = mulrdx +# asm 1: mov a1=int64#12 +# asm 2: mov a1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 8) +# asm 1: movq 8(mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a2=int64#13 +# asm 2: mov $0,>a2=%r15 +mov $0,%r15 + +# qhasm: a2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul a3=int64#14 +# asm 2: mov $0,>a3=%rbx +mov $0,%rbx + +# qhasm: a3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq mulx1=%r12 +movq 64(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq mulx2=%r12 +movq 72(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq mulx3=%r12 +movq 80(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 0) +# asm 1: movq 0(mulrax=int64#7 +# asm 2: movq 0(mulrax=%rax +movq 0(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 8(mulrax=%rax +movq 8(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 16(mulrax=%rax +movq 16(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 24(mulrax=%rax +movq 24(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? a3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? a0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc a0_stack=stack64#8 +# asm 2: movq a0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: a1_stack = a1 +# asm 1: movq a1_stack=stack64#9 +# asm 2: movq a1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: a2_stack = a2 +# asm 1: movq a2_stack=stack64#10 +# asm 2: movq a2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: a3_stack = a3 +# asm 1: movq a3_stack=stack64#11 +# asm 2: movq a3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = b0_stack +# asm 1: movq mulx0=int64#10 +# asm 2: movq mulx0=%r12 +movq 88(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx0=int64#11 +# asm 2: mov rx0=%r13 +mov %rax,%r13 + +# qhasm: rx1 = mulrdx +# asm 1: mov rx1=int64#12 +# asm 2: mov rx1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 40) +# asm 1: movq 40(mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx2=int64#13 +# asm 2: mov $0,>rx2=%r15 +mov $0,%r15 + +# qhasm: rx2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rx3=int64#14 +# asm 2: mov $0,>rx3=%rbx +mov $0,%rbx + +# qhasm: rx3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq mulx1=%r12 +movq 96(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq mulx2=%r12 +movq 104(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq mulx3=%r12 +movq 112(%rsp),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 32) +# asm 1: movq 32(mulrax=int64#7 +# asm 2: movq 32(mulrax=%rax +movq 32(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 40(mulrax=%rax +movq 40(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 48(mulrax=%rax +movq 48(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 56(mulrax=%rax +movq 56(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rx3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? rx0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc ry0=int64#3 +# asm 2: mov ry0=%rdx +mov %r13,%rdx + +# qhasm: ry1 = rx1 +# asm 1: mov ry1=int64#5 +# asm 2: mov ry1=%r8 +mov %r14,%r8 + +# qhasm: ry2 = rx2 +# asm 1: mov ry2=int64#6 +# asm 2: mov ry2=%r9 +mov %r15,%r9 + +# qhasm: ry3 = rx3 +# asm 1: mov ry3=int64#7 +# asm 2: mov ry3=%rax +mov %rbx,%rax + +# qhasm: carry? ry0 += a0_stack +# asm 1: addq addt0=int64#8 +# asm 2: mov $0,>addt0=%r10 +mov $0,%r10 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#9 +# asm 2: mov $38,>addt1=%r11 +mov $38,%r11 + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae subt0=int64#8 +# asm 2: mov $0,>subt0=%r10 +mov $0,%r10 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#9 +# asm 2: mov $38,>subt1=%r11 +mov $38,%r11 + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 96) +# asm 1: movq 96(mulx0=int64#10 +# asm 2: movq 96(mulx0=%r12 +movq 96(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c0=int64#11 +# asm 2: mov c0=%r13 +mov %rax,%r13 + +# qhasm: c1 = mulrdx +# asm 1: mov c1=int64#12 +# asm 2: mov c1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 104) +# asm 1: movq 104(mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c2=int64#13 +# asm 2: mov $0,>c2=%r15 +mov $0,%r15 + +# qhasm: c2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul c3=int64#14 +# asm 2: mov $0,>c3=%rbx +mov $0,%rbx + +# qhasm: c3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq 104(mulx1=%r12 +movq 104(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq 112(mulx2=%r12 +movq 112(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#10 +# asm 2: movq 120(mulx3=%r12 +movq 120(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 96) +# asm 1: movq 96(mulrax=int64#7 +# asm 2: movq 96(mulrax=%rax +movq 96(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 104(mulrax=%rax +movq 104(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 112(mulrax=%rax +movq 112(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 120(mulrax=%rax +movq 120(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? c3 += mulrax +# asm 1: add mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#3 +# asm 2: imulq $38,mulr4=%rdx +imulq $38,%r8,%rdx + +# qhasm: carry? c0 += mulr4 +# asm 1: add mulzero=int64#3 +# asm 2: mov $0,>mulzero=%rdx +mov $0,%rdx + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#5 +# asm 2: mov $38,>muli38=%r8 +mov $38,%r8 + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc c0_stack=stack64#8 +# asm 2: movq c0_stack=56(%rsp) +movq %r13,56(%rsp) + +# qhasm: c1_stack = c1 +# asm 1: movq c1_stack=stack64#9 +# asm 2: movq c1_stack=64(%rsp) +movq %r14,64(%rsp) + +# qhasm: c2_stack = c2 +# asm 1: movq c2_stack=stack64#10 +# asm 2: movq c2_stack=72(%rsp) +movq %r15,72(%rsp) + +# qhasm: c3_stack = c3 +# asm 1: movq c3_stack=stack64#11 +# asm 2: movq c3_stack=80(%rsp) +movq %rbx,80(%rsp) + +# qhasm: mulr4 = 0 +# asm 1: mov $0,>mulr4=int64#5 +# asm 2: mov $0,>mulr4=%r8 +mov $0,%r8 + +# qhasm: mulr5 = 0 +# asm 1: mov $0,>mulr5=int64#6 +# asm 2: mov $0,>mulr5=%r9 +mov $0,%r9 + +# qhasm: mulr6 = 0 +# asm 1: mov $0,>mulr6=int64#8 +# asm 2: mov $0,>mulr6=%r10 +mov $0,%r10 + +# qhasm: mulr7 = 0 +# asm 1: mov $0,>mulr7=int64#9 +# asm 2: mov $0,>mulr7=%r11 +mov $0,%r11 + +# qhasm: mulx0 = *(uint64 *)(pp + 64) +# asm 1: movq 64(mulx0=int64#10 +# asm 2: movq 64(mulx0=%r12 +movq 64(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt0=int64#11 +# asm 2: mov rt0=%r13 +mov %rax,%r13 + +# qhasm: rt1 = mulrdx +# asm 1: mov rt1=int64#12 +# asm 2: mov rt1=%r14 +mov %rdx,%r14 + +# qhasm: mulrax = *(uint64 *)(qp + 72) +# asm 1: movq 72(mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt2=int64#13 +# asm 2: mov $0,>rt2=%r15 +mov $0,%r15 + +# qhasm: rt2 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul rt3=int64#14 +# asm 2: mov $0,>rt3=%rbx +mov $0,%rbx + +# qhasm: rt3 += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx0 +# asm 1: mul mulx1=int64#10 +# asm 2: movq 72(mulx1=%r12 +movq 72(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx1 +# asm 1: mul mulx2=int64#10 +# asm 2: movq 80(mulx2=%r12 +movq 80(%rsi),%r12 + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulc=int64#15 +# asm 2: mov $0,>mulc=%rbp +mov $0,%rbp + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx2 +# asm 1: mul mulx3=int64#2 +# asm 2: movq 88(mulx3=%rsi +movq 88(%rsi),%rsi + +# qhasm: mulrax = *(uint64 *)(qp + 64) +# asm 1: movq 64(mulrax=int64#7 +# asm 2: movq 64(mulrax=%rax +movq 64(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 72(mulrax=%rax +movq 72(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 80(mulrax=%rax +movq 80(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulc=int64#10 +# asm 2: mov $0,>mulc=%r12 +mov $0,%r12 + +# qhasm: mulc += mulrdx + carry +# asm 1: adc mulrax=int64#7 +# asm 2: movq 88(mulrax=%rax +movq 88(%rcx),%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * mulx3 +# asm 1: mul mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r8,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt0 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r9,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt1 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r10,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt2 += mulrax +# asm 1: add mulrax=int64#7 +# asm 2: mov mulrax=%rax +mov %r11,%rax + +# qhasm: (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_24k_batch_38 +mulq crypto_sign_ed25519_amd64_64_24k_batch_38 + +# qhasm: carry? rt3 += mulrax +# asm 1: add mulr4=int64#2 +# asm 2: mov $0,>mulr4=%rsi +mov $0,%rsi + +# qhasm: mulr4 += mulrdx + carry +# asm 1: adc mulr4=int64#2 +# asm 2: imulq $38,mulr4=%rsi +imulq $38,%rsi,%rsi + +# qhasm: carry? rt0 += mulr4 +# asm 1: add mulzero=int64#2 +# asm 2: mov $0,>mulzero=%rsi +mov $0,%rsi + +# qhasm: muli38 = 38 +# asm 1: mov $38,>muli38=int64#3 +# asm 2: mov $38,>muli38=%rdx +mov $38,%rdx + +# qhasm: mulzero = muli38 if carry +# asm 1: cmovc addt0=int64#2 +# asm 2: mov $0,>addt0=%rsi +mov $0,%rsi + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#3 +# asm 2: mov $38,>addt1=%rdx +mov $38,%rdx + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae rz0=int64#2 +# asm 2: mov rz0=%rsi +mov %r13,%rsi + +# qhasm: rz1 = rt1 +# asm 1: mov rz1=int64#3 +# asm 2: mov rz1=%rdx +mov %r14,%rdx + +# qhasm: rz2 = rt2 +# asm 1: mov rz2=int64#4 +# asm 2: mov rz2=%rcx +mov %r15,%rcx + +# qhasm: rz3 = rt3 +# asm 1: mov rz3=int64#5 +# asm 2: mov rz3=%r8 +mov %rbx,%r8 + +# qhasm: carry? rz0 += c0_stack +# asm 1: addq addt0=int64#6 +# asm 2: mov $0,>addt0=%r9 +mov $0,%r9 + +# qhasm: addt1 = 38 +# asm 1: mov $38,>addt1=int64#7 +# asm 2: mov $38,>addt1=%rax +mov $38,%rax + +# qhasm: addt1 = addt0 if !carry +# asm 1: cmovae subt0=int64#6 +# asm 2: mov $0,>subt0=%r9 +mov $0,%r9 + +# qhasm: subt1 = 38 +# asm 1: mov $38,>subt1=int64#7 +# asm 2: mov $38,>subt1=%rax +mov $38,%rax + +# qhasm: subt1 = subt0 if !carry +# asm 1: cmovae caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_scalarmult_base.c b/src/ed25519-supercop-amd64-64-24k/ge25519_scalarmult_base.c new file mode 100644 index 0000000..9a0ff33 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_scalarmult_base.c @@ -0,0 +1,50 @@ +#include "fe25519.h" +#include "sc25519.h" +#include "ge25519.h" + +/* Multiples of the base point in Niels' representation */ +static const ge25519_niels ge25519_base_multiples_niels[] = { +#include "ge25519_base_niels_smalltables.data" +}; + +/* d */ +static const fe25519 ecd = {{0x75EB4DCA135978A3, 0x00700A4D4141D8AB, 0x8CC740797779E898, 0x52036CEE2B6FFE73}}; + +void ge25519_scalarmult_base(ge25519_p3 *r, const sc25519 *s) +{ + signed char b[64]; + int i; + ge25519_niels t; + fe25519 d; + + sc25519_window4(b,s); + + ge25519_p1p1 tp1p1; + choose_t((ge25519_niels *)r, 0, (signed long long) b[1], ge25519_base_multiples_niels); + fe25519_sub(&d, &r->y, &r->x); + fe25519_add(&r->y, &r->y, &r->x); + r->x = d; + r->t = r->z; + fe25519_setint(&r->z,2); + for(i=3;i<64;i+=2) + { + choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels); + ge25519_nielsadd2(r, &t); + } + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p2((ge25519_p2 *)r, &tp1p1); + ge25519_dbl_p1p1(&tp1p1,(ge25519_p2 *)r); + ge25519_p1p1_to_p3(r, &tp1p1); + choose_t(&t, (unsigned long long) 0, (signed long long) b[0], ge25519_base_multiples_niels); + fe25519_mul(&t.t2d, &t.t2d, &ecd); + ge25519_nielsadd2(r, &t); + for(i=2;i<64;i+=2) + { + choose_t(&t, (unsigned long long) i/2, (signed long long) b[i], ge25519_base_multiples_niels); + ge25519_nielsadd2(r, &t); + } +} diff --git a/src/ed25519-supercop-amd64-64-24k/ge25519_unpackneg.c b/src/ed25519-supercop-amd64-64-24k/ge25519_unpackneg.c new file mode 100644 index 0000000..ff16fd2 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/ge25519_unpackneg.c @@ -0,0 +1,60 @@ +#include "fe25519.h" +#include "ge25519.h" + +/* d */ +static const fe25519 ecd = {{0x75EB4DCA135978A3, 0x00700A4D4141D8AB, 0x8CC740797779E898, 0x52036CEE2B6FFE73}}; +/* sqrt(-1) */ +static const fe25519 sqrtm1 = {{0xC4EE1B274A0EA0B0, 0x2F431806AD2FE478, 0x2B4D00993DFBD7A7, 0x2B8324804FC1DF0B}}; + +/* return 0 on success, -1 otherwise */ +int ge25519_unpackneg_vartime(ge25519_p3 *r, const unsigned char p[32]) +{ + fe25519 t, chk, num, den, den2, den4, den6; + unsigned char par = p[31] >> 7; + + fe25519_setint(&r->z,1); + fe25519_unpack(&r->y, p); + fe25519_square(&num, &r->y); /* x = y^2 */ + fe25519_mul(&den, &num, &ecd); /* den = dy^2 */ + fe25519_sub(&num, &num, &r->z); /* x = y^2-1 */ + fe25519_add(&den, &r->z, &den); /* den = dy^2+1 */ + + /* Computation of sqrt(num/den) + 1.: computation of num^((p-5)/8)*den^((7p-35)/8) = (num*den^7)^((p-5)/8) + */ + fe25519_square(&den2, &den); + fe25519_square(&den4, &den2); + fe25519_mul(&den6, &den4, &den2); + fe25519_mul(&t, &den6, &num); + fe25519_mul(&t, &t, &den); + + fe25519_pow2523(&t, &t); + /* 2. computation of r->x = t * num * den^3 + */ + fe25519_mul(&t, &t, &num); + fe25519_mul(&t, &t, &den); + fe25519_mul(&t, &t, &den); + fe25519_mul(&r->x, &t, &den); + + /* 3. Check whether sqrt computation gave correct result, multiply by sqrt(-1) if not: + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + fe25519_mul(&r->x, &r->x, &sqrtm1); + + /* 4. Now we have one of the two square roots, except if input was not a square + */ + fe25519_square(&chk, &r->x); + fe25519_mul(&chk, &chk, &den); + if (!fe25519_iseq_vartime(&chk, &num)) + return -1; + + /* 5. Choose the desired square root according to parity: + */ + if(fe25519_getparity(&r->x) != (1-par)) + fe25519_neg(&r->x, &r->x); + + fe25519_mul(&r->t, &r->x, &r->y); + return 0; +} diff --git a/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced.s b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced.s new file mode 100644 index 0000000..c7e30c1 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced.s @@ -0,0 +1,476 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced +.globl crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced +_crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced: +crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#10 +# asm 2: movq 8(c1=%r12 +movq 8(%r10),%r12 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#11 +# asm 2: movq 16(c2=%r13 +movq 16(%r10),%r13 + +# qhasm: c3 = *(uint64 *)(spc + 24) +# asm 1: movq 24(c3=int64#12 +# asm 2: movq 24(c3=%r14 +movq 24(%r10),%r14 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#8 +# asm 2: movq 8(c1=%r10 +movq 8(%r9),%r10 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#9 +# asm 2: movq 16(c2=%r11 +movq 16(%r9),%r11 + +# qhasm: c3 = *(uint64 *)(spc + 24) +# asm 1: movq 24(c3=int64#10 +# asm 2: movq 24(c3=%r12 +movq 24(%r9),%r12 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_1limb.s b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_1limb.s new file mode 100644 index 0000000..2b7cbe0 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_1limb.s @@ -0,0 +1,416 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_1limb +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_1limb +.globl crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_1limb +_crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_1limb: +crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_1limb: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_2limbs.s b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_2limbs.s new file mode 100644 index 0000000..c2829f0 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_2limbs.s @@ -0,0 +1,436 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_2limbs +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_2limbs +.globl crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_2limbs +_crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_2limbs: +crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_2limbs: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#10 +# asm 2: movq 8(c1=%r12 +movq 8(%r10),%r12 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#8 +# asm 2: movq 8(c1=%r10 +movq 8(%r9),%r10 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_3limbs.s b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_3limbs.s new file mode 100644 index 0000000..988023c --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/heap_rootreplaced_3limbs.s @@ -0,0 +1,456 @@ + +# qhasm: int64 hp + +# qhasm: int64 hlen + +# qhasm: int64 sp + +# qhasm: int64 pp + +# qhasm: input hp + +# qhasm: input hlen + +# qhasm: input sp + +# qhasm: int64 prc + +# qhasm: int64 plc + +# qhasm: int64 pc + +# qhasm: int64 d + +# qhasm: int64 spp + +# qhasm: int64 sprc + +# qhasm: int64 spc + +# qhasm: int64 c0 + +# qhasm: int64 c1 + +# qhasm: int64 c2 + +# qhasm: int64 c3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 p0 + +# qhasm: int64 p1 + +# qhasm: int64 p2 + +# qhasm: int64 p3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_3limbs +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_3limbs +.globl crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_3limbs +_crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_3limbs: +crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_3limbs: +mov %rsp,%r11 +and $31,%r11 +add $64,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: pp = 0 +# asm 1: mov $0,>pp=int64#4 +# asm 2: mov $0,>pp=%rcx +mov $0,%rcx + +# qhasm: siftdownloop: +._siftdownloop: + +# qhasm: prc = pp +# asm 1: mov prc=int64#5 +# asm 2: mov prc=%r8 +mov %rcx,%r8 + +# qhasm: prc *= 2 +# asm 1: imulq $2,prc=int64#5 +# asm 2: imulq $2,prc=%r8 +imulq $2,%r8,%r8 + +# qhasm: pc = prc +# asm 1: mov pc=int64#6 +# asm 2: mov pc=%r9 +mov %r8,%r9 + +# qhasm: prc += 2 +# asm 1: add $2,? hlen - prc +# asm 1: cmp +jbe ._siftuploop + +# qhasm: sprc = *(uint64 *)(hp + prc * 8) +# asm 1: movq (sprc=int64#7 +# asm 2: movq (sprc=%rax +movq (%rdi,%r8,8),%rax + +# qhasm: sprc <<= 5 +# asm 1: shl $5,spc=int64#8 +# asm 2: movq (spc=%r10 +movq (%rdi,%r9,8),%r10 + +# qhasm: spc <<= 5 +# asm 1: shl $5,c0=int64#9 +# asm 2: movq 0(c0=%r11 +movq 0(%r10),%r11 + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#10 +# asm 2: movq 8(c1=%r12 +movq 8(%r10),%r12 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#11 +# asm 2: movq 16(c2=%r13 +movq 16(%r10),%r13 + +# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: *(uint64 *)(hp + pp * 8) = spc +# asm 1: movq pp=int64#4 +# asm 2: mov pp=%rcx +mov %r9,%rcx +# comment:fp stack unchanged by jump + +# qhasm: goto siftdownloop +jmp ._siftdownloop + +# qhasm: siftuploop: +._siftuploop: + +# qhasm: pc = pp +# asm 1: mov pc=int64#2 +# asm 2: mov pc=%rsi +mov %rcx,%rsi + +# qhasm: pp -= 1 +# asm 1: sub $1,>= 1 +# asm 1: shr $1,? pc - 0 +# asm 1: cmp $0, +jbe ._end + +# qhasm: spp = *(uint64 *)(hp + pp * 8) +# asm 1: movq (spp=int64#5 +# asm 2: movq (spp=%r8 +movq (%rdi,%rcx,8),%r8 + +# qhasm: spc = *(uint64 *)(hp + pc * 8) +# asm 1: movq (spc=int64#6 +# asm 2: movq (spc=%r9 +movq (%rdi,%rsi,8),%r9 + +# qhasm: spp <<= 5 +# asm 1: shl $5,c0=int64#7 +# asm 2: movq 0(c0=%rax +movq 0(%r9),%rax + +# qhasm: c1 = *(uint64 *)(spc + 8) +# asm 1: movq 8(c1=int64#8 +# asm 2: movq 8(c1=%r10 +movq 8(%r9),%r10 + +# qhasm: c2 = *(uint64 *)(spc + 16) +# asm 1: movq 16(c2=int64#9 +# asm 2: movq 16(c2=%r11 +movq 16(%r9),%r11 + +# qhasm: carry? c0 -= *(uint64 *)(spp + 0) +# asm 1: subq 0(>= 5 +# asm 1: shr $5,>= 5 +# asm 1: shr $5,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/hram.c b/src/ed25519-supercop-amd64-64-24k/hram.c new file mode 100644 index 0000000..f020f6f --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/hram.c @@ -0,0 +1,13 @@ +#include "crypto_hash_sha512.h" +#include "hram.h" + +void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen) +{ + unsigned long long i; + + for (i = 0;i < 32;++i) playground[i] = sm[i]; + for (i = 32;i < 64;++i) playground[i] = pk[i-32]; + for (i = 64;i < smlen;++i) playground[i] = sm[i]; + + crypto_hash_sha512(hram,playground,smlen); +} diff --git a/src/ed25519-supercop-amd64-64-24k/hram.h b/src/ed25519-supercop-amd64-64-24k/hram.h new file mode 100644 index 0000000..bec46e9 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/hram.h @@ -0,0 +1,8 @@ +#ifndef HRAM_H +#define HRAM_H + +#define get_hram crypto_sign_ed25519_amd64_64_24k_batch_get_hram + +extern void get_hram(unsigned char *hram, const unsigned char *sm, const unsigned char *pk, unsigned char *playground, unsigned long long smlen); + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/implementors b/src/ed25519-supercop-amd64-64-24k/implementors new file mode 100644 index 0000000..9b5399a --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/implementors @@ -0,0 +1,5 @@ +Daniel J. Bernstein +Niels Duif +Tanja Lange +lead: Peter Schwabe +Bo-Yin Yang diff --git a/src/ed25519-supercop-amd64-64-24k/index_heap.c b/src/ed25519-supercop-amd64-64-24k/index_heap.c new file mode 100644 index 0000000..f29f7a2 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/index_heap.c @@ -0,0 +1,58 @@ +#include "sc25519.h" +#include "index_heap.h" + +/* caller's responsibility to ensure hlen>=3 */ +void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars) +{ + h[0] = 0; + unsigned long long i=1; + while(i 0) + { + /* if(sc25519_lt_vartime(&scalars[h[ppos]], &scalars[h[pos]])) */ + if(sc25519_lt(&scalars[h[ppos]], &scalars[h[pos]])) + { + t = h[ppos]; + h[ppos] = h[pos]; + h[pos] = t; + pos = ppos; + ppos = (pos-1)/2; + } + else break; + } + (*hlen)++; +} + +/* Put the largest value in the heap in max1, the second largest in max2 */ +void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars) +{ + *max1 = h[0]; + *max2 = h[1]; + if(sc25519_lt(&scalars[h[1]],&scalars[h[2]])) + *max2 = h[2]; +} + +/* After the root has been replaced, restore heap property */ +/* extern void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +*/ +/* extern void heap_rootreplaced_shortscalars(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +*/ diff --git a/src/ed25519-supercop-amd64-64-24k/index_heap.h b/src/ed25519-supercop-amd64-64-24k/index_heap.h new file mode 100644 index 0000000..b3b4294 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/index_heap.h @@ -0,0 +1,31 @@ +#ifndef INDEX_HEAP_H +#define INDEX_HEAP_H + +#include "sc25519.h" + +#define heap_init crypto_sign_ed25519_amd64_64_24k_batch_heap_init +#define heap_extend crypto_sign_ed25519_amd64_64_24k_batch_heap_extend +#define heap_pop crypto_sign_ed25519_amd64_64_24k_batch_heap_pop +#define heap_push crypto_sign_ed25519_amd64_64_24k_batch_heap_push +#define heap_get2max crypto_sign_ed25519_amd64_64_24k_batch_heap_get2max +#define heap_rootreplaced crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced +#define heap_rootreplaced_3limbs crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_3limbs +#define heap_rootreplaced_2limbs crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_2limbs +#define heap_rootreplaced_1limb crypto_sign_ed25519_amd64_64_24k_batch_heap_rootreplaced_1limb + +void heap_init(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); + +void heap_extend(unsigned long long *h, unsigned long long oldlen, unsigned long long newlen, sc25519 *scalars); + +unsigned long long heap_pop(unsigned long long *h, unsigned long long *hlen, sc25519 *scalars); + +void heap_push(unsigned long long *h, unsigned long long *hlen, unsigned long long elem, sc25519 *scalars); + +void heap_get2max(unsigned long long *h, unsigned long long *max1, unsigned long long *max2, sc25519 *scalars); + +void heap_rootreplaced(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_3limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_2limbs(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); +void heap_rootreplaced_1limb(unsigned long long *h, unsigned long long hlen, sc25519 *scalars); + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/keypair.c b/src/ed25519-supercop-amd64-64-24k/keypair.c new file mode 100644 index 0000000..bb04e8d --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/keypair.c @@ -0,0 +1,30 @@ +#include "randombytes.h" +#include "crypto_hash_sha512.h" +#include "crypto_sign.h" +#include "ge25519.h" +#include "hram.h" + +int crypto_sign_keypair( + unsigned char *pk, + unsigned char *sk + ) +{ + sc25519 scsk; + ge25519 gepk; + unsigned char extsk[64]; + int i; + + randombytes(sk, 32); + crypto_hash_sha512(extsk, sk, 32); + extsk[0] &= 248; + extsk[31] &= 127; + extsk[31] |= 64; + + sc25519_from32bytes(&scsk,extsk); + + ge25519_scalarmult_base(&gepk, &scsk); + ge25519_pack(pk, &gepk); + for(i=0;i<32;i++) + sk[32 + i] = pk[i]; + return 0; +} diff --git a/src/ed25519-supercop-amd64-64-24k/open.c b/src/ed25519-supercop-amd64-64-24k/open.c new file mode 100644 index 0000000..1f73fe9 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/open.c @@ -0,0 +1,45 @@ +#include "crypto_sign.h" +#include "crypto_verify_32.h" +#include "crypto_hash_sha512.h" +#include "ge25519.h" +#include "hram.h" + +int crypto_sign_open( + unsigned char *m,unsigned long long *mlen, + const unsigned char *sm,unsigned long long smlen, + const unsigned char *pk + ) +{ + int i; + unsigned char t2[32]; + ge25519 get1, get2; + sc25519 schram, scs; + unsigned char hram[crypto_hash_sha512_BYTES]; + + *mlen = (unsigned long long) -1; + + if (smlen < 64) goto badsig; + if (ge25519_unpackneg_vartime(&get1, pk)) goto badsig; + + get_hram(hram,sm,pk,m,smlen); + + sc25519_from64bytes(&schram, hram); + + sc25519_from32bytes(&scs, sm+32); + + ge25519_double_scalarmult_vartime(&get2, &get1, &schram, &scs); + ge25519_pack(t2, &get2); + + if (!crypto_verify_32(sm, t2)) + { + for(i=0;icaller4_stack=stack64#1 +# asm 2: movq caller4_stack=0(%rsp) +movq %r14,0(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#2 +# asm 2: movq caller5_stack=8(%rsp) +movq %r15,8(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#3 +# asm 2: movq caller6_stack=16(%rsp) +movq %rbx,16(%rsp) + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(r0=int64#4 +# asm 2: movq 0(r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#6 +# asm 2: movq 16(r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 += *(uint64 *)(yp + 0) +# asm 1: addq 0(t0=int64#3 +# asm 2: mov t0=%rdx +mov %rcx,%rdx + +# qhasm: t1 = r1 +# asm 1: mov t1=int64#7 +# asm 2: mov t1=%rax +mov %r8,%rax + +# qhasm: t2 = r2 +# asm 1: mov t2=int64#8 +# asm 2: mov t2=%r10 +mov %r9,%r10 + +# qhasm: t3 = r3 +# asm 1: mov t3=int64#12 +# asm 2: mov t3=%r14 +mov %rsi,%r14 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_64_24k_batch_ORDER0,caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 0(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 8(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 16(%rsp),%rbx + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_barrett.s b/src/ed25519-supercop-amd64-64-24k/sc25519_barrett.s new file mode 100644 index 0000000..3a1aa6c --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_barrett.s @@ -0,0 +1,1188 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller1_stack + +# qhasm: stack64 caller2_stack + +# qhasm: stack64 caller3_stack + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: int64 q23 + +# qhasm: int64 q24 + +# qhasm: int64 q30 + +# qhasm: int64 q31 + +# qhasm: int64 q32 + +# qhasm: int64 q33 + +# qhasm: int64 r20 + +# qhasm: int64 r21 + +# qhasm: int64 r22 + +# qhasm: int64 r23 + +# qhasm: int64 r24 + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 rax + +# qhasm: int64 rdx + +# qhasm: int64 c + +# qhasm: int64 zero + +# qhasm: int64 mask + +# qhasm: int64 nmask + +# qhasm: stack64 q30_stack + +# qhasm: stack64 q31_stack + +# qhasm: stack64 q32_stack + +# qhasm: stack64 q33_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_sc25519_barrett +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_sc25519_barrett +.globl crypto_sign_ed25519_amd64_64_24k_batch_sc25519_barrett +_crypto_sign_ed25519_amd64_64_24k_batch_sc25519_barrett: +crypto_sign_ed25519_amd64_64_24k_batch_sc25519_barrett: +mov %rsp,%r11 +and $31,%r11 +add $96,%r11 +sub %r11,%rsp + +# qhasm: caller1_stack = caller1 +# asm 1: movq caller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: zero ^= zero +# asm 1: xor rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU3 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU3 + +# qhasm: q23 = rax +# asm 1: mov q23=int64#10 +# asm 2: mov q23=%r12 +mov %rax,%r12 + +# qhasm: c = rdx +# asm 1: mov c=int64#11 +# asm 2: mov c=%r13 +mov %rdx,%r13 + +# qhasm: rax = *(uint64 *)(xp + 24) +# asm 1: movq 24(rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU4 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU4 + +# qhasm: q24 = rax +# asm 1: mov q24=int64#12 +# asm 2: mov q24=%r14 +mov %rax,%r14 + +# qhasm: carry? q24 += c +# asm 1: add rax=int64#7 +# asm 2: movq 32(rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU2 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU2 + +# qhasm: carry? q23 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 32(rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU3 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU3 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 32(rax=%rax +movq 32(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU4 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU4 + +# qhasm: carry? q30 += rax +# asm 1: add rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU1 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU1 + +# qhasm: carry? q23 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU2 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU2 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU3 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU3 + +# qhasm: carry? q30 += rax +# asm 1: add c=int64#11 +# asm 2: mov $0,>c=%r13 +mov $0,%r13 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 40(rax=%rax +movq 40(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU4 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU4 + +# qhasm: carry? q31 += rax +# asm 1: add rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU0 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU0 + +# qhasm: carry? q23 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU1 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU1 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU2 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU2 + +# qhasm: carry? q30 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU3 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU3 + +# qhasm: carry? q31 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 48(rax=%rax +movq 48(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU4 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU4 + +# qhasm: carry? q32 += rax +# asm 1: add rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU0 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU0 + +# qhasm: carry? q24 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU1 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU1 + +# qhasm: carry? q30 += rax +# asm 1: add c=int64#10 +# asm 2: mov $0,>c=%r12 +mov $0,%r12 + +# qhasm: c += rdx + carry +# asm 1: adc q30_stack=stack64#8 +# asm 2: movq q30_stack=56(%rsp) +movq %r8,56(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU2 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU2 + +# qhasm: carry? q31 += rax +# asm 1: add c=int64#5 +# asm 2: mov $0,>c=%r8 +mov $0,%r8 + +# qhasm: c += rdx + carry +# asm 1: adc q31_stack=stack64#9 +# asm 2: movq q31_stack=64(%rsp) +movq %r9,64(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU3 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU3 + +# qhasm: carry? q32 += rax +# asm 1: add c=int64#5 +# asm 2: mov $0,>c=%r8 +mov $0,%r8 + +# qhasm: c += rdx + carry +# asm 1: adc q32_stack=stack64#10 +# asm 2: movq q32_stack=72(%rsp) +movq %r10,72(%rsp) + +# qhasm: rax = *(uint64 *)(xp + 56) +# asm 1: movq 56(rax=int64#7 +# asm 2: movq 56(rax=%rax +movq 56(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_MU4 +mulq crypto_sign_ed25519_amd64_64_24k_batch_MU4 + +# qhasm: carry? q33 += rax +# asm 1: add q33_stack=stack64#11 +# asm 2: movq q33_stack=80(%rsp) +movq %r11,80(%rsp) + +# qhasm: rax = q30_stack +# asm 1: movq rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 + +# qhasm: r20 = rax +# asm 1: mov r20=int64#5 +# asm 2: mov r20=%r8 +mov %rax,%r8 + +# qhasm: c = rdx +# asm 1: mov c=int64#6 +# asm 2: mov c=%r9 +mov %rdx,%r9 + +# qhasm: rax = q30_stack +# asm 1: movq rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER1 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER1 + +# qhasm: r21 = rax +# asm 1: mov r21=int64#8 +# asm 2: mov r21=%r10 +mov %rax,%r10 + +# qhasm: carry? r21 += c +# asm 1: add c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER2 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER2 + +# qhasm: r22 = rax +# asm 1: mov r22=int64#9 +# asm 2: mov r22=%r11 +mov %rax,%r11 + +# qhasm: carry? r22 += c +# asm 1: add c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 56(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER3 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER3 + +# qhasm: free rdx + +# qhasm: r23 = rax +# asm 1: mov r23=int64#10 +# asm 2: mov r23=%r12 +mov %rax,%r12 + +# qhasm: r23 += c +# asm 1: add rax=int64#7 +# asm 2: movq rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 + +# qhasm: carry? r21 += rax +# asm 1: add c=int64#6 +# asm 2: mov $0,>c=%r9 +mov $0,%r9 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER1 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER1 + +# qhasm: carry? r22 += rax +# asm 1: add c=int64#4 +# asm 2: mov $0,>c=%rcx +mov $0,%rcx + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 64(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER2 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER2 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add rax=int64#7 +# asm 2: movq rax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 + +# qhasm: carry? r22 += rax +# asm 1: add c=int64#4 +# asm 2: mov $0,>c=%rcx +mov $0,%rcx + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq rax=%rax +movq 72(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER1 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER1 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add rax=int64#7 +# asm 2: movq rax=%rax +movq 80(%rsp),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +mulq crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 + +# qhasm: free rdx + +# qhasm: r23 += rax +# asm 1: add r0=int64#3 +# asm 2: movq 0(r0=%rdx +movq 0(%rsi),%rdx + +# qhasm: carry? r0 -= r20 +# asm 1: sub t0=int64#4 +# asm 2: mov t0=%rcx +mov %rdx,%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: carry? r1 -= r21 - carry +# asm 1: sbb t1=int64#6 +# asm 2: mov t1=%r9 +mov %r8,%r9 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#7 +# asm 2: movq 16(r2=%rax +movq 16(%rsi),%rax + +# qhasm: carry? r2 -= r22 - carry +# asm 1: sbb t2=int64#8 +# asm 2: mov t2=%r10 +mov %rax,%r10 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: r3 -= r23 - carry +# asm 1: sbb t3=int64#9 +# asm 2: mov t3=%r11 +mov %rsi,%r11 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_64_24k_batch_ORDER0,t0=int64#4 +# asm 2: mov t0=%rcx +mov %rdx,%rcx + +# qhasm: r1 = t1 if !unsigned< +# asm 1: cmovae t1=int64#6 +# asm 2: mov t1=%r9 +mov %r8,%r9 + +# qhasm: r2 = t2 if !unsigned< +# asm 1: cmovae t2=int64#8 +# asm 2: mov t2=%r10 +mov %rax,%r10 + +# qhasm: r3 = t3 if !unsigned< +# asm 1: cmovae t3=int64#9 +# asm 2: mov t3=%r11 +mov %rsi,%r11 + +# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_24k_batch_ORDER0 +# asm 1: sub crypto_sign_ed25519_amd64_64_24k_batch_ORDER0,caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_from32bytes.c b/src/ed25519-supercop-amd64-64-24k/sc25519_from32bytes.c new file mode 100644 index 0000000..7f21e68 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_from32bytes.c @@ -0,0 +1,55 @@ +#include "sc25519.h" + +/*Arithmetic modulo the group order n = 2^252 + 27742317777372353535851937790883648493 + * = 7237005577332262213973186563042994240857116359379907606001950938285454250989 + */ + +/* Contains order, 2*order, 4*order, 8*order, each represented in 4 consecutive unsigned long long */ +static const unsigned long long order[16] = {0x5812631A5CF5D3EDULL, 0x14DEF9DEA2F79CD6ULL, + 0x0000000000000000ULL, 0x1000000000000000ULL, + 0xB024C634B9EBA7DAULL, 0x29BDF3BD45EF39ACULL, + 0x0000000000000000ULL, 0x2000000000000000ULL, + 0x60498C6973D74FB4ULL, 0x537BE77A8BDE7359ULL, + 0x0000000000000000ULL, 0x4000000000000000ULL, + 0xC09318D2E7AE9F68ULL, 0xA6F7CEF517BCE6B2ULL, + 0x0000000000000000ULL, 0x8000000000000000ULL}; + +static unsigned long long smaller(unsigned long long a,unsigned long long b) +{ + unsigned long long atop = a >> 32; + unsigned long long abot = a & 4294967295; + unsigned long long btop = b >> 32; + unsigned long long bbot = b & 4294967295; + unsigned long long atopbelowbtop = (atop - btop) >> 63; + unsigned long long atopeqbtop = ((atop ^ btop) - 1) >> 63; + unsigned long long abotbelowbbot = (abot - bbot) >> 63; + return atopbelowbtop | (atopeqbtop & abotbelowbbot); +} + +void sc25519_from32bytes(sc25519 *r, const unsigned char x[32]) +{ + unsigned long long t[4]; + unsigned long long b; + unsigned long long mask; + int i, j; + + /* assuming little-endian */ + r->v[0] = *(unsigned long long *)x; + r->v[1] = *(((unsigned long long *)x)+1); + r->v[2] = *(((unsigned long long *)x)+2); + r->v[3] = *(((unsigned long long *)x)+3); + + for(j=3;j>=0;j--) + { + b=0; + for(i=0;i<4;i++) + { + b += order[4*j+i]; /* no overflow for this particular order */ + t[i] = r->v[i] - b; + b = smaller(r->v[i],b); + } + mask = b - 1; + for(i=0;i<4;i++) + r->v[i] ^= mask & (r->v[i] ^ t[i]); + } +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_from64bytes.c b/src/ed25519-supercop-amd64-64-24k/sc25519_from64bytes.c new file mode 100644 index 0000000..8e76a1b --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_from64bytes.c @@ -0,0 +1,7 @@ +#include "sc25519.h" + +void sc25519_from64bytes(sc25519 *r, const unsigned char x[64]) +{ + /* assuming little-endian representation of unsigned long long */ + sc25519_barrett(r, (unsigned long long *)x); +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_from_shortsc.c b/src/ed25519-supercop-amd64-64-24k/sc25519_from_shortsc.c new file mode 100644 index 0000000..3b8ff2f --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_from_shortsc.c @@ -0,0 +1,9 @@ +#include "sc25519.h" + +void sc25519_from_shortsc(sc25519 *r, const shortsc25519 *x) +{ + r->v[0] = x->v[0]; + r->v[1] = x->v[1]; + r->v[2] = 0; + r->v[3] = 0; +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_iszero.c b/src/ed25519-supercop-amd64-64-24k/sc25519_iszero.c new file mode 100644 index 0000000..21f593d --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_iszero.c @@ -0,0 +1,10 @@ +#include "sc25519.h" + +int sc25519_iszero_vartime(const sc25519 *x) +{ + if(x->v[0] != 0) return 0; + if(x->v[1] != 0) return 0; + if(x->v[2] != 0) return 0; + if(x->v[3] != 0) return 0; + return 1; +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_lt.s b/src/ed25519-supercop-amd64-64-24k/sc25519_lt.s new file mode 100644 index 0000000..af7b25c --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_lt.s @@ -0,0 +1,131 @@ + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: int64 ret + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: output ret + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 doof + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_sc25519_lt +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_sc25519_lt +.globl crypto_sign_ed25519_amd64_64_24k_batch_sc25519_lt +_crypto_sign_ed25519_amd64_64_24k_batch_sc25519_lt: +crypto_sign_ed25519_amd64_64_24k_batch_sc25519_lt: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: t0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(t0=int64#3 +# asm 2: movq 0(t0=%rdx +movq 0(%rdi),%rdx + +# qhasm: t1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(t1=int64#4 +# asm 2: movq 8(t1=%rcx +movq 8(%rdi),%rcx + +# qhasm: t2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(t2=int64#5 +# asm 2: movq 16(t2=%r8 +movq 16(%rdi),%r8 + +# qhasm: t3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(t3=int64#1 +# asm 2: movq 24(t3=%rdi +movq 24(%rdi),%rdi + +# qhasm: carry? t0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(ret=int64#1 +# asm 2: mov $0,>ret=%rdi +mov $0,%rdi + +# qhasm: doof = 1 +# asm 1: mov $1,>doof=int64#2 +# asm 2: mov $1,>doof=%rsi +mov $1,%rsi + +# qhasm: ret = doof if carry +# asm 1: cmovc v, y->v); + sc25519_barrett(r, t); +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_mul_shortsc.c b/src/ed25519-supercop-amd64-64-24k/sc25519_mul_shortsc.c new file mode 100644 index 0000000..0c67250 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_mul_shortsc.c @@ -0,0 +1,9 @@ +#include "sc25519.h" + +void sc25519_mul_shortsc(sc25519 *r, const sc25519 *x, const shortsc25519 *y) +{ + /* XXX: This wants to be faster */ + sc25519 t; + sc25519_from_shortsc(&t, y); + sc25519_mul(r, x, &t); +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_slide.c b/src/ed25519-supercop-amd64-64-24k/sc25519_slide.c new file mode 100644 index 0000000..4e52010 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_slide.c @@ -0,0 +1,49 @@ +#include "sc25519.h" + +void sc25519_slide(signed char r[256], const sc25519 *s, int swindowsize) +{ + int i,j,k,b,m=(1<<(swindowsize-1))-1, soplen=256; + unsigned long long sv0 = s->v[0]; + unsigned long long sv1 = s->v[1]; + unsigned long long sv2 = s->v[2]; + unsigned long long sv3 = s->v[3]; + + /* first put the binary expansion into r */ + for(i=0;i<64;i++) { + r[i] = sv0 & 1; + r[i+64] = sv1 & 1; + r[i+128] = sv2 & 1; + r[i+192] = sv3 & 1; + sv0 >>= 1; + sv1 >>= 1; + sv2 >>= 1; + sv3 >>= 1; + } + + /* Making it sliding window */ + for (j = 0;j < soplen;++j) + { + if (r[j]) { + for (b = 1;b < soplen - j && b <= 6;++b) { + if (r[j] + (r[j + b] << b) <= m) + { + r[j] += r[j + b] << b; r[j + b] = 0; + } + else if (r[j] - (r[j + b] << b) >= -m) + { + r[j] -= r[j + b] << b; + for (k = j + b;k < soplen;++k) + { + if (!r[k]) { + r[k] = 1; + break; + } + r[k] = 0; + } + } + else if (r[j + b]) + break; + } + } + } +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_sub_nored.s b/src/ed25519-supercop-amd64-64-24k/sc25519_sub_nored.s new file mode 100644 index 0000000..cb39271 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_sub_nored.s @@ -0,0 +1,142 @@ + +# qhasm: int64 rp + +# qhasm: int64 xp + +# qhasm: int64 yp + +# qhasm: input rp + +# qhasm: input xp + +# qhasm: input yp + +# qhasm: int64 r0 + +# qhasm: int64 r1 + +# qhasm: int64 r2 + +# qhasm: int64 r3 + +# qhasm: int64 t0 + +# qhasm: int64 t1 + +# qhasm: int64 t2 + +# qhasm: int64 t3 + +# qhasm: int64 caller1 + +# qhasm: int64 caller2 + +# qhasm: int64 caller3 + +# qhasm: int64 caller4 + +# qhasm: int64 caller5 + +# qhasm: int64 caller6 + +# qhasm: int64 caller7 + +# qhasm: caller caller1 + +# qhasm: caller caller2 + +# qhasm: caller caller3 + +# qhasm: caller caller4 + +# qhasm: caller caller5 + +# qhasm: caller caller6 + +# qhasm: caller caller7 + +# qhasm: stack64 caller4_stack + +# qhasm: stack64 caller5_stack + +# qhasm: stack64 caller6_stack + +# qhasm: stack64 caller7_stack + +# qhasm: enter crypto_sign_ed25519_amd64_64_24k_batch_sc25519_sub_nored +.text +.p2align 5 +.globl _crypto_sign_ed25519_amd64_64_24k_batch_sc25519_sub_nored +.globl crypto_sign_ed25519_amd64_64_24k_batch_sc25519_sub_nored +_crypto_sign_ed25519_amd64_64_24k_batch_sc25519_sub_nored: +crypto_sign_ed25519_amd64_64_24k_batch_sc25519_sub_nored: +mov %rsp,%r11 +and $31,%r11 +add $0,%r11 +sub %r11,%rsp + +# qhasm: r0 = *(uint64 *)(xp + 0) +# asm 1: movq 0(r0=int64#4 +# asm 2: movq 0(r0=%rcx +movq 0(%rsi),%rcx + +# qhasm: r1 = *(uint64 *)(xp + 8) +# asm 1: movq 8(r1=int64#5 +# asm 2: movq 8(r1=%r8 +movq 8(%rsi),%r8 + +# qhasm: r2 = *(uint64 *)(xp + 16) +# asm 1: movq 16(r2=int64#6 +# asm 2: movq 16(r2=%r9 +movq 16(%rsi),%r9 + +# qhasm: r3 = *(uint64 *)(xp + 24) +# asm 1: movq 24(r3=int64#2 +# asm 2: movq 24(r3=%rsi +movq 24(%rsi),%rsi + +# qhasm: carry? r0 -= *(uint64 *)(yp + 0) +# asm 1: subq 0(v]; +} diff --git a/src/ed25519-supercop-amd64-64-24k/sc25519_window4.c b/src/ed25519-supercop-amd64-64-24k/sc25519_window4.c new file mode 100644 index 0000000..683a1d4 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sc25519_window4.c @@ -0,0 +1,27 @@ +#include "sc25519.h" + +void sc25519_window4(signed char r[64], const sc25519 *s) +{ + char carry; + int i; + for(i=0;i<16;i++) + r[i] = (s->v[0] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+16] = (s->v[1] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+32] = (s->v[2] >> (4*i)) & 15; + for(i=0;i<16;i++) + r[i+48] = (s->v[3] >> (4*i)) & 15; + + /* Making it signed */ + carry = 0; + for(i=0;i<63;i++) + { + r[i] += carry; + r[i+1] += r[i] >> 4; + r[i] &= 15; + carry = r[i] >> 3; + r[i] -= carry << 4; + } + r[63] += carry; +} diff --git a/src/ed25519-supercop-amd64-64-24k/sign.c b/src/ed25519-supercop-amd64-64-24k/sign.c new file mode 100644 index 0000000..3c3a2c8 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sign.c @@ -0,0 +1,56 @@ +#include "crypto_sign.h" +#include "crypto_hash_sha512.h" +#include "ge25519.h" +#include "hram.h" + +int crypto_sign( + unsigned char *sm,unsigned long long *smlen, + const unsigned char *m,unsigned long long mlen, + const unsigned char *sk + ) +{ + sc25519 sck, scs, scsk; + ge25519 ger; + unsigned char r[32]; + unsigned char s[32]; + unsigned char extsk[64]; + unsigned long long i; + unsigned char hmg[crypto_hash_sha512_BYTES]; + unsigned char hram[crypto_hash_sha512_BYTES]; + + crypto_hash_sha512(extsk, sk, 32); + extsk[0] &= 248; + extsk[31] &= 127; + extsk[31] |= 64; + + *smlen = mlen+64; + for(i=0;icaller1_stack=stack64#1 +# asm 2: movq caller1_stack=0(%rsp) +movq %r11,0(%rsp) + +# qhasm: caller2_stack = caller2 +# asm 1: movq caller2_stack=stack64#2 +# asm 2: movq caller2_stack=8(%rsp) +movq %r12,8(%rsp) + +# qhasm: caller3_stack = caller3 +# asm 1: movq caller3_stack=stack64#3 +# asm 2: movq caller3_stack=16(%rsp) +movq %r13,16(%rsp) + +# qhasm: caller4_stack = caller4 +# asm 1: movq caller4_stack=stack64#4 +# asm 2: movq caller4_stack=24(%rsp) +movq %r14,24(%rsp) + +# qhasm: caller5_stack = caller5 +# asm 1: movq caller5_stack=stack64#5 +# asm 2: movq caller5_stack=32(%rsp) +movq %r15,32(%rsp) + +# qhasm: caller6_stack = caller6 +# asm 1: movq caller6_stack=stack64#6 +# asm 2: movq caller6_stack=40(%rsp) +movq %rbx,40(%rsp) + +# qhasm: caller7_stack = caller7 +# asm 1: movq caller7_stack=stack64#7 +# asm 2: movq caller7_stack=48(%rsp) +movq %rbp,48(%rsp) + +# qhasm: yp = yp +# asm 1: mov yp=int64#4 +# asm 2: mov yp=%rcx +mov %rdx,%rcx + +# qhasm: r4 = 0 +# asm 1: mov $0,>r4=int64#5 +# asm 2: mov $0,>r4=%r8 +mov $0,%r8 + +# qhasm: r5 = 0 +# asm 1: mov $0,>r5=int64#6 +# asm 2: mov $0,>r5=%r9 +mov $0,%r9 + +# qhasm: r6 = 0 +# asm 1: mov $0,>r6=int64#8 +# asm 2: mov $0,>r6=%r10 +mov $0,%r10 + +# qhasm: r7 = 0 +# asm 1: mov $0,>r7=int64#9 +# asm 2: mov $0,>r7=%r11 +mov $0,%r11 + +# qhasm: zero = 0 +# asm 1: mov $0,>zero=int64#10 +# asm 2: mov $0,>zero=%r12 +mov $0,%r12 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(r0=int64#11 +# asm 2: mov r0=%r13 +mov %rax,%r13 + +# qhasm: c = rdx +# asm 1: mov c=int64#12 +# asm 2: mov c=%r14 +mov %rdx,%r14 + +# qhasm: rax = *(uint64 *)(xp + 0) +# asm 1: movq 0(rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(r1=int64#13 +# asm 2: mov r1=%r15 +mov %rax,%r15 + +# qhasm: carry? r1 += c +# asm 1: add c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(r2=int64#14 +# asm 2: mov r2=%rbx +mov %rax,%rbx + +# qhasm: carry? r2 += c +# asm 1: add c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 0(rax=%rax +movq 0(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(r3=int64#15 +# asm 2: mov r3=%rbp +mov %rax,%rbp + +# qhasm: carry? r3 += c +# asm 1: add rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 8(rax=%rax +movq 8(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 16(rax=%rax +movq 16(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 0) +# asm 1: mulq 0(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 8) +# asm 1: mulq 8(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16) +# asm 1: mulq 16(c=int64#12 +# asm 2: mov $0,>c=%r14 +mov $0,%r14 + +# qhasm: c += rdx + carry +# asm 1: adc rax=int64#7 +# asm 2: movq 24(rax=%rax +movq 24(%rsi),%rax + +# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24) +# asm 1: mulq 24(caller1=int64#9 +# asm 2: movq caller1=%r11 +movq 0(%rsp),%r11 + +# qhasm: caller2 = caller2_stack +# asm 1: movq caller2=int64#10 +# asm 2: movq caller2=%r12 +movq 8(%rsp),%r12 + +# qhasm: caller3 = caller3_stack +# asm 1: movq caller3=int64#11 +# asm 2: movq caller3=%r13 +movq 16(%rsp),%r13 + +# qhasm: caller4 = caller4_stack +# asm 1: movq caller4=int64#12 +# asm 2: movq caller4=%r14 +movq 24(%rsp),%r14 + +# qhasm: caller5 = caller5_stack +# asm 1: movq caller5=int64#13 +# asm 2: movq caller5=%r15 +movq 32(%rsp),%r15 + +# qhasm: caller6 = caller6_stack +# asm 1: movq caller6=int64#14 +# asm 2: movq caller6=%rbx +movq 40(%rsp),%rbx + +# qhasm: caller7 = caller7_stack +# asm 1: movq caller7=int64#15 +# asm 2: movq caller7=%rbp +movq 48(%rsp),%rbp + +# qhasm: leave +add %r11,%rsp +mov %rdi,%rax +mov %rsi,%rdx +ret From 419f1cec19450bf78f23243ab61784198a3195ba Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 17:17:40 -0800 Subject: [PATCH 07/10] add enough code to let it compile --- src/ed25519-supercop-amd64-64-24k/Makefile | 69 +++++ .../crypto_hash_sha512.h | 1 + .../crypto_int32.h | 6 + .../crypto_int64.h | 6 + .../crypto_sign.h | 13 + .../crypto_uint32.h | 6 + .../crypto_uint64.h | 6 + .../crypto_verify_32.h | 7 + src/ed25519-supercop-amd64-64-24k/publickey.c | 33 +++ .../sha512-blocks.c | 239 ++++++++++++++++++ .../sha512-hash.c | 72 ++++++ src/ed25519-supercop-amd64-64-24k/sha512.h | 4 + src/ed25519-supercop-amd64-64-24k/test.c | 42 +++ src/ed25519-supercop-amd64-64-24k/verify.c | 40 +++ 14 files changed, 544 insertions(+) create mode 100644 src/ed25519-supercop-amd64-64-24k/Makefile create mode 100644 src/ed25519-supercop-amd64-64-24k/crypto_hash_sha512.h create mode 100644 src/ed25519-supercop-amd64-64-24k/crypto_int32.h create mode 100644 src/ed25519-supercop-amd64-64-24k/crypto_int64.h create mode 100644 src/ed25519-supercop-amd64-64-24k/crypto_sign.h create mode 100644 src/ed25519-supercop-amd64-64-24k/crypto_uint32.h create mode 100644 src/ed25519-supercop-amd64-64-24k/crypto_uint64.h create mode 100644 src/ed25519-supercop-amd64-64-24k/crypto_verify_32.h create mode 100644 src/ed25519-supercop-amd64-64-24k/publickey.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sha512-blocks.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sha512-hash.c create mode 100644 src/ed25519-supercop-amd64-64-24k/sha512.h create mode 100644 src/ed25519-supercop-amd64-64-24k/test.c create mode 100644 src/ed25519-supercop-amd64-64-24k/verify.c diff --git a/src/ed25519-supercop-amd64-64-24k/Makefile b/src/ed25519-supercop-amd64-64-24k/Makefile new file mode 100644 index 0000000..87f6bc3 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/Makefile @@ -0,0 +1,69 @@ +OBJS = \ + choose_t.o \ + consts.o \ + fe25519_add.o \ + fe25519_freeze.o \ + fe25519_getparity.o \ + fe25519_invert.o \ + fe25519_iseq.o \ + fe25519_iszero.o \ + fe25519_mul.o \ + fe25519_neg.o \ + fe25519_pack.o \ + fe25519_pow2523.o \ + fe25519_setint.o \ + fe25519_square.o \ + fe25519_sub.o \ + fe25519_unpack.o \ + ge25519_add.o \ + ge25519_add_p1p1.o \ + ge25519_base.o \ + ge25519_dbl_p1p1.o \ + ge25519_double.o \ + ge25519_double_scalarmult.o \ + ge25519_isneutral.o \ + ge25519_multi_scalarmult.o \ + ge25519_nielsadd2.o \ + ge25519_nielsadd_p1p1.o \ + ge25519_p1p1_to_p2.o \ + ge25519_p1p1_to_p3.o \ + ge25519_pack.o \ + ge25519_pnielsadd_p1p1.o \ + ge25519_scalarmult_base.o \ + ge25519_unpackneg.o \ + heap_rootreplaced.o \ + heap_rootreplaced_1limb.o \ + heap_rootreplaced_2limbs.o \ + heap_rootreplaced_3limbs.o \ + hram.o \ + index_heap.o \ + open.o \ + sc25519_add.o \ + sc25519_barrett.o \ + sc25519_from32bytes.o \ + sc25519_from64bytes.o \ + sc25519_from_shortsc.o \ + sc25519_iszero.o \ + sc25519_lt.o \ + sc25519_mul.o \ + sc25519_mul_shortsc.o \ + sc25519_slide.o \ + sc25519_sub_nored.o \ + sc25519_to32bytes.o \ + sc25519_window4.o \ + sign.o \ + ull4_mul.o \ + publickey.o sha512-blocks.o sha512-hash.o verify.o + +OFF_OBJS= \ + batch.o \ + keypair.o + +CC=gcc +CFLAGS=-O2 -Wall + +test: test.o $(OBJS) + gcc -o $@ $^ + +clean: + rm -f *.o test diff --git a/src/ed25519-supercop-amd64-64-24k/crypto_hash_sha512.h b/src/ed25519-supercop-amd64-64-24k/crypto_hash_sha512.h new file mode 100644 index 0000000..6c60661 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/crypto_hash_sha512.h @@ -0,0 +1 @@ +#include "sha512.h" diff --git a/src/ed25519-supercop-amd64-64-24k/crypto_int32.h b/src/ed25519-supercop-amd64-64-24k/crypto_int32.h new file mode 100644 index 0000000..cae135e --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/crypto_int32.h @@ -0,0 +1,6 @@ +#ifndef crypto_int32_h +#define crypto_int32_h + +typedef int crypto_int32; + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/crypto_int64.h b/src/ed25519-supercop-amd64-64-24k/crypto_int64.h new file mode 100644 index 0000000..fc92417 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/crypto_int64.h @@ -0,0 +1,6 @@ +#ifndef crypto_int64_h +#define crypto_int64_h + +typedef long long crypto_int64; + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/crypto_sign.h b/src/ed25519-supercop-amd64-64-24k/crypto_sign.h new file mode 100644 index 0000000..afed208 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/crypto_sign.h @@ -0,0 +1,13 @@ +#ifndef crypto_sign_edwards25519sha512batch_H +#define crypto_sign_edwards25519sha512batch_H + +#define SECRETKEYBYTES 64 +#define PUBLICKEYBYTES 32 +#define SIGNATUREBYTES 64 + +extern int crypto_sign(unsigned char *,unsigned long long *,const unsigned char *,unsigned long long,const unsigned char *); +extern int crypto_sign_open(unsigned char *,unsigned long long *,const unsigned char *,unsigned long long,const unsigned char *); +extern int crypto_sign_keypair(unsigned char *,unsigned char *); +extern int crypto_sign_publickey(unsigned char *pk, unsigned char *sk, unsigned char *seed); + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/crypto_uint32.h b/src/ed25519-supercop-amd64-64-24k/crypto_uint32.h new file mode 100644 index 0000000..21020d7 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/crypto_uint32.h @@ -0,0 +1,6 @@ +#ifndef crypto_uint32_h +#define crypto_uint32_h + +typedef unsigned int crypto_uint32; + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/crypto_uint64.h b/src/ed25519-supercop-amd64-64-24k/crypto_uint64.h new file mode 100644 index 0000000..5aa0070 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/crypto_uint64.h @@ -0,0 +1,6 @@ +#ifndef crypto_uint64_h +#define crypto_uint64_h + +typedef unsigned long long crypto_uint64; + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/crypto_verify_32.h b/src/ed25519-supercop-amd64-64-24k/crypto_verify_32.h new file mode 100644 index 0000000..ad265c7 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/crypto_verify_32.h @@ -0,0 +1,7 @@ +#ifndef crypto_verify_32_H +#define crypto_verify_32_H + +#define crypto_verify_32_ref_BYTES 32 +extern int crypto_verify_32(const unsigned char *,const unsigned char *); + +#endif diff --git a/src/ed25519-supercop-amd64-64-24k/publickey.c b/src/ed25519-supercop-amd64-64-24k/publickey.c new file mode 100644 index 0000000..ca84244 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/publickey.c @@ -0,0 +1,33 @@ +#include "crypto_sign.h" + +#include "crypto_verify_32.h" +#include "sha512.h" + +#include "ge25519.h" + + +int crypto_sign_publickey( + unsigned char *pk, // write 32 bytes into this + unsigned char *sk, // write 64 bytes into this (seed+pubkey) + unsigned char *seed // 32 bytes + ) +{ + sc25519 scsk; + ge25519 gepk; + int i; + + crypto_hash_sha512(sk, seed, 32); + sk[0] &= 248; + sk[31] &= 127; + sk[31] |= 64; + + sc25519_from32bytes(&scsk,sk); + + ge25519_scalarmult_base(&gepk, &scsk); + ge25519_pack(pk, &gepk); + for(i=0;i<32;i++) + sk[32 + i] = pk[i]; + for(i=0;i<32;i++) + sk[i] = seed[i]; + return 0; +} diff --git a/src/ed25519-supercop-amd64-64-24k/sha512-blocks.c b/src/ed25519-supercop-amd64-64-24k/sha512-blocks.c new file mode 100644 index 0000000..c8dbf0d --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sha512-blocks.c @@ -0,0 +1,239 @@ +//#include "crypto_hashblocks.h" + +typedef unsigned long long uint64; + +static uint64 load_bigendian(const unsigned char *x) +{ + return + (uint64) (x[7]) \ + | (((uint64) (x[6])) << 8) \ + | (((uint64) (x[5])) << 16) \ + | (((uint64) (x[4])) << 24) \ + | (((uint64) (x[3])) << 32) \ + | (((uint64) (x[2])) << 40) \ + | (((uint64) (x[1])) << 48) \ + | (((uint64) (x[0])) << 56) + ; +} + +static void store_bigendian(unsigned char *x,uint64 u) +{ + x[7] = u; u >>= 8; + x[6] = u; u >>= 8; + x[5] = u; u >>= 8; + x[4] = u; u >>= 8; + x[3] = u; u >>= 8; + x[2] = u; u >>= 8; + x[1] = u; u >>= 8; + x[0] = u; +} + +#define SHR(x,c) ((x) >> (c)) +#define ROTR(x,c) (((x) >> (c)) | ((x) << (64 - (c)))) + +#define Ch(x,y,z) ((x & y) ^ (~x & z)) +#define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) +#define Sigma0(x) (ROTR(x,28) ^ ROTR(x,34) ^ ROTR(x,39)) +#define Sigma1(x) (ROTR(x,14) ^ ROTR(x,18) ^ ROTR(x,41)) +#define sigma0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x,7)) +#define sigma1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x,6)) + +#define M(w0,w14,w9,w1) w0 = sigma1(w14) + w9 + sigma0(w1) + w0; + +#define EXPAND \ + M(w0 ,w14,w9 ,w1 ) \ + M(w1 ,w15,w10,w2 ) \ + M(w2 ,w0 ,w11,w3 ) \ + M(w3 ,w1 ,w12,w4 ) \ + M(w4 ,w2 ,w13,w5 ) \ + M(w5 ,w3 ,w14,w6 ) \ + M(w6 ,w4 ,w15,w7 ) \ + M(w7 ,w5 ,w0 ,w8 ) \ + M(w8 ,w6 ,w1 ,w9 ) \ + M(w9 ,w7 ,w2 ,w10) \ + M(w10,w8 ,w3 ,w11) \ + M(w11,w9 ,w4 ,w12) \ + M(w12,w10,w5 ,w13) \ + M(w13,w11,w6 ,w14) \ + M(w14,w12,w7 ,w15) \ + M(w15,w13,w8 ,w0 ) + +#define F(w,k) \ + T1 = h + Sigma1(e) + Ch(e,f,g) + k + w; \ + T2 = Sigma0(a) + Maj(a,b,c); \ + h = g; \ + g = f; \ + f = e; \ + e = d + T1; \ + d = c; \ + c = b; \ + b = a; \ + a = T1 + T2; + +int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) +{ + uint64 state[8]; + uint64 a; + uint64 b; + uint64 c; + uint64 d; + uint64 e; + uint64 f; + uint64 g; + uint64 h; + uint64 T1; + uint64 T2; + + a = load_bigendian(statebytes + 0); state[0] = a; + b = load_bigendian(statebytes + 8); state[1] = b; + c = load_bigendian(statebytes + 16); state[2] = c; + d = load_bigendian(statebytes + 24); state[3] = d; + e = load_bigendian(statebytes + 32); state[4] = e; + f = load_bigendian(statebytes + 40); state[5] = f; + g = load_bigendian(statebytes + 48); state[6] = g; + h = load_bigendian(statebytes + 56); state[7] = h; + + while (inlen >= 128) { + uint64 w0 = load_bigendian(in + 0); + uint64 w1 = load_bigendian(in + 8); + uint64 w2 = load_bigendian(in + 16); + uint64 w3 = load_bigendian(in + 24); + uint64 w4 = load_bigendian(in + 32); + uint64 w5 = load_bigendian(in + 40); + uint64 w6 = load_bigendian(in + 48); + uint64 w7 = load_bigendian(in + 56); + uint64 w8 = load_bigendian(in + 64); + uint64 w9 = load_bigendian(in + 72); + uint64 w10 = load_bigendian(in + 80); + uint64 w11 = load_bigendian(in + 88); + uint64 w12 = load_bigendian(in + 96); + uint64 w13 = load_bigendian(in + 104); + uint64 w14 = load_bigendian(in + 112); + uint64 w15 = load_bigendian(in + 120); + + F(w0 ,0x428a2f98d728ae22ULL) + F(w1 ,0x7137449123ef65cdULL) + F(w2 ,0xb5c0fbcfec4d3b2fULL) + F(w3 ,0xe9b5dba58189dbbcULL) + F(w4 ,0x3956c25bf348b538ULL) + F(w5 ,0x59f111f1b605d019ULL) + F(w6 ,0x923f82a4af194f9bULL) + F(w7 ,0xab1c5ed5da6d8118ULL) + F(w8 ,0xd807aa98a3030242ULL) + F(w9 ,0x12835b0145706fbeULL) + F(w10,0x243185be4ee4b28cULL) + F(w11,0x550c7dc3d5ffb4e2ULL) + F(w12,0x72be5d74f27b896fULL) + F(w13,0x80deb1fe3b1696b1ULL) + F(w14,0x9bdc06a725c71235ULL) + F(w15,0xc19bf174cf692694ULL) + + EXPAND + + F(w0 ,0xe49b69c19ef14ad2ULL) + F(w1 ,0xefbe4786384f25e3ULL) + F(w2 ,0x0fc19dc68b8cd5b5ULL) + F(w3 ,0x240ca1cc77ac9c65ULL) + F(w4 ,0x2de92c6f592b0275ULL) + F(w5 ,0x4a7484aa6ea6e483ULL) + F(w6 ,0x5cb0a9dcbd41fbd4ULL) + F(w7 ,0x76f988da831153b5ULL) + F(w8 ,0x983e5152ee66dfabULL) + F(w9 ,0xa831c66d2db43210ULL) + F(w10,0xb00327c898fb213fULL) + F(w11,0xbf597fc7beef0ee4ULL) + F(w12,0xc6e00bf33da88fc2ULL) + F(w13,0xd5a79147930aa725ULL) + F(w14,0x06ca6351e003826fULL) + F(w15,0x142929670a0e6e70ULL) + + EXPAND + + F(w0 ,0x27b70a8546d22ffcULL) + F(w1 ,0x2e1b21385c26c926ULL) + F(w2 ,0x4d2c6dfc5ac42aedULL) + F(w3 ,0x53380d139d95b3dfULL) + F(w4 ,0x650a73548baf63deULL) + F(w5 ,0x766a0abb3c77b2a8ULL) + F(w6 ,0x81c2c92e47edaee6ULL) + F(w7 ,0x92722c851482353bULL) + F(w8 ,0xa2bfe8a14cf10364ULL) + F(w9 ,0xa81a664bbc423001ULL) + F(w10,0xc24b8b70d0f89791ULL) + F(w11,0xc76c51a30654be30ULL) + F(w12,0xd192e819d6ef5218ULL) + F(w13,0xd69906245565a910ULL) + F(w14,0xf40e35855771202aULL) + F(w15,0x106aa07032bbd1b8ULL) + + EXPAND + + F(w0 ,0x19a4c116b8d2d0c8ULL) + F(w1 ,0x1e376c085141ab53ULL) + F(w2 ,0x2748774cdf8eeb99ULL) + F(w3 ,0x34b0bcb5e19b48a8ULL) + F(w4 ,0x391c0cb3c5c95a63ULL) + F(w5 ,0x4ed8aa4ae3418acbULL) + F(w6 ,0x5b9cca4f7763e373ULL) + F(w7 ,0x682e6ff3d6b2b8a3ULL) + F(w8 ,0x748f82ee5defb2fcULL) + F(w9 ,0x78a5636f43172f60ULL) + F(w10,0x84c87814a1f0ab72ULL) + F(w11,0x8cc702081a6439ecULL) + F(w12,0x90befffa23631e28ULL) + F(w13,0xa4506cebde82bde9ULL) + F(w14,0xbef9a3f7b2c67915ULL) + F(w15,0xc67178f2e372532bULL) + + EXPAND + + F(w0 ,0xca273eceea26619cULL) + F(w1 ,0xd186b8c721c0c207ULL) + F(w2 ,0xeada7dd6cde0eb1eULL) + F(w3 ,0xf57d4f7fee6ed178ULL) + F(w4 ,0x06f067aa72176fbaULL) + F(w5 ,0x0a637dc5a2c898a6ULL) + F(w6 ,0x113f9804bef90daeULL) + F(w7 ,0x1b710b35131c471bULL) + F(w8 ,0x28db77f523047d84ULL) + F(w9 ,0x32caab7b40c72493ULL) + F(w10,0x3c9ebe0a15c9bebcULL) + F(w11,0x431d67c49c100d4cULL) + F(w12,0x4cc5d4becb3e42b6ULL) + F(w13,0x597f299cfc657e2aULL) + F(w14,0x5fcb6fab3ad6faecULL) + F(w15,0x6c44198c4a475817ULL) + + a += state[0]; + b += state[1]; + c += state[2]; + d += state[3]; + e += state[4]; + f += state[5]; + g += state[6]; + h += state[7]; + + state[0] = a; + state[1] = b; + state[2] = c; + state[3] = d; + state[4] = e; + state[5] = f; + state[6] = g; + state[7] = h; + + in += 128; + inlen -= 128; + } + + store_bigendian(statebytes + 0,state[0]); + store_bigendian(statebytes + 8,state[1]); + store_bigendian(statebytes + 16,state[2]); + store_bigendian(statebytes + 24,state[3]); + store_bigendian(statebytes + 32,state[4]); + store_bigendian(statebytes + 40,state[5]); + store_bigendian(statebytes + 48,state[6]); + store_bigendian(statebytes + 56,state[7]); + + return inlen; +} diff --git a/src/ed25519-supercop-amd64-64-24k/sha512-hash.c b/src/ed25519-supercop-amd64-64-24k/sha512-hash.c new file mode 100644 index 0000000..f2f2925 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sha512-hash.c @@ -0,0 +1,72 @@ +/* +20080913 +D. J. Bernstein +Public domain. +*/ + +#include "sha512.h" + +extern int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen); + +#define blocks crypto_hashblocks + +static const unsigned char iv[64] = { + 0x6a,0x09,0xe6,0x67,0xf3,0xbc,0xc9,0x08, + 0xbb,0x67,0xae,0x85,0x84,0xca,0xa7,0x3b, + 0x3c,0x6e,0xf3,0x72,0xfe,0x94,0xf8,0x2b, + 0xa5,0x4f,0xf5,0x3a,0x5f,0x1d,0x36,0xf1, + 0x51,0x0e,0x52,0x7f,0xad,0xe6,0x82,0xd1, + 0x9b,0x05,0x68,0x8c,0x2b,0x3e,0x6c,0x1f, + 0x1f,0x83,0xd9,0xab,0xfb,0x41,0xbd,0x6b, + 0x5b,0xe0,0xcd,0x19,0x13,0x7e,0x21,0x79 +} ; + +typedef unsigned long long uint64; + +int crypto_hash_sha512(unsigned char *out,const unsigned char *in,unsigned long long inlen) +{ + unsigned char h[64]; + unsigned char padded[256]; + int i; + unsigned long long bytes = inlen; + + for (i = 0;i < 64;++i) h[i] = iv[i]; + + blocks(h,in,inlen); + in += inlen; + inlen &= 127; + in -= inlen; + + for (i = 0;i < inlen;++i) padded[i] = in[i]; + padded[inlen] = 0x80; + + if (inlen < 112) { + for (i = inlen + 1;i < 119;++i) padded[i] = 0; + padded[119] = bytes >> 61; + padded[120] = bytes >> 53; + padded[121] = bytes >> 45; + padded[122] = bytes >> 37; + padded[123] = bytes >> 29; + padded[124] = bytes >> 21; + padded[125] = bytes >> 13; + padded[126] = bytes >> 5; + padded[127] = bytes << 3; + blocks(h,padded,128); + } else { + for (i = inlen + 1;i < 247;++i) padded[i] = 0; + padded[247] = bytes >> 61; + padded[248] = bytes >> 53; + padded[249] = bytes >> 45; + padded[250] = bytes >> 37; + padded[251] = bytes >> 29; + padded[252] = bytes >> 21; + padded[253] = bytes >> 13; + padded[254] = bytes >> 5; + padded[255] = bytes << 3; + blocks(h,padded,256); + } + + for (i = 0;i < 64;++i) out[i] = h[i]; + + return 0; +} diff --git a/src/ed25519-supercop-amd64-64-24k/sha512.h b/src/ed25519-supercop-amd64-64-24k/sha512.h new file mode 100644 index 0000000..37376b1 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/sha512.h @@ -0,0 +1,4 @@ +extern int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen); +extern int crypto_hash_sha512(unsigned char *out,const unsigned char *in,unsigned long long inlen); + +#define crypto_hash_sha512_BYTES 64 diff --git a/src/ed25519-supercop-amd64-64-24k/test.c b/src/ed25519-supercop-amd64-64-24k/test.c new file mode 100644 index 0000000..cf344f7 --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/test.c @@ -0,0 +1,42 @@ + +#include +#include +#include +#include "crypto_sign.h" + +char *msg = "Hello World"; + +int main(int argc, char *argv[]) { + unsigned char sk[SECRETKEYBYTES], vk[PUBLICKEYBYTES]; + unsigned char *sigmsg, *newmsg; + unsigned long long sigmsglen, newmsglen; + int ret; + unsigned char seed[32]; + int i; + for (i=0; i<32; i++) seed[i] = i; // not so random + crypto_sign_publickey(vk, sk, seed); + printf("got keypair\n"); + sigmsg = malloc(strlen(msg)+1+SIGNATUREBYTES); + if (!sigmsg) + return 1; + crypto_sign(sigmsg, &sigmsglen, (unsigned char *)msg, strlen(msg)+1, sk); + printf("got signature\n"); + if (sigmsglen != strlen(msg)+1+SIGNATUREBYTES) + return 2; + newmsg = malloc(sigmsglen); + if (!newmsg) + return 3; + ret = crypto_sign_open(newmsg, &newmsglen, sigmsg, sigmsglen, vk); + printf("verified signature\n"); + if (ret == 0) + printf("good!\n"); + else + printf("bad\n"); + sigmsg[0] ^= 0x01; + ret = crypto_sign_open(newmsg, &newmsglen, sigmsg, sigmsglen, vk); + if (ret == 0) + printf("bad: failed to detect simple corruption\n"); + else + printf("good: detected simple corruption\n"); + return 0; +} diff --git a/src/ed25519-supercop-amd64-64-24k/verify.c b/src/ed25519-supercop-amd64-64-24k/verify.c new file mode 100644 index 0000000..a04186b --- /dev/null +++ b/src/ed25519-supercop-amd64-64-24k/verify.c @@ -0,0 +1,40 @@ +#include "crypto_verify_32.h" + +int crypto_verify_32(const unsigned char *x,const unsigned char *y) +{ + unsigned int differentbits = 0; +#define F(i) differentbits |= x[i] ^ y[i]; + F(0) + F(1) + F(2) + F(3) + F(4) + F(5) + F(6) + F(7) + F(8) + F(9) + F(10) + F(11) + F(12) + F(13) + F(14) + F(15) + F(16) + F(17) + F(18) + F(19) + F(20) + F(21) + F(22) + F(23) + F(24) + F(25) + F(26) + F(27) + F(28) + F(29) + F(30) + F(31) + return (1 & ((differentbits - 1) >> 8)) - 1; +} From 53cb38b767d08495ab7d37d11e323c7c21f3ab8f Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 17:21:13 -0800 Subject: [PATCH 08/10] 51-30k: add enough code to let it compile --- src/ed25519-supercop-amd64-51-30k/Makefile | 71 ++++++ .../crypto_hash_sha512.h | 1 + .../crypto_int32.h | 6 + .../crypto_int64.h | 6 + .../crypto_sign.h | 13 + .../crypto_uint32.h | 6 + .../crypto_uint64.h | 6 + .../crypto_verify_32.h | 7 + src/ed25519-supercop-amd64-51-30k/publickey.c | 33 +++ .../sha512-blocks.c | 239 ++++++++++++++++++ .../sha512-hash.c | 72 ++++++ src/ed25519-supercop-amd64-51-30k/sha512.h | 4 + src/ed25519-supercop-amd64-51-30k/test.c | 42 +++ src/ed25519-supercop-amd64-51-30k/verify.c | 40 +++ 14 files changed, 546 insertions(+) create mode 100644 src/ed25519-supercop-amd64-51-30k/Makefile create mode 100644 src/ed25519-supercop-amd64-51-30k/crypto_hash_sha512.h create mode 100644 src/ed25519-supercop-amd64-51-30k/crypto_int32.h create mode 100644 src/ed25519-supercop-amd64-51-30k/crypto_int64.h create mode 100644 src/ed25519-supercop-amd64-51-30k/crypto_sign.h create mode 100644 src/ed25519-supercop-amd64-51-30k/crypto_uint32.h create mode 100644 src/ed25519-supercop-amd64-51-30k/crypto_uint64.h create mode 100644 src/ed25519-supercop-amd64-51-30k/crypto_verify_32.h create mode 100644 src/ed25519-supercop-amd64-51-30k/publickey.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sha512-blocks.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sha512-hash.c create mode 100644 src/ed25519-supercop-amd64-51-30k/sha512.h create mode 100644 src/ed25519-supercop-amd64-51-30k/test.c create mode 100644 src/ed25519-supercop-amd64-51-30k/verify.c diff --git a/src/ed25519-supercop-amd64-51-30k/Makefile b/src/ed25519-supercop-amd64-51-30k/Makefile new file mode 100644 index 0000000..d9f8213 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/Makefile @@ -0,0 +1,71 @@ +OBJS = \ + choose_t.o \ + consts.o \ + fe25519_add.o \ + fe25519_freeze.o \ + fe25519_getparity.o \ + fe25519_invert.o \ + fe25519_iseq.o \ + fe25519_iszero.o \ + fe25519_mul.o \ + fe25519_neg.o \ + fe25519_nsquare.o \ + fe25519_pack.o \ + fe25519_pow2523.o \ + fe25519_setint.o \ + fe25519_square.o \ + fe25519_sub.o \ + fe25519_unpack.o \ + ge25519_add.o \ + ge25519_add_p1p1.o \ + ge25519_base.o \ + ge25519_dbl_p1p1.o \ + ge25519_double.o \ + ge25519_double_scalarmult.o \ + ge25519_isneutral.o \ + ge25519_multi_scalarmult.o \ + ge25519_nielsadd2.o \ + ge25519_nielsadd_p1p1.o \ + ge25519_p1p1_to_p2.o \ + ge25519_p1p1_to_p3.o \ + ge25519_p1p1_to_pniels.o \ + ge25519_pack.o \ + ge25519_pnielsadd_p1p1.o \ + ge25519_scalarmult_base.o \ + ge25519_unpackneg.o \ + heap_rootreplaced.o \ + heap_rootreplaced_1limb.o \ + heap_rootreplaced_2limbs.o \ + heap_rootreplaced_3limbs.o \ + hram.o \ + index_heap.o \ + open.o \ + sc25519_add.o \ + sc25519_barrett.o \ + sc25519_from32bytes.o \ + sc25519_from64bytes.o \ + sc25519_from_shortsc.o \ + sc25519_iszero.o \ + sc25519_lt.o \ + sc25519_mul.o \ + sc25519_mul_shortsc.o \ + sc25519_slide.o \ + sc25519_sub_nored.o \ + sc25519_to32bytes.o \ + sc25519_window4.o \ + sign.o \ + ull4_mul.o \ + publickey.o sha512-blocks.o sha512-hash.o verify.o + +OFF_OBJS= \ + batch.o \ + keypair.o + +CC=gcc +CFLAGS=-O2 -Wall + +test: test.o $(OBJS) + gcc -o $@ $^ + +clean: + rm -f *.o test diff --git a/src/ed25519-supercop-amd64-51-30k/crypto_hash_sha512.h b/src/ed25519-supercop-amd64-51-30k/crypto_hash_sha512.h new file mode 100644 index 0000000..6c60661 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/crypto_hash_sha512.h @@ -0,0 +1 @@ +#include "sha512.h" diff --git a/src/ed25519-supercop-amd64-51-30k/crypto_int32.h b/src/ed25519-supercop-amd64-51-30k/crypto_int32.h new file mode 100644 index 0000000..cae135e --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/crypto_int32.h @@ -0,0 +1,6 @@ +#ifndef crypto_int32_h +#define crypto_int32_h + +typedef int crypto_int32; + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/crypto_int64.h b/src/ed25519-supercop-amd64-51-30k/crypto_int64.h new file mode 100644 index 0000000..fc92417 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/crypto_int64.h @@ -0,0 +1,6 @@ +#ifndef crypto_int64_h +#define crypto_int64_h + +typedef long long crypto_int64; + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/crypto_sign.h b/src/ed25519-supercop-amd64-51-30k/crypto_sign.h new file mode 100644 index 0000000..afed208 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/crypto_sign.h @@ -0,0 +1,13 @@ +#ifndef crypto_sign_edwards25519sha512batch_H +#define crypto_sign_edwards25519sha512batch_H + +#define SECRETKEYBYTES 64 +#define PUBLICKEYBYTES 32 +#define SIGNATUREBYTES 64 + +extern int crypto_sign(unsigned char *,unsigned long long *,const unsigned char *,unsigned long long,const unsigned char *); +extern int crypto_sign_open(unsigned char *,unsigned long long *,const unsigned char *,unsigned long long,const unsigned char *); +extern int crypto_sign_keypair(unsigned char *,unsigned char *); +extern int crypto_sign_publickey(unsigned char *pk, unsigned char *sk, unsigned char *seed); + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/crypto_uint32.h b/src/ed25519-supercop-amd64-51-30k/crypto_uint32.h new file mode 100644 index 0000000..21020d7 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/crypto_uint32.h @@ -0,0 +1,6 @@ +#ifndef crypto_uint32_h +#define crypto_uint32_h + +typedef unsigned int crypto_uint32; + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/crypto_uint64.h b/src/ed25519-supercop-amd64-51-30k/crypto_uint64.h new file mode 100644 index 0000000..5aa0070 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/crypto_uint64.h @@ -0,0 +1,6 @@ +#ifndef crypto_uint64_h +#define crypto_uint64_h + +typedef unsigned long long crypto_uint64; + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/crypto_verify_32.h b/src/ed25519-supercop-amd64-51-30k/crypto_verify_32.h new file mode 100644 index 0000000..ad265c7 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/crypto_verify_32.h @@ -0,0 +1,7 @@ +#ifndef crypto_verify_32_H +#define crypto_verify_32_H + +#define crypto_verify_32_ref_BYTES 32 +extern int crypto_verify_32(const unsigned char *,const unsigned char *); + +#endif diff --git a/src/ed25519-supercop-amd64-51-30k/publickey.c b/src/ed25519-supercop-amd64-51-30k/publickey.c new file mode 100644 index 0000000..ca84244 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/publickey.c @@ -0,0 +1,33 @@ +#include "crypto_sign.h" + +#include "crypto_verify_32.h" +#include "sha512.h" + +#include "ge25519.h" + + +int crypto_sign_publickey( + unsigned char *pk, // write 32 bytes into this + unsigned char *sk, // write 64 bytes into this (seed+pubkey) + unsigned char *seed // 32 bytes + ) +{ + sc25519 scsk; + ge25519 gepk; + int i; + + crypto_hash_sha512(sk, seed, 32); + sk[0] &= 248; + sk[31] &= 127; + sk[31] |= 64; + + sc25519_from32bytes(&scsk,sk); + + ge25519_scalarmult_base(&gepk, &scsk); + ge25519_pack(pk, &gepk); + for(i=0;i<32;i++) + sk[32 + i] = pk[i]; + for(i=0;i<32;i++) + sk[i] = seed[i]; + return 0; +} diff --git a/src/ed25519-supercop-amd64-51-30k/sha512-blocks.c b/src/ed25519-supercop-amd64-51-30k/sha512-blocks.c new file mode 100644 index 0000000..c8dbf0d --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sha512-blocks.c @@ -0,0 +1,239 @@ +//#include "crypto_hashblocks.h" + +typedef unsigned long long uint64; + +static uint64 load_bigendian(const unsigned char *x) +{ + return + (uint64) (x[7]) \ + | (((uint64) (x[6])) << 8) \ + | (((uint64) (x[5])) << 16) \ + | (((uint64) (x[4])) << 24) \ + | (((uint64) (x[3])) << 32) \ + | (((uint64) (x[2])) << 40) \ + | (((uint64) (x[1])) << 48) \ + | (((uint64) (x[0])) << 56) + ; +} + +static void store_bigendian(unsigned char *x,uint64 u) +{ + x[7] = u; u >>= 8; + x[6] = u; u >>= 8; + x[5] = u; u >>= 8; + x[4] = u; u >>= 8; + x[3] = u; u >>= 8; + x[2] = u; u >>= 8; + x[1] = u; u >>= 8; + x[0] = u; +} + +#define SHR(x,c) ((x) >> (c)) +#define ROTR(x,c) (((x) >> (c)) | ((x) << (64 - (c)))) + +#define Ch(x,y,z) ((x & y) ^ (~x & z)) +#define Maj(x,y,z) ((x & y) ^ (x & z) ^ (y & z)) +#define Sigma0(x) (ROTR(x,28) ^ ROTR(x,34) ^ ROTR(x,39)) +#define Sigma1(x) (ROTR(x,14) ^ ROTR(x,18) ^ ROTR(x,41)) +#define sigma0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x,7)) +#define sigma1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x,6)) + +#define M(w0,w14,w9,w1) w0 = sigma1(w14) + w9 + sigma0(w1) + w0; + +#define EXPAND \ + M(w0 ,w14,w9 ,w1 ) \ + M(w1 ,w15,w10,w2 ) \ + M(w2 ,w0 ,w11,w3 ) \ + M(w3 ,w1 ,w12,w4 ) \ + M(w4 ,w2 ,w13,w5 ) \ + M(w5 ,w3 ,w14,w6 ) \ + M(w6 ,w4 ,w15,w7 ) \ + M(w7 ,w5 ,w0 ,w8 ) \ + M(w8 ,w6 ,w1 ,w9 ) \ + M(w9 ,w7 ,w2 ,w10) \ + M(w10,w8 ,w3 ,w11) \ + M(w11,w9 ,w4 ,w12) \ + M(w12,w10,w5 ,w13) \ + M(w13,w11,w6 ,w14) \ + M(w14,w12,w7 ,w15) \ + M(w15,w13,w8 ,w0 ) + +#define F(w,k) \ + T1 = h + Sigma1(e) + Ch(e,f,g) + k + w; \ + T2 = Sigma0(a) + Maj(a,b,c); \ + h = g; \ + g = f; \ + f = e; \ + e = d + T1; \ + d = c; \ + c = b; \ + b = a; \ + a = T1 + T2; + +int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen) +{ + uint64 state[8]; + uint64 a; + uint64 b; + uint64 c; + uint64 d; + uint64 e; + uint64 f; + uint64 g; + uint64 h; + uint64 T1; + uint64 T2; + + a = load_bigendian(statebytes + 0); state[0] = a; + b = load_bigendian(statebytes + 8); state[1] = b; + c = load_bigendian(statebytes + 16); state[2] = c; + d = load_bigendian(statebytes + 24); state[3] = d; + e = load_bigendian(statebytes + 32); state[4] = e; + f = load_bigendian(statebytes + 40); state[5] = f; + g = load_bigendian(statebytes + 48); state[6] = g; + h = load_bigendian(statebytes + 56); state[7] = h; + + while (inlen >= 128) { + uint64 w0 = load_bigendian(in + 0); + uint64 w1 = load_bigendian(in + 8); + uint64 w2 = load_bigendian(in + 16); + uint64 w3 = load_bigendian(in + 24); + uint64 w4 = load_bigendian(in + 32); + uint64 w5 = load_bigendian(in + 40); + uint64 w6 = load_bigendian(in + 48); + uint64 w7 = load_bigendian(in + 56); + uint64 w8 = load_bigendian(in + 64); + uint64 w9 = load_bigendian(in + 72); + uint64 w10 = load_bigendian(in + 80); + uint64 w11 = load_bigendian(in + 88); + uint64 w12 = load_bigendian(in + 96); + uint64 w13 = load_bigendian(in + 104); + uint64 w14 = load_bigendian(in + 112); + uint64 w15 = load_bigendian(in + 120); + + F(w0 ,0x428a2f98d728ae22ULL) + F(w1 ,0x7137449123ef65cdULL) + F(w2 ,0xb5c0fbcfec4d3b2fULL) + F(w3 ,0xe9b5dba58189dbbcULL) + F(w4 ,0x3956c25bf348b538ULL) + F(w5 ,0x59f111f1b605d019ULL) + F(w6 ,0x923f82a4af194f9bULL) + F(w7 ,0xab1c5ed5da6d8118ULL) + F(w8 ,0xd807aa98a3030242ULL) + F(w9 ,0x12835b0145706fbeULL) + F(w10,0x243185be4ee4b28cULL) + F(w11,0x550c7dc3d5ffb4e2ULL) + F(w12,0x72be5d74f27b896fULL) + F(w13,0x80deb1fe3b1696b1ULL) + F(w14,0x9bdc06a725c71235ULL) + F(w15,0xc19bf174cf692694ULL) + + EXPAND + + F(w0 ,0xe49b69c19ef14ad2ULL) + F(w1 ,0xefbe4786384f25e3ULL) + F(w2 ,0x0fc19dc68b8cd5b5ULL) + F(w3 ,0x240ca1cc77ac9c65ULL) + F(w4 ,0x2de92c6f592b0275ULL) + F(w5 ,0x4a7484aa6ea6e483ULL) + F(w6 ,0x5cb0a9dcbd41fbd4ULL) + F(w7 ,0x76f988da831153b5ULL) + F(w8 ,0x983e5152ee66dfabULL) + F(w9 ,0xa831c66d2db43210ULL) + F(w10,0xb00327c898fb213fULL) + F(w11,0xbf597fc7beef0ee4ULL) + F(w12,0xc6e00bf33da88fc2ULL) + F(w13,0xd5a79147930aa725ULL) + F(w14,0x06ca6351e003826fULL) + F(w15,0x142929670a0e6e70ULL) + + EXPAND + + F(w0 ,0x27b70a8546d22ffcULL) + F(w1 ,0x2e1b21385c26c926ULL) + F(w2 ,0x4d2c6dfc5ac42aedULL) + F(w3 ,0x53380d139d95b3dfULL) + F(w4 ,0x650a73548baf63deULL) + F(w5 ,0x766a0abb3c77b2a8ULL) + F(w6 ,0x81c2c92e47edaee6ULL) + F(w7 ,0x92722c851482353bULL) + F(w8 ,0xa2bfe8a14cf10364ULL) + F(w9 ,0xa81a664bbc423001ULL) + F(w10,0xc24b8b70d0f89791ULL) + F(w11,0xc76c51a30654be30ULL) + F(w12,0xd192e819d6ef5218ULL) + F(w13,0xd69906245565a910ULL) + F(w14,0xf40e35855771202aULL) + F(w15,0x106aa07032bbd1b8ULL) + + EXPAND + + F(w0 ,0x19a4c116b8d2d0c8ULL) + F(w1 ,0x1e376c085141ab53ULL) + F(w2 ,0x2748774cdf8eeb99ULL) + F(w3 ,0x34b0bcb5e19b48a8ULL) + F(w4 ,0x391c0cb3c5c95a63ULL) + F(w5 ,0x4ed8aa4ae3418acbULL) + F(w6 ,0x5b9cca4f7763e373ULL) + F(w7 ,0x682e6ff3d6b2b8a3ULL) + F(w8 ,0x748f82ee5defb2fcULL) + F(w9 ,0x78a5636f43172f60ULL) + F(w10,0x84c87814a1f0ab72ULL) + F(w11,0x8cc702081a6439ecULL) + F(w12,0x90befffa23631e28ULL) + F(w13,0xa4506cebde82bde9ULL) + F(w14,0xbef9a3f7b2c67915ULL) + F(w15,0xc67178f2e372532bULL) + + EXPAND + + F(w0 ,0xca273eceea26619cULL) + F(w1 ,0xd186b8c721c0c207ULL) + F(w2 ,0xeada7dd6cde0eb1eULL) + F(w3 ,0xf57d4f7fee6ed178ULL) + F(w4 ,0x06f067aa72176fbaULL) + F(w5 ,0x0a637dc5a2c898a6ULL) + F(w6 ,0x113f9804bef90daeULL) + F(w7 ,0x1b710b35131c471bULL) + F(w8 ,0x28db77f523047d84ULL) + F(w9 ,0x32caab7b40c72493ULL) + F(w10,0x3c9ebe0a15c9bebcULL) + F(w11,0x431d67c49c100d4cULL) + F(w12,0x4cc5d4becb3e42b6ULL) + F(w13,0x597f299cfc657e2aULL) + F(w14,0x5fcb6fab3ad6faecULL) + F(w15,0x6c44198c4a475817ULL) + + a += state[0]; + b += state[1]; + c += state[2]; + d += state[3]; + e += state[4]; + f += state[5]; + g += state[6]; + h += state[7]; + + state[0] = a; + state[1] = b; + state[2] = c; + state[3] = d; + state[4] = e; + state[5] = f; + state[6] = g; + state[7] = h; + + in += 128; + inlen -= 128; + } + + store_bigendian(statebytes + 0,state[0]); + store_bigendian(statebytes + 8,state[1]); + store_bigendian(statebytes + 16,state[2]); + store_bigendian(statebytes + 24,state[3]); + store_bigendian(statebytes + 32,state[4]); + store_bigendian(statebytes + 40,state[5]); + store_bigendian(statebytes + 48,state[6]); + store_bigendian(statebytes + 56,state[7]); + + return inlen; +} diff --git a/src/ed25519-supercop-amd64-51-30k/sha512-hash.c b/src/ed25519-supercop-amd64-51-30k/sha512-hash.c new file mode 100644 index 0000000..f2f2925 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sha512-hash.c @@ -0,0 +1,72 @@ +/* +20080913 +D. J. Bernstein +Public domain. +*/ + +#include "sha512.h" + +extern int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen); + +#define blocks crypto_hashblocks + +static const unsigned char iv[64] = { + 0x6a,0x09,0xe6,0x67,0xf3,0xbc,0xc9,0x08, + 0xbb,0x67,0xae,0x85,0x84,0xca,0xa7,0x3b, + 0x3c,0x6e,0xf3,0x72,0xfe,0x94,0xf8,0x2b, + 0xa5,0x4f,0xf5,0x3a,0x5f,0x1d,0x36,0xf1, + 0x51,0x0e,0x52,0x7f,0xad,0xe6,0x82,0xd1, + 0x9b,0x05,0x68,0x8c,0x2b,0x3e,0x6c,0x1f, + 0x1f,0x83,0xd9,0xab,0xfb,0x41,0xbd,0x6b, + 0x5b,0xe0,0xcd,0x19,0x13,0x7e,0x21,0x79 +} ; + +typedef unsigned long long uint64; + +int crypto_hash_sha512(unsigned char *out,const unsigned char *in,unsigned long long inlen) +{ + unsigned char h[64]; + unsigned char padded[256]; + int i; + unsigned long long bytes = inlen; + + for (i = 0;i < 64;++i) h[i] = iv[i]; + + blocks(h,in,inlen); + in += inlen; + inlen &= 127; + in -= inlen; + + for (i = 0;i < inlen;++i) padded[i] = in[i]; + padded[inlen] = 0x80; + + if (inlen < 112) { + for (i = inlen + 1;i < 119;++i) padded[i] = 0; + padded[119] = bytes >> 61; + padded[120] = bytes >> 53; + padded[121] = bytes >> 45; + padded[122] = bytes >> 37; + padded[123] = bytes >> 29; + padded[124] = bytes >> 21; + padded[125] = bytes >> 13; + padded[126] = bytes >> 5; + padded[127] = bytes << 3; + blocks(h,padded,128); + } else { + for (i = inlen + 1;i < 247;++i) padded[i] = 0; + padded[247] = bytes >> 61; + padded[248] = bytes >> 53; + padded[249] = bytes >> 45; + padded[250] = bytes >> 37; + padded[251] = bytes >> 29; + padded[252] = bytes >> 21; + padded[253] = bytes >> 13; + padded[254] = bytes >> 5; + padded[255] = bytes << 3; + blocks(h,padded,256); + } + + for (i = 0;i < 64;++i) out[i] = h[i]; + + return 0; +} diff --git a/src/ed25519-supercop-amd64-51-30k/sha512.h b/src/ed25519-supercop-amd64-51-30k/sha512.h new file mode 100644 index 0000000..37376b1 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/sha512.h @@ -0,0 +1,4 @@ +extern int crypto_hashblocks(unsigned char *statebytes,const unsigned char *in,unsigned long long inlen); +extern int crypto_hash_sha512(unsigned char *out,const unsigned char *in,unsigned long long inlen); + +#define crypto_hash_sha512_BYTES 64 diff --git a/src/ed25519-supercop-amd64-51-30k/test.c b/src/ed25519-supercop-amd64-51-30k/test.c new file mode 100644 index 0000000..cf344f7 --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/test.c @@ -0,0 +1,42 @@ + +#include +#include +#include +#include "crypto_sign.h" + +char *msg = "Hello World"; + +int main(int argc, char *argv[]) { + unsigned char sk[SECRETKEYBYTES], vk[PUBLICKEYBYTES]; + unsigned char *sigmsg, *newmsg; + unsigned long long sigmsglen, newmsglen; + int ret; + unsigned char seed[32]; + int i; + for (i=0; i<32; i++) seed[i] = i; // not so random + crypto_sign_publickey(vk, sk, seed); + printf("got keypair\n"); + sigmsg = malloc(strlen(msg)+1+SIGNATUREBYTES); + if (!sigmsg) + return 1; + crypto_sign(sigmsg, &sigmsglen, (unsigned char *)msg, strlen(msg)+1, sk); + printf("got signature\n"); + if (sigmsglen != strlen(msg)+1+SIGNATUREBYTES) + return 2; + newmsg = malloc(sigmsglen); + if (!newmsg) + return 3; + ret = crypto_sign_open(newmsg, &newmsglen, sigmsg, sigmsglen, vk); + printf("verified signature\n"); + if (ret == 0) + printf("good!\n"); + else + printf("bad\n"); + sigmsg[0] ^= 0x01; + ret = crypto_sign_open(newmsg, &newmsglen, sigmsg, sigmsglen, vk); + if (ret == 0) + printf("bad: failed to detect simple corruption\n"); + else + printf("good: detected simple corruption\n"); + return 0; +} diff --git a/src/ed25519-supercop-amd64-51-30k/verify.c b/src/ed25519-supercop-amd64-51-30k/verify.c new file mode 100644 index 0000000..a04186b --- /dev/null +++ b/src/ed25519-supercop-amd64-51-30k/verify.c @@ -0,0 +1,40 @@ +#include "crypto_verify_32.h" + +int crypto_verify_32(const unsigned char *x,const unsigned char *y) +{ + unsigned int differentbits = 0; +#define F(i) differentbits |= x[i] ^ y[i]; + F(0) + F(1) + F(2) + F(3) + F(4) + F(5) + F(6) + F(7) + F(8) + F(9) + F(10) + F(11) + F(12) + F(13) + F(14) + F(15) + F(16) + F(17) + F(18) + F(19) + F(20) + F(21) + F(22) + F(23) + F(24) + F(25) + F(26) + F(27) + F(28) + F(29) + F(30) + F(31) + return (1 & ((differentbits - 1) >> 8)) - 1; +} From cbd4af4130a448e955c52eef690050281e8e9763 Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 17:21:52 -0800 Subject: [PATCH 09/10] setup.py: switch to amd64-64-24k. Fails because distutils doesn't understand .s --- setup.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index e4cd7db..523a856 100644 --- a/setup.py +++ b/setup.py @@ -22,12 +22,13 @@ """ sources = ["src/ed25519-glue/ed25519module.c"] -sources.extend(["src/ed25519-supercop-ref10/"+s - for s in os.listdir("src/ed25519-supercop-ref10") - if s.endswith(".c") and s!="test.c" and s!="keypair.c"]) +sources.extend(["src/ed25519-supercop-amd64-64-24k/"+s + for s in os.listdir("src/ed25519-supercop-amd64-64-24k") + if ((s.endswith(".c") or s.endswith(".s")) + and s!="test.c" and s!="keypair.c")]) m = Extension("ed25519._ed25519", - include_dirs=["src/ed25519-supercop-ref10"], sources=sources) + include_dirs=["src/ed25519-supercop-amd64-64-24k"], sources=sources) commands = versioneer.get_cmdclass().copy() From ab21bfacf5d1df6990beca02c72a4b84b9bb7f5a Mon Sep 17 00:00:00 2001 From: Brian Warner Date: Fri, 10 Feb 2012 17:50:22 -0800 Subject: [PATCH 10/10] try other flags --- setup.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 523a856..24ef397 100644 --- a/setup.py +++ b/setup.py @@ -25,10 +25,15 @@ sources.extend(["src/ed25519-supercop-amd64-64-24k/"+s for s in os.listdir("src/ed25519-supercop-amd64-64-24k") if ((s.endswith(".c") or s.endswith(".s")) - and s!="test.c" and s!="keypair.c")]) + and s!="test.c" and s!="keypair.c" and s!="batch.c")]) +from distutils import unixccompiler +unixccompiler.UnixCCompiler.src_extensions.append(".s") m = Extension("ed25519._ed25519", - include_dirs=["src/ed25519-supercop-amd64-64-24k"], sources=sources) + include_dirs=["src/ed25519-supercop-amd64-64-24k"], + sources=sources, + extra_compile_args=["-m64", "-march=core2"], + ) commands = versioneer.get_cmdclass().copy()