@@ -955,4 +955,183 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
955955 r -> d [3 ] = (r -> d [3 ] & mask0 ) | (a -> d [3 ] & mask1 );
956956}
957957
958+ static void secp256k1_scalar_inverse (secp256k1_scalar * r , const secp256k1_scalar * x ) {
959+ secp256k1_scalar * t ;
960+ int i ;
961+ /* First compute xN as x ^ (2^N - 1) for some values of N,
962+ * and uM as x ^ M for some values of M. */
963+ secp256k1_scalar x2 , x3 , x6 , x8 , x14 , x28 , x56 , x112 , x126 ;
964+ secp256k1_scalar u2 , u5 , u9 , u11 , u13 ;
965+
966+ secp256k1_scalar_sqr (& u2 , x );
967+ secp256k1_scalar_mul (& x2 , & u2 , x );
968+ secp256k1_scalar_mul (& u5 , & u2 , & x2 );
969+ secp256k1_scalar_mul (& x3 , & u5 , & u2 );
970+ secp256k1_scalar_mul (& u9 , & x3 , & u2 );
971+ secp256k1_scalar_mul (& u11 , & u9 , & u2 );
972+ secp256k1_scalar_mul (& u13 , & u11 , & u2 );
973+
974+ secp256k1_scalar_sqr (& x6 , & u13 );
975+ secp256k1_scalar_sqr (& x6 , & x6 );
976+ secp256k1_scalar_mul (& x6 , & x6 , & u11 );
977+
978+ secp256k1_scalar_sqr (& x8 , & x6 );
979+ secp256k1_scalar_sqr (& x8 , & x8 );
980+ secp256k1_scalar_mul (& x8 , & x8 , & x2 );
981+
982+ secp256k1_scalar_sqr (& x14 , & x8 );
983+ for (i = 0 ; i < 5 ; i ++ ) {
984+ secp256k1_scalar_sqr (& x14 , & x14 );
985+ }
986+ secp256k1_scalar_mul (& x14 , & x14 , & x6 );
987+
988+ secp256k1_scalar_sqr (& x28 , & x14 );
989+ for (i = 0 ; i < 13 ; i ++ ) {
990+ secp256k1_scalar_sqr (& x28 , & x28 );
991+ }
992+ secp256k1_scalar_mul (& x28 , & x28 , & x14 );
993+
994+ secp256k1_scalar_sqr (& x56 , & x28 );
995+ for (i = 0 ; i < 27 ; i ++ ) {
996+ secp256k1_scalar_sqr (& x56 , & x56 );
997+ }
998+ secp256k1_scalar_mul (& x56 , & x56 , & x28 );
999+
1000+ secp256k1_scalar_sqr (& x112 , & x56 );
1001+ for (i = 0 ; i < 55 ; i ++ ) {
1002+ secp256k1_scalar_sqr (& x112 , & x112 );
1003+ }
1004+ secp256k1_scalar_mul (& x112 , & x112 , & x56 );
1005+
1006+ secp256k1_scalar_sqr (& x126 , & x112 );
1007+ for (i = 0 ; i < 13 ; i ++ ) {
1008+ secp256k1_scalar_sqr (& x126 , & x126 );
1009+ }
1010+ secp256k1_scalar_mul (& x126 , & x126 , & x14 );
1011+
1012+ /* Then accumulate the final result (t starts at x126). */
1013+ t = & x126 ;
1014+ for (i = 0 ; i < 3 ; i ++ ) {
1015+ secp256k1_scalar_sqr (t , t );
1016+ }
1017+ secp256k1_scalar_mul (t , t , & u5 ); /* 101 */
1018+ for (i = 0 ; i < 4 ; i ++ ) { /* 0 */
1019+ secp256k1_scalar_sqr (t , t );
1020+ }
1021+ secp256k1_scalar_mul (t , t , & x3 ); /* 111 */
1022+ for (i = 0 ; i < 4 ; i ++ ) { /* 0 */
1023+ secp256k1_scalar_sqr (t , t );
1024+ }
1025+ secp256k1_scalar_mul (t , t , & u5 ); /* 101 */
1026+ for (i = 0 ; i < 5 ; i ++ ) { /* 0 */
1027+ secp256k1_scalar_sqr (t , t );
1028+ }
1029+ secp256k1_scalar_mul (t , t , & u11 ); /* 1011 */
1030+ for (i = 0 ; i < 4 ; i ++ ) {
1031+ secp256k1_scalar_sqr (t , t );
1032+ }
1033+ secp256k1_scalar_mul (t , t , & u11 ); /* 1011 */
1034+ for (i = 0 ; i < 4 ; i ++ ) { /* 0 */
1035+ secp256k1_scalar_sqr (t , t );
1036+ }
1037+ secp256k1_scalar_mul (t , t , & x3 ); /* 111 */
1038+ for (i = 0 ; i < 5 ; i ++ ) { /* 00 */
1039+ secp256k1_scalar_sqr (t , t );
1040+ }
1041+ secp256k1_scalar_mul (t , t , & x3 ); /* 111 */
1042+ for (i = 0 ; i < 6 ; i ++ ) { /* 00 */
1043+ secp256k1_scalar_sqr (t , t );
1044+ }
1045+ secp256k1_scalar_mul (t , t , & u13 ); /* 1101 */
1046+ for (i = 0 ; i < 4 ; i ++ ) { /* 0 */
1047+ secp256k1_scalar_sqr (t , t );
1048+ }
1049+ secp256k1_scalar_mul (t , t , & u5 ); /* 101 */
1050+ for (i = 0 ; i < 3 ; i ++ ) {
1051+ secp256k1_scalar_sqr (t , t );
1052+ }
1053+ secp256k1_scalar_mul (t , t , & x3 ); /* 111 */
1054+ for (i = 0 ; i < 5 ; i ++ ) { /* 0 */
1055+ secp256k1_scalar_sqr (t , t );
1056+ }
1057+ secp256k1_scalar_mul (t , t , & u9 ); /* 1001 */
1058+ for (i = 0 ; i < 6 ; i ++ ) { /* 000 */
1059+ secp256k1_scalar_sqr (t , t );
1060+ }
1061+ secp256k1_scalar_mul (t , t , & u5 ); /* 101 */
1062+ for (i = 0 ; i < 10 ; i ++ ) { /* 0000000 */
1063+ secp256k1_scalar_sqr (t , t );
1064+ }
1065+ secp256k1_scalar_mul (t , t , & x3 ); /* 111 */
1066+ for (i = 0 ; i < 4 ; i ++ ) { /* 0 */
1067+ secp256k1_scalar_sqr (t , t );
1068+ }
1069+ secp256k1_scalar_mul (t , t , & x3 ); /* 111 */
1070+ for (i = 0 ; i < 9 ; i ++ ) { /* 0 */
1071+ secp256k1_scalar_sqr (t , t );
1072+ }
1073+ secp256k1_scalar_mul (t , t , & x8 ); /* 11111111 */
1074+ for (i = 0 ; i < 5 ; i ++ ) { /* 0 */
1075+ secp256k1_scalar_sqr (t , t );
1076+ }
1077+ secp256k1_scalar_mul (t , t , & u9 ); /* 1001 */
1078+ for (i = 0 ; i < 6 ; i ++ ) { /* 00 */
1079+ secp256k1_scalar_sqr (t , t );
1080+ }
1081+ secp256k1_scalar_mul (t , t , & u11 ); /* 1011 */
1082+ for (i = 0 ; i < 4 ; i ++ ) {
1083+ secp256k1_scalar_sqr (t , t );
1084+ }
1085+ secp256k1_scalar_mul (t , t , & u13 ); /* 1101 */
1086+ for (i = 0 ; i < 5 ; i ++ ) {
1087+ secp256k1_scalar_sqr (t , t );
1088+ }
1089+ secp256k1_scalar_mul (t , t , & x2 ); /* 11 */
1090+ for (i = 0 ; i < 6 ; i ++ ) { /* 00 */
1091+ secp256k1_scalar_sqr (t , t );
1092+ }
1093+ secp256k1_scalar_mul (t , t , & u13 ); /* 1101 */
1094+ for (i = 0 ; i < 10 ; i ++ ) { /* 000000 */
1095+ secp256k1_scalar_sqr (t , t );
1096+ }
1097+ secp256k1_scalar_mul (t , t , & u13 ); /* 1101 */
1098+ for (i = 0 ; i < 4 ; i ++ ) {
1099+ secp256k1_scalar_sqr (t , t );
1100+ }
1101+ secp256k1_scalar_mul (t , t , & u9 ); /* 1001 */
1102+ for (i = 0 ; i < 6 ; i ++ ) { /* 00000 */
1103+ secp256k1_scalar_sqr (t , t );
1104+ }
1105+ secp256k1_scalar_mul (t , t , x ); /* 1 */
1106+ for (i = 0 ; i < 8 ; i ++ ) { /* 00 */
1107+ secp256k1_scalar_sqr (t , t );
1108+ }
1109+ secp256k1_scalar_mul (r , t , & x6 ); /* 111111 */
1110+ }
1111+
1112+ static void secp256k1_scalar_inverse_var (secp256k1_scalar * r , const secp256k1_scalar * x ) {
1113+ #if defined(USE_SCALAR_INV_BUILTIN )
1114+ secp256k1_scalar_inverse (r , x );
1115+ #elif defined(USE_SCALAR_INV_NUM )
1116+ unsigned char b [32 ];
1117+ secp256k1_num n , m ;
1118+ secp256k1_scalar t = * x ;
1119+ secp256k1_scalar_get_b32 (b , & t );
1120+ secp256k1_num_set_bin (& n , b , 32 );
1121+ secp256k1_scalar_order_get_num (& m );
1122+ secp256k1_num_mod_inverse (& n , & n , & m );
1123+ secp256k1_num_get_bin (b , 32 , & n );
1124+ secp256k1_scalar_set_b32 (r , b , NULL );
1125+ /* Verify that the inverse was computed correctly, without GMP code. */
1126+ secp256k1_scalar_mul (& t , & t , r );
1127+ CHECK (secp256k1_scalar_is_one (& t ));
1128+ #else
1129+ #error "Please select scalar inverse implementation"
1130+ #endif
1131+ }
1132+
1133+ SECP256K1_INLINE static int secp256k1_scalar_is_even (const secp256k1_scalar * a ) {
1134+ return !(a -> d [0 ] & 1 );
1135+ }
1136+
9581137#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
0 commit comments