4848 *
4949 * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
5050 */
51- static int secp256k1_wnaf_const (int * wnaf , secp256k1_scalar s , int w , int size ) {
51+ static int secp256k1_wnaf_const (int * wnaf , const secp256k1_scalar * scalar , int w , int size ) {
5252 int global_sign ;
5353 int skew = 0 ;
5454 int word = 0 ;
@@ -59,7 +59,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size)
5959
6060 int flip ;
6161 int bit ;
62- secp256k1_scalar neg_s ;
62+ secp256k1_scalar s ;
6363 int not_neg_one ;
6464 /* Note that we cannot handle even numbers by negating them to be odd, as is
6565 * done in other implementations, since if our scalars were specified to have
@@ -75,12 +75,13 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size)
7575 * {1, 2} we want to add to the scalar when ensuring that it's odd. Further
7676 * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
7777 * we need to special-case it in this logic. */
78- flip = secp256k1_scalar_is_high (& s );
78+ flip = secp256k1_scalar_is_high (scalar );
7979 /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
80- bit = flip ^ !secp256k1_scalar_is_even (& s );
80+ bit = flip ^ !secp256k1_scalar_is_even (scalar );
8181 /* We check for negative one, since adding 2 to it will cause an overflow */
82- secp256k1_scalar_negate (& neg_s , & s );
83- not_neg_one = !secp256k1_scalar_is_one (& neg_s );
82+ secp256k1_scalar_negate (& s , scalar );
83+ not_neg_one = !secp256k1_scalar_is_one (& s );
84+ s = * scalar ;
8485 secp256k1_scalar_cadd_bit (& s , bit , not_neg_one );
8586 /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
8687 * that we added two to it and flipped it. In fact for -1 these operations are
@@ -132,21 +133,20 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
132133 int wnaf_1 [1 + WNAF_SIZE (WINDOW_A - 1 )];
133134
134135 int i ;
135- secp256k1_scalar sc = * scalar ;
136136
137137 /* build wnaf representation for q. */
138138 int rsize = size ;
139139#ifdef USE_ENDOMORPHISM
140140 if (size > 128 ) {
141141 rsize = 128 ;
142142 /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */
143- secp256k1_scalar_split_lambda (& q_1 , & q_lam , & sc );
144- skew_1 = secp256k1_wnaf_const (wnaf_1 , q_1 , WINDOW_A - 1 , 128 );
145- skew_lam = secp256k1_wnaf_const (wnaf_lam , q_lam , WINDOW_A - 1 , 128 );
143+ secp256k1_scalar_split_lambda (& q_1 , & q_lam , scalar );
144+ skew_1 = secp256k1_wnaf_const (wnaf_1 , & q_1 , WINDOW_A - 1 , 128 );
145+ skew_lam = secp256k1_wnaf_const (wnaf_lam , & q_lam , WINDOW_A - 1 , 128 );
146146 } else
147147#endif
148148 {
149- skew_1 = secp256k1_wnaf_const (wnaf_1 , sc , WINDOW_A - 1 , size );
149+ skew_1 = secp256k1_wnaf_const (wnaf_1 , scalar , WINDOW_A - 1 , size );
150150#ifdef USE_ENDOMORPHISM
151151 skew_lam = 0 ;
152152#endif
0 commit comments