Skip to content

Commit 1348cd4

Browse files
woodsp-ibmmergify[bot]
authored andcommitted
Fix CI for mypy and add explicit shape for COO (#919)
* Fix CI for mypy and add explicit shape for COO * Fix style (cherry picked from commit 4b918b0)
1 parent ba0545c commit 1348cd4

File tree

5 files changed

+19
-15
lines changed

5 files changed

+19
-15
lines changed

qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# This code is part of a Qiskit project.
22
#
3-
# (C) Copyright IBM 2022, 2024.
3+
# (C) Copyright IBM 2022, 2025.
44
#
55
# This code is licensed under the Apache License, Version 2.0. You may
66
# obtain a copy of this license in the LICENSE.txt file in the root directory
@@ -193,7 +193,7 @@ def fit(
193193
# training loop
194194
for step in range(1, self._num_steps + 1):
195195
# for every step, a random index (determining a random datum) is fixed
196-
i = algorithm_globals.random.integers(0, len(y))
196+
i = int(algorithm_globals.random.integers(0, len(y)))
197197

198198
value = self._compute_weighted_kernel_sum(i, X, training=True)
199199

qiskit_machine_learning/connectors/torch_connector.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# This code is part of a Qiskit project.
22
#
3-
# (C) Copyright IBM 2021, 2024.
3+
# (C) Copyright IBM 2021, 2025.
44
#
55
# This code is licensed under the Apache License, Version 2.0. You may
66
# obtain a copy of this license in the LICENSE.txt file in the root directory
@@ -237,7 +237,9 @@ def backward(ctx: Any, grad_output: Tensor) -> tuple: # type: ignore[override]
237237
from sparse import COO
238238

239239
grad_output = grad_output.detach().cpu()
240-
grad_coo = COO(grad_output.indices(), grad_output.values())
240+
grad_coo = COO(
241+
grad_output.indices(), grad_output.values(), shape=grad_output.shape
242+
)
241243

242244
# Takes gradients from previous layer in backward pass (i.e. later layer in
243245
# forward pass) j for each observation i in the batch. Multiplies this with
@@ -280,7 +282,9 @@ def backward(ctx: Any, grad_output: Tensor) -> tuple: # type: ignore[override]
280282
from sparse import COO
281283

282284
grad_output = grad_output.detach().cpu()
283-
grad_coo = COO(grad_output.indices(), grad_output.values())
285+
grad_coo = COO(
286+
grad_output.indices(), grad_output.values(), shape=grad_output.shape
287+
)
284288

285289
# Takes gradients from previous layer in backward pass (i.e. later layer in
286290
# forward pass) j for each observation i in the batch. Multiplies this with

qiskit_machine_learning/neural_networks/effective_dimension.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# This code is part of a Qiskit project.
22
#
3-
# (C) Copyright IBM 2022, 2024.
3+
# (C) Copyright IBM 2022, 2025.
44
#
55
# This code is licensed under the Apache License, Version 2.0. You may
66
# obtain a copy of this license in the LICENSE.txt file in the root directory
@@ -13,7 +13,7 @@
1313

1414
import logging
1515
import time
16-
from typing import Union, List, Tuple
16+
from typing import Any, Union, List, Tuple
1717

1818
import numpy as np
1919
from scipy.special import logsumexp
@@ -136,14 +136,14 @@ def run_monte_carlo(self) -> Tuple[np.ndarray, np.ndarray]:
136136
outputs: QNN output vector, result of forward passes, of shape
137137
``(num_input_samples * num_weight_samples, output_size)``.
138138
"""
139-
grads = np.zeros(
139+
grads: Any = np.zeros(
140140
(
141141
self._num_input_samples * self._num_weight_samples,
142142
self._model.output_shape[0],
143143
self._model.num_weights,
144144
)
145145
)
146-
outputs = np.zeros(
146+
outputs: Any = np.zeros(
147147
(self._num_input_samples * self._num_weight_samples, self._model.output_shape[0])
148148
)
149149

qiskit_machine_learning/optimizers/adam_amsgrad.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# This code is part of a Qiskit project.
22
#
3-
# (C) Copyright IBM 2019, 2024.
3+
# (C) Copyright IBM 2019, 2025.
44
#
55
# This code is licensed under the Apache License, Version 2.0. You may
66
# obtain a copy of this license in the LICENSE.txt file in the root directory
@@ -106,10 +106,10 @@ def __init__(
106106

107107
# runtime variables
108108
self._t = 0 # time steps
109-
self._m = np.zeros(1)
110-
self._v = np.zeros(1)
109+
self._m: Any = np.zeros(1)
110+
self._v: Any = np.zeros(1)
111111
if self._amsgrad:
112-
self._v_eff = np.zeros(1)
112+
self._v_eff: Any = np.zeros(1)
113113

114114
if self._snapshot_dir is not None:
115115
file_path = os.path.join(self._snapshot_dir, "adam_params.csv")

qiskit_machine_learning/optimizers/aqgd.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# This code is part of a Qiskit project.
22
#
3-
# (C) Copyright IBM 2019, 2024.
3+
# (C) Copyright IBM 2019, 2025.
44
#
55
# This code is licensed under the Apache License, Version 2.0. You may
66
# obtain a copy of this license in the LICENSE.txt file in the root directory
@@ -161,7 +161,7 @@ def _compute_objective_fn_and_gradient(
161161
# Evaluate,
162162
# reshaping to flatten, as expected by objective function
163163
if self._max_evals_grouped > 1:
164-
batches = [
164+
batches: Any = [
165165
param_sets_to_eval[i : i + self._max_evals_grouped]
166166
for i in range(0, len(param_sets_to_eval), self._max_evals_grouped)
167167
]

0 commit comments

Comments
 (0)