diff --git a/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py b/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py index 795fda8be..d87a50a2e 100644 --- a/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py +++ b/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2022, 2024. +# (C) Copyright IBM 2022, 2025. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -193,7 +193,7 @@ def fit( # training loop for step in range(1, self._num_steps + 1): # for every step, a random index (determining a random datum) is fixed - i = algorithm_globals.random.integers(0, len(y)) + i = int(algorithm_globals.random.integers(0, len(y))) value = self._compute_weighted_kernel_sum(i, X, training=True) diff --git a/qiskit_machine_learning/connectors/torch_connector.py b/qiskit_machine_learning/connectors/torch_connector.py index 241b51e4f..d4c55a822 100644 --- a/qiskit_machine_learning/connectors/torch_connector.py +++ b/qiskit_machine_learning/connectors/torch_connector.py @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2021, 2024. +# (C) Copyright IBM 2021, 2025. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -237,7 +237,9 @@ def backward(ctx: Any, grad_output: Tensor) -> tuple: # type: ignore[override] from sparse import COO grad_output = grad_output.detach().cpu() - grad_coo = COO(grad_output.indices(), grad_output.values()) + grad_coo = COO( + grad_output.indices(), grad_output.values(), shape=grad_output.shape + ) # Takes gradients from previous layer in backward pass (i.e. later layer in # forward pass) j for each observation i in the batch. Multiplies this with @@ -280,7 +282,9 @@ def backward(ctx: Any, grad_output: Tensor) -> tuple: # type: ignore[override] from sparse import COO grad_output = grad_output.detach().cpu() - grad_coo = COO(grad_output.indices(), grad_output.values()) + grad_coo = COO( + grad_output.indices(), grad_output.values(), shape=grad_output.shape + ) # Takes gradients from previous layer in backward pass (i.e. later layer in # forward pass) j for each observation i in the batch. Multiplies this with diff --git a/qiskit_machine_learning/neural_networks/effective_dimension.py b/qiskit_machine_learning/neural_networks/effective_dimension.py index 1411b9bf8..4b6869a8c 100644 --- a/qiskit_machine_learning/neural_networks/effective_dimension.py +++ b/qiskit_machine_learning/neural_networks/effective_dimension.py @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2022, 2024. +# (C) Copyright IBM 2022, 2025. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -13,7 +13,7 @@ import logging import time -from typing import Union, List, Tuple +from typing import Any, Union, List, Tuple import numpy as np from scipy.special import logsumexp @@ -136,14 +136,14 @@ def run_monte_carlo(self) -> Tuple[np.ndarray, np.ndarray]: outputs: QNN output vector, result of forward passes, of shape ``(num_input_samples * num_weight_samples, output_size)``. """ - grads = np.zeros( + grads: Any = np.zeros( ( self._num_input_samples * self._num_weight_samples, self._model.output_shape[0], self._model.num_weights, ) ) - outputs = np.zeros( + outputs: Any = np.zeros( (self._num_input_samples * self._num_weight_samples, self._model.output_shape[0]) ) diff --git a/qiskit_machine_learning/optimizers/adam_amsgrad.py b/qiskit_machine_learning/optimizers/adam_amsgrad.py index 1fc6c9823..e28dc26ed 100644 --- a/qiskit_machine_learning/optimizers/adam_amsgrad.py +++ b/qiskit_machine_learning/optimizers/adam_amsgrad.py @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2019, 2024. +# (C) Copyright IBM 2019, 2025. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -106,10 +106,10 @@ def __init__( # runtime variables self._t = 0 # time steps - self._m = np.zeros(1) - self._v = np.zeros(1) + self._m: Any = np.zeros(1) + self._v: Any = np.zeros(1) if self._amsgrad: - self._v_eff = np.zeros(1) + self._v_eff: Any = np.zeros(1) if self._snapshot_dir is not None: file_path = os.path.join(self._snapshot_dir, "adam_params.csv") diff --git a/qiskit_machine_learning/optimizers/aqgd.py b/qiskit_machine_learning/optimizers/aqgd.py index 4de3fdfd7..423e5f680 100644 --- a/qiskit_machine_learning/optimizers/aqgd.py +++ b/qiskit_machine_learning/optimizers/aqgd.py @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2019, 2024. +# (C) Copyright IBM 2019, 2025. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -161,7 +161,7 @@ def _compute_objective_fn_and_gradient( # Evaluate, # reshaping to flatten, as expected by objective function if self._max_evals_grouped > 1: - batches = [ + batches: Any = [ param_sets_to_eval[i : i + self._max_evals_grouped] for i in range(0, len(param_sets_to_eval), self._max_evals_grouped) ]