From 6362aa0cdc847cf6b256d04d5664bcffb5001bec Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 01/19] Fix neuron_integrated_gradients pyre fixme issues (#1457) Summary: Fixing unresolved pyre fixme issues in corresponding file Reviewed By: craymichael Differential Revision: D67523072 --- captum/_utils/gradient.py | 8 ++++++-- captum/_utils/typing.py | 7 +++++++ .../attr/_core/neuron/neuron_integrated_gradients.py | 12 +++++++----- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/captum/_utils/gradient.py b/captum/_utils/gradient.py index 69502b7443..1e2b827ab4 100644 --- a/captum/_utils/gradient.py +++ b/captum/_utils/gradient.py @@ -28,6 +28,7 @@ from captum._utils.sample_gradient import SampleGradientWrapper from captum._utils.typing import ( ModuleOrModuleList, + SliceIntType, TargetType, TensorOrTupleOfTensorsGeneric, ) @@ -775,8 +776,11 @@ def compute_layer_gradients_and_eval( def construct_neuron_grad_fn( layer: Module, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], device_ids: Union[None, List[int]] = None, attribute_to_neuron_input: bool = False, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. diff --git a/captum/_utils/typing.py b/captum/_utils/typing.py index 10a2385611..80cf22d451 100644 --- a/captum/_utils/typing.py +++ b/captum/_utils/typing.py @@ -41,6 +41,13 @@ TensorLikeList5D, ] +try: + # Subscripted slice syntax is not supported in previous Python versions, + # falling back to slice type. + SliceIntType = slice[int, int, int] +except TypeError: + # pyre-fixme[24]: Generic type `slice` expects 3 type parameters. + SliceIntType = slice # type: ignore # Necessary for Python >=3.7 and <3.9! if TYPE_CHECKING: diff --git a/captum/attr/_core/neuron/neuron_integrated_gradients.py b/captum/attr/_core/neuron/neuron_integrated_gradients.py index 8e56221d77..0e4504bee9 100644 --- a/captum/attr/_core/neuron/neuron_integrated_gradients.py +++ b/captum/attr/_core/neuron/neuron_integrated_gradients.py @@ -4,7 +4,7 @@ from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.integrated_gradients import IntegratedGradients from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage @@ -27,8 +27,7 @@ class NeuronIntegratedGradients(NeuronAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -76,8 +75,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None, additional_forward_args: Optional[object] = None, n_steps: int = 50, From 25a5ee382fb593b07b12af4f2ac6d66a3fae9222 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 02/19] Fix neuron conductance pyre fixme issues (#1458) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67523217 --- .../attr/_core/neuron/neuron_conductance.py | 55 ++++++++++--------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/captum/attr/_core/neuron/neuron_conductance.py b/captum/attr/_core/neuron/neuron_conductance.py index 359736d0c9..6c8020f93e 100644 --- a/captum/attr/_core/neuron/neuron_conductance.py +++ b/captum/attr/_core/neuron/neuron_conductance.py @@ -14,7 +14,12 @@ _verify_select_neuron, ) from captum._utils.gradient import compute_layer_gradients_and_eval -from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TargetType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._utils.approximation_methods import approximation_parameters from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.attr._utils.batching import _batch_attribution @@ -39,8 +44,7 @@ class NeuronConductance(NeuronAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -94,8 +98,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[int, ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, target: TargetType = None, additional_forward_args: Optional[object] = None, @@ -285,28 +292,24 @@ def attribute( " results.", stacklevel=1, ) - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `TensorOrTupleOfTensorsGeneric`. is_inputs_tuple = _is_tuple(inputs) - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs, baselines = _format_input_baseline(inputs, baselines) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - _validate_input(inputs, baselines, n_steps, method) + formatted_inputs, formatted_baselines = _format_input_baseline( + inputs, baselines + ) + _validate_input(formatted_inputs, formatted_baselines, n_steps, method) - num_examples = inputs[0].shape[0] + num_examples = formatted_inputs[0].shape[0] if internal_batch_size is not None: - num_examples = inputs[0].shape[0] + num_examples = formatted_inputs[0].shape[0] attrs = _batch_attribution( self, num_examples, internal_batch_size, n_steps, - inputs=inputs, - baselines=baselines, + inputs=formatted_inputs, + baselines=formatted_baselines, neuron_selector=neuron_selector, target=target, additional_forward_args=additional_forward_args, @@ -315,11 +318,9 @@ def attribute( ) else: attrs = self._attribute( - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `TensorOrTupleOfTensorsGeneric`. - inputs=inputs, + inputs=formatted_inputs, neuron_selector=neuron_selector, - baselines=baselines, + baselines=formatted_baselines, target=target, additional_forward_args=additional_forward_args, n_steps=n_steps, @@ -334,8 +335,11 @@ def attribute( def _attribute( self, inputs: Tuple[Tensor, ...], - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[int, ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Tuple[Union[Tensor, int, float], ...], target: TargetType = None, additional_forward_args: Optional[object] = None, @@ -409,8 +413,9 @@ def _attribute( # Aggregates across all steps for each tensor in the input tuple total_grads = tuple( - # pyre-fixme[6]: For 4th argument expected `Tuple[int, ...]` but got `Size`. - _reshape_and_sum(scaled_grad, n_steps, num_examples, input_grad.shape[1:]) + _reshape_and_sum( + scaled_grad, n_steps, num_examples, tuple(input_grad.shape[1:]) + ) for (scaled_grad, input_grad) in zip(scaled_grads, input_grads) ) From bb04ff5021098d267d20392c3eced8d00591197f Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 03/19] Fix neuron deep lift pyre fixme issues (#1461) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67704291 --- captum/attr/_core/neuron/neuron_deep_lift.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/captum/attr/_core/neuron/neuron_deep_lift.py b/captum/attr/_core/neuron/neuron_deep_lift.py index f4648b43b5..e7e3f2a77e 100644 --- a/captum/attr/_core/neuron/neuron_deep_lift.py +++ b/captum/attr/_core/neuron/neuron_deep_lift.py @@ -4,7 +4,11 @@ from typing import Callable, cast, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._core.deep_lift import DeepLift, DeepLiftShap from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage @@ -79,8 +83,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, @@ -309,8 +316,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[ TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], From 09c878abb0ca4910e9094aa03e8317a848f453ca Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 04/19] Fix neuron feature ablation pyre fixme issues (#1462) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67705096 --- .../_core/neuron/neuron_feature_ablation.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/captum/attr/_core/neuron/neuron_feature_ablation.py b/captum/attr/_core/neuron/neuron_feature_ablation.py index c72cf806a1..d391481ed4 100644 --- a/captum/attr/_core/neuron/neuron_feature_ablation.py +++ b/captum/attr/_core/neuron/neuron_feature_ablation.py @@ -6,7 +6,11 @@ import torch from captum._utils.common import _verify_select_neuron from captum._utils.gradient import _forward_layer_eval -from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._core.feature_ablation import FeatureAblation from captum.attr._utils.attribution import NeuronAttribution, PerturbationAttribution from captum.log import log_usage @@ -31,8 +35,7 @@ class NeuronFeatureAblation(NeuronAttribution, PerturbationAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -61,8 +64,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, additional_forward_args: Optional[object] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, @@ -250,8 +256,7 @@ def attribute( >>> feature_mask=feature_mask) """ - # pyre-fixme[3]: Return type must be annotated. - def neuron_forward_func(*args: Any): + def neuron_forward_func(*args: Any) -> Tensor: with torch.no_grad(): layer_eval = _forward_layer_eval( self.forward_func, From 40675932368791b9629a833a202c534be779c7f0 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 05/19] Fix neuron gradient shap pyre fixme issues (#1463) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67705098 --- captum/attr/_core/neuron/neuron_gradient_shap.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/captum/attr/_core/neuron/neuron_gradient_shap.py b/captum/attr/_core/neuron/neuron_gradient_shap.py index 18b6507231..b0b82084f5 100644 --- a/captum/attr/_core/neuron/neuron_gradient_shap.py +++ b/captum/attr/_core/neuron/neuron_gradient_shap.py @@ -4,10 +4,11 @@ from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.gradient_shap import GradientShap from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -50,8 +51,7 @@ class NeuronGradientShap(NeuronAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -97,8 +97,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[ TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], From dc12fd2a2df21a3da91b77ed34d4d63b7181a156 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 06/19] Fix neuron gradient pyre fixme issues (#1464) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67704365 --- captum/attr/_core/neuron/neuron_gradient.py | 33 ++++++++++----------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/captum/attr/_core/neuron/neuron_gradient.py b/captum/attr/_core/neuron/neuron_gradient.py index 0e74382d33..b806c1f4c2 100644 --- a/captum/attr/_core/neuron/neuron_gradient.py +++ b/captum/attr/_core/neuron/neuron_gradient.py @@ -14,9 +14,10 @@ apply_gradient_requirements, undo_gradient_requirements, ) -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -28,8 +29,7 @@ class NeuronGradient(NeuronAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -60,8 +60,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: @@ -162,18 +165,12 @@ def attribute( >>> # index (4,1,2). >>> attribution = neuron_ig.attribute(input, (4,1,2)) """ - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `TensorOrTupleOfTensorsGeneric`. is_inputs_tuple = _is_tuple(inputs) - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs = _format_tensor_into_tuples(inputs) + inputs_tuple = _format_tensor_into_tuples(inputs) additional_forward_args = _format_additional_forward_args( additional_forward_args ) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - gradient_mask = apply_gradient_requirements(inputs) + gradient_mask = apply_gradient_requirements(inputs_tuple) _, input_grads = _forward_layer_eval_with_neuron_grads( self.forward_func, @@ -185,9 +182,9 @@ def attribute( attribute_to_layer_input=attribute_to_neuron_input, ) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - undo_gradient_requirements(inputs, gradient_mask) - # pyre-fixme[7]: Expected `TensorOrTupleOfTensorsGeneric` but got - # `Tuple[Tensor, ...]`. + undo_gradient_requirements(inputs_tuple, gradient_mask) + + # pyre-fixme[7]: Expected `Variable[TensorOrTupleOfTensorsGeneric <: + # [Tensor, typing.Tuple[Tensor, ...]]]` but got `Union[Tensor, + # typing.Tuple[Tensor, ...]]`. return _format_output(is_inputs_tuple, input_grads) From c815b72db274b70e1a444018d227f75a607f3b80 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 07/19] Fix neuron guided backprop pyre fixme issues (#1465) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67705091 --- .../neuron/neuron_guided_backprop_deconvnet.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py b/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py index 03f3e14186..4b3720c96f 100644 --- a/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py +++ b/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.py @@ -4,10 +4,11 @@ from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.guided_backprop_deconvnet import Deconvolution, GuidedBackprop from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -60,8 +61,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: @@ -215,8 +219,11 @@ def __init__( def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: From 0c3092dd367aada4596eb6425817f3ac35dd5570 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 08/19] Fix gradcam pyre fixme issues (#1466) Summary: Fixing unresolved pyre fixme issues in corresponding file Reviewed By: cyrjano Differential Revision: D67705191 --- captum/attr/_core/layer/grad_cam.py | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/captum/attr/_core/layer/grad_cam.py b/captum/attr/_core/layer/grad_cam.py index eed6397609..d57049ad8e 100644 --- a/captum/attr/_core/layer/grad_cam.py +++ b/captum/attr/_core/layer/grad_cam.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F @@ -54,8 +54,7 @@ class LayerGradCam(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -201,7 +200,7 @@ def attribute( # hidden layer and hidden layer evaluated at each input. layer_gradients, layer_evals = compute_layer_gradients_and_eval( self.forward_func, - self.layer, + cast(Module, self.layer), inputs, target, additional_forward_args, @@ -213,10 +212,7 @@ def attribute( summed_grads = tuple( ( torch.mean( - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Tuple[Tensor, ...]`. layer_grad, - # pyre-fixme[16]: `tuple` has no attribute `shape`. dim=tuple(x for x in range(2, len(layer_grad.shape))), keepdim=True, ) @@ -228,27 +224,15 @@ def attribute( if attr_dim_summation: scaled_acts = tuple( - # pyre-fixme[58]: `*` is not supported for operand types - # `Union[tuple[torch._tensor.Tensor], torch._tensor.Tensor]` and - # `Tuple[Tensor, ...]`. - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Tuple[Tensor, ...]`. torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) else: scaled_acts = tuple( - # pyre-fixme[58]: `*` is not supported for operand types - # `Union[tuple[torch._tensor.Tensor], torch._tensor.Tensor]` and - # `Tuple[Tensor, ...]`. summed_grad * layer_eval for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) if relu_attributions: - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Union[tuple[Tensor], Tensor]`. scaled_acts = tuple(F.relu(scaled_act) for scaled_act in scaled_acts) - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but got - # `Tuple[Union[tuple[Tensor], Tensor], ...]`. return _format_output(len(scaled_acts) > 1, scaled_acts) From 2dfd8e8ed61eb11461527995b0f6f0492331aa54 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 09/19] Fix internal influence pyre fixme issues (#1467) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67705214 --- captum/attr/_core/layer/internal_influence.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/captum/attr/_core/layer/internal_influence.py b/captum/attr/_core/layer/internal_influence.py index 47b69ffb2b..a0bbffee20 100644 --- a/captum/attr/_core/layer/internal_influence.py +++ b/captum/attr/_core/layer/internal_influence.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -41,8 +41,7 @@ class InternalInfluence(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -293,7 +292,7 @@ def _attribute( # Returns gradient of output with respect to hidden layer. layer_gradients, _ = compute_layer_gradients_and_eval( forward_fn=self.forward_func, - layer=self.layer, + layer=cast(Module, self.layer), inputs=scaled_features_tpl, target_ind=expanded_target, additional_forward_args=input_additional_args, @@ -304,9 +303,7 @@ def _attribute( # flattening grads so that we can multiply it with step-size # calling contiguous to avoid `memory whole` problems scaled_grads = tuple( - # pyre-fixme[16]: `tuple` has no attribute `contiguous`. layer_grad.contiguous().view(n_steps, -1) - # pyre-fixme[16]: `tuple` has no attribute `device`. * torch.tensor(step_sizes).view(n_steps, 1).to(layer_grad.device) for layer_grad in layer_gradients ) @@ -317,8 +314,7 @@ def _attribute( scaled_grad, n_steps, inputs[0].shape[0], - # pyre-fixme[16]: `tuple` has no attribute `shape`. - layer_grad.shape[1:], + tuple(layer_grad.shape[1:]), ) for scaled_grad, layer_grad in zip(scaled_grads, layer_gradients) ) From fe1ef8e63a0b71b5028a4164070f226bf1284f7e Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 10/19] Fix layer activation pyre fixme issues (#1468) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67706972 --- captum/attr/_core/layer/layer_activation.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/captum/attr/_core/layer/layer_activation.py b/captum/attr/_core/layer/layer_activation.py index 076323a274..d9aea9b27d 100644 --- a/captum/attr/_core/layer/layer_activation.py +++ b/captum/attr/_core/layer/layer_activation.py @@ -20,8 +20,7 @@ class LayerActivation(LayerAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: ModuleOrModuleList, device_ids: Union[None, List[int]] = None, ) -> None: @@ -132,8 +131,6 @@ def attribute( ) else: return [ - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but - # got `Tensor`. _format_output(len(single_layer_eval) > 1, single_layer_eval) for single_layer_eval in layer_eval ] From 8bea2e27598e5d8a0e35c2c3271fd9296a0f8233 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 11/19] Fix layer conductance pyre fixme issues (#1469) Summary: Fixing unresolved pyre fixme issues in corresponding file Reviewed By: cyrjano Differential Revision: D67705320 --- captum/attr/_core/layer/layer_conductance.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/captum/attr/_core/layer/layer_conductance.py b/captum/attr/_core/layer/layer_conductance.py index 1f1a5f4676..2d15d25270 100644 --- a/captum/attr/_core/layer/layer_conductance.py +++ b/captum/attr/_core/layer/layer_conductance.py @@ -2,7 +2,7 @@ # pyre-strict import typing -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Literal, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -44,8 +44,7 @@ class LayerConductance(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -73,8 +72,6 @@ def has_convergence_delta(self) -> bool: return True @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `75`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -91,8 +88,6 @@ def attribute( ) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `91`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -376,7 +371,7 @@ def _attribute( layer_evals, ) = compute_layer_gradients_and_eval( forward_fn=self.forward_func, - layer=self.layer, + layer=cast(Module, self.layer), inputs=scaled_features_tpl, additional_forward_args=input_additional_args, target_ind=expanded_target, @@ -389,8 +384,6 @@ def _attribute( # This approximates the total input gradient of each step multiplied # by the step size. grad_diffs = tuple( - # pyre-fixme[58]: `-` is not supported for operand types `Tuple[Tensor, - # ...]` and `Tuple[Tensor, ...]`. layer_eval[num_examples:] - layer_eval[:-num_examples] for layer_eval in layer_evals ) @@ -403,8 +396,7 @@ def _attribute( grad_diff * layer_gradient[:-num_examples], n_steps, num_examples, - # pyre-fixme[16]: `tuple` has no attribute `shape`. - layer_eval.shape[1:], + tuple(layer_eval.shape[1:]), ) for layer_gradient, layer_eval, grad_diff in zip( layer_gradients, layer_evals, grad_diffs From 04c6d574f4c4afc3e74460650fedb071d4aeb605 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 12/19] Fix layer deeplift pyre fixme issues (#1470) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67705583 --- captum/attr/_core/layer/layer_deep_lift.py | 33 ++++++++++++---------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/captum/attr/_core/layer/layer_deep_lift.py b/captum/attr/_core/layer/layer_deep_lift.py index a126971cf2..da24e7cb48 100644 --- a/captum/attr/_core/layer/layer_deep_lift.py +++ b/captum/attr/_core/layer/layer_deep_lift.py @@ -321,8 +321,9 @@ def attribute( additional_forward_args, ) - # pyre-fixme[24]: Generic type `Sequence` expects 1 type parameter. - def chunk_output_fn(out: TensorOrTupleOfTensorsGeneric) -> Sequence: + def chunk_output_fn( + out: TensorOrTupleOfTensorsGeneric, + ) -> Sequence[Union[Tensor, Sequence[Tensor]]]: if isinstance(out, Tensor): return out.chunk(2) return tuple(out_sub.chunk(2) for out_sub in out) @@ -434,8 +435,6 @@ def __init__( # Ignoring mypy error for inconsistent signature with DeepLiftShap @typing.overload # type: ignore - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `453`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -450,9 +449,7 @@ def attribute( custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... - @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `439`. + @typing.overload # type: ignore def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -654,7 +651,7 @@ def attribute( ) = DeepLiftShap._expand_inputs_baselines_targets( self, baselines, inputs, target, additional_forward_args ) - attributions = LayerDeepLift.attribute.__wrapped__( # type: ignore + attribs_layer_deeplift = LayerDeepLift.attribute.__wrapped__( # type: ignore self, exp_inp, exp_base, @@ -667,8 +664,12 @@ def attribute( attribute_to_layer_input=attribute_to_layer_input, custom_attribution_func=custom_attribution_func, ) + delta: Tensor + attributions: Union[Tensor, Tuple[Tensor, ...]] if return_convergence_delta: - attributions, delta = attributions + attributions, delta = attribs_layer_deeplift + else: + attributions = attribs_layer_deeplift if isinstance(attributions, tuple): attributions = tuple( DeepLiftShap._compute_mean_across_baselines( @@ -681,15 +682,17 @@ def attribute( self, inp_bsz, base_bsz, attributions ) if return_convergence_delta: - # pyre-fixme[61]: `delta` is undefined, or not always defined. return attributions, delta else: - # pyre-fixme[7]: Expected `Union[Tuple[Union[Tensor, - # typing.Tuple[Tensor, ...]], Tensor], Tensor, typing.Tuple[Tensor, ...]]` - # but got `Union[tuple[Tensor], Tensor]`. - return attributions + return cast( + Union[ + Tensor, + Tuple[Tensor, ...], + Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor], + ], + attributions, + ) @property - # pyre-fixme[3]: Return type must be annotated. def multiplies_by_inputs(self) -> bool: return self._multiply_by_inputs From 7165d66b43edc684982065f728061bf90331c0ad Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 13/19] Fix layer gradient shap pyre fixme issues (#1471) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67705670 --- .../attr/_core/layer/layer_gradient_shap.py | 58 +++++++------------ 1 file changed, 22 insertions(+), 36 deletions(-) diff --git a/captum/attr/_core/layer/layer_gradient_shap.py b/captum/attr/_core/layer/layer_gradient_shap.py index c9987eb001..e0e213997c 100644 --- a/captum/attr/_core/layer/layer_gradient_shap.py +++ b/captum/attr/_core/layer/layer_gradient_shap.py @@ -61,8 +61,7 @@ class LayerGradientShap(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -104,13 +103,12 @@ def __init__( self._multiply_by_inputs = multiply_by_inputs @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `106`. def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -121,13 +119,12 @@ def attribute( ) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `120`. def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -137,11 +134,14 @@ def attribute( ) -> Union[Tensor, Tuple[Tensor, ...]]: ... @log_usage() + # pyre-fixme[43]: This definition does not have the same decorators as the + # preceding overload(s). def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -294,17 +294,10 @@ def attribute( """ # since `baselines` is a distribution, we can generate it using a function # rather than passing it as an input argument - # pyre-fixme[9]: baselines has type `Union[typing.Callable[..., typing.Any], - # Variable[TensorOrTupleOfTensorsGeneric <: [Tensor, typing.Tuple[Tensor, - # ...]]]]`; used as `Tuple[Tensor, ...]`. - baselines = _format_callable_baseline(baselines, inputs) - # pyre-fixme[16]: Item `Callable` of `Union[(...) -> Any, - # TensorOrTupleOfTensorsGeneric]` has no attribute `__getitem__`. - assert isinstance(baselines[0], torch.Tensor), ( + formatted_baselines = _format_callable_baseline(baselines, inputs) + assert isinstance(formatted_baselines[0], torch.Tensor), ( "Baselines distribution has to be provided in a form " - # pyre-fixme[16]: Item `Callable` of `Union[(...) -> Any, - # TensorOrTupleOfTensorsGeneric]` has no attribute `__getitem__`. - "of a torch.Tensor {}.".format(baselines[0]) + "of a torch.Tensor {}.".format(formatted_baselines[0]) ) input_min_baseline_x_grad = LayerInputBaselineXGradient( @@ -323,7 +316,7 @@ def attribute( nt_samples=n_samples, stdevs=stdevs, draw_baseline_from_distrib=True, - baselines=baselines, + baselines=formatted_baselines, target=target, additional_forward_args=additional_forward_args, return_convergence_delta=return_convergence_delta, @@ -343,8 +336,7 @@ def multiplies_by_inputs(self) -> bool: class LayerInputBaselineXGradient(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -436,7 +428,7 @@ def attribute( # type: ignore ) grads, _ = compute_layer_gradients_and_eval( self.forward_func, - self.layer, + cast(Module, self.layer), input_baseline_scaled, target, additional_forward_args, @@ -448,7 +440,7 @@ def attribute( # type: ignore attr_baselines = _forward_layer_eval( self.forward_func, baselines, - self.layer, + cast(Module, self.layer), additional_forward_args=additional_forward_args, device_ids=self.device_ids, attribute_to_layer_input=attribute_to_layer_input, @@ -457,19 +449,15 @@ def attribute( # type: ignore attr_inputs = _forward_layer_eval( self.forward_func, inputs, - self.layer, + cast(Module, self.layer), additional_forward_args=additional_forward_args, device_ids=self.device_ids, attribute_to_layer_input=attribute_to_layer_input, ) - + attributions: Tuple[Tensor, ...] if self.multiplies_by_inputs: input_baseline_diffs = tuple( - # pyre-fixme[58]: `-` is not supported for operand types - # `typing.Tuple[torch._tensor.Tensor, ...]` and - # `typing.Tuple[torch._tensor.Tensor, ...]`. - input - baseline - for input, baseline in zip(attr_inputs, attr_baselines) + input - baseline for input, baseline in zip(attr_inputs, attr_baselines) ) attributions = tuple( input_baseline_diff * grad @@ -481,8 +469,6 @@ def attribute( # type: ignore return _compute_conv_delta_and_format_attrs( self, return_convergence_delta, - # pyre-fixme[6]: For 3rd argument expected `Tuple[Tensor, ...]` but got - # `Union[List[typing.Tuple[Tensor, ...]], tuple[Tensor]]`. attributions, baselines, inputs, From ba85f8aeac22ec049562db91dff84e8fc64e6404 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 14/19] Fix layer gradient x activation pyre fixme issues (#1472) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67705758 --- .../_core/layer/layer_gradient_x_activation.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/captum/attr/_core/layer/layer_gradient_x_activation.py b/captum/attr/_core/layer/layer_gradient_x_activation.py index c828a262e5..f56265c2e8 100644 --- a/captum/attr/_core/layer/layer_gradient_x_activation.py +++ b/captum/attr/_core/layer/layer_gradient_x_activation.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union from captum._utils.common import ( _format_additional_forward_args, @@ -24,8 +24,7 @@ class LayerGradientXActivation(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: ModuleOrModuleList, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -186,11 +185,10 @@ def attribute( if isinstance(self.layer, Module): return _format_output( len(layer_evals) > 1, - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `List[typing.Tuple[Tensor, ...]]`. - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but - # got `List[typing.Tuple[Tensor, ...]]`. - self.multiply_gradient_acts(layer_gradients, layer_evals), + self.multiply_gradient_acts( + cast(Tuple[Tensor, ...], layer_gradients), + cast(Tuple[Tensor, ...], layer_evals), + ), ) else: return [ From a2d58f833ad83da41540a17d6112fe6f69221e16 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 15/19] Fix layer LRP pyre fixme issues (#1474) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67706680 --- captum/attr/_core/layer/layer_lrp.py | 81 ++++++++++++++-------------- captum/attr/_utils/lrp_rules.py | 8 ++- 2 files changed, 47 insertions(+), 42 deletions(-) diff --git a/captum/attr/_core/layer/layer_lrp.py b/captum/attr/_core/layer/layer_lrp.py index ba6a73d701..3621c118ac 100644 --- a/captum/attr/_core/layer/layer_lrp.py +++ b/captum/attr/_core/layer/layer_lrp.py @@ -2,7 +2,9 @@ # pyre-strict import typing -from typing import Any, cast, List, Literal, Optional, Tuple, Union +from typing import cast, Dict, List, Literal, Optional, Tuple, TypeVar, Union + +import torch from captum._utils.common import ( _format_tensor_into_tuples, @@ -21,8 +23,12 @@ ) from captum.attr._core.lrp import LRP from captum.attr._utils.attribution import LayerAttribution +from captum.attr._utils.lrp_rules import PropagationRule from torch import Tensor from torch.nn import Module +from torch.utils.hooks import RemovableHandle + +Generic = TypeVar("Generic") class LayerLRP(LRP, LayerAttribution): @@ -39,6 +45,13 @@ class LayerLRP(LRP, LayerAttribution): Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW]. """ + device_ids: List[int] + verbose: bool + layers: List[Module] + attribute_to_layer_input: bool = False + backward_handles: List[RemovableHandle] + forward_handles: List[RemovableHandle] + def __init__(self, model: Module, layer: ModuleOrModuleList) -> None: """ Args: @@ -59,7 +72,6 @@ def __init__(self, model: Module, layer: ModuleOrModuleList) -> None: LayerAttribution.__init__(self, model, layer) LRP.__init__(self, model) if hasattr(self.model, "device_ids"): - # pyre-fixme[4]: Attribute must be annotated. self.device_ids = cast(List[int], self.model.device_ids) @typing.overload # type: ignore @@ -208,48 +220,34 @@ def attribute( >>> attribution = layer_lrp.attribute(input, target=5) """ - # pyre-fixme[16]: `LayerLRP` has no attribute `verbose`. self.verbose = verbose - # pyre-fixme[16]: `LayerLRP` has no attribute `_original_state_dict`. self._original_state_dict = self.model.state_dict() - # pyre-fixme[16]: `LayerLRP` has no attribute `layers`. self.layers = [] self._get_layers(self.model) self._check_and_attach_rules() - # pyre-fixme[16]: `LayerLRP` has no attribute `attribute_to_layer_input`. self.attribute_to_layer_input = attribute_to_layer_input - # pyre-fixme[16]: `LayerLRP` has no attribute `backward_handles`. self.backward_handles = [] - # pyre-fixme[16]: `LayerLRP` has no attribute `forward_handles`. self.forward_handles = [] - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs = _format_tensor_into_tuples(inputs) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - gradient_mask = apply_gradient_requirements(inputs) + inputs_tuple = _format_tensor_into_tuples(inputs) + gradient_mask = apply_gradient_requirements(inputs_tuple) try: # 1. Forward pass output = self._compute_output_and_change_weights( - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `TensorOrTupleOfTensorsGeneric`. - inputs, + inputs_tuple, target, additional_forward_args, ) self._register_forward_hooks() # 2. Forward pass + backward pass _ = compute_gradients( - self._forward_fn_wrapper, inputs, target, additional_forward_args + self._forward_fn_wrapper, inputs_tuple, target, additional_forward_args ) relevances = self._get_output_relevance(output) finally: self._restore_model() - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - undo_gradient_requirements(inputs, gradient_mask) + undo_gradient_requirements(inputs_tuple, gradient_mask) if return_convergence_delta: delta: Union[Tensor, List[Tensor]] @@ -257,7 +255,10 @@ def attribute( delta = [] for relevance_layer in relevances: delta.append( - self.compute_convergence_delta(relevance_layer, output) + self.compute_convergence_delta( + cast(Union[Tensor, Tuple[Tensor, ...]], relevance_layer), + output, + ) ) else: delta = self.compute_convergence_delta( @@ -267,33 +268,35 @@ def attribute( else: return relevances # type: ignore - # pyre-fixme[3]: Return type must be annotated. - # pyre-fixme[2]: Parameter must be annotated. - def _get_single_output_relevance(self, layer, output): - # pyre-fixme[16]: `LayerLRP` has no attribute `attribute_to_layer_input`. + def _get_single_output_relevance( + self, layer: Module, output: Tensor + ) -> Union[Tensor, Tuple[Tensor, ...]]: if self.attribute_to_layer_input: - normalized_relevances = layer.rule.relevance_input + normalized_relevances = cast( + Dict[torch.device, Tensor], + cast(PropagationRule, layer.rule).relevance_input, + ) else: - normalized_relevances = layer.rule.relevance_output + normalized_relevances = cast(PropagationRule, layer.rule).relevance_output key_list = _sort_key_list(list(normalized_relevances.keys()), self.device_ids) - normalized_relevances = _reduce_list( + normalized_relevances_reduced = _reduce_list( [normalized_relevances[device_id] for device_id in key_list] ) - if isinstance(normalized_relevances, tuple): + if isinstance(normalized_relevances_reduced, tuple): return tuple( normalized_relevance * output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1)) - for normalized_relevance in normalized_relevances + for normalized_relevance in normalized_relevances_reduced ) else: - return normalized_relevances * output.reshape( - (-1,) + (1,) * (normalized_relevances.dim() - 1) + return normalized_relevances_reduced * output.reshape( + (-1,) + (1,) * (normalized_relevances_reduced.dim() - 1) ) - # pyre-fixme[3]: Return type must be annotated. - # pyre-fixme[2]: Parameter must be annotated. - def _get_output_relevance(self, output): + def _get_output_relevance( + self, output: Tensor + ) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]: if isinstance(self.layer, list): relevances = [] for layer in self.layer: @@ -303,11 +306,9 @@ def _get_output_relevance(self, output): return self._get_single_output_relevance(self.layer, output) @staticmethod - # pyre-fixme[3]: Return annotation cannot contain `Any`. def _convert_list_to_tuple( - # pyre-fixme[2]: Parameter annotation cannot contain `Any`. - relevances: Union[List[Any], Tuple[Any, ...]] - ) -> Tuple[Any, ...]: + relevances: Union[List[Generic], Tuple[Generic, ...]] + ) -> Tuple[Generic, ...]: if isinstance(relevances, list): return tuple(relevances) else: diff --git a/captum/attr/_utils/lrp_rules.py b/captum/attr/_utils/lrp_rules.py index 2dd8dc4fe8..91761c226c 100644 --- a/captum/attr/_utils/lrp_rules.py +++ b/captum/attr/_utils/lrp_rules.py @@ -3,10 +3,11 @@ # pyre-strict from abc import ABC, abstractmethod +from typing import cast, Dict, List, Union import torch - from captum._utils.common import _format_tensor_into_tuples +from torch import Tensor class PropagationRule(ABC): @@ -15,6 +16,9 @@ class PropagationRule(ABC): STABILITY_FACTOR is used to assure that no zero divison occurs. """ + relevance_input: Dict[torch.device, Union[torch.Tensor, List[torch.Tensor]]] = {} + relevance_output: Dict[torch.device, torch.Tensor] = {} + STABILITY_FACTOR = 1e-9 # pyre-fixme[3]: Return type must be annotated. @@ -67,7 +71,7 @@ def _backward_hook_input(grad): # pyre-fixme[16]: `PropagationRule` has no attribute `relevance_input`. self.relevance_input[device] = relevance.data else: - self.relevance_input[device].append(relevance.data) + cast(List[Tensor], self.relevance_input[device]).append(relevance.data) # replace_out is needed since two hooks are set on the same tensor # The output of this hook is needed in backward_hook_activation From 88d381370810ca8446d2ef6be32daa12137ff442 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 16/19] Fix approximation utils pyre fix me issues (#1475) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67706741 --- captum/attr/_utils/approximation_methods.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/captum/attr/_utils/approximation_methods.py b/captum/attr/_utils/approximation_methods.py index 8debc95540..9af3cf9580 100644 --- a/captum/attr/_utils/approximation_methods.py +++ b/captum/attr/_utils/approximation_methods.py @@ -2,7 +2,7 @@ # pyre-strict from enum import Enum -from typing import Callable, List, Tuple +from typing import Callable, cast, List, Tuple import torch @@ -121,19 +121,20 @@ def gauss_legendre_builders() -> ( # allow using riemann even without np import numpy as np + from numpy.typing import NDArray def step_sizes(n: int) -> List[float]: assert n > 0, "The number of steps has to be larger than zero" # Scaling from 2 to 1 - # pyre-fixme[6]: For 1st argument expected `Iterable[Variable[_T]]` but got - # `float`. - return list(0.5 * np.polynomial.legendre.leggauss(n)[1]) + return cast( + NDArray[np.float64], 0.5 * np.polynomial.legendre.leggauss(n)[1] + ).tolist() def alphas(n: int) -> List[float]: assert n > 0, "The number of steps has to be larger than zero" # Scaling from [-1, 1] to [0, 1] - # pyre-fixme[6]: For 1st argument expected `Iterable[Variable[_T]]` but got - # `float`. - return list(0.5 * (1 + np.polynomial.legendre.leggauss(n)[0])) + return cast( + NDArray[np.float64], 0.5 * (1 + np.polynomial.legendre.leggauss(n)[0]) + ).tolist() return step_sizes, alphas From 2e64ce5412272ac01052097ea8af062d9d526e35 Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 17/19] Fix custom modules pyre fix me issues (#1476) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67706756 --- captum/attr/_utils/custom_modules.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/captum/attr/_utils/custom_modules.py b/captum/attr/_utils/custom_modules.py index a666cfce6a..6593bc33c8 100644 --- a/captum/attr/_utils/custom_modules.py +++ b/captum/attr/_utils/custom_modules.py @@ -2,6 +2,7 @@ # pyre-strict import torch.nn as nn +from torch import Tensor class Addition_Module(nn.Module): @@ -12,7 +13,5 @@ class Addition_Module(nn.Module): def __init__(self) -> None: super().__init__() - # pyre-fixme[3]: Return type must be annotated. - # pyre-fixme[2]: Parameter must be annotated. - def forward(self, x1, x2): + def forward(self, x1: Tensor, x2: Tensor) -> Tensor: return x1 + x2 From 1c5220eda7d9e00df0f6723d9ceff3bae7e32d2a Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 18/19] Fix class summarizer pyre fix me issues (#1477) Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67706853 --- captum/attr/_utils/class_summarizer.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/captum/attr/_utils/class_summarizer.py b/captum/attr/_utils/class_summarizer.py index 5fe1deab35..316f15e26c 100644 --- a/captum/attr/_utils/class_summarizer.py +++ b/captum/attr/_utils/class_summarizer.py @@ -2,7 +2,7 @@ # pyre-strict from collections import defaultdict -from typing import Any, Dict, List, Optional, Union +from typing import Any, cast, Dict, Generic, List, Optional, TypeVar, Union from captum._utils.common import _format_tensor_into_tuples from captum._utils.typing import TargetType, TensorOrTupleOfTensorsGeneric @@ -11,8 +11,10 @@ from captum.log import log_usage from torch import Tensor +KeyType = TypeVar("KeyType") -class ClassSummarizer(Summarizer): + +class ClassSummarizer(Summarizer, Generic[KeyType]): r""" Used to keep track of summaries for associated classes. The classes/labels can be of any type that are supported by `dict`. @@ -23,8 +25,7 @@ class ClassSummarizer(Summarizer): @log_usage() def __init__(self, stats: List[Stat]) -> None: Summarizer.__init__.__wrapped__(self, stats) - # pyre-fixme[4]: Attribute annotation cannot contain `Any`. - self.summaries: Dict[Any, Summarizer] = defaultdict( + self.summaries: Dict[KeyType, Summarizer] = defaultdict( lambda: Summarizer(stats=stats) ) @@ -84,15 +85,15 @@ def update( # type: ignore tensors_to_summarize_copy = tuple(tensor[i].clone() for tensor in x) label = labels_typed[0] if len(labels_typed) == 1 else labels_typed[i] - self.summaries[label].update(tensors_to_summarize) + self.summaries[cast(KeyType, label)].update(tensors_to_summarize) super().update(tensors_to_summarize_copy) @property - # pyre-fixme[3]: Return annotation cannot contain `Any`. def class_summaries( self, ) -> Dict[ - Any, Union[None, Dict[str, Optional[Tensor]], List[Dict[str, Optional[Tensor]]]] + KeyType, + Union[None, Dict[str, Optional[Tensor]], List[Dict[str, Optional[Tensor]]]], ]: r""" Returns: From 5c2599702c1ce47eba86e06b582454dd6c44e05f Mon Sep 17 00:00:00 2001 From: Vivek Miglani Date: Mon, 30 Dec 2024 09:20:17 -0800 Subject: [PATCH 19/19] Fix summarizer pyre fix me issues Summary: Fixing unresolved pyre fixme issues in corresponding file Differential Revision: D67707848 --- captum/attr/_utils/summarizer.py | 38 +++++++++++++------------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/captum/attr/_utils/summarizer.py b/captum/attr/_utils/summarizer.py index 148b19787a..3f4ffc54ed 100644 --- a/captum/attr/_utils/summarizer.py +++ b/captum/attr/_utils/summarizer.py @@ -28,6 +28,9 @@ class Summarizer: >>>print(summ.summary['mean']) """ + _stats: List[Stat] + _summary_stats_indicies: List[int] + @log_usage() def __init__(self, stats: List[Stat]) -> None: r""" @@ -37,11 +40,9 @@ def __init__(self, stats: List[Stat]) -> None: """ self._summarizers: List[SummarizerSingleTensor] = [] self._is_inputs_tuple: Optional[bool] = None - # pyre-fixme[4]: Attribute must be annotated. self._stats, self._summary_stats_indicies = _reorder_stats(stats) - # pyre-fixme[3]: Return type must be annotated. - def _copy_stats(self): + def _copy_stats(self) -> List[Stat]: import copy return copy.deepcopy(self._stats) @@ -125,48 +126,37 @@ def _reorder_stats(stats: List[Stat]) -> Tuple[List[Stat], List[int]]: dep_order = [StdDev, Var, MSE, Mean, Count] # remove dupe stats - # pyre-fixme[9]: stats has type `List[Stat]`; used as `Set[Stat]`. - stats = set(stats) + stats_set = set(stats) summary_stats = set(stats) from collections import defaultdict - # pyre-fixme[24]: Generic type `type` expects 1 type parameter, use - # `typing.Type[]` to avoid runtime subscripting errors. - stats_by_module: Dict[Type, List[Stat]] = defaultdict(list) - for stat in stats: + stats_by_module: Dict[Type[Stat], List[Stat]] = defaultdict(list) + for stat in stats_set: stats_by_module[stat.__class__].append(stat) # StdDev is an odd case since it is parameterized, thus # for each StdDev(order) we must ensure there is an associated Var(order) for std_dev in stats_by_module[StdDev]: stat_to_add = Var(order=std_dev.order) # type: ignore - # pyre-fixme[16]: `List` has no attribute `add`. - stats.add(stat_to_add) + stats_set.add(stat_to_add) stats_by_module[stat_to_add.__class__].append(stat_to_add) # For the other modules (deps[1:n-1]): if i exists => # we want to ensure i...n-1 exists for i, dep in enumerate(dep_order[1:]): if dep in stats_by_module: - # pyre-fixme[16]: `List` has no attribute `update`. - stats.update([mod() for mod in dep_order[i + 1 :]]) + stats_set.update([mod() for mod in dep_order[i + 1 :]]) break # Step 2: get the correct order # NOTE: we are sorting via a given topological order - sort_order = {mod: i for i, mod in enumerate(dep_order)} - # pyre-fixme[6]: For 1st argument expected `Type[Union[Count, MSE, Mean, StdDev, - # Var]]` but got `Type[Min]`. + sort_order: Dict[Type[Stat], int] = {mod: i for i, mod in enumerate(dep_order)} sort_order[Min] = -1 - # pyre-fixme[6]: For 1st argument expected `Type[Union[Count, MSE, Mean, StdDev, - # Var]]` but got `Type[Max]`. sort_order[Max] = -1 - # pyre-fixme[6]: For 1st argument expected `Type[Union[Count, MSE, Mean, StdDev, - # Var]]` but got `Type[Sum]`. sort_order[Sum] = -1 - stats = list(stats) + stats = list(stats_set) stats.sort(key=lambda x: sort_order[x.__class__], reverse=True) # get the summary stat indices @@ -185,6 +175,10 @@ class SummarizerSingleTensor: If possible use `Summarizer` instead. """ + _stats: List[Stat] + _stat_to_stat: Dict[Stat, Stat] + _summary_stats: List[Stat] + def __init__(self, stats: List[Stat], summary_stats_indices: List[int]) -> None: r""" Args: @@ -196,9 +190,7 @@ def __init__(self, stats: List[Stat], summary_stats_indices: List[int]) -> None: does not require any specific order. """ self._stats = stats - # pyre-fixme[4]: Attribute must be annotated. self._stat_to_stat = {stat: stat for stat in self._stats} - # pyre-fixme[4]: Attribute must be annotated. self._summary_stats = [stats[i] for i in summary_stats_indices] for stat in stats: