diff --git a/api/_modules/captum/attr/_core/layer/grad_cam.html b/api/_modules/captum/attr/_core/layer/grad_cam.html index 850fb67f06..61914e1279 100644 --- a/api/_modules/captum/attr/_core/layer/grad_cam.html +++ b/api/_modules/captum/attr/_core/layer/grad_cam.html @@ -33,7 +33,7 @@

Source code for captum.attr._core.layer.grad_cam

#!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F @@ -88,8 +88,7 @@

Source code for captum.attr._core.layer.grad_cam

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -237,7 +236,7 @@

Source code for captum.attr._core.layer.grad_cam

# hidden layer and hidden layer evaluated at each input. layer_gradients, layer_evals = compute_layer_gradients_and_eval( self.forward_func, - self.layer, + cast(Module, self.layer), inputs, target, additional_forward_args, @@ -249,10 +248,7 @@

Source code for captum.attr._core.layer.grad_cam

summed_grads = tuple( ( torch.mean( - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Tuple[Tensor, ...]`. layer_grad, - # pyre-fixme[16]: `tuple` has no attribute `shape`. dim=tuple(x for x in range(2, len(layer_grad.shape))), keepdim=True, ) @@ -264,29 +260,17 @@

Source code for captum.attr._core.layer.grad_cam

if attr_dim_summation: scaled_acts = tuple( - # pyre-fixme[58]: `*` is not supported for operand types - # `Union[tuple[torch._tensor.Tensor], torch._tensor.Tensor]` and - # `Tuple[Tensor, ...]`. - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Tuple[Tensor, ...]`. torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) else: scaled_acts = tuple( - # pyre-fixme[58]: `*` is not supported for operand types - # `Union[tuple[torch._tensor.Tensor], torch._tensor.Tensor]` and - # `Tuple[Tensor, ...]`. summed_grad * layer_eval for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) if relu_attributions: - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Union[tuple[Tensor], Tensor]`. scaled_acts = tuple(F.relu(scaled_act) for scaled_act in scaled_acts) - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but got - # `Tuple[Union[tuple[Tensor], Tensor], ...]`. return _format_output(len(scaled_acts) > 1, scaled_acts)
diff --git a/api/_modules/captum/attr/_core/layer/grad_cam/index.html b/api/_modules/captum/attr/_core/layer/grad_cam/index.html index 850fb67f06..61914e1279 100644 --- a/api/_modules/captum/attr/_core/layer/grad_cam/index.html +++ b/api/_modules/captum/attr/_core/layer/grad_cam/index.html @@ -33,7 +33,7 @@

Source code for captum.attr._core.layer.grad_cam

#!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F @@ -88,8 +88,7 @@

Source code for captum.attr._core.layer.grad_cam

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -237,7 +236,7 @@

Source code for captum.attr._core.layer.grad_cam

# hidden layer and hidden layer evaluated at each input. layer_gradients, layer_evals = compute_layer_gradients_and_eval( self.forward_func, - self.layer, + cast(Module, self.layer), inputs, target, additional_forward_args, @@ -249,10 +248,7 @@

Source code for captum.attr._core.layer.grad_cam

summed_grads = tuple( ( torch.mean( - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Tuple[Tensor, ...]`. layer_grad, - # pyre-fixme[16]: `tuple` has no attribute `shape`. dim=tuple(x for x in range(2, len(layer_grad.shape))), keepdim=True, ) @@ -264,29 +260,17 @@

Source code for captum.attr._core.layer.grad_cam

if attr_dim_summation: scaled_acts = tuple( - # pyre-fixme[58]: `*` is not supported for operand types - # `Union[tuple[torch._tensor.Tensor], torch._tensor.Tensor]` and - # `Tuple[Tensor, ...]`. - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Tuple[Tensor, ...]`. torch.sum(summed_grad * layer_eval, dim=1, keepdim=True) for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) else: scaled_acts = tuple( - # pyre-fixme[58]: `*` is not supported for operand types - # `Union[tuple[torch._tensor.Tensor], torch._tensor.Tensor]` and - # `Tuple[Tensor, ...]`. summed_grad * layer_eval for summed_grad, layer_eval in zip(summed_grads, layer_evals) ) if relu_attributions: - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `Union[tuple[Tensor], Tensor]`. scaled_acts = tuple(F.relu(scaled_act) for scaled_act in scaled_acts) - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but got - # `Tuple[Union[tuple[Tensor], Tensor], ...]`. return _format_output(len(scaled_acts) > 1, scaled_acts)
diff --git a/api/_modules/captum/attr/_core/layer/internal_influence.html b/api/_modules/captum/attr/_core/layer/internal_influence.html index 260ca39518..d0aa2ce662 100644 --- a/api/_modules/captum/attr/_core/layer/internal_influence.html +++ b/api/_modules/captum/attr/_core/layer/internal_influence.html @@ -33,7 +33,7 @@

Source code for captum.attr._core.layer.internal_influence

#!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -75,8 +75,7 @@

Source code for captum.attr._core.layer.internal_influence

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -330,7 +329,7 @@

Source code for captum.attr._core.layer.internal_influence

# Returns gradient of output with respect to hidden layer. layer_gradients, _ = compute_layer_gradients_and_eval( forward_fn=self.forward_func, - layer=self.layer, + layer=cast(Module, self.layer), inputs=scaled_features_tpl, target_ind=expanded_target, additional_forward_args=input_additional_args, @@ -341,9 +340,7 @@

Source code for captum.attr._core.layer.internal_influence

# flattening grads so that we can multiply it with step-size # calling contiguous to avoid `memory whole` problems scaled_grads = tuple( - # pyre-fixme[16]: `tuple` has no attribute `contiguous`. layer_grad.contiguous().view(n_steps, -1) - # pyre-fixme[16]: `tuple` has no attribute `device`. * torch.tensor(step_sizes).view(n_steps, 1).to(layer_grad.device) for layer_grad in layer_gradients ) @@ -354,8 +351,7 @@

Source code for captum.attr._core.layer.internal_influence

scaled_grad, n_steps, inputs[0].shape[0], - # pyre-fixme[16]: `tuple` has no attribute `shape`. - layer_grad.shape[1:], + tuple(layer_grad.shape[1:]), ) for scaled_grad, layer_grad in zip(scaled_grads, layer_gradients) ) diff --git a/api/_modules/captum/attr/_core/layer/internal_influence/index.html b/api/_modules/captum/attr/_core/layer/internal_influence/index.html index 260ca39518..d0aa2ce662 100644 --- a/api/_modules/captum/attr/_core/layer/internal_influence/index.html +++ b/api/_modules/captum/attr/_core/layer/internal_influence/index.html @@ -33,7 +33,7 @@

Source code for captum.attr._core.layer.internal_influence

#!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -75,8 +75,7 @@

Source code for captum.attr._core.layer.internal_influence

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -330,7 +329,7 @@

Source code for captum.attr._core.layer.internal_influence

# Returns gradient of output with respect to hidden layer. layer_gradients, _ = compute_layer_gradients_and_eval( forward_fn=self.forward_func, - layer=self.layer, + layer=cast(Module, self.layer), inputs=scaled_features_tpl, target_ind=expanded_target, additional_forward_args=input_additional_args, @@ -341,9 +340,7 @@

Source code for captum.attr._core.layer.internal_influence

# flattening grads so that we can multiply it with step-size # calling contiguous to avoid `memory whole` problems scaled_grads = tuple( - # pyre-fixme[16]: `tuple` has no attribute `contiguous`. layer_grad.contiguous().view(n_steps, -1) - # pyre-fixme[16]: `tuple` has no attribute `device`. * torch.tensor(step_sizes).view(n_steps, 1).to(layer_grad.device) for layer_grad in layer_gradients ) @@ -354,8 +351,7 @@

Source code for captum.attr._core.layer.internal_influence

scaled_grad, n_steps, inputs[0].shape[0], - # pyre-fixme[16]: `tuple` has no attribute `shape`. - layer_grad.shape[1:], + tuple(layer_grad.shape[1:]), ) for scaled_grad, layer_grad in zip(scaled_grads, layer_gradients) ) diff --git a/api/_modules/captum/attr/_core/layer/layer_activation.html b/api/_modules/captum/attr/_core/layer/layer_activation.html index 7697b8345f..cfde20afb7 100644 --- a/api/_modules/captum/attr/_core/layer/layer_activation.html +++ b/api/_modules/captum/attr/_core/layer/layer_activation.html @@ -54,8 +54,7 @@

Source code for captum.attr._core.layer.layer_activation

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: ModuleOrModuleList, device_ids: Union[None, List[int]] = None, ) -> None: @@ -168,8 +167,6 @@

Source code for captum.attr._core.layer.layer_activation

) else: return [ - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but - # got `Tensor`. _format_output(len(single_layer_eval) > 1, single_layer_eval) for single_layer_eval in layer_eval ]
diff --git a/api/_modules/captum/attr/_core/layer/layer_activation/index.html b/api/_modules/captum/attr/_core/layer/layer_activation/index.html index 7697b8345f..cfde20afb7 100644 --- a/api/_modules/captum/attr/_core/layer/layer_activation/index.html +++ b/api/_modules/captum/attr/_core/layer/layer_activation/index.html @@ -54,8 +54,7 @@

Source code for captum.attr._core.layer.layer_activation

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: ModuleOrModuleList, device_ids: Union[None, List[int]] = None, ) -> None: @@ -168,8 +167,6 @@

Source code for captum.attr._core.layer.layer_activation

) else: return [ - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but - # got `Tensor`. _format_output(len(single_layer_eval) > 1, single_layer_eval) for single_layer_eval in layer_eval ]
diff --git a/api/_modules/captum/attr/_core/layer/layer_conductance.html b/api/_modules/captum/attr/_core/layer/layer_conductance.html index 4929001652..85b6f436d4 100644 --- a/api/_modules/captum/attr/_core/layer/layer_conductance.html +++ b/api/_modules/captum/attr/_core/layer/layer_conductance.html @@ -34,7 +34,7 @@

Source code for captum.attr._core.layer.layer_conductance

# pyre-strict import typing -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Literal, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -78,8 +78,7 @@

Source code for captum.attr._core.layer.layer_conductance

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -110,8 +109,6 @@

Source code for captum.attr._core.layer.layer_conductance

@typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `75`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -128,8 +125,6 @@

Source code for captum.attr._core.layer.layer_conductance

) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `91`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -416,7 +411,7 @@

Source code for captum.attr._core.layer.layer_conductance

layer_evals, ) = compute_layer_gradients_and_eval( forward_fn=self.forward_func, - layer=self.layer, + layer=cast(Module, self.layer), inputs=scaled_features_tpl, additional_forward_args=input_additional_args, target_ind=expanded_target, @@ -429,8 +424,6 @@

Source code for captum.attr._core.layer.layer_conductance

# This approximates the total input gradient of each step multiplied # by the step size. grad_diffs = tuple( - # pyre-fixme[58]: `-` is not supported for operand types `Tuple[Tensor, - # ...]` and `Tuple[Tensor, ...]`. layer_eval[num_examples:] - layer_eval[:-num_examples] for layer_eval in layer_evals ) @@ -443,8 +436,7 @@

Source code for captum.attr._core.layer.layer_conductance

grad_diff * layer_gradient[:-num_examples], n_steps, num_examples, - # pyre-fixme[16]: `tuple` has no attribute `shape`. - layer_eval.shape[1:], + tuple(layer_eval.shape[1:]), ) for layer_gradient, layer_eval, grad_diff in zip( layer_gradients, layer_evals, grad_diffs diff --git a/api/_modules/captum/attr/_core/layer/layer_conductance/index.html b/api/_modules/captum/attr/_core/layer/layer_conductance/index.html index 4929001652..85b6f436d4 100644 --- a/api/_modules/captum/attr/_core/layer/layer_conductance/index.html +++ b/api/_modules/captum/attr/_core/layer/layer_conductance/index.html @@ -34,7 +34,7 @@

Source code for captum.attr._core.layer.layer_conductance

# pyre-strict import typing -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Literal, Optional, Tuple, Union import torch from captum._utils.common import ( @@ -78,8 +78,7 @@

Source code for captum.attr._core.layer.layer_conductance

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -110,8 +109,6 @@

Source code for captum.attr._core.layer.layer_conductance

@typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `75`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -128,8 +125,6 @@

Source code for captum.attr._core.layer.layer_conductance

) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `91`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -416,7 +411,7 @@

Source code for captum.attr._core.layer.layer_conductance

layer_evals, ) = compute_layer_gradients_and_eval( forward_fn=self.forward_func, - layer=self.layer, + layer=cast(Module, self.layer), inputs=scaled_features_tpl, additional_forward_args=input_additional_args, target_ind=expanded_target, @@ -429,8 +424,6 @@

Source code for captum.attr._core.layer.layer_conductance

# This approximates the total input gradient of each step multiplied # by the step size. grad_diffs = tuple( - # pyre-fixme[58]: `-` is not supported for operand types `Tuple[Tensor, - # ...]` and `Tuple[Tensor, ...]`. layer_eval[num_examples:] - layer_eval[:-num_examples] for layer_eval in layer_evals ) @@ -443,8 +436,7 @@

Source code for captum.attr._core.layer.layer_conductance

grad_diff * layer_gradient[:-num_examples], n_steps, num_examples, - # pyre-fixme[16]: `tuple` has no attribute `shape`. - layer_eval.shape[1:], + tuple(layer_eval.shape[1:]), ) for layer_gradient, layer_eval, grad_diff in zip( layer_gradients, layer_evals, grad_diffs diff --git a/api/_modules/captum/attr/_core/layer/layer_deep_lift.html b/api/_modules/captum/attr/_core/layer/layer_deep_lift.html index e4858361e8..ef2093cfa6 100644 --- a/api/_modules/captum/attr/_core/layer/layer_deep_lift.html +++ b/api/_modules/captum/attr/_core/layer/layer_deep_lift.html @@ -357,8 +357,9 @@

Source code for captum.attr._core.layer.layer_deep_lift

additional_forward_args, ) - # pyre-fixme[24]: Generic type `Sequence` expects 1 type parameter. - def chunk_output_fn(out: TensorOrTupleOfTensorsGeneric) -> Sequence: + def chunk_output_fn( + out: TensorOrTupleOfTensorsGeneric, + ) -> Sequence[Union[Tensor, Sequence[Tensor]]]: if isinstance(out, Tensor): return out.chunk(2) return tuple(out_sub.chunk(2) for out_sub in out) @@ -474,8 +475,6 @@

Source code for captum.attr._core.layer.layer_deep_lift

# Ignoring mypy error for inconsistent signature with DeepLiftShap @typing.overload # type: ignore - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `453`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -490,9 +489,7 @@

Source code for captum.attr._core.layer.layer_deep_lift

custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... - @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `439`. + @typing.overload # type: ignore def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -696,7 +693,7 @@

Source code for captum.attr._core.layer.layer_deep_lift

) = DeepLiftShap._expand_inputs_baselines_targets( self, baselines, inputs, target, additional_forward_args ) - attributions = LayerDeepLift.attribute.__wrapped__( # type: ignore + attribs_layer_deeplift = LayerDeepLift.attribute.__wrapped__( # type: ignore self, exp_inp, exp_base, @@ -709,8 +706,12 @@

Source code for captum.attr._core.layer.layer_deep_lift

attribute_to_layer_input=attribute_to_layer_input, custom_attribution_func=custom_attribution_func, ) + delta: Tensor + attributions: Union[Tensor, Tuple[Tensor, ...]] if return_convergence_delta: - attributions, delta = attributions + attributions, delta = attribs_layer_deeplift + else: + attributions = attribs_layer_deeplift if isinstance(attributions, tuple): attributions = tuple( DeepLiftShap._compute_mean_across_baselines( @@ -723,17 +724,19 @@

Source code for captum.attr._core.layer.layer_deep_lift

self, inp_bsz, base_bsz, attributions ) if return_convergence_delta: - # pyre-fixme[61]: `delta` is undefined, or not always defined. return attributions, delta else: - # pyre-fixme[7]: Expected `Union[Tuple[Union[Tensor, - # typing.Tuple[Tensor, ...]], Tensor], Tensor, typing.Tuple[Tensor, ...]]` - # but got `Union[tuple[Tensor], Tensor]`. - return attributions
+ return cast( + Union[ + Tensor, + Tuple[Tensor, ...], + Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor], + ], + attributions, + )
@property - # pyre-fixme[3]: Return type must be annotated. def multiplies_by_inputs(self) -> bool: return self._multiply_by_inputs
diff --git a/api/_modules/captum/attr/_core/layer/layer_deep_lift/index.html b/api/_modules/captum/attr/_core/layer/layer_deep_lift/index.html index e4858361e8..ef2093cfa6 100644 --- a/api/_modules/captum/attr/_core/layer/layer_deep_lift/index.html +++ b/api/_modules/captum/attr/_core/layer/layer_deep_lift/index.html @@ -357,8 +357,9 @@

Source code for captum.attr._core.layer.layer_deep_lift

additional_forward_args, ) - # pyre-fixme[24]: Generic type `Sequence` expects 1 type parameter. - def chunk_output_fn(out: TensorOrTupleOfTensorsGeneric) -> Sequence: + def chunk_output_fn( + out: TensorOrTupleOfTensorsGeneric, + ) -> Sequence[Union[Tensor, Sequence[Tensor]]]: if isinstance(out, Tensor): return out.chunk(2) return tuple(out_sub.chunk(2) for out_sub in out) @@ -474,8 +475,6 @@

Source code for captum.attr._core.layer.layer_deep_lift

# Ignoring mypy error for inconsistent signature with DeepLiftShap @typing.overload # type: ignore - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `453`. def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -490,9 +489,7 @@

Source code for captum.attr._core.layer.layer_deep_lift

custom_attribution_func: Union[None, Callable[..., Tuple[Tensor, ...]]] = None, ) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... - @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `439`. + @typing.overload # type: ignore def attribute( self, inputs: Union[Tensor, Tuple[Tensor, ...]], @@ -696,7 +693,7 @@

Source code for captum.attr._core.layer.layer_deep_lift

) = DeepLiftShap._expand_inputs_baselines_targets( self, baselines, inputs, target, additional_forward_args ) - attributions = LayerDeepLift.attribute.__wrapped__( # type: ignore + attribs_layer_deeplift = LayerDeepLift.attribute.__wrapped__( # type: ignore self, exp_inp, exp_base, @@ -709,8 +706,12 @@

Source code for captum.attr._core.layer.layer_deep_lift

attribute_to_layer_input=attribute_to_layer_input, custom_attribution_func=custom_attribution_func, ) + delta: Tensor + attributions: Union[Tensor, Tuple[Tensor, ...]] if return_convergence_delta: - attributions, delta = attributions + attributions, delta = attribs_layer_deeplift + else: + attributions = attribs_layer_deeplift if isinstance(attributions, tuple): attributions = tuple( DeepLiftShap._compute_mean_across_baselines( @@ -723,17 +724,19 @@

Source code for captum.attr._core.layer.layer_deep_lift

self, inp_bsz, base_bsz, attributions ) if return_convergence_delta: - # pyre-fixme[61]: `delta` is undefined, or not always defined. return attributions, delta else: - # pyre-fixme[7]: Expected `Union[Tuple[Union[Tensor, - # typing.Tuple[Tensor, ...]], Tensor], Tensor, typing.Tuple[Tensor, ...]]` - # but got `Union[tuple[Tensor], Tensor]`. - return attributions
+ return cast( + Union[ + Tensor, + Tuple[Tensor, ...], + Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor], + ], + attributions, + )
@property - # pyre-fixme[3]: Return type must be annotated. def multiplies_by_inputs(self) -> bool: return self._multiply_by_inputs
diff --git a/api/_modules/captum/attr/_core/layer/layer_gradient_shap.html b/api/_modules/captum/attr/_core/layer/layer_gradient_shap.html index d47d6683ab..15ba2ceee8 100644 --- a/api/_modules/captum/attr/_core/layer/layer_gradient_shap.html +++ b/api/_modules/captum/attr/_core/layer/layer_gradient_shap.html @@ -95,8 +95,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -138,13 +137,12 @@

Source code for captum.attr._core.layer.layer_gradient_shap

self._multiply_by_inputs = multiply_by_inputs @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `106`. def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -155,13 +153,12 @@

Source code for captum.attr._core.layer.layer_gradient_shap

) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `120`. def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -173,11 +170,14 @@

Source code for captum.attr._core.layer.layer_gradient_shap

[docs] @log_usage() + # pyre-fixme[43]: This definition does not have the same decorators as the + # preceding overload(s). def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -330,17 +330,10 @@

Source code for captum.attr._core.layer.layer_gradient_shap

""" # since `baselines` is a distribution, we can generate it using a function # rather than passing it as an input argument - # pyre-fixme[9]: baselines has type `Union[typing.Callable[..., typing.Any], - # Variable[TensorOrTupleOfTensorsGeneric <: [Tensor, typing.Tuple[Tensor, - # ...]]]]`; used as `Tuple[Tensor, ...]`. - baselines = _format_callable_baseline(baselines, inputs) - # pyre-fixme[16]: Item `Callable` of `Union[(...) -> Any, - # TensorOrTupleOfTensorsGeneric]` has no attribute `__getitem__`. - assert isinstance(baselines[0], torch.Tensor), ( + formatted_baselines = _format_callable_baseline(baselines, inputs) + assert isinstance(formatted_baselines[0], torch.Tensor), ( "Baselines distribution has to be provided in a form " - # pyre-fixme[16]: Item `Callable` of `Union[(...) -> Any, - # TensorOrTupleOfTensorsGeneric]` has no attribute `__getitem__`. - "of a torch.Tensor {}.".format(baselines[0]) + "of a torch.Tensor {}.".format(formatted_baselines[0]) ) input_min_baseline_x_grad = LayerInputBaselineXGradient( @@ -359,7 +352,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

nt_samples=n_samples, stdevs=stdevs, draw_baseline_from_distrib=True, - baselines=baselines, + baselines=formatted_baselines, target=target, additional_forward_args=additional_forward_args, return_convergence_delta=return_convergence_delta, @@ -384,8 +377,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

class LayerInputBaselineXGradient(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -477,7 +469,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

) grads, _ = compute_layer_gradients_and_eval( self.forward_func, - self.layer, + cast(Module, self.layer), input_baseline_scaled, target, additional_forward_args, @@ -489,7 +481,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

attr_baselines = _forward_layer_eval( self.forward_func, baselines, - self.layer, + cast(Module, self.layer), additional_forward_args=additional_forward_args, device_ids=self.device_ids, attribute_to_layer_input=attribute_to_layer_input, @@ -498,19 +490,15 @@

Source code for captum.attr._core.layer.layer_gradient_shap

attr_inputs = _forward_layer_eval( self.forward_func, inputs, - self.layer, + cast(Module, self.layer), additional_forward_args=additional_forward_args, device_ids=self.device_ids, attribute_to_layer_input=attribute_to_layer_input, ) - + attributions: Tuple[Tensor, ...] if self.multiplies_by_inputs: input_baseline_diffs = tuple( - # pyre-fixme[58]: `-` is not supported for operand types - # `typing.Tuple[torch._tensor.Tensor, ...]` and - # `typing.Tuple[torch._tensor.Tensor, ...]`. - input - baseline - for input, baseline in zip(attr_inputs, attr_baselines) + input - baseline for input, baseline in zip(attr_inputs, attr_baselines) ) attributions = tuple( input_baseline_diff * grad @@ -522,8 +510,6 @@

Source code for captum.attr._core.layer.layer_gradient_shap

return _compute_conv_delta_and_format_attrs( self, return_convergence_delta, - # pyre-fixme[6]: For 3rd argument expected `Tuple[Tensor, ...]` but got - # `Union[List[typing.Tuple[Tensor, ...]], tuple[Tensor]]`. attributions, baselines, inputs, diff --git a/api/_modules/captum/attr/_core/layer/layer_gradient_shap/index.html b/api/_modules/captum/attr/_core/layer/layer_gradient_shap/index.html index d47d6683ab..15ba2ceee8 100644 --- a/api/_modules/captum/attr/_core/layer/layer_gradient_shap/index.html +++ b/api/_modules/captum/attr/_core/layer/layer_gradient_shap/index.html @@ -95,8 +95,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -138,13 +137,12 @@

Source code for captum.attr._core.layer.layer_gradient_shap

self._multiply_by_inputs = multiply_by_inputs @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `106`. def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -155,13 +153,12 @@

Source code for captum.attr._core.layer.layer_gradient_shap

) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Tensor]: ... @typing.overload - # pyre-fixme[43]: The implementation of `attribute` does not accept all possible - # arguments of overload defined on line `120`. def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -173,11 +170,14 @@

Source code for captum.attr._core.layer.layer_gradient_shap

[docs] @log_usage() + # pyre-fixme[43]: This definition does not have the same decorators as the + # preceding overload(s). def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - baselines: Union[TensorOrTupleOfTensorsGeneric, Callable], + baselines: Union[ + TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] + ], n_samples: int = 5, stdevs: Union[float, Tuple[float, ...]] = 0.0, target: TargetType = None, @@ -330,17 +330,10 @@

Source code for captum.attr._core.layer.layer_gradient_shap

""" # since `baselines` is a distribution, we can generate it using a function # rather than passing it as an input argument - # pyre-fixme[9]: baselines has type `Union[typing.Callable[..., typing.Any], - # Variable[TensorOrTupleOfTensorsGeneric <: [Tensor, typing.Tuple[Tensor, - # ...]]]]`; used as `Tuple[Tensor, ...]`. - baselines = _format_callable_baseline(baselines, inputs) - # pyre-fixme[16]: Item `Callable` of `Union[(...) -> Any, - # TensorOrTupleOfTensorsGeneric]` has no attribute `__getitem__`. - assert isinstance(baselines[0], torch.Tensor), ( + formatted_baselines = _format_callable_baseline(baselines, inputs) + assert isinstance(formatted_baselines[0], torch.Tensor), ( "Baselines distribution has to be provided in a form " - # pyre-fixme[16]: Item `Callable` of `Union[(...) -> Any, - # TensorOrTupleOfTensorsGeneric]` has no attribute `__getitem__`. - "of a torch.Tensor {}.".format(baselines[0]) + "of a torch.Tensor {}.".format(formatted_baselines[0]) ) input_min_baseline_x_grad = LayerInputBaselineXGradient( @@ -359,7 +352,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

nt_samples=n_samples, stdevs=stdevs, draw_baseline_from_distrib=True, - baselines=baselines, + baselines=formatted_baselines, target=target, additional_forward_args=additional_forward_args, return_convergence_delta=return_convergence_delta, @@ -384,8 +377,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

class LayerInputBaselineXGradient(LayerAttribution, GradientAttribution): def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -477,7 +469,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

) grads, _ = compute_layer_gradients_and_eval( self.forward_func, - self.layer, + cast(Module, self.layer), input_baseline_scaled, target, additional_forward_args, @@ -489,7 +481,7 @@

Source code for captum.attr._core.layer.layer_gradient_shap

attr_baselines = _forward_layer_eval( self.forward_func, baselines, - self.layer, + cast(Module, self.layer), additional_forward_args=additional_forward_args, device_ids=self.device_ids, attribute_to_layer_input=attribute_to_layer_input, @@ -498,19 +490,15 @@

Source code for captum.attr._core.layer.layer_gradient_shap

attr_inputs = _forward_layer_eval( self.forward_func, inputs, - self.layer, + cast(Module, self.layer), additional_forward_args=additional_forward_args, device_ids=self.device_ids, attribute_to_layer_input=attribute_to_layer_input, ) - + attributions: Tuple[Tensor, ...] if self.multiplies_by_inputs: input_baseline_diffs = tuple( - # pyre-fixme[58]: `-` is not supported for operand types - # `typing.Tuple[torch._tensor.Tensor, ...]` and - # `typing.Tuple[torch._tensor.Tensor, ...]`. - input - baseline - for input, baseline in zip(attr_inputs, attr_baselines) + input - baseline for input, baseline in zip(attr_inputs, attr_baselines) ) attributions = tuple( input_baseline_diff * grad @@ -522,8 +510,6 @@

Source code for captum.attr._core.layer.layer_gradient_shap

return _compute_conv_delta_and_format_attrs( self, return_convergence_delta, - # pyre-fixme[6]: For 3rd argument expected `Tuple[Tensor, ...]` but got - # `Union[List[typing.Tuple[Tensor, ...]], tuple[Tensor]]`. attributions, baselines, inputs, diff --git a/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation.html b/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation.html index 64ba05d2ee..5332f63590 100644 --- a/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation.html +++ b/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation.html @@ -33,7 +33,7 @@

Source code for captum.attr._core.layer.layer_gradient_x_activation

#!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union from captum._utils.common import ( _format_additional_forward_args, @@ -58,8 +58,7 @@

Source code for captum.attr._core.layer.layer_gradient_x_activation

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: ModuleOrModuleList, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -222,11 +221,10 @@

Source code for captum.attr._core.layer.layer_gradient_x_activation

if isinstance(self.layer, Module): return _format_output( len(layer_evals) > 1, - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `List[typing.Tuple[Tensor, ...]]`. - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but - # got `List[typing.Tuple[Tensor, ...]]`. - self.multiply_gradient_acts(layer_gradients, layer_evals), + self.multiply_gradient_acts( + cast(Tuple[Tensor, ...], layer_gradients), + cast(Tuple[Tensor, ...], layer_evals), + ), ) else: return [ diff --git a/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation/index.html b/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation/index.html index 64ba05d2ee..5332f63590 100644 --- a/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation/index.html +++ b/api/_modules/captum/attr/_core/layer/layer_gradient_x_activation/index.html @@ -33,7 +33,7 @@

Source code for captum.attr._core.layer.layer_gradient_x_activation

#!/usr/bin/env python3 # pyre-strict -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union from captum._utils.common import ( _format_additional_forward_args, @@ -58,8 +58,7 @@

Source code for captum.attr._core.layer.layer_gradient_x_activation

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: ModuleOrModuleList, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -222,11 +221,10 @@

Source code for captum.attr._core.layer.layer_gradient_x_activation

if isinstance(self.layer, Module): return _format_output( len(layer_evals) > 1, - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `List[typing.Tuple[Tensor, ...]]`. - # pyre-fixme[6]: For 2nd argument expected `Tuple[Tensor, ...]` but - # got `List[typing.Tuple[Tensor, ...]]`. - self.multiply_gradient_acts(layer_gradients, layer_evals), + self.multiply_gradient_acts( + cast(Tuple[Tensor, ...], layer_gradients), + cast(Tuple[Tensor, ...], layer_evals), + ), ) else: return [ diff --git a/api/_modules/captum/attr/_core/layer/layer_lrp.html b/api/_modules/captum/attr/_core/layer/layer_lrp.html index 8eb03e7163..57376239dd 100644 --- a/api/_modules/captum/attr/_core/layer/layer_lrp.html +++ b/api/_modules/captum/attr/_core/layer/layer_lrp.html @@ -34,7 +34,9 @@

Source code for captum.attr._core.layer.layer_lrp

# pyre-strict import typing -from typing import Any, cast, List, Literal, Optional, Tuple, Union +from typing import cast, Dict, List, Literal, Optional, Tuple, TypeVar, Union + +import torch from captum._utils.common import ( _format_tensor_into_tuples, @@ -53,8 +55,12 @@

Source code for captum.attr._core.layer.layer_lrp

) from captum.attr._core.lrp import LRP from captum.attr._utils.attribution import LayerAttribution +from captum.attr._utils.lrp_rules import PropagationRule from torch import Tensor from torch.nn import Module +from torch.utils.hooks import RemovableHandle + +T = TypeVar("T")
@@ -73,6 +79,13 @@

Source code for captum.attr._core.layer.layer_lrp

Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW]. """ + device_ids: List[int] + verbose: bool + layers: List[Module] + attribute_to_layer_input: bool = False + backward_handles: List[RemovableHandle] + forward_handles: List[RemovableHandle] + def __init__(self, model: Module, layer: ModuleOrModuleList) -> None: """ Args: @@ -93,7 +106,6 @@

Source code for captum.attr._core.layer.layer_lrp

LayerAttribution.__init__(self, model, layer) LRP.__init__(self, model) if hasattr(self.model, "device_ids"): - # pyre-fixme[4]: Attribute must be annotated. self.device_ids = cast(List[int], self.model.device_ids) @typing.overload # type: ignore @@ -244,48 +256,34 @@

Source code for captum.attr._core.layer.layer_lrp

>>> attribution = layer_lrp.attribute(input, target=5) """ - # pyre-fixme[16]: `LayerLRP` has no attribute `verbose`. self.verbose = verbose - # pyre-fixme[16]: `LayerLRP` has no attribute `_original_state_dict`. self._original_state_dict = self.model.state_dict() - # pyre-fixme[16]: `LayerLRP` has no attribute `layers`. self.layers = [] self._get_layers(self.model) self._check_and_attach_rules() - # pyre-fixme[16]: `LayerLRP` has no attribute `attribute_to_layer_input`. self.attribute_to_layer_input = attribute_to_layer_input - # pyre-fixme[16]: `LayerLRP` has no attribute `backward_handles`. self.backward_handles = [] - # pyre-fixme[16]: `LayerLRP` has no attribute `forward_handles`. self.forward_handles = [] - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs = _format_tensor_into_tuples(inputs) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - gradient_mask = apply_gradient_requirements(inputs) + inputs_tuple = _format_tensor_into_tuples(inputs) + gradient_mask = apply_gradient_requirements(inputs_tuple) try: # 1. Forward pass output = self._compute_output_and_change_weights( - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `TensorOrTupleOfTensorsGeneric`. - inputs, + inputs_tuple, target, additional_forward_args, ) self._register_forward_hooks() # 2. Forward pass + backward pass _ = compute_gradients( - self._forward_fn_wrapper, inputs, target, additional_forward_args + self._forward_fn_wrapper, inputs_tuple, target, additional_forward_args ) relevances = self._get_output_relevance(output) finally: self._restore_model() - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - undo_gradient_requirements(inputs, gradient_mask) + undo_gradient_requirements(inputs_tuple, gradient_mask) if return_convergence_delta: delta: Union[Tensor, List[Tensor]] @@ -293,7 +291,10 @@

Source code for captum.attr._core.layer.layer_lrp

delta = [] for relevance_layer in relevances: delta.append( - self.compute_convergence_delta(relevance_layer, output) + self.compute_convergence_delta( + cast(Union[Tensor, Tuple[Tensor, ...]], relevance_layer), + output, + ) ) else: delta = self.compute_convergence_delta( @@ -304,33 +305,35 @@

Source code for captum.attr._core.layer.layer_lrp

return relevances # type: ignore
- # pyre-fixme[3]: Return type must be annotated. - # pyre-fixme[2]: Parameter must be annotated. - def _get_single_output_relevance(self, layer, output): - # pyre-fixme[16]: `LayerLRP` has no attribute `attribute_to_layer_input`. + def _get_single_output_relevance( + self, layer: Module, output: Tensor + ) -> Union[Tensor, Tuple[Tensor, ...]]: if self.attribute_to_layer_input: - normalized_relevances = layer.rule.relevance_input + normalized_relevances = cast( + Dict[torch.device, Tensor], + cast(PropagationRule, layer.rule).relevance_input, + ) else: - normalized_relevances = layer.rule.relevance_output + normalized_relevances = cast(PropagationRule, layer.rule).relevance_output key_list = _sort_key_list(list(normalized_relevances.keys()), self.device_ids) - normalized_relevances = _reduce_list( + normalized_relevances_reduced = _reduce_list( [normalized_relevances[device_id] for device_id in key_list] ) - if isinstance(normalized_relevances, tuple): + if isinstance(normalized_relevances_reduced, tuple): return tuple( normalized_relevance * output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1)) - for normalized_relevance in normalized_relevances + for normalized_relevance in normalized_relevances_reduced ) else: - return normalized_relevances * output.reshape( - (-1,) + (1,) * (normalized_relevances.dim() - 1) + return normalized_relevances_reduced * output.reshape( + (-1,) + (1,) * (normalized_relevances_reduced.dim() - 1) ) - # pyre-fixme[3]: Return type must be annotated. - # pyre-fixme[2]: Parameter must be annotated. - def _get_output_relevance(self, output): + def _get_output_relevance( + self, output: Tensor + ) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]: if isinstance(self.layer, list): relevances = [] for layer in self.layer: @@ -340,11 +343,9 @@

Source code for captum.attr._core.layer.layer_lrp

return self._get_single_output_relevance(self.layer, output) @staticmethod - # pyre-fixme[3]: Return annotation cannot contain `Any`. def _convert_list_to_tuple( - # pyre-fixme[2]: Parameter annotation cannot contain `Any`. - relevances: Union[List[Any], Tuple[Any, ...]] - ) -> Tuple[Any, ...]: + relevances: Union[List[T], Tuple[T, ...]] + ) -> Tuple[T, ...]: if isinstance(relevances, list): return tuple(relevances) else: diff --git a/api/_modules/captum/attr/_core/layer/layer_lrp/index.html b/api/_modules/captum/attr/_core/layer/layer_lrp/index.html index 8eb03e7163..57376239dd 100644 --- a/api/_modules/captum/attr/_core/layer/layer_lrp/index.html +++ b/api/_modules/captum/attr/_core/layer/layer_lrp/index.html @@ -34,7 +34,9 @@

Source code for captum.attr._core.layer.layer_lrp

# pyre-strict import typing -from typing import Any, cast, List, Literal, Optional, Tuple, Union +from typing import cast, Dict, List, Literal, Optional, Tuple, TypeVar, Union + +import torch from captum._utils.common import ( _format_tensor_into_tuples, @@ -53,8 +55,12 @@

Source code for captum.attr._core.layer.layer_lrp

) from captum.attr._core.lrp import LRP from captum.attr._utils.attribution import LayerAttribution +from captum.attr._utils.lrp_rules import PropagationRule from torch import Tensor from torch.nn import Module +from torch.utils.hooks import RemovableHandle + +T = TypeVar("T")
@@ -73,6 +79,13 @@

Source code for captum.attr._core.layer.layer_lrp

Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW]. """ + device_ids: List[int] + verbose: bool + layers: List[Module] + attribute_to_layer_input: bool = False + backward_handles: List[RemovableHandle] + forward_handles: List[RemovableHandle] + def __init__(self, model: Module, layer: ModuleOrModuleList) -> None: """ Args: @@ -93,7 +106,6 @@

Source code for captum.attr._core.layer.layer_lrp

LayerAttribution.__init__(self, model, layer) LRP.__init__(self, model) if hasattr(self.model, "device_ids"): - # pyre-fixme[4]: Attribute must be annotated. self.device_ids = cast(List[int], self.model.device_ids) @typing.overload # type: ignore @@ -244,48 +256,34 @@

Source code for captum.attr._core.layer.layer_lrp

>>> attribution = layer_lrp.attribute(input, target=5) """ - # pyre-fixme[16]: `LayerLRP` has no attribute `verbose`. self.verbose = verbose - # pyre-fixme[16]: `LayerLRP` has no attribute `_original_state_dict`. self._original_state_dict = self.model.state_dict() - # pyre-fixme[16]: `LayerLRP` has no attribute `layers`. self.layers = [] self._get_layers(self.model) self._check_and_attach_rules() - # pyre-fixme[16]: `LayerLRP` has no attribute `attribute_to_layer_input`. self.attribute_to_layer_input = attribute_to_layer_input - # pyre-fixme[16]: `LayerLRP` has no attribute `backward_handles`. self.backward_handles = [] - # pyre-fixme[16]: `LayerLRP` has no attribute `forward_handles`. self.forward_handles = [] - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs = _format_tensor_into_tuples(inputs) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - gradient_mask = apply_gradient_requirements(inputs) + inputs_tuple = _format_tensor_into_tuples(inputs) + gradient_mask = apply_gradient_requirements(inputs_tuple) try: # 1. Forward pass output = self._compute_output_and_change_weights( - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `TensorOrTupleOfTensorsGeneric`. - inputs, + inputs_tuple, target, additional_forward_args, ) self._register_forward_hooks() # 2. Forward pass + backward pass _ = compute_gradients( - self._forward_fn_wrapper, inputs, target, additional_forward_args + self._forward_fn_wrapper, inputs_tuple, target, additional_forward_args ) relevances = self._get_output_relevance(output) finally: self._restore_model() - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - undo_gradient_requirements(inputs, gradient_mask) + undo_gradient_requirements(inputs_tuple, gradient_mask) if return_convergence_delta: delta: Union[Tensor, List[Tensor]] @@ -293,7 +291,10 @@

Source code for captum.attr._core.layer.layer_lrp

delta = [] for relevance_layer in relevances: delta.append( - self.compute_convergence_delta(relevance_layer, output) + self.compute_convergence_delta( + cast(Union[Tensor, Tuple[Tensor, ...]], relevance_layer), + output, + ) ) else: delta = self.compute_convergence_delta( @@ -304,33 +305,35 @@

Source code for captum.attr._core.layer.layer_lrp

return relevances # type: ignore
- # pyre-fixme[3]: Return type must be annotated. - # pyre-fixme[2]: Parameter must be annotated. - def _get_single_output_relevance(self, layer, output): - # pyre-fixme[16]: `LayerLRP` has no attribute `attribute_to_layer_input`. + def _get_single_output_relevance( + self, layer: Module, output: Tensor + ) -> Union[Tensor, Tuple[Tensor, ...]]: if self.attribute_to_layer_input: - normalized_relevances = layer.rule.relevance_input + normalized_relevances = cast( + Dict[torch.device, Tensor], + cast(PropagationRule, layer.rule).relevance_input, + ) else: - normalized_relevances = layer.rule.relevance_output + normalized_relevances = cast(PropagationRule, layer.rule).relevance_output key_list = _sort_key_list(list(normalized_relevances.keys()), self.device_ids) - normalized_relevances = _reduce_list( + normalized_relevances_reduced = _reduce_list( [normalized_relevances[device_id] for device_id in key_list] ) - if isinstance(normalized_relevances, tuple): + if isinstance(normalized_relevances_reduced, tuple): return tuple( normalized_relevance * output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1)) - for normalized_relevance in normalized_relevances + for normalized_relevance in normalized_relevances_reduced ) else: - return normalized_relevances * output.reshape( - (-1,) + (1,) * (normalized_relevances.dim() - 1) + return normalized_relevances_reduced * output.reshape( + (-1,) + (1,) * (normalized_relevances_reduced.dim() - 1) ) - # pyre-fixme[3]: Return type must be annotated. - # pyre-fixme[2]: Parameter must be annotated. - def _get_output_relevance(self, output): + def _get_output_relevance( + self, output: Tensor + ) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]: if isinstance(self.layer, list): relevances = [] for layer in self.layer: @@ -340,11 +343,9 @@

Source code for captum.attr._core.layer.layer_lrp

return self._get_single_output_relevance(self.layer, output) @staticmethod - # pyre-fixme[3]: Return annotation cannot contain `Any`. def _convert_list_to_tuple( - # pyre-fixme[2]: Parameter annotation cannot contain `Any`. - relevances: Union[List[Any], Tuple[Any, ...]] - ) -> Tuple[Any, ...]: + relevances: Union[List[T], Tuple[T, ...]] + ) -> Tuple[T, ...]: if isinstance(relevances, list): return tuple(relevances) else: diff --git a/api/_modules/captum/attr/_core/neuron/neuron_conductance.html b/api/_modules/captum/attr/_core/neuron/neuron_conductance.html index 2bf7186be0..07b4c4bd8d 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_conductance.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_conductance.html @@ -46,7 +46,12 @@

Source code for captum.attr._core.neuron.neuron_conductance

_verify_select_neuron, ) from captum._utils.gradient import compute_layer_gradients_and_eval -from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TargetType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._utils.approximation_methods import approximation_parameters from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.attr._utils.batching import _batch_attribution @@ -73,8 +78,7 @@

Source code for captum.attr._core.neuron.neuron_conductance

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -130,8 +134,11 @@

Source code for captum.attr._core.neuron.neuron_conductance

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[int, ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, target: TargetType = None, additional_forward_args: Optional[object] = None, @@ -321,28 +328,24 @@

Source code for captum.attr._core.neuron.neuron_conductance

" results.", stacklevel=1, ) - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `TensorOrTupleOfTensorsGeneric`. is_inputs_tuple = _is_tuple(inputs) - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs, baselines = _format_input_baseline(inputs, baselines) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - _validate_input(inputs, baselines, n_steps, method) + formatted_inputs, formatted_baselines = _format_input_baseline( + inputs, baselines + ) + _validate_input(formatted_inputs, formatted_baselines, n_steps, method) - num_examples = inputs[0].shape[0] + num_examples = formatted_inputs[0].shape[0] if internal_batch_size is not None: - num_examples = inputs[0].shape[0] + num_examples = formatted_inputs[0].shape[0] attrs = _batch_attribution( self, num_examples, internal_batch_size, n_steps, - inputs=inputs, - baselines=baselines, + inputs=formatted_inputs, + baselines=formatted_baselines, neuron_selector=neuron_selector, target=target, additional_forward_args=additional_forward_args, @@ -351,11 +354,9 @@

Source code for captum.attr._core.neuron.neuron_conductance

) else: attrs = self._attribute( - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `TensorOrTupleOfTensorsGeneric`. - inputs=inputs, + inputs=formatted_inputs, neuron_selector=neuron_selector, - baselines=baselines, + baselines=formatted_baselines, target=target, additional_forward_args=additional_forward_args, n_steps=n_steps, @@ -371,8 +372,11 @@

Source code for captum.attr._core.neuron.neuron_conductance

def _attribute( self, inputs: Tuple[Tensor, ...], - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[int, ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Tuple[Union[Tensor, int, float], ...], target: TargetType = None, additional_forward_args: Optional[object] = None, @@ -446,8 +450,9 @@

Source code for captum.attr._core.neuron.neuron_conductance

# Aggregates across all steps for each tensor in the input tuple total_grads = tuple( - # pyre-fixme[6]: For 4th argument expected `Tuple[int, ...]` but got `Size`. - _reshape_and_sum(scaled_grad, n_steps, num_examples, input_grad.shape[1:]) + _reshape_and_sum( + scaled_grad, n_steps, num_examples, tuple(input_grad.shape[1:]) + ) for (scaled_grad, input_grad) in zip(scaled_grads, input_grads) ) diff --git a/api/_modules/captum/attr/_core/neuron/neuron_conductance/index.html b/api/_modules/captum/attr/_core/neuron/neuron_conductance/index.html index 2bf7186be0..07b4c4bd8d 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_conductance/index.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_conductance/index.html @@ -46,7 +46,12 @@

Source code for captum.attr._core.neuron.neuron_conductance

_verify_select_neuron, ) from captum._utils.gradient import compute_layer_gradients_and_eval -from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TargetType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._utils.approximation_methods import approximation_parameters from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.attr._utils.batching import _batch_attribution @@ -73,8 +78,7 @@

Source code for captum.attr._core.neuron.neuron_conductance

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -130,8 +134,11 @@

Source code for captum.attr._core.neuron.neuron_conductance

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[int, ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, target: TargetType = None, additional_forward_args: Optional[object] = None, @@ -321,28 +328,24 @@

Source code for captum.attr._core.neuron.neuron_conductance

" results.", stacklevel=1, ) - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `TensorOrTupleOfTensorsGeneric`. is_inputs_tuple = _is_tuple(inputs) - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs, baselines = _format_input_baseline(inputs, baselines) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - _validate_input(inputs, baselines, n_steps, method) + formatted_inputs, formatted_baselines = _format_input_baseline( + inputs, baselines + ) + _validate_input(formatted_inputs, formatted_baselines, n_steps, method) - num_examples = inputs[0].shape[0] + num_examples = formatted_inputs[0].shape[0] if internal_batch_size is not None: - num_examples = inputs[0].shape[0] + num_examples = formatted_inputs[0].shape[0] attrs = _batch_attribution( self, num_examples, internal_batch_size, n_steps, - inputs=inputs, - baselines=baselines, + inputs=formatted_inputs, + baselines=formatted_baselines, neuron_selector=neuron_selector, target=target, additional_forward_args=additional_forward_args, @@ -351,11 +354,9 @@

Source code for captum.attr._core.neuron.neuron_conductance

) else: attrs = self._attribute( - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but - # got `TensorOrTupleOfTensorsGeneric`. - inputs=inputs, + inputs=formatted_inputs, neuron_selector=neuron_selector, - baselines=baselines, + baselines=formatted_baselines, target=target, additional_forward_args=additional_forward_args, n_steps=n_steps, @@ -371,8 +372,11 @@

Source code for captum.attr._core.neuron.neuron_conductance

def _attribute( self, inputs: Tuple[Tensor, ...], - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[int, ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Tuple[Union[Tensor, int, float], ...], target: TargetType = None, additional_forward_args: Optional[object] = None, @@ -446,8 +450,9 @@

Source code for captum.attr._core.neuron.neuron_conductance

# Aggregates across all steps for each tensor in the input tuple total_grads = tuple( - # pyre-fixme[6]: For 4th argument expected `Tuple[int, ...]` but got `Size`. - _reshape_and_sum(scaled_grad, n_steps, num_examples, input_grad.shape[1:]) + _reshape_and_sum( + scaled_grad, n_steps, num_examples, tuple(input_grad.shape[1:]) + ) for (scaled_grad, input_grad) in zip(scaled_grads, input_grads) ) diff --git a/api/_modules/captum/attr/_core/neuron/neuron_deep_lift.html b/api/_modules/captum/attr/_core/neuron/neuron_deep_lift.html index bc84760fd3..8ddf3ca001 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_deep_lift.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_deep_lift.html @@ -36,7 +36,11 @@

Source code for captum.attr._core.neuron.neuron_deep_lift

from typing import Callable, cast, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._core.deep_lift import DeepLift, DeepLiftShap from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage @@ -115,8 +119,11 @@

Source code for captum.attr._core.neuron.neuron_deep_lift

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, @@ -351,8 +358,11 @@

Source code for captum.attr._core.neuron.neuron_deep_lift

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[ TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], diff --git a/api/_modules/captum/attr/_core/neuron/neuron_deep_lift/index.html b/api/_modules/captum/attr/_core/neuron/neuron_deep_lift/index.html index bc84760fd3..8ddf3ca001 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_deep_lift/index.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_deep_lift/index.html @@ -36,7 +36,11 @@

Source code for captum.attr._core.neuron.neuron_deep_lift

from typing import Callable, cast, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._core.deep_lift import DeepLift, DeepLiftShap from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage @@ -115,8 +119,11 @@

Source code for captum.attr._core.neuron.neuron_deep_lift

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, @@ -351,8 +358,11 @@

Source code for captum.attr._core.neuron.neuron_deep_lift

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[ TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], diff --git a/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation.html b/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation.html index d59b8eed60..d96d53bc03 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation.html @@ -38,7 +38,11 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

import torch from captum._utils.common import _verify_select_neuron from captum._utils.gradient import _forward_layer_eval -from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._core.feature_ablation import FeatureAblation from captum.attr._utils.attribution import NeuronAttribution, PerturbationAttribution from captum.log import log_usage @@ -65,8 +69,7 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -97,8 +100,11 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, additional_forward_args: Optional[object] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, @@ -286,8 +292,7 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

>>> feature_mask=feature_mask) """ - # pyre-fixme[3]: Return type must be annotated. - def neuron_forward_func(*args: Any): + def neuron_forward_func(*args: Any) -> Tensor: with torch.no_grad(): layer_eval = _forward_layer_eval( self.forward_func, diff --git a/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation/index.html b/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation/index.html index d59b8eed60..d96d53bc03 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation/index.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_feature_ablation/index.html @@ -38,7 +38,11 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

import torch from captum._utils.common import _verify_select_neuron from captum._utils.gradient import _forward_layer_eval -from captum._utils.typing import BaselineType, TensorOrTupleOfTensorsGeneric +from captum._utils.typing import ( + BaselineType, + SliceIntType, + TensorOrTupleOfTensorsGeneric, +) from captum.attr._core.feature_ablation import FeatureAblation from captum.attr._utils.attribution import NeuronAttribution, PerturbationAttribution from captum.log import log_usage @@ -65,8 +69,7 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -97,8 +100,11 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: BaselineType = None, additional_forward_args: Optional[object] = None, feature_mask: Union[None, TensorOrTupleOfTensorsGeneric] = None, @@ -286,8 +292,7 @@

Source code for captum.attr._core.neuron.neuron_feature_ablation

>>> feature_mask=feature_mask) """ - # pyre-fixme[3]: Return type must be annotated. - def neuron_forward_func(*args: Any): + def neuron_forward_func(*args: Any) -> Tensor: with torch.no_grad(): layer_eval = _forward_layer_eval( self.forward_func, diff --git a/api/_modules/captum/attr/_core/neuron/neuron_gradient.html b/api/_modules/captum/attr/_core/neuron/neuron_gradient.html index 160182bc77..2353f152f6 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_gradient.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_gradient.html @@ -46,9 +46,10 @@

Source code for captum.attr._core.neuron.neuron_gradient

apply_gradient_requirements, undo_gradient_requirements, ) -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -62,8 +63,7 @@

Source code for captum.attr._core.neuron.neuron_gradient

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -96,8 +96,11 @@

Source code for captum.attr._core.neuron.neuron_gradient

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: @@ -198,18 +201,12 @@

Source code for captum.attr._core.neuron.neuron_gradient

>>> # index (4,1,2). >>> attribution = neuron_ig.attribute(input, (4,1,2)) """ - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `TensorOrTupleOfTensorsGeneric`. is_inputs_tuple = _is_tuple(inputs) - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs = _format_tensor_into_tuples(inputs) + inputs_tuple = _format_tensor_into_tuples(inputs) additional_forward_args = _format_additional_forward_args( additional_forward_args ) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - gradient_mask = apply_gradient_requirements(inputs) + gradient_mask = apply_gradient_requirements(inputs_tuple) _, input_grads = _forward_layer_eval_with_neuron_grads( self.forward_func, @@ -221,11 +218,11 @@

Source code for captum.attr._core.neuron.neuron_gradient

attribute_to_layer_input=attribute_to_neuron_input, ) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - undo_gradient_requirements(inputs, gradient_mask) - # pyre-fixme[7]: Expected `TensorOrTupleOfTensorsGeneric` but got - # `Tuple[Tensor, ...]`. + undo_gradient_requirements(inputs_tuple, gradient_mask) + + # pyre-fixme[7]: Expected `Variable[TensorOrTupleOfTensorsGeneric <: + # [Tensor, typing.Tuple[Tensor, ...]]]` but got `Union[Tensor, + # typing.Tuple[Tensor, ...]]`. return _format_output(is_inputs_tuple, input_grads)
diff --git a/api/_modules/captum/attr/_core/neuron/neuron_gradient/index.html b/api/_modules/captum/attr/_core/neuron/neuron_gradient/index.html index 160182bc77..2353f152f6 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_gradient/index.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_gradient/index.html @@ -46,9 +46,10 @@

Source code for captum.attr._core.neuron.neuron_gradient

apply_gradient_requirements, undo_gradient_requirements, ) -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -62,8 +63,7 @@

Source code for captum.attr._core.neuron.neuron_gradient

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, ) -> None: @@ -96,8 +96,11 @@

Source code for captum.attr._core.neuron.neuron_gradient

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: @@ -198,18 +201,12 @@

Source code for captum.attr._core.neuron.neuron_gradient

>>> # index (4,1,2). >>> attribution = neuron_ig.attribute(input, (4,1,2)) """ - # pyre-fixme[6]: For 1st argument expected `Tensor` but got - # `TensorOrTupleOfTensorsGeneric`. is_inputs_tuple = _is_tuple(inputs) - # pyre-fixme[9]: inputs has type `TensorOrTupleOfTensorsGeneric`; used as - # `Tuple[Tensor, ...]`. - inputs = _format_tensor_into_tuples(inputs) + inputs_tuple = _format_tensor_into_tuples(inputs) additional_forward_args = _format_additional_forward_args( additional_forward_args ) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - gradient_mask = apply_gradient_requirements(inputs) + gradient_mask = apply_gradient_requirements(inputs_tuple) _, input_grads = _forward_layer_eval_with_neuron_grads( self.forward_func, @@ -221,11 +218,11 @@

Source code for captum.attr._core.neuron.neuron_gradient

attribute_to_layer_input=attribute_to_neuron_input, ) - # pyre-fixme[6]: For 1st argument expected `Tuple[Tensor, ...]` but got - # `TensorOrTupleOfTensorsGeneric`. - undo_gradient_requirements(inputs, gradient_mask) - # pyre-fixme[7]: Expected `TensorOrTupleOfTensorsGeneric` but got - # `Tuple[Tensor, ...]`. + undo_gradient_requirements(inputs_tuple, gradient_mask) + + # pyre-fixme[7]: Expected `Variable[TensorOrTupleOfTensorsGeneric <: + # [Tensor, typing.Tuple[Tensor, ...]]]` but got `Union[Tensor, + # typing.Tuple[Tensor, ...]]`. return _format_output(is_inputs_tuple, input_grads)
diff --git a/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap.html b/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap.html index e89616be3f..5188954e90 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap.html @@ -36,10 +36,11 @@

Source code for captum.attr._core.neuron.neuron_gradient_shap

from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.gradient_shap import GradientShap from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -84,8 +85,7 @@

Source code for captum.attr._core.neuron.neuron_gradient_shap

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -133,8 +133,11 @@

Source code for captum.attr._core.neuron.neuron_gradient_shap

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[ TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], diff --git a/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap/index.html b/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap/index.html index e89616be3f..5188954e90 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap/index.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_gradient_shap/index.html @@ -36,10 +36,11 @@

Source code for captum.attr._core.neuron.neuron_gradient_shap

from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.gradient_shap import GradientShap from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -84,8 +85,7 @@

Source code for captum.attr._core.neuron.neuron_gradient_shap

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Union[int, float, Tensor]], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -133,8 +133,11 @@

Source code for captum.attr._core.neuron.neuron_gradient_shap

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[ TensorOrTupleOfTensorsGeneric, Callable[..., TensorOrTupleOfTensorsGeneric] ], diff --git a/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.html b/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.html index d2a898b021..b9c18d8bf2 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet.html @@ -36,10 +36,11 @@

Source code for captum.attr._core.neuron.neuron_guided_backprop_deconvnetfrom typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.guided_backprop_deconvnet import Deconvolution, GuidedBackprop from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -96,8 +97,11 @@

Source code for captum.attr._core.neuron.neuron_guided_backprop_deconvnetdef attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: @@ -257,8 +261,11 @@

Source code for captum.attr._core.neuron.neuron_guided_backprop_deconvnetdef attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: diff --git a/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet/index.html b/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet/index.html index d2a898b021..b9c18d8bf2 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet/index.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_guided_backprop_deconvnet/index.html @@ -36,10 +36,11 @@

Source code for captum.attr._core.neuron.neuron_guided_backprop_deconvnetfrom typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.guided_backprop_deconvnet import Deconvolution, GuidedBackprop from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage +from torch import Tensor from torch.nn import Module @@ -96,8 +97,11 @@

Source code for captum.attr._core.neuron.neuron_guided_backprop_deconvnetdef attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: @@ -257,8 +261,11 @@

Source code for captum.attr._core.neuron.neuron_guided_backprop_deconvnetdef attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], additional_forward_args: Optional[object] = None, attribute_to_neuron_input: bool = False, ) -> TensorOrTupleOfTensorsGeneric: diff --git a/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients.html b/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients.html index f6b9a78d63..9671bb1688 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients.html @@ -36,7 +36,7 @@

Source code for captum.attr._core.neuron.neuron_integrated_gradients

from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.integrated_gradients import IntegratedGradients from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage @@ -61,8 +61,7 @@

Source code for captum.attr._core.neuron.neuron_integrated_gradients

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -112,8 +111,11 @@

Source code for captum.attr._core.neuron.neuron_integrated_gradients

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None, additional_forward_args: Optional[object] = None, n_steps: int = 50, diff --git a/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients/index.html b/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients/index.html index f6b9a78d63..9671bb1688 100644 --- a/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients/index.html +++ b/api/_modules/captum/attr/_core/neuron/neuron_integrated_gradients/index.html @@ -36,7 +36,7 @@

Source code for captum.attr._core.neuron.neuron_integrated_gradients

from typing import Callable, List, Optional, Tuple, Union from captum._utils.gradient import construct_neuron_grad_fn -from captum._utils.typing import TensorOrTupleOfTensorsGeneric +from captum._utils.typing import SliceIntType, TensorOrTupleOfTensorsGeneric from captum.attr._core.integrated_gradients import IntegratedGradients from captum.attr._utils.attribution import GradientAttribution, NeuronAttribution from captum.log import log_usage @@ -61,8 +61,7 @@

Source code for captum.attr._core.neuron.neuron_integrated_gradients

def __init__( self, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - forward_func: Callable, + forward_func: Callable[..., Tensor], layer: Module, device_ids: Union[None, List[int]] = None, multiply_by_inputs: bool = True, @@ -112,8 +111,11 @@

Source code for captum.attr._core.neuron.neuron_integrated_gradients

def attribute( self, inputs: TensorOrTupleOfTensorsGeneric, - # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable], + neuron_selector: Union[ + int, + Tuple[Union[int, SliceIntType], ...], + Callable[[Union[Tensor, Tuple[Tensor, ...]]], Tensor], + ], baselines: Union[None, Tensor, Tuple[Tensor, ...]] = None, additional_forward_args: Optional[object] = None, n_steps: int = 50, diff --git a/tutorials/CIFAR_TorchVision_Captum_Insights.html b/tutorials/CIFAR_TorchVision_Captum_Insights.html index e8b30fb48a..9eeefc5c5a 100644 --- a/tutorials/CIFAR_TorchVision_Captum_Insights.html +++ b/tutorials/CIFAR_TorchVision_Captum_Insights.html @@ -234,10 +234,10 @@

-
+