diff --git a/foolbox/attacks/__init__.py b/foolbox/attacks/__init__.py index b1eecd12..a78c0689 100644 --- a/foolbox/attacks/__init__.py +++ b/foolbox/attacks/__init__.py @@ -20,6 +20,11 @@ L2AdamBasicIterativeAttack, LinfAdamBasicIterativeAttack, ) +from .mi_fgsm import ( # noqa: F401 + L1MomentumIterativeFastGradientMethod, + L2MomentumIterativeFastGradientMethod, + LinfMomentumIterativeFastGradientMethod, +) from .fast_gradient_method import ( # noqa: F401 L1FastGradientAttack, L2FastGradientAttack, @@ -93,6 +98,7 @@ L2PGD = L2ProjectedGradientDescentAttack LinfPGD = LinfProjectedGradientDescentAttack PGD = LinfPGD +MIFGSM = LinfMomentumIterativeFastGradientMethod L1AdamPGD = L1AdamProjectedGradientDescentAttack L2AdamPGD = L2AdamProjectedGradientDescentAttack diff --git a/foolbox/attacks/mi_fgsm.py b/foolbox/attacks/mi_fgsm.py new file mode 100644 index 00000000..65008b50 --- /dev/null +++ b/foolbox/attacks/mi_fgsm.py @@ -0,0 +1,143 @@ +from functools import partial +from typing import Callable, Optional + +from foolbox.attacks.gradient_descent_base import normalize_lp_norms + +from .basic_iterative_method import ( + Optimizer, + L1BasicIterativeAttack, + L2BasicIterativeAttack, + LinfBasicIterativeAttack, +) +import eagerpy as ep + + +class GDMOptimizer(Optimizer): + """Momentum-based Gradient Descent Optimizer + + Args: + x : Optimization variable for initialization of accumulation grad + stepsize : Stepsize for gradient descent + momentum : Momentum factor for accumulation grad + normalize_fn : Function to normalize the gradient + """ + + def __init__( + self, + x: ep.Tensor, + stepsize: float, + momentum: float = 1.0, + normalize_fn: Callable[[ep.Tensor], ep.Tensor] = lambda x: x.sign(), + ): + self.stepsize = stepsize + self.momentum = momentum + self.normalize = normalize_fn + self.accumulation_grad = ep.zeros_like(x) + + def __call__(self, gradient: ep.Tensor) -> ep.Tensor: + self.accumulation_grad = self.momentum * self.accumulation_grad + gradient + return self.stepsize * self.normalize(self.accumulation_grad) + + +class L1MomentumIterativeFastGradientMethod(L1BasicIterativeAttack): + """L1 Momentum Iterative Fast Gradient Sign Method (MI-FGSM) [#Dong18] + + Args: + momentum : Momentum factor for accumulation grad + rel_stepsize : Stepsize relative to epsilon + abs_stepsize : If given, it takes precedence over rel_stepsize. + steps : Number of update steps to perform. + random_start : Whether the perturbation is initialized randomly or starts at zero. + """ + + def __init__( + self, + *, + momentum: float = 1.0, + rel_stepsize: float = 0.2, + abs_stepsize: Optional[float] = None, + steps: int = 10, + random_start: bool = False, + ): + self.momentum = momentum + super().__init__( + rel_stepsize=rel_stepsize, + abs_stepsize=abs_stepsize, + steps=steps, + random_start=random_start, + ) + + def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer: + return GDMOptimizer( + x, stepsize, self.momentum, partial(normalize_lp_norms, p=1) + ) + + +class L2MomentumIterativeFastGradientMethod(L2BasicIterativeAttack): + """L2 Momentum Iterative Fast Gradient Sign Method (MI-FGSM) [#Dong18] + + Args: + momentum : Momentum factor for accumulation grad + rel_stepsize : Stepsize relative to epsilon + abs_stepsize : If given, it takes precedence over rel_stepsize. + steps : Number of update steps to perform. + random_start : Whether the perturbation is initialized randomly or starts at zero. + """ + + def __init__( + self, + *, + momentum: float = 1.0, + rel_stepsize: float = 0.2, + abs_stepsize: Optional[float] = None, + steps: int = 10, + random_start: bool = False, + ): + self.momentum = momentum + super().__init__( + rel_stepsize=rel_stepsize, + abs_stepsize=abs_stepsize, + steps=steps, + random_start=random_start, + ) + + def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer: + return GDMOptimizer( + x, stepsize, self.momentum, partial(normalize_lp_norms, p=2) + ) + + +class LinfMomentumIterativeFastGradientMethod(LinfBasicIterativeAttack): + """Linf Momentum Iterative Fast Gradient Sign Method (MI-FGSM) [#Dong18] + + Args: + momentum : Momentum factor for accumulation grad + rel_stepsize : Stepsize relative to epsilon + abs_stepsize : If given, it takes precedence over rel_stepsize. + steps : Number of update steps to perform. + random_start : Whether the perturbation is initialized randomly or starts at zero. + + References: .. [#Dong18] Dong Y, Liao F, Pang T, et al. Boosting adversarial attacks with momentum[ + C]//Proceedings of the IEEE conference on computer vision and pattern recognition. 2018: 9185-9193. + https://arxiv.org/abs/1710.06081 + """ + + def __init__( + self, + *, + momentum: float = 1.0, + rel_stepsize: float = 0.2, + abs_stepsize: Optional[float] = None, + steps: int = 10, + random_start: bool = False, + ): + self.momentum = momentum + super().__init__( + rel_stepsize=rel_stepsize, + abs_stepsize=abs_stepsize, + steps=steps, + random_start=random_start, + ) + + def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer: + return GDMOptimizer(x, stepsize, self.momentum) diff --git a/tests/test_attacks.py b/tests/test_attacks.py index 18249c48..08fea209 100644 --- a/tests/test_attacks.py +++ b/tests/test_attacks.py @@ -94,6 +94,15 @@ def get_attack_id(x: AttackTestTarget) -> str: AttackTestTarget(fa.FGSM(), Linf(100.0), uses_grad=True), AttackTestTarget(FGSM_GE(), Linf(100.0)), AttackTestTarget(fa.FGM(), L2(100.0), uses_grad=True), + AttackTestTarget( + fa.LinfMomentumIterativeFastGradientMethod(), Linf(1.0), uses_grad=True + ), + AttackTestTarget( + fa.L2MomentumIterativeFastGradientMethod(), L2(50.0), uses_grad=True + ), + AttackTestTarget( + fa.L1MomentumIterativeFastGradientMethod(), 5000.0, uses_grad=True + ), AttackTestTarget(fa.L1FastGradientAttack(), 5000.0, uses_grad=True), AttackTestTarget( fa.GaussianBlurAttack(steps=10), uses_grad=True, requires_real_model=True @@ -243,6 +252,15 @@ def test_untargeted_attacks( ), AttackTestTarget(fa.L2AdamBasicIterativeAttack(), L2(50.0), uses_grad=True), AttackTestTarget(fa.L1AdamBasicIterativeAttack(), 5000.0, uses_grad=True), + AttackTestTarget( + fa.LinfMomentumIterativeFastGradientMethod(), Linf(1.0), uses_grad=True + ), + AttackTestTarget( + fa.L2MomentumIterativeFastGradientMethod(), L2(50.0), uses_grad=True + ), + AttackTestTarget( + fa.L1MomentumIterativeFastGradientMethod(), 5000.0, uses_grad=True + ), AttackTestTarget(fa.SparseL1DescentAttack(), 5000.0, uses_grad=True), ]