Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove TOSA make_fx configuration #3951

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions build_tools/python_deploy/build_linux_packages.sh
Original file line number Diff line number Diff line change
Expand Up @@ -324,9 +324,6 @@ function test_in_tree() {
;;
esac

echo ":::: Run make_fx + TOSA e2e integration tests"
python -m e2e_testing.main --config=make_fx_tosa -v

echo ":::: Run TOSA e2e integration tests"
python -m e2e_testing.main --config=tosa -v
}
Expand Down
7 changes: 0 additions & 7 deletions projects/pt1/e2e_testing/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@
from .xfail_sets import (
LINALG_XFAIL_SET,
LINALG_CRASHING_SET,
MAKE_FX_TOSA_PASS_SET,
MAKE_FX_TOSA_CRASHING_SET,
STABLEHLO_PASS_SET,
STABLEHLO_CRASHING_SET,
TOSA_PASS_SET,
Expand Down Expand Up @@ -76,7 +74,6 @@ def _get_argparse():
"torchscript",
"linalg",
"stablehlo",
"make_fx_tosa",
"tosa",
"lazy_tensor_core",
"torchdynamo",
Expand Down Expand Up @@ -166,10 +163,6 @@ def main():
config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend())
xfail_set = all_test_unique_names - TOSA_PASS_SET
crashing_set = TOSA_CRASHING_SET
elif args.config == "make_fx_tosa":
config = TosaBackendTestConfig(LinalgOnTensorsTosaBackend(), use_make_fx=True)
xfail_set = all_test_unique_names - MAKE_FX_TOSA_PASS_SET
crashing_set = MAKE_FX_TOSA_CRASHING_SET
elif args.config == "native_torch":
config = NativeTorchTestConfig()
xfail_set = set()
Expand Down
140 changes: 0 additions & 140 deletions projects/pt1/e2e_testing/xfail_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -2451,146 +2451,6 @@
"IndexTensorStaticNonContiguousWithNoneModule_basic",
}

MAKE_FX_TOSA_CRASHING_SET = TOSA_CRASHING_SET | {
# Runtime op verification: static result dims in reassoc group do not divide src dim evenly
"FlattenDynamicModule_basic",
"ReshapeDynamicModule_basic",
"ViewFlattenAndExpandModule_basic",
"ViewSizeDimLedAndFollowedByExpandedOnesModule_basic",
"ViewSizeDimLedByExpandedOnesModule_basic",
}

MAKE_FX_TOSA_PASS_SET = (
TOSA_PASS_SET
| {
### Tests additionally passing in make_fx_tosa
"AdaptiveAvgPool1dStaticEvenMultiple_basic",
"IsInfiniteModule_basic",
"AdaptiveAvgPool2dFixedKernelStrideSizeStaticModule_basic",
"AdaptiveAvgPool2dUnitOutputSizeStaticModule_basic",
"ResNet18StaticModule_basic",
"AdaptiveAvgPool1dStaticLargerOutput_basic",
"ScaledDotProductAttentionBoolMaskModule_basic",
"ScaledDotProductAttentionDifferentDynamicCausalModule_basic",
"ArgminIntModule_basic",
"ArgminIntModule_multiple_mins",
"ArgminModule_basic",
"ArgminModule_keepDim",
"ReduceAllDimBool_basic",
"ReduceAllDimFloat_basic",
"ReduceAllDimInt_basic",
"ReduceAllFloatModule_basic",
"ReduceAllIntModule_basic",
"ReduceAnyFloatModule_basic",
"ReduceAnyIntModule_basic",
"ReduceMaxAllDims_basic",
"ReduceMaxFloatModule_basic",
"ReduceMaxSignedIntModule_basic",
"ReduceMaxUnsignedIntModule_basic",
"ReduceMinFloatModule_basic",
"ReduceMinSignedIntModule_basic",
"ReduceMinUnsignedIntModule_basic",
"ReduceProdDtypeFloatModule_basic",
"ReduceProdDtypeIntModule_basic",
"ReduceProdElementTypeBoolModule_basic",
"ReduceProdFloatModule_basic",
"ReduceProdSignedIntModule_basic",
"ReduceProdUnsignedIntModule_basic",
"ReduceSumDimIntListDtypeFloatModule_basic",
"ReduceSumDimIntListDtypeIntModule_basic",
"ReduceSumDimIntListElementTypeBoolModule_basic",
"ReduceSumDtypeFloatModule_basic",
"ReduceSumDtypeIntModule_basic",
"ReduceSumElementTypeBoolModule_basic",
"ScaledDotProductAttentionDifferentModule_basic",
"ScaledDotProductAttentionMaskModule_basic",
"ScaledDotProductAttentionSameModule_basic",
"AvgPool2dCountIncludePadFalseStaticModule_basic",
"AtenLinear1D_basic",
"AtenLinearMatVec_basic",
"AtenLinearVecMatBias_basic",
"Atleast1dModule0dInput_basic",
"Atleast1dModule1dInput_basic",
"Atleast2dModule0dInput_basic",
"Atleast2dModule1dInput_basic",
"Atleast2dModule2dInput_basic",
"MaxPool1dEmptyStrideStaticModule_basic",
"MaxPool1dStaticCeilModeTrueModule_basic",
"MaxPool1dStaticModule_basic",
"AdaptiveAvgPool1dUnitOutputSizeStaticModule_basic",
"CosineSimilarityModule_basic",
"NativeGroupNormBackwardModule_basic",
"ReduceFrobeniusNormKeepDimModule_basic",
"ReduceFrobeniusNormModule_basic",
"SliceWholeTensorModule_basic",
"TensorFloatModule_basic",
"TensorIntModule_basic",
"RepeatInterleaveSelfIntModule_basic",
"TorchPrimLoopForLikeTensorArgModule_basic",
"ViewSizeDimFollowedByCollapsedOnesModule_basic",
"ViewSizeDimFollowedByExpandedOnesModule_basic",
"ViewSizeDimLedAndFollowedByCollapsedOnesModule_basic",
"ViewSizeDimLedByCollapsedOnesModule_basic",
"ViewSizeFromOtherTensor_basic",
"RenormModuleFloat32NegativeDim_basic",
"RenormModuleFloat32_basic",
"RreluWithNoiseBackwardEvalModule_basic",
"RreluWithNoiseBackwardEvalStaticModule_basic",
"RreluWithNoiseBackwardTrainModule_basic",
"RreluWithNoiseBackwardTrainStaticModule_basic",
}
) - {
### Test failing in make_fx_tosa but not in tosa
"AdaptiveMaxPool1dDimOneStatic_basic",
"FloatPowerTensorTensorStaticModule_basic",
# Dynamic shape, has extra unsupported broadcast ops
"Matmul_3d",
# Unimplemented operator 'aten._index_put_impl_.hacked_twin'
"IndexPutImpl1DFloatNonAccumulateModule_basic",
"IndexPutImpl1DIntNonAccumulateModule_basic",
# RuntimeError: The size of tensor a (7) must match the size of tensor b (3) at non-singleton dimension 1
"Add_Module_basic",
# failed to legalize operation 'torch.aten.to.dtype' that was explicitly marked illegal
"AtenEyeModuleInt2D_basic",
"AtenEyeMModuleInt2D_basic",
"Conv2dBiasNoPaddingModule_basic",
"Conv2dNoPaddingModule_basic",
"Conv2dWithPaddingDilationStrideModule_basic",
"Conv2dWithPaddingModule_basic",
"Conv2dWithSamePaddingModule_basic",
"Conv2dWithValidPaddingModule_basic",
# failed to legalize operation 'torch.operator'
"ElementwisePreluModule_basic",
"ElementwisePreluStaticModule_basic",
"ElementwiseLogSigmoidModule_basic",
# failed to legalize operation 'torch.aten.rrelu_with_noise'
"ElementwiseRreluEvalModule_basic",
# incompatible return type failure for tosa.concat.
"HstackBasicComplexModule_basic",
"HstackBasicFloatModule_basic",
"HstackBasicIntFloatModule_basic",
"HstackBasicIntModule_basic",
# Shape Related failures
"PrimListUnpackNumMismatchModule_basic",
"ReshapeExpandModule_basic",
"UnsafeViewCollapseModule_basic",
"UnsafeViewDynamicExpandModule_basic",
"ViewCollapseModule_basic",
"ViewDynamicExpandCollapseModule_basic",
"ViewDynamicExpandModule_basic",
"ViewExpandDynamicDimModule_basic",
"ViewNoChange1dModule_basic",
"ViewNoChange2dModule_basic",
"ViewNoChange3dModule_basic",
}

if torch_version_for_comparison() < version.parse("2.5.0.dev"):
MAKE_FX_TOSA_PASS_SET = MAKE_FX_TOSA_PASS_SET | {
"ScaledDotProductAttentionDifferentModule_basic",
"ScaledDotProductAttentionMaskModule_basic",
"ScaledDotProductAttentionSameModule_basic",
}

LTC_CRASHING_SET = {
# TODO: update test to move all inputs to the lazy device. Otherwise test fails with:
# Check failed: lazy_tensor Input tensor is not a lazy tensor: CPUBoolType.
Expand Down
9 changes: 0 additions & 9 deletions projects/pt1/python/torch_mlir/torchscript.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
from torch._functorch.compile_utils import strip_overloads
import torch
import torch.fx
from torch_mlir.dynamo import _get_decomposition_table
from torch.fx.experimental.proxy_tensor import make_fx

from torch_mlir.compiler_utils import (
run_pipeline_with_repro_report,
Expand Down Expand Up @@ -203,7 +201,6 @@ def compile(
backend_legal_ops: Optional[Sequence[str]] = None,
extra_library: Iterable[Callable] = [],
verbose: bool = False,
use_make_fx: bool = False,
enable_ir_printing: bool = False,
):
"""Convert a PyTorch model to MLIR.
Expand Down Expand Up @@ -266,12 +263,6 @@ def compile(
else:
backend_legal_ops = BACKEND_LEGAL_OPS.get(output_type, [])

if use_make_fx:
args = example_args._get_for_tracing(
use_tracing=True, ignore_traced_shapes=True
)["forward"]
model = make_fx(model, decomposition_table=_get_decomposition_table())(*args)

# For FX-based models, automatically strip overloads.
if isinstance(model, torch.fx.GraphModule):
strip_overloads(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,12 +132,10 @@ class OnnxBackendTestConfig(TestConfig):
def __init__(
self,
backend,
use_make_fx: bool = False,
output_type="linalg-on-tensors",
):
super().__init__()
self.backend = backend
self.use_make_fx = use_make_fx
self.output_type = output_type

def compile(self, program: torch.nn.Module, verbose: bool = False) -> Any:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,16 @@ class TosaBackendTestConfig(TestConfig):
reaching the TOSA abstraction level.
"""

def __init__(self, backend: TosaBackend, use_make_fx: bool = False):
def __init__(self, backend: TosaBackend):
super().__init__()
self.backend = backend
self.use_make_fx = use_make_fx

def compile(self, program: torch.nn.Module, verbose: bool = False) -> Any:
example_args = convert_annotations_to_placeholders(program.forward)
module = torchscript.compile(
program,
example_args,
output_type="tosa",
use_make_fx=self.use_make_fx,
verbose=verbose,
)

Expand Down
Loading