diff --git a/configs/torchdynamo/nvfuser-aot-speedup.yaml b/configs/torchdynamo/nvfuser-aot-speedup.yaml index 78e61c23a4..976b2c56f2 100644 --- a/configs/torchdynamo/nvfuser-aot-speedup.yaml +++ b/configs/torchdynamo/nvfuser-aot-speedup.yaml @@ -7,4 +7,4 @@ args: # empty argument means the default pytorch eager mode - "" - "--torchdynamo nvfuser" - - "--torchdynamo aot_autograd_speedup_strategy" + - "--torchdynamo aot_nvfuser" diff --git a/torchbenchmark/util/backends/torchdynamo.py b/torchbenchmark/util/backends/torchdynamo.py index b9ebfe275e..e3ada15f55 100644 --- a/torchbenchmark/util/backends/torchdynamo.py +++ b/torchbenchmark/util/backends/torchdynamo.py @@ -5,16 +5,10 @@ import functools from typing import List import torchdynamo -from torchdynamo.optimizations.training import aot_autograd_speedup_strategy - -EXTRA_BACKENDS = { - "aot_autograd_speedup_strategy": aot_autograd_speedup_strategy, -} def parse_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', dyamo_args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser() available_backends = torchdynamo.list_backends() - available_backends.extend(EXTRA_BACKENDS.keys()) parser.add_argument( "--torchdynamo", choices=available_backends, help="Specify torchdynamo backends" ) @@ -23,9 +17,7 @@ def parse_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', dy def apply_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', args: argparse.Namespace, precision: str): - if args.torchdynamo in EXTRA_BACKENDS: - model.add_context(functools.partial(torchdynamo.optimize, EXTRA_BACKENDS[args.torchdynamo])) - elif args.torchdynamo == "fx2trt" and precision == "fp16": + if args.torchdynamo == "fx2trt" and precision == "fp16": model.add_context(functools.partial(torchdynamo.optimize, torchdynamo.optimizations.backends.fx2trt_compiler_fp16)) else: model.add_context(functools.partial(torchdynamo.optimize, args.torchdynamo))