From 617b6ed1e6dd63ae606f27be892d39ca69beb1b2 Mon Sep 17 00:00:00 2001 From: David Berard Date: Mon, 8 Aug 2022 19:16:56 -0700 Subject: [PATCH] Update torchdynamo backend for torchbench Summary: Previously not added to TARGETS build file, so trying to run internally would fail when `--dynamo` is added to torchbench command Also removed aot_autograd_speedup_strategy to reflect changes in https://github.com/pytorch/torchdynamo/pull/201 (i.e. use `--torchdynamo aot_nvfuser` instead) Differential Revision: D38445665 fbshipit-source-id: 4385902594ff79a5fe2cf2f3366961fcd552befc --- configs/torchdynamo/nvfuser-aot-speedup.yaml | 2 +- torchbenchmark/util/backends/torchdynamo.py | 10 +--------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/configs/torchdynamo/nvfuser-aot-speedup.yaml b/configs/torchdynamo/nvfuser-aot-speedup.yaml index 78e61c23a4..976b2c56f2 100644 --- a/configs/torchdynamo/nvfuser-aot-speedup.yaml +++ b/configs/torchdynamo/nvfuser-aot-speedup.yaml @@ -7,4 +7,4 @@ args: # empty argument means the default pytorch eager mode - "" - "--torchdynamo nvfuser" - - "--torchdynamo aot_autograd_speedup_strategy" + - "--torchdynamo aot_nvfuser" diff --git a/torchbenchmark/util/backends/torchdynamo.py b/torchbenchmark/util/backends/torchdynamo.py index b9ebfe275e..e3ada15f55 100644 --- a/torchbenchmark/util/backends/torchdynamo.py +++ b/torchbenchmark/util/backends/torchdynamo.py @@ -5,16 +5,10 @@ import functools from typing import List import torchdynamo -from torchdynamo.optimizations.training import aot_autograd_speedup_strategy - -EXTRA_BACKENDS = { - "aot_autograd_speedup_strategy": aot_autograd_speedup_strategy, -} def parse_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', dyamo_args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser() available_backends = torchdynamo.list_backends() - available_backends.extend(EXTRA_BACKENDS.keys()) parser.add_argument( "--torchdynamo", choices=available_backends, help="Specify torchdynamo backends" ) @@ -23,9 +17,7 @@ def parse_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', dy def apply_torchdynamo_args(model: 'torchbenchmark.util.model.BenchmarkModel', args: argparse.Namespace, precision: str): - if args.torchdynamo in EXTRA_BACKENDS: - model.add_context(functools.partial(torchdynamo.optimize, EXTRA_BACKENDS[args.torchdynamo])) - elif args.torchdynamo == "fx2trt" and precision == "fp16": + if args.torchdynamo == "fx2trt" and precision == "fp16": model.add_context(functools.partial(torchdynamo.optimize, torchdynamo.optimizations.backends.fx2trt_compiler_fp16)) else: model.add_context(functools.partial(torchdynamo.optimize, args.torchdynamo))