Skip to content

Commit 8abd26a

Browse files
Support more open-source models
Differential Revision: D75420351 Pull Request resolved: #11125
1 parent 0c9a4f5 commit 8abd26a

File tree

19 files changed

+610
-31
lines changed

19 files changed

+610
-31
lines changed

backends/mediatek/partitioner.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ def ops_to_not_decompose(
8181
torch.ops.aten.upsample_bilinear2d.vec,
8282
torch.ops.aten.upsample_nearest2d.default,
8383
torch.ops.aten.upsample_nearest2d.vec,
84+
torch.ops.aten._safe_softmax.default,
8485
]
8586
return (ops_not_decompose, None)
8687

backends/mediatek/scripts/mtk_build.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ rm -rf cmake-android-out && mkdir cmake-android-out && cd cmake-android-out
3333
cmake -DBUCK2="$BUCK_PATH" \
3434
-DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake" \
3535
-DANDROID_ABI=arm64-v8a \
36+
-DANDROID_PLATFORM=android-26 \
3637
-DEXECUTORCH_BUILD_NEURON=ON \
3738
-DNEURON_BUFFER_ALLOCATOR_LIB="$NEURON_BUFFER_ALLOCATOR_LIB" \
3839
..

examples/mediatek/aot_utils/oss_utils/utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@ def build_executorch_binary(
2424
file_name,
2525
dataset,
2626
quant_dtype: Optional[Precision] = None,
27+
skip_op_name: Optional[set] = None,
28+
skip_op_type: Optional[set] = None,
2729
):
2830
if quant_dtype is not None:
2931
quantizer = NeuropilotQuantizer()
@@ -47,14 +49,12 @@ def build_executorch_binary(
4749
from executorch.exir.program._program import to_edge_transform_and_lower
4850

4951
edge_compile_config = exir.EdgeCompileConfig(_check_ir_validity=False)
50-
# skipped op names are used for deeplabV3 model
5152
neuro_partitioner = NeuropilotPartitioner(
5253
[CompileSpec("platform-config", b"mt6989")],
53-
op_names_to_skip={
54-
"aten_convolution_default_106",
55-
"aten_convolution_default_107",
56-
},
54+
op_types_to_skip=skip_op_type,
55+
op_names_to_skip=skip_op_name,
5756
)
57+
5858
edge_prog = to_edge_transform_and_lower(
5959
aten_dialect,
6060
compile_config=edge_compile_config,
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
# Copyright (c) MediaTek Inc.
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
import os
7+
import sys
8+
9+
if os.getcwd() not in sys.path:
10+
sys.path.append(os.getcwd())
11+
import argparse
12+
import os
13+
14+
import dcgan_main
15+
16+
import torch
17+
from aot_utils.oss_utils.utils import build_executorch_binary
18+
from executorch.backends.mediatek import Precision
19+
20+
21+
class NhwcWrappedModel(torch.nn.Module):
22+
def __init__(self, is_gen=True):
23+
super(NhwcWrappedModel, self).__init__()
24+
if is_gen:
25+
self.dcgan = dcgan_main.Generator()
26+
else:
27+
self.dcgan = dcgan_main.Discriminator()
28+
29+
def forward(self, input1):
30+
nchw_input1 = input1.permute(0, 3, 1, 2)
31+
output = self.dcgan(nchw_input1)
32+
return output
33+
34+
35+
if __name__ == "__main__":
36+
parser = argparse.ArgumentParser()
37+
38+
parser.add_argument(
39+
"-a",
40+
"--artifact",
41+
help="path for storing generated artifacts by this example. " "Default ./dcgan",
42+
default="./dcgan",
43+
type=str,
44+
)
45+
46+
args = parser.parse_args()
47+
48+
# ensure the working directory exist.
49+
os.makedirs(args.artifact, exist_ok=True)
50+
51+
# prepare dummy data
52+
inputG = torch.randn(1, 1, 1, 100)
53+
inputD = torch.randn(1, 64, 64, 3)
54+
55+
# build Generator
56+
netG_instance = NhwcWrappedModel(True)
57+
netG_pte_filename = "dcgan_netG_mtk"
58+
build_executorch_binary(
59+
netG_instance.eval(),
60+
(torch.randn(1, 1, 1, 100),),
61+
f"{args.artifact}/{netG_pte_filename}",
62+
[(inputG,)],
63+
quant_dtype=Precision.A8W8,
64+
)
65+
66+
# build Discriminator
67+
netD_instance = NhwcWrappedModel(False)
68+
netD_pte_filename = "dcgan_netD_mtk"
69+
build_executorch_binary(
70+
netD_instance.eval(),
71+
(torch.randn(1, 64, 64, 3),),
72+
f"{args.artifact}/{netD_pte_filename}",
73+
[(inputD,)],
74+
quant_dtype=Precision.A8W8,
75+
)
76+
77+
# save data to inference on device
78+
input_list_file = f"{args.artifact}/input_list_G.txt"
79+
with open(input_list_file, "w") as f:
80+
f.write("inputG_0_0.bin")
81+
f.flush()
82+
file_name = f"{args.artifact}/inputG_0_0.bin"
83+
inputG.detach().numpy().tofile(file_name)
84+
file_name = f"{args.artifact}/goldenG_0_0.bin"
85+
goldenG = netG_instance(inputG)
86+
goldenG.detach().numpy().tofile(file_name)
87+
88+
input_list_file = f"{args.artifact}/input_list_D.txt"
89+
with open(input_list_file, "w") as f:
90+
f.write("inputD_0_0.bin")
91+
f.flush()
92+
file_name = f"{args.artifact}/inputD_0_0.bin"
93+
inputD.detach().numpy().tofile(file_name)
94+
file_name = f"{args.artifact}/goldenD_0_0.bin"
95+
goldenD = netD_instance(inputD)
96+
goldenD.detach().numpy().tofile(file_name)
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
"""Ref https://github.com/pytorch/examples/blob/main/dcgan/main.py"""
2+
3+
import torch.nn as nn
4+
5+
6+
class Generator(nn.Module):
7+
def __init__(self):
8+
super().__init__()
9+
self.main = nn.Sequential(
10+
# input is Z, going into a convolution
11+
nn.ConvTranspose2d(100, 64 * 8, 4, 1, 0, bias=False),
12+
nn.BatchNorm2d(64 * 8),
13+
nn.ReLU(True),
14+
# state size. (64*8) x 4 x 4
15+
nn.ConvTranspose2d(64 * 8, 64 * 4, 4, 2, 1, bias=False),
16+
nn.BatchNorm2d(64 * 4),
17+
nn.ReLU(True),
18+
# state size. (64*4) x 8 x 8
19+
nn.ConvTranspose2d(64 * 4, 64 * 2, 4, 2, 1, bias=False),
20+
nn.BatchNorm2d(64 * 2),
21+
nn.ReLU(True),
22+
# state size. (64*2) x 16 x 16
23+
nn.ConvTranspose2d(64 * 2, 64, 4, 2, 1, bias=False),
24+
nn.BatchNorm2d(64),
25+
nn.ReLU(True),
26+
# state size. (64) x 32 x 32
27+
nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False),
28+
nn.Tanh(),
29+
# state size. (3) x 64 x 64
30+
)
31+
32+
def forward(self, input):
33+
output = self.main(input)
34+
return output
35+
36+
37+
# main_netG_input_shape = [1, 100, 1, 1]
38+
# model = Generator()
39+
40+
41+
class Discriminator(nn.Module):
42+
def __init__(self):
43+
super().__init__()
44+
self.main = nn.Sequential(
45+
# input is (3) x 64 x 64
46+
nn.Conv2d(3, 64, 4, 2, 1, bias=False),
47+
nn.LeakyReLU(0.2, inplace=True),
48+
# state size. (64) x 32 x 32
49+
nn.Conv2d(64, 64 * 2, 4, 2, 1, bias=False),
50+
nn.BatchNorm2d(64 * 2),
51+
nn.LeakyReLU(0.2, inplace=True),
52+
# state size. (64*2) x 16 x 16
53+
nn.Conv2d(64 * 2, 64 * 4, 4, 2, 1, bias=False),
54+
nn.BatchNorm2d(64 * 4),
55+
nn.LeakyReLU(0.2, inplace=True),
56+
# state size. (64*4) x 8 x 8
57+
nn.Conv2d(64 * 4, 64 * 8, 4, 2, 1, bias=False),
58+
nn.BatchNorm2d(64 * 8),
59+
nn.LeakyReLU(0.2, inplace=True),
60+
# state size. (64*8) x 4 x 4
61+
nn.Conv2d(64 * 8, 1, 4, 1, 0, bias=False),
62+
nn.Sigmoid(),
63+
)
64+
65+
def forward(self, input):
66+
output = self.main(input)
67+
68+
return output.view(-1, 1).squeeze(1)
69+
70+
71+
# main_netD_input_shape = [1, 3, 64, 64]
72+
# model = Discriminator()

examples/mediatek/model_export_scripts/deeplab_v3.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,19 @@
33
#
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
6+
import os
7+
import sys
68

9+
if os.getcwd() not in sys.path:
10+
sys.path.append(os.getcwd())
711
import argparse
8-
import os
912
import random
1013

1114
import numpy as np
1215

1316
import torch
17+
from aot_utils.oss_utils.utils import build_executorch_binary
1418
from executorch.backends.mediatek import Precision
15-
from executorch.examples.mediatek.aot_utils.oss_utils.utils import (
16-
build_executorch_binary,
17-
)
1819
from executorch.examples.models.deeplab_v3 import DeepLabV3ResNet101Model
1920

2021

@@ -26,7 +27,7 @@ def __init__(self):
2627
def forward(self, input1):
2728
nchw_input1 = input1.permute(0, 3, 1, 2)
2829
nchw_output = self.deeplabv3(nchw_input1)
29-
return nchw_output.permute(0, 2, 3, 1)
30+
return nchw_output
3031

3132

3233
def get_dataset(data_size, dataset_dir, download):
@@ -121,4 +122,8 @@ def get_dataset(data_size, dataset_dir, download):
121122
f"{args.artifact}/{pte_filename}",
122123
inputs,
123124
quant_dtype=Precision.A8W8,
125+
skip_op_name={
126+
"aten_convolution_default_106",
127+
"aten_convolution_default_107",
128+
},
124129
)

examples/mediatek/model_export_scripts/edsr.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,15 @@
66

77
import argparse
88
import os
9+
import sys
910

11+
if os.getcwd() not in sys.path:
12+
sys.path.append(os.getcwd())
1013
import numpy as np
1114

1215
import torch
16+
from aot_utils.oss_utils.utils import build_executorch_binary
1317
from executorch.backends.mediatek import Precision
14-
from executorch.examples.mediatek.aot_utils.oss_utils.utils import (
15-
build_executorch_binary,
16-
)
1718
from executorch.examples.models.edsr import EdsrModel
1819

1920
from PIL import Image

0 commit comments

Comments
 (0)