diff --git a/tests/pytorch/matmul/config.nix b/tests/pytorch/matmul/config.nix new file mode 100644 index 0000000000..d00359f79b --- /dev/null +++ b/tests/pytorch/matmul/config.nix @@ -0,0 +1,30 @@ +{ + includes = [ + ../memref.h + ]; + + buddyOptArgs = [ + [ + "--pass-pipeline" + "builtin.module(func.func(tosa-to-linalg-named, tosa-to-linalg, tosa-to-tensor, tosa-to-arith), empty-tensor-to-alloc-tensor, convert-elementwise-to-linalg, arith-bufferize, func.func(linalg-bufferize, tensor-bufferize), func-bufferize)" + ] + [ + "--pass-pipeline" + "builtin.module(func.func(buffer-deallocation-simplification, convert-linalg-to-loops), eliminate-empty-tensors, func.func(llvm-request-c-wrappers))" + ] + [ + "--lower-affine" + "--convert-math-to-llvm" + "--convert-math-to-libm" + "--convert-scf-to-cf" + "--convert-arith-to-llvm" + "--expand-strided-metadata" + "--finalize-memref-to-llvm" + "--lower-vector-exp" + "--lower-rvv=rv32" + "--convert-vector-to-llvm" + "--convert-func-to-llvm" + "--reconcile-unrealized-casts" + ] + ]; +} diff --git a/tests/pytorch/matmul/matmul.c b/tests/pytorch/matmul/matmul.c new file mode 100644 index 0000000000..d8aa32497c --- /dev/null +++ b/tests/pytorch/matmul/matmul.c @@ -0,0 +1,39 @@ +#include "memref.h" + +NEW_MEMREF(float, 3); + +extern void _mlir_ciface_forward(struct MemRef_float_dim1 *output, + struct MemRef_float_dim1 *arg1, + struct MemRef_float_dim1 *arg2); + +__attribute((section(".vdata"))) float input_float_0[64][32][2]; +struct MemRef_float_dim3 input1 = { + .allocatedPtr = input_float_0, + .alignedPtr = input_float_0, + .offset = 0, + .sizes = {64, 32, 2}, + .strides = {2 * 32, 2, 1}, +}; + +__attribute((section(".vdata"))) float input_float_1[64][2][8]; +struct MemRef_float_dim3 input2 = { + .allocatedPtr = input_float_1, + .alignedPtr = input_float_1, + .offset = 0, + .sizes = {64, 2, 8}, + .strides = {2 * 8, 8, 1}, +}; + +__attribute((section(".vdata"))) float output_float_0[64][32][8]; +struct MemRef_float_dim3 output = { + .allocatedPtr = output_float_0, + .alignedPtr = output_float_0, + .offset = 0, + .sizes = {64, 32, 8}, + .strides = {32 * 8, 8, 1}, +}; + +int test() { + _mlir_ciface_forward(&output, &input1, &input2); + return 0; +} diff --git a/tests/pytorch/matmul/matmul.py b/tests/pytorch/matmul/matmul.py new file mode 100644 index 0000000000..86157e75db --- /dev/null +++ b/tests/pytorch/matmul/matmul.py @@ -0,0 +1,26 @@ +import torch +import torch._dynamo as dynamo +from torch._inductor.decomposition import decompositions as inductor_decomp + +from buddy.compiler.frontend import DynamoCompiler +from buddy.compiler.ops import tosa + +# Define the input data. +float32_in1 = torch.randn(64, 32, 2).to(torch.float32) +float32_in2 = torch.randn(64, 2, 8).to(torch.float32) + +# Initialize the dynamo compiler. +dynamo_compiler = DynamoCompiler( + primary_registry=tosa.ops_registry, + aot_autograd_decomposition=inductor_decomp, +) + +# Pass the function and input data to the dynamo compiler's importer, the +# importer will first build a graph. Then, lower the graph to top-level IR. +# (tosa, linalg, etc.). Finally, accepts the generated module and weight parameters. +graphs = dynamo_compiler.importer(torch.matmul, *(float32_in1, float32_in2)) +graph = graphs[0] +graph.lower_to_top_level_ir() + +with open("forward.mlir", "w") as mlir_module: + print(graph._imported_module, file = mlir_module)