Conversion from Tosa to Linalg

Hi everyone,
I’m trying to convert Tosa which is generated from Torch-mlir for simple model with single conv2d layer. I’m trying to use inbuilt conversion from mlir but I’m getting an error

./my_mlir_parser /home/vimal/personal/MLIR-Read/samples/extTosa.mlir
loc("/home/vimal/personal/MLIR-Read/samples/extTosa.mlir":1:1): error: 'builtin.module' op trying to schedule a pass on an unsupported operation
Error: Tosa to Linalg lowering pass failed

The following is my main code,

#include "mlir/IR/Dialect.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"

#include "mlir/IR/DialectRegistry.h"
#include "mlir/Tools/mlir-opt/MlirOptMain.h"

#include "mlir/IR/AsmState.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/FileUtilities.h"

#include "mlir/Pass/PassManager.h"
#include "mlir/Conversion/TosaToLinalg/TosaToLinalg.h"
#include "mlir/Transforms/Passes.h"

#include "llvm/Support/raw_ostream.h"

using namespace mlir;
using namespace llvm; 

int main(int argc, char ** argv) {
  MLIRContext ctx;
  ctx.loadDialect<func::FuncDialect, tosa::TosaDialect>();
  auto src = parseSourceFile<ModuleOp>(argv[1], &ctx);

  PassManager pm(&ctx);
  pm.addPass(tosa::createTosaToLinalg());

  if (failed(pm.run(*src))) {
    llvm::errs() << "Error: Tosa to Linalg lowering pass failed\n";
    return 1;
  }
  
  src->dump();
  return 0;
}

and finally the mlir file I generated from torch-mlir,

module attributes {torch.debug_module_name = "SimpleConvNet"} {
  func.func @forward(%arg0: tensor<1x3x225x225xf32>) -> tensor<1x16x225x225xf32> {
    %0 = "tosa.const"() <{value = dense<[-0.182676569, -0.177921489, -0.0139302155, -0.183302373, 0.12598449, 0.187495232, -0.0370999128, 0.184032455, 0.0485874861, -0.0185722671, 0.147716835, 0.175081506, -0.0600383803, 0.0901354774, 0.0397139303, -0.00391211128]> : tensor<16xf32>}> : () -> tensor<16xf32>
    %1 = "tosa.const"() <{value = dense<"0x519F91BD48CF1E3E31A5323EE04C14BE5758B9BDA8E615BEC24391BC7CDC34BD797034BE8A28AE3D6C8409BE8AAF063EF5117CBC655A19BEEAC1EBBD4729CF3BE3CA35BE65DC233E6B9EA23C661E833DAE85863D6BDCB3BD54BBC5BD1CBB38BEBCB633BC32C9863CD48FD23D160A6D3D612EDD3D324A253E9495C2BD500F98BD8904D8BB2DAC34BE2828083E7600363E9E9881BCD2BE413EC1C00A3DE2FEC0BD456A713D36D2E6BD84F8E53DD2C4C73CA0B3DB3D1330D8BD11B6223DFF6497BCF0A536BEE0AB36BE5B32903D0B9AF13C20B308BE4F1B833D0F6C16BD25D01C3E284E04BEDB0CAD3D3B47CABD4352A33D4282EBBDC056BABCC0114BBDCFA425BEBE562BBED1E1D3BC4E34803DCCDBA7BB9D8C03BEF940DFBDE726CE3D31F443BEBECBF8BC6F6FAC34E65AFC3D22CE41BE9FC03A3EBE34273E4DAEB13C2D20713D4A15D6BD7234223E003729BE8AED203E2ACAF2BDC579DD3DF33B1ABD30F424BE493A99BC7F95DDBDC2D7F63B1BC9D4BDB7D71C3E80CDAF3DABDE533DE2811ABE74F9E33B9090A23D495FBDBC25FA103D2612E63D39B6233E41A321BE1148EBBC8AA8FA3D09DAC1BD4D8EA03D846A853D6373913C617B523DC36C93BC9DA9EEBC50BE13BC96BB40BA993033BD5CB1793DF4D7D8BD0BC307BE101901BE6E567DBD798F263EE6D612BE1259DE3DAC733F3E380728BE2A5028BECB0EE8BDCC6FCDBD4AB0DE3D9E8096BD71C7983D685569BD943706BE7EE541BEEB38C33D9E9960BD087F6EBD189A30BE0C5E273EC41F173EA61BD23DDC1EDB3D03F9CEBD05CD29BE71D837BCA363103EB8F3E2BD2209353E8989DB3C534D853D310678BD6649A3BD25CE1E3D0A3ECFBDB210E13D5426DBBDF42A11BEB62F09BE5DE5843D5115C3BDE732413BC7D6ECBD9E9B043EF9FD06BE871C643DA4C5263EB1BC433E888BC93DC3C417BDCCB32ABEC656323E100E3B3EDB3D3ABE6B301CBEF0C513BD4397B0BDD4BDE13DEC5F0D3D65DD2B3E3527C0BCD4EDFB3C44E5783DE78545BDAB3B2DBE3F1498BDF4A5753CD7E31ABD7C7140BEA2269B3DC76208BDDC7A113E6FBF40BE1C222A3ECAE6033D42152F3E065A3CBE4A60F23D2C84313EF938293DA4BA283E12383B3EA4493CBED5AE6E3D4712FD3DE619DCBDD98492BD26B15DBC25D4EE3D5FCC6A3D65872FBECD4EA7BD4327ACBBDEC176BD673FDABDD72D43BE37E3A3BD41CD833DFF68B5BD28C7043E750EB6BC23819C3D97ED353E8EE46FBCBEDC41BC830DAE3D45FC2EBDBC639BBD1FEA953DEEFA6F3D870DCB3DEE2C3E3E2267F73C517A1CBD63313D3DA70DB8BD1445A8BDC29DA4BC51ECB83D18771B3E0FE4133E862C4B3DC77011BED99F523D5AA00EBD583CF33D7B94A53B7FF61BBEB625DEBD2136013EE1C53CBEF73D563B8772D4BDA27285BD3FB32CBD018183BD267E1C3E0382CA3B119EE03C67C3A03D6FC0C2BD24432FBC0C5EDA3DCB174CBD28C314BE7C3766BD92791BBEC2E5523CB40A243D4F13673C88670BBE2D581EBEED2518BE7F135FBD62F215BC710701BE9179943D9765D33D04FE42BD5F4A863C035ABE3D42F8E83D22BCAF3DCFFC6CBD3989FBBDA51802B7269629BE97FE083EBB6A8ABC40EF34BE7343003B31980C3E2A0D2EBE33F288BC023E0C3E590931BE474E1FBE05FA233E7D0A163D6635153E4C00C23C7F3ADC3D854C07BE850EBABDAE5E9E3D77AF2CBEF29C2ABE918BC73D2D6241BDE6F36E3C3EEC00BEEE89DC3ABA1A2FBDB766593D7D00EBBDB162423E55DA323EB9B01D3EBCF5DF3D2D67763D2189E63D0B1F1A3ED409123EA106EA3D69110E3C7E59BA3D965C27BE65FD78BDA8F3213E6140FBBD29E91E3D9549DB3D50193DBE1F1FA5BDCF40E5BD6E4A0D3E87F71FBD1806F2BD25BE3EBD43F77C3DF1C9413EB7E242BEFEDF363E7925C03D7543DCBDD9D88E3DB18698BD845886BCACA898BDFA79B83D67E9B43DF715383E2B8D04BE113FE3BD0CF6A83DCB3132BD75F04A3D60DD3EBEA2FF243E7EFD3EBE1A1D1EBE87A3353E529D0E3B9134A5BD673FB83DFC40F83C4B9B0A3E5B75803D120C363E76187DBCAB431BBEFFE1103E0F6B123E3E77083E4CD806BE167E003E01EB613CF88216BECA159E3D0388333E185B95BD7AF807BE552F1EBE1B9D203EDD97193D0D88DCBC5CCB37BE7FB7B93C966D583D182A4BBD23243DBEFF12373E80FC203E705F873D0AE112BE8CCCA0BD120231BE4D791B3EA7D2E7BD3906AEBC1F3043BEBCD81C3E892E9D3D593D14BEBDD651BD2D1DF2BD1AB2313EEDC2363D96289ABD7921B8BD22803CBED36B50BCC842D3BD49870BBE0BBAAA3C11DD41BE07D124BE12C4263C87F6283E044FC3BC1D41F2BD742969BC6E42343D1FF4483DE645F03DAF9301BEBD0ED5BD3FAC33BDB163A1BDB567A13DF23E9F3CDFCC02BCA0C5D3BD"> : tensor<16x3x3x3xf32>}> : () -> tensor<16x3x3x3xf32>
    %2 = "tosa.const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> : () -> tensor<4xi32>
    %3 = "tosa.const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi32>}> : () -> tensor<4xi32>
    %4 = tosa.transpose %arg0, %2 : (tensor<1x3x225x225xf32>, tensor<4xi32>) -> tensor<1x225x225x3xf32>
    %5 = tosa.transpose %1, %2 : (tensor<16x3x3x3xf32>, tensor<4xi32>) -> tensor<16x3x3x3xf32>
    %6 = tosa.conv2d %4, %5, %0 {dilation = array<i64: 1, 1>, pad = array<i64: 1, 1, 1, 1>, stride = array<i64: 1, 1>} : (tensor<1x225x225x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<1x225x225x16xf32>
    %7 = tosa.transpose %6, %3 : (tensor<1x225x225x16xf32>, tensor<4xi32>) -> tensor<1x16x225x225xf32>
    return %7 : tensor<1x16x225x225xf32>
  }
}

It will be every helpful in my process of understanding mlir, if anyone could help me in this regards

Thanks
Vimal William

Hey Vimal,

I believe the TosaToLinalg pass is a FunctionOpInterface pass meaning it needs to run on operations that implement the function interface. I think that the pass manager you’ve created here is going to run at the builtin.module level, rather than at the level of the func.func nested within it. Could you try calling pm.addNestedPass<tosa::createTosaToLinalg()) and see if you get the same error?

More generally not everything from TOSA will legalize to LinAlg, you might want to try calling mlir::tosa::addTosaToLinalgPasses(pm, ...) instead since I think that adds a bunch of other passes handling things like broadcasting and canonicalization that should complete the lowering.

I’ve not tested this locally so could be that the issue is elsewhere but worth a first try :crossed_fingers:

Cheer,
Jack.

1 Like

Thanks for your question! A few things to unpack here - your example contains more than just LinAlg content . As @FranklandJack mentioned, some passes have a FuncOpInterface. But that can be overcome.

The larger problem is that a sequence of passes need to be called, since TOSA is decomposed to LinAlg, Arith, Tensor and SCF. This can be seen in the pass pipeline construction in Torch-MLIR’s Tosa To Linalg backend: torch-mlir/projects/pt1/python/torch_mlir_e2e_test/tosa_backends/linalg_on_tensors.py at main · llvm/torch-mlir · GitHub

This can be replicated with the core mlir-opt using:

$ ./mlir-opt -pass-pipeline="builtin.module(func.func(tosa-to-arith,tosa-to-scf,tosa-to-linalg-named,tosa-to-linalg,tosa-to-tensor,tosa-to-arith))" tosa-to-linalg.mlir
#map = affine_map<(d0, d1, d2, d3) -> (d3)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
module attributes {torch.debug_module_name = "SimpleConvNet"} {
  func.func @forward(%arg0: tensor<1x3x225x225xf32>) -> tensor<1x16x225x225xf32> {
    %cst = arith.constant dense<[-0.182676569, -0.177921489, -0.0139302155, -0.183302373, 0.12598449, 0.187495232, -0.0370999128, 0.184032455, 0.0485874861, -0.0185722671, 0.147716835, 0.175081506, -0.0600383803, 0.0901354774, 0.0397139303, -0.00391211128]> : tensor<16xf32>
    %cst_0 = arith.constant dense<"0x519F91BD48CF1E3E31A5323EE04C14BE5758B9BDA8E615BEC24391BC7CDC34BD797034BE8A28AE3D6C8409BE8AAF063EF5117CBC655A19BEEAC1EBBD4729CF3BE3CA35BE65DC233E6B9EA23C661E833DAE85863D6BDCB3BD54BBC5BD1CBB38BEBCB633BC32C9863CD48FD23D160A6D3D612EDD3D324A253E9495C2BD500F98BD8904D8BB2DAC34BE2828083E7600363E9E9881BCD2BE413EC1C00A3DE2FEC0BD456A713D36D2E6BD84F8E53DD2C4C73CA0B3DB3D1330D8BD11B6223DFF6497BCF0A536BEE0AB36BE5B32903D0B9AF13C20B308BE4F1B833D0F6C16BD25D01C3E284E04BEDB0CAD3D3B47CABD4352A33D4282EBBDC056BABCC0114BBDCFA425BEBE562BBED1E1D3BC4E34803DCCDBA7BB9D8C03BEF940DFBDE726CE3D31F443BEBECBF8BC6F6FAC34E65AFC3D22CE41BE9FC03A3EBE34273E4DAEB13C2D20713D4A15D6BD7234223E003729BE8AED203E2ACAF2BDC579DD3DF33B1ABD30F424BE493A99BC7F95DDBDC2D7F63B1BC9D4BDB7D71C3E80CDAF3DABDE533DE2811ABE74F9E33B9090A23D495FBDBC25FA103D2612E63D39B6233E41A321BE1148EBBC8AA8FA3D09DAC1BD4D8EA03D846A853D6373913C617B523DC36C93BC9DA9EEBC50BE13BC96BB40BA993033BD5CB1793DF4D7D8BD0BC307BE101901BE6E567DBD798F263EE6D612BE1259DE3DAC733F3E380728BE2A5028BECB0EE8BDCC6FCDBD4AB0DE3D9E8096BD71C7983D685569BD943706BE7EE541BEEB38C33D9E9960BD087F6EBD189A30BE0C5E273EC41F173EA61BD23DDC1EDB3D03F9CEBD05CD29BE71D837BCA363103EB8F3E2BD2209353E8989DB3C534D853D310678BD6649A3BD25CE1E3D0A3ECFBDB210E13D5426DBBDF42A11BEB62F09BE5DE5843D5115C3BDE732413BC7D6ECBD9E9B043EF9FD06BE871C643DA4C5263EB1BC433E888BC93DC3C417BDCCB32ABEC656323E100E3B3EDB3D3ABE6B301CBEF0C513BD4397B0BDD4BDE13DEC5F0D3D65DD2B3E3527C0BCD4EDFB3C44E5783DE78545BDAB3B2DBE3F1498BDF4A5753CD7E31ABD7C7140BEA2269B3DC76208BDDC7A113E6FBF40BE1C222A3ECAE6033D42152F3E065A3CBE4A60F23D2C84313EF938293DA4BA283E12383B3EA4493CBED5AE6E3D4712FD3DE619DCBDD98492BD26B15DBC25D4EE3D5FCC6A3D65872FBECD4EA7BD4327ACBBDEC176BD673FDABDD72D43BE37E3A3BD41CD833DFF68B5BD28C7043E750EB6BC23819C3D97ED353E8EE46FBCBEDC41BC830DAE3D45FC2EBDBC639BBD1FEA953DEEFA6F3D870DCB3DEE2C3E3E2267F73C517A1CBD63313D3DA70DB8BD1445A8BDC29DA4BC51ECB83D18771B3E0FE4133E862C4B3DC77011BED99F523D5AA00EBD583CF33D7B94A53B7FF61BBEB625DEBD2136013EE1C53CBEF73D563B8772D4BDA27285BD3FB32CBD018183BD267E1C3E0382CA3B119EE03C67C3A03D6FC0C2BD24432FBC0C5EDA3DCB174CBD28C314BE7C3766BD92791BBEC2E5523CB40A243D4F13673C88670BBE2D581EBEED2518BE7F135FBD62F215BC710701BE9179943D9765D33D04FE42BD5F4A863C035ABE3D42F8E83D22BCAF3DCFFC6CBD3989FBBDA51802B7269629BE97FE083EBB6A8ABC40EF34BE7343003B31980C3E2A0D2EBE33F288BC023E0C3E590931BE474E1FBE05FA233E7D0A163D6635153E4C00C23C7F3ADC3D854C07BE850EBABDAE5E9E3D77AF2CBEF29C2ABE918BC73D2D6241BDE6F36E3C3EEC00BEEE89DC3ABA1A2FBDB766593D7D00EBBDB162423E55DA323EB9B01D3EBCF5DF3D2D67763D2189E63D0B1F1A3ED409123EA106EA3D69110E3C7E59BA3D965C27BE65FD78BDA8F3213E6140FBBD29E91E3D9549DB3D50193DBE1F1FA5BDCF40E5BD6E4A0D3E87F71FBD1806F2BD25BE3EBD43F77C3DF1C9413EB7E242BEFEDF363E7925C03D7543DCBDD9D88E3DB18698BD845886BCACA898BDFA79B83D67E9B43DF715383E2B8D04BE113FE3BD0CF6A83DCB3132BD75F04A3D60DD3EBEA2FF243E7EFD3EBE1A1D1EBE87A3353E529D0E3B9134A5BD673FB83DFC40F83C4B9B0A3E5B75803D120C363E76187DBCAB431BBEFFE1103E0F6B123E3E77083E4CD806BE167E003E01EB613CF88216BECA159E3D0388333E185B95BD7AF807BE552F1EBE1B9D203EDD97193D0D88DCBC5CCB37BE7FB7B93C966D583D182A4BBD23243DBEFF12373E80FC203E705F873D0AE112BE8CCCA0BD120231BE4D791B3EA7D2E7BD3906AEBC1F3043BEBCD81C3E892E9D3D593D14BEBDD651BD2D1DF2BD1AB2313EEDC2363D96289ABD7921B8BD22803CBED36B50BCC842D3BD49870BBE0BBAAA3C11DD41BE07D124BE12C4263C87F6283E044FC3BC1D41F2BD742969BC6E42343D1FF4483DE645F03DAF9301BEBD0ED5BD3FAC33BDB163A1BDB567A13DF23E9F3CDFCC02BCA0C5D3BD"> : tensor<16x3x3x3xf32>
    %cst_1 = arith.constant dense<[0, 2, 3, 1]> : tensor<4xi32>
    %cst_2 = arith.constant dense<[0, 3, 1, 2]> : tensor<4xi32>
    %0 = tensor.empty() : tensor<1x225x225x3xf32>
    %transposed = linalg.transpose ins(%arg0 : tensor<1x3x225x225xf32>) outs(%0 : tensor<1x225x225x3xf32>) permutation = [0, 2, 3, 1]
    %1 = tensor.empty() : tensor<16x3x3x3xf32>
    %transposed_3 = linalg.transpose ins(%cst_0 : tensor<16x3x3x3xf32>) outs(%1 : tensor<16x3x3x3xf32>) permutation = [0, 2, 3, 1]
    %cst_4 = arith.constant 0.000000e+00 : f32
    %padded = tensor.pad %transposed low[0, 1, 1, 0] high[0, 1, 1, 0] {
    ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
      tensor.yield %cst_4 : f32
    } : tensor<1x225x225x3xf32> to tensor<1x227x227x3xf32>
    %2 = tensor.empty() : tensor<1x225x225x16xf32>
    %3 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%cst : tensor<16xf32>) outs(%2 : tensor<1x225x225x16xf32>) {
    ^bb0(%in: f32, %out: f32):
      linalg.yield %in : f32
    } -> tensor<1x225x225x16xf32>
    %4 = linalg.conv_2d_nhwc_fhwc {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%padded, %transposed_3 : tensor<1x227x227x3xf32>, tensor<16x3x3x3xf32>) outs(%3 : tensor<1x225x225x16xf32>) -> tensor<1x225x225x16xf32>
    %5 = tensor.empty() : tensor<1x16x225x225xf32>
    %transposed_5 = linalg.transpose ins(%4 : tensor<1x225x225x16xf32>) outs(%5 : tensor<1x16x225x225xf32>) permutation = [0, 3, 1, 2]
    return %transposed_5 : tensor<1x16x225x225xf32>
  }
}```

@FranklandJack and @sjarus thanks for your help. Let me try and come again if any errors i come across. Thanks

I updated the code based on your suggestions and it ran without any error during pm.run and I wanted to see the lowered IR but I’ve no clue how to enable it, finally, i tried enableprintingIR it also did not seem to be working

#include "mlir/IR/Dialect.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"

#include "mlir/IR/DialectRegistry.h"
#include "mlir/Tools/mlir-opt/MlirOptMain.h"

#include "mlir/IR/AsmState.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Transforms/Passes.h"

#include "mlir/Pass/PassManager.h"
#include "mlir/Conversion/TosaToLinalg/TosaToLinalg.h"
#include "mlir/Transforms/Passes.h"

#include "llvm/Support/raw_ostream.h"

using namespace mlir;
using namespace llvm;

void lowerTosaToLinalg(ModuleOp module) {
  PassManager pm(module.getContext());
  pm.enableIRPrinting(); // Enable IR printing

  // Add the built-in passes to convert TOSA to Linalg.
  OpPassManager &tosaToLinalgPM = pm.nest<ModuleOp>();
  TosaToLinalgOptions options; // Default options
  TosaToLinalgNamedOptions namedOptions; // Default named options
  tosa::TosaValidationOptions validationOptions; // Default validation options
  addTosaToLinalgPasses(tosaToLinalgPM, options, namedOptions, validationOptions);
  
  // Run the pass pipeline.
  if (failed(pm.run(module)))
    llvm::errs() << "Failed to lower TOSA to Linalg\n";

}

int main(int argc, char ** argv) {
  MLIRContext ctx;
  ctx.disableMultithreading(true);
  ctx.loadDialect<func::FuncDialect, tosa::TosaDialect>();
  auto src = parseSourceFile<ModuleOp>(argv[1], &ctx);

  lowerTosaToLinalg(src.get());
//   src->print(llvm::outs());



//   src->dump();
  return 0;
}

If there is any way to turn it on, kindly let me know please

This seems to be a general pass manager construct issue, not related to TosaToLinalg anymore. You can look at the toy example: llvm-project/mlir/examples/toy/Ch7 at main · llvm/llvm-project · GitHub

For this particular situation, the following after conversions ought to emit IR:

module->dump(); 
===-------------------------------------------------------------------------===
                         ... Pass statistics report ...
===-------------------------------------------------------------------------===
'builtin.module' Pipeline
  'func.func' Pipeline
    TosaOptionalDecompositions
    Canonicalizer
    TosaInferShapes
    TosaMakeBroadcastable
    TosaToLinalgNamed
    Canonicalizer
    TosaLayerwiseConstantFoldPass
    TosaMakeBroadcastable
  TosaValidation
  'func.func' Pipeline
    TosaToLinalg

module attributes {torch.debug_module_name = "SimpleConvNet"} {
  func.func @forward(%arg0: tensor<1x3x225x225xf32>) -> tensor<1x16x225x225xf32> {
    %0 = "tosa.const"() <{value = dense<[-0.182676569, -0.177921489, -0.0139302155, -0.183302373, 0.12598449, 0.187495232, -0.0370999128, 0.184032455, 0.0485874861, -0.0185722671, 0.147716835, 0.175081506, -0.0600383803, 0.0901354774, 0.0397139303, -0.00391211128]> : tensor<16xf32>}> : () -> tensor<16xf32>
    %1 = "tosa.const"() <{value = dense<"0x519F91BD48CF1E3E31A5323EE04C14BE5758B9BDA8E615BEC24391BC7CDC34BD797034BE8A28AE3D6C8409BE8AAF063EF5117CBC655A19BEEAC1EBBD4729CF3BE3CA35BE65DC233E6B9EA23C661E833DAE85863D6BDCB3BD54BBC5BD1CBB38BEBCB633BC32C9863CD48FD23D160A6D3D612EDD3D324A253E9495C2BD500F98BD8904D8BB2DAC34BE2828083E7600363E9E9881BCD2BE413EC1C00A3DE2FEC0BD456A713D36D2E6BD84F8E53DD2C4C73CA0B3DB3D1330D8BD11B6223DFF6497BCF0A536BEE0AB36BE5B32903D0B9AF13C20B308BE4F1B833D0F6C16BD25D01C3E284E04BEDB0CAD3D3B47CABD4352A33D4282EBBDC056BABCC0114BBDCFA425BEBE562BBED1E1D3BC4E34803DCCDBA7BB9D8C03BEF940DFBDE726CE3D31F443BEBECBF8BC6F6FAC34E65AFC3D22CE41BE9FC03A3EBE34273E4DAEB13C2D20713D4A15D6BD7234223E003729BE8AED203E2ACAF2BDC579DD3DF33B1ABD30F424BE493A99BC7F95DDBDC2D7F63B1BC9D4BDB7D71C3E80CDAF3DABDE533DE2811ABE74F9E33B9090A23D495FBDBC25FA103D2612E63D39B6233E41A321BE1148EBBC8AA8FA3D09DAC1BD4D8EA03D846A853D6373913C617B523DC36C93BC9DA9EEBC50BE13BC96BB40BA993033BD5CB1793DF4D7D8BD0BC307BE101901BE6E567DBD798F263EE6D612BE1259DE3DAC733F3E380728BE2A5028BECB0EE8BDCC6FCDBD4AB0DE3D9E8096BD71C7983D685569BD943706BE7EE541BEEB38C33D9E9960BD087F6EBD189A30BE0C5E273EC41F173EA61BD23DDC1EDB3D03F9CEBD05CD29BE71D837BCA363103EB8F3E2BD2209353E8989DB3C534D853D310678BD6649A3BD25CE1E3D0A3ECFBDB210E13D5426DBBDF42A11BEB62F09BE5DE5843D5115C3BDE732413BC7D6ECBD9E9B043EF9FD06BE871C643DA4C5263EB1BC433E888BC93DC3C417BDCCB32ABEC656323E100E3B3EDB3D3ABE6B301CBEF0C513BD4397B0BDD4BDE13DEC5F0D3D65DD2B3E3527C0BCD4EDFB3C44E5783DE78545BDAB3B2DBE3F1498BDF4A5753CD7E31ABD7C7140BEA2269B3DC76208BDDC7A113E6FBF40BE1C222A3ECAE6033D42152F3E065A3CBE4A60F23D2C84313EF938293DA4BA283E12383B3EA4493CBED5AE6E3D4712FD3DE619DCBDD98492BD26B15DBC25D4EE3D5FCC6A3D65872FBECD4EA7BD4327ACBBDEC176BD673FDABDD72D43BE37E3A3BD41CD833DFF68B5BD28C7043E750EB6BC23819C3D97ED353E8EE46FBCBEDC41BC830DAE3D45FC2EBDBC639BBD1FEA953DEEFA6F3D870DCB3DEE2C3E3E2267F73C517A1CBD63313D3DA70DB8BD1445A8BDC29DA4BC51ECB83D18771B3E0FE4133E862C4B3DC77011BED99F523D5AA00EBD583CF33D7B94A53B7FF61BBEB625DEBD2136013EE1C53CBEF73D563B8772D4BDA27285BD3FB32CBD018183BD267E1C3E0382CA3B119EE03C67C3A03D6FC0C2BD24432FBC0C5EDA3DCB174CBD28C314BE7C3766BD92791BBEC2E5523CB40A243D4F13673C88670BBE2D581EBEED2518BE7F135FBD62F215BC710701BE9179943D9765D33D04FE42BD5F4A863C035ABE3D42F8E83D22BCAF3DCFFC6CBD3989FBBDA51802B7269629BE97FE083EBB6A8ABC40EF34BE7343003B31980C3E2A0D2EBE33F288BC023E0C3E590931BE474E1FBE05FA233E7D0A163D6635153E4C00C23C7F3ADC3D854C07BE850EBABDAE5E9E3D77AF2CBEF29C2ABE918BC73D2D6241BDE6F36E3C3EEC00BEEE89DC3ABA1A2FBDB766593D7D00EBBDB162423E55DA323EB9B01D3EBCF5DF3D2D67763D2189E63D0B1F1A3ED409123EA106EA3D69110E3C7E59BA3D965C27BE65FD78BDA8F3213E6140FBBD29E91E3D9549DB3D50193DBE1F1FA5BDCF40E5BD6E4A0D3E87F71FBD1806F2BD25BE3EBD43F77C3DF1C9413EB7E242BEFEDF363E7925C03D7543DCBDD9D88E3DB18698BD845886BCACA898BDFA79B83D67E9B43DF715383E2B8D04BE113FE3BD0CF6A83DCB3132BD75F04A3D60DD3EBEA2FF243E7EFD3EBE1A1D1EBE87A3353E529D0E3B9134A5BD673FB83DFC40F83C4B9B0A3E5B75803D120C363E76187DBCAB431BBEFFE1103E0F6B123E3E77083E4CD806BE167E003E01EB613CF88216BECA159E3D0388333E185B95BD7AF807BE552F1EBE1B9D203EDD97193D0D88DCBC5CCB37BE7FB7B93C966D583D182A4BBD23243DBEFF12373E80FC203E705F873D0AE112BE8CCCA0BD120231BE4D791B3EA7D2E7BD3906AEBC1F3043BEBCD81C3E892E9D3D593D14BEBDD651BD2D1DF2BD1AB2313EEDC2363D96289ABD7921B8BD22803CBED36B50BCC842D3BD49870BBE0BBAAA3C11DD41BE07D124BE12C4263C87F6283E044FC3BC1D41F2BD742969BC6E42343D1FF4483DE645F03DAF9301BEBD0ED5BD3FAC33BDB163A1BDB567A13DF23E9F3CDFCC02BCA0C5D3BD"> : tensor<16x3x3x3xf32>}> : () -> tensor<16x3x3x3xf32>
    %2 = "tosa.const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> : () -> tensor<4xi32>
    %3 = "tosa.const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi32>}> : () -> tensor<4xi32>
    %4 = tosa.transpose %arg0, %2 : (tensor<1x3x225x225xf32>, tensor<4xi32>) -> tensor<1x225x225x3xf32>
    %5 = tosa.transpose %1, %2 : (tensor<16x3x3x3xf32>, tensor<4xi32>) -> tensor<16x3x3x3xf32>
    %6 = tosa.conv2d %4, %5, %0 {dilation = array<i64: 1, 1>, pad = array<i64: 1, 1, 1, 1>, stride = array<i64: 1, 1>} : (tensor<1x225x225x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<1x225x225x16xf32>
    %7 = tosa.transpose %6, %3 : (tensor<1x225x225x16xf32>, tensor<4xi32>) -> tensor<1x16x225x225xf32>
    return %7 : tensor<1x16x225x225xf32>
  }
}

Its dumping the same IR as input?

Hey Vimal,

It’s tricky to know what is going on here without building the example but as @sjarus said you should be able to explicitly dump your module after the pipeline has run.

In your original example you had the line src->dump() commented out, you might want to try uncommenting that line and rebuilding the example.

Cheers,
Jack.

Thanks @sjarus and @FranklandJack for the guidance,

#map = affine_map<(d0, d1, d2, d3) -> (d3)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
module attributes {torch.debug_module_name = "SimpleConvNet"} {
  func.func @forward(%arg0: tensor<1x3x225x225xf32>) -> tensor<1x16x225x225xf32> {
    %cst = arith.constant 0.000000e+00 : f32
    %cst_0 = arith.constant dense<[-0.182676569, -0.177921489, -0.0139302155, -0.183302373, 0.12598449, 0.187495232, -0.0370999128, 0.184032455, 0.0485874861, -0.0185722671, 0.147716835, 0.175081506, -0.0600383803, 0.0901354774, 0.0397139303, -0.00391211128]> : tensor<16xf32>
    %cst_1 = arith.constant dense<"0x519F91BD48CF1E3E31A5323EE04C14BE5758B9BDA8E615BEC24391BC7CDC34BD797034BE8A28AE3D6C8409BE8AAF063EF5117CBC655A19BEEAC1EBBD4729CF3BE3CA35BE65DC233E6B9EA23C661E833DAE85863D6BDCB3BD54BBC5BD1CBB38BEBCB633BC32C9863CD48FD23D160A6D3D612EDD3D324A253E9495C2BD500F98BD8904D8BB2DAC34BE2828083E7600363E9E9881BCD2BE413EC1C00A3DE2FEC0BD456A713D36D2E6BD84F8E53DD2C4C73CA0B3DB3D1330D8BD11B6223DFF6497BCF0A536BEE0AB36BE5B32903D0B9AF13C20B308BE4F1B833D0F6C16BD25D01C3E284E04BEDB0CAD3D3B47CABD4352A33D4282EBBDC056BABCC0114BBDCFA425BEBE562BBED1E1D3BC4E34803DCCDBA7BB9D8C03BEF940DFBDE726CE3D31F443BEBECBF8BC6F6FAC34E65AFC3D22CE41BE9FC03A3EBE34273E4DAEB13C2D20713D4A15D6BD7234223E003729BE8AED203E2ACAF2BDC579DD3DF33B1ABD30F424BE493A99BC7F95DDBDC2D7F63B1BC9D4BDB7D71C3E80CDAF3DABDE533DE2811ABE74F9E33B9090A23D495FBDBC25FA103D2612E63D39B6233E41A321BE1148EBBC8AA8FA3D09DAC1BD4D8EA03D846A853D6373913C617B523DC36C93BC9DA9EEBC50BE13BC96BB40BA993033BD5CB1793DF4D7D8BD0BC307BE101901BE6E567DBD798F263EE6D612BE1259DE3DAC733F3E380728BE2A5028BECB0EE8BDCC6FCDBD4AB0DE3D9E8096BD71C7983D685569BD943706BE7EE541BEEB38C33D9E9960BD087F6EBD189A30BE0C5E273EC41F173EA61BD23DDC1EDB3D03F9CEBD05CD29BE71D837BCA363103EB8F3E2BD2209353E8989DB3C534D853D310678BD6649A3BD25CE1E3D0A3ECFBDB210E13D5426DBBDF42A11BEB62F09BE5DE5843D5115C3BDE732413BC7D6ECBD9E9B043EF9FD06BE871C643DA4C5263EB1BC433E888BC93DC3C417BDCCB32ABEC656323E100E3B3EDB3D3ABE6B301CBEF0C513BD4397B0BDD4BDE13DEC5F0D3D65DD2B3E3527C0BCD4EDFB3C44E5783DE78545BDAB3B2DBE3F1498BDF4A5753CD7E31ABD7C7140BEA2269B3DC76208BDDC7A113E6FBF40BE1C222A3ECAE6033D42152F3E065A3CBE4A60F23D2C84313EF938293DA4BA283E12383B3EA4493CBED5AE6E3D4712FD3DE619DCBDD98492BD26B15DBC25D4EE3D5FCC6A3D65872FBECD4EA7BD4327ACBBDEC176BD673FDABDD72D43BE37E3A3BD41CD833DFF68B5BD28C7043E750EB6BC23819C3D97ED353E8EE46FBCBEDC41BC830DAE3D45FC2EBDBC639BBD1FEA953DEEFA6F3D870DCB3DEE2C3E3E2267F73C517A1CBD63313D3DA70DB8BD1445A8BDC29DA4BC51ECB83D18771B3E0FE4133E862C4B3DC77011BED99F523D5AA00EBD583CF33D7B94A53B7FF61BBEB625DEBD2136013EE1C53CBEF73D563B8772D4BDA27285BD3FB32CBD018183BD267E1C3E0382CA3B119EE03C67C3A03D6FC0C2BD24432FBC0C5EDA3DCB174CBD28C314BE7C3766BD92791BBEC2E5523CB40A243D4F13673C88670BBE2D581EBEED2518BE7F135FBD62F215BC710701BE9179943D9765D33D04FE42BD5F4A863C035ABE3D42F8E83D22BCAF3DCFFC6CBD3989FBBDA51802B7269629BE97FE083EBB6A8ABC40EF34BE7343003B31980C3E2A0D2EBE33F288BC023E0C3E590931BE474E1FBE05FA233E7D0A163D6635153E4C00C23C7F3ADC3D854C07BE850EBABDAE5E9E3D77AF2CBEF29C2ABE918BC73D2D6241BDE6F36E3C3EEC00BEEE89DC3ABA1A2FBDB766593D7D00EBBDB162423E55DA323EB9B01D3EBCF5DF3D2D67763D2189E63D0B1F1A3ED409123EA106EA3D69110E3C7E59BA3D965C27BE65FD78BDA8F3213E6140FBBD29E91E3D9549DB3D50193DBE1F1FA5BDCF40E5BD6E4A0D3E87F71FBD1806F2BD25BE3EBD43F77C3DF1C9413EB7E242BEFEDF363E7925C03D7543DCBDD9D88E3DB18698BD845886BCACA898BDFA79B83D67E9B43DF715383E2B8D04BE113FE3BD0CF6A83DCB3132BD75F04A3D60DD3EBEA2FF243E7EFD3EBE1A1D1EBE87A3353E529D0E3B9134A5BD673FB83DFC40F83C4B9B0A3E5B75803D120C363E76187DBCAB431BBEFFE1103E0F6B123E3E77083E4CD806BE167E003E01EB613CF88216BECA159E3D0388333E185B95BD7AF807BE552F1EBE1B9D203EDD97193D0D88DCBC5CCB37BE7FB7B93C966D583D182A4BBD23243DBEFF12373E80FC203E705F873D0AE112BE8CCCA0BD120231BE4D791B3EA7D2E7BD3906AEBC1F3043BEBCD81C3E892E9D3D593D14BEBDD651BD2D1DF2BD1AB2313EEDC2363D96289ABD7921B8BD22803CBED36B50BCC842D3BD49870BBE0BBAAA3C11DD41BE07D124BE12C4263C87F6283E044FC3BC1D41F2BD742969BC6E42343D1FF4483DE645F03DAF9301BEBD0ED5BD3FAC33BDB163A1BDB567A13DF23E9F3CDFCC02BCA0C5D3BD"> : tensor<16x3x3x3xf32>
    %0 = bufferization.alloc_tensor() : tensor<1x225x225x3xf32>
    %transposed = linalg.transpose ins(%arg0 : tensor<1x3x225x225xf32>) outs(%0 : tensor<1x225x225x3xf32>) permutation = [0, 2, 3, 1] 
    %1 = bufferization.alloc_tensor() : tensor<16x3x3x3xf32>
    %transposed_2 = linalg.transpose ins(%cst_1 : tensor<16x3x3x3xf32>) outs(%1 : tensor<16x3x3x3xf32>) permutation = [0, 2, 3, 1] 
    %2 = bufferization.alloc_tensor() : tensor<1x227x227x3xf32>
    %3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<1x227x227x3xf32>) -> tensor<1x227x227x3xf32>
    %inserted_slice = tensor.insert_slice %transposed into %3[0, 1, 1, 0] [1, 225, 225, 3] [1, 1, 1, 1] : tensor<1x225x225x3xf32> into tensor<1x227x227x3xf32>
    %4 = bufferization.alloc_tensor() : tensor<1x225x225x16xf32>
    %5 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%cst_0 : tensor<16xf32>) outs(%4 : tensor<1x225x225x16xf32>) {
    ^bb0(%in: f32, %out: f32):
      linalg.yield %in : f32
    } -> tensor<1x225x225x16xf32>
    %6 = linalg.conv_2d_nhwc_fhwc {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%inserted_slice, %transposed_2 : tensor<1x227x227x3xf32>, tensor<16x3x3x3xf32>) outs(%5 : tensor<1x225x225x16xf32>) -> tensor<1x225x225x16xf32>
    %7 = bufferization.alloc_tensor() : tensor<1x16x225x225xf32>
    %transposed_3 = linalg.transpose ins(%6 : tensor<1x225x225x16xf32>) outs(%7 : tensor<1x16x225x225xf32>) permutation = [0, 3, 1, 2] 
    return %transposed_3 : tensor<1x16x225x225xf32>
  }
}
based on some code modification and some updates the pipeline now i can get the pass done.