I have a TOSA mlir which describes a “Conv2d+Linear+Linear” model:
module attributes {torch.debug_module_name = "simple"} {
func.func @forward(%arg0: tensor<1x3x16x16xf32>) -> tensor<1x8x16x4xf32> {
%0 = "tosa.const"() <{value = dense_resource<__elided__> : tensor<8x3x3x3xf32>}> : () -> tensor<8x3x3x3xf32>
%1 = "tosa.const"() <{value = dense_resource<__elided__> : tensor<1x16x8xf32>}> : () -> tensor<1x16x8xf32>
%2 = "tosa.const"() <{value = dense_resource<__elided__> : tensor<1x8x4xf32>}> : () -> tensor<1x8x4xf32>
%3 = "tosa.const"() <{value = dense<1.000010e+00> : tensor<8x1x1xf32>}> : () -> tensor<8x1x1xf32>
%4 = "tosa.const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi32>}> : () -> tensor<4xi32>
%5 = "tosa.const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> : () -> tensor<4xi32>
%6 = "tosa.const"() <{value = dense_resource<__elided__> : tensor<8xf32>}> : () -> tensor<8xf32>
%7 = "tosa.const"() <{value = dense_resource<__elided__> : tensor<8xf32>}> : () -> tensor<8xf32>
%8 = "tosa.const"() <{value = dense<[-0.121429406, 0.0909240693, 0.0867559984, 0.169947132]> : tensor<4xf32>}> : () -> tensor<4xf32>
%9 = "tosa.transpose"(%arg0, %5) : (tensor<1x3x16x16xf32>, tensor<4xi32>) -> tensor<1x16x16x3xf32>
%10 = "tosa.conv2d"(%9, %0, %6) <{dilation = array<i64: 1, 1>, pad = array<i64: 1, 1, 1, 1>, stride = array<i64: 1, 1>}> : (tensor<1x16x16x3xf32>, tensor<8x3x3x3xf32>, tensor<8xf32>) -> tensor<1x16x16x8xf32>
%11 = "tosa.transpose"(%10, %4) : (tensor<1x16x16x8xf32>, tensor<4xi32>) -> tensor<1x8x16x16xf32>
%12 = "tosa.rsqrt"(%3) : (tensor<8x1x1xf32>) -> tensor<8x1x1xf32>
%13 = "tosa.mul"(%11, %12) <{shift = 0 : i32}> : (tensor<1x8x16x16xf32>, tensor<8x1x1xf32>) -> tensor<1x8x16x16xf32>
%14 = "tosa.clamp"(%13) <{max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64}> : (tensor<1x8x16x16xf32>) -> tensor<1x8x16x16xf32>
%15 = "tosa.reshape"(%14) <{new_shape = array<i64: 1, 128, 16>}> : (tensor<1x8x16x16xf32>) -> tensor<1x128x16xf32>
%16 = "tosa.matmul"(%15, %1) : (tensor<1x128x16xf32>, tensor<1x16x8xf32>) -> tensor<1x128x8xf32>
%17 = "tosa.reshape"(%16) <{new_shape = array<i64: 1, 8, 16, 8>}> : (tensor<1x128x8xf32>) -> tensor<1x8x16x8xf32>
%18 = "tosa.add"(%17, %7) : (tensor<1x8x16x8xf32>, tensor<8xf32>) -> tensor<1x8x16x8xf32>
%19 = "tosa.clamp"(%18) <{max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64}> : (tensor<1x8x16x8xf32>) -> tensor<1x8x16x8xf32>
%20 = "tosa.reshape"(%19) <{new_shape = array<i64: 1, 128, 8>}> : (tensor<1x8x16x8xf32>) -> tensor<1x128x8xf32>
%21 = "tosa.matmul"(%20, %2) : (tensor<1x128x8xf32>, tensor<1x8x4xf32>) -> tensor<1x128x4xf32>
%22 = "tosa.reshape"(%21) <{new_shape = array<i64: 1, 8, 16, 4>}> : (tensor<1x128x4xf32>) -> tensor<1x8x16x4xf32>
%23 = "tosa.add"(%22, %8) : (tensor<1x8x16x4xf32>, tensor<4xf32>) -> tensor<1x8x16x4xf32>
return %23 : tensor<1x8x16x4xf32>
}
}
And I am parsing it with ParseSourceFile() in a pybind11 c++ src, but always fail. Here is the parsing process:
void load(std::string filename) {
std::unique_ptr<mlir::MLIRContext> context_;
OwningOpRef<ModuleOp> module_;
mlirEnableGlobalDebug(true);
llvm::DebugFlag = true;
DialectRegistry registry;
registry.insert<func::FuncDialect, FORWARD::FORWARDDialect,
quant::QuantizationDialect, memref::MemRefDialect,
tensor::TensorDialect, tosa::TosaDialect>();
context_ = std::make_unique<MLIRContext>(registry);
module_ = parseSourceFile<mlir::ModuleOp>(filename, context_.get());
std::cout << "file:" << filename<< ", module: "<< module_.get() <<"\n";
assert(module_); // failed here
...
Here is the printed info (I have set llvm::DebugFlag = true):
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr)
after allowUnregisteredDialects
Load new dialect in Context func
ImplicitTypeIDRegistry::lookupOrInsert(mlir::FunctionOpInterface)
Load new dialect in Context tosa
Load new dialect in Context tensor
Load new dialect in Context affine
Load new dialect in Context arith
ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferTypeOpInterface)
Load new dialect in Context complex
Load new dialect in Context quant
ImplicitTypeIDRegistry::lookupOrInsert(mlir::quant::AnyQuantizedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::quant::CalibratedQuantizedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::quant::UniformQuantizedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::quant::UniformQuantizedPerAxisType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferShapedTypeOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::IsTerminator<mlir::TypeID::get() [with Trait = mlir::OpTrait::IsTerminator]::Empty>)
loc("tosa_elided.mlir":27:5): error: block with no terminator, has "func.return"(%23) : (tensor<1x8x16x4xf32>) -> ()
file:tosa_elided.mlir, module: 0
python3: /home/jhlou/forward-opt/bindings/pymlir/pymlir.cpp:105: void py_module::load(std::string): Assertion `module_' failed.
The main error is :
loc("tosa_elided.mlir":27:5): error: block with no terminator, has "func.return"(%23) : (tensor<1x8x16x4xf32>) -> ()
.
This means that the parseSourceFile() has read in the mlir file successfully, but didn’t recognize the last operation ‘func.return’ as the terminator of the function Block.
How can I fix it? Did I set the registry in a wrong way? Thanks!