Infertype problem

I am making a python API in Toy Tutorial using boost python,“unable to infer shape of operation without shape inference interface” come out when go through “createShapeInferencePass”.it means dyn_cast(op) failed.
image
I hardly changed the source code,what might cause that?

I would guess that the ops aren’t registered properly, but I would expect the verifier to hit you first, unless the check for registered dialect has been explicitly disabled?

I have registered operations(only changed name Toy to Relay) and have registered this Dialectimage I hardly changed the source code,so I don’t think the check for registered dialect has been explicitly disabled(don’t know how).How should I do ?

You have to register the dialect before creating the MLIRContext.

This should still be caught by the verifier: it checks if the operation has a registered dialect.

That is assuming that they are reasonably up to date. The flag for guarding that was only added within the past ~3 weeks.

    #include <boost/python.hpp>

#include
#include <boost/python/module.hpp>
#include <boost/python/def.hpp>
#include<string.h>
#include

#include “toy/Dialect.h”
#include “toy/Passes.h”

#include “mlir/Analysis/Verifier.h”
#include “mlir/ExecutionEngine/ExecutionEngine.h”
#include “mlir/ExecutionEngine/OptUtils.h”
#include “mlir/IR/MLIRContext.h”
#include “mlir/IR/Module.h”
#include “mlir/InitAllDialects.h”
#include “mlir/Parser.h”
#include “mlir/Pass/Pass.h”
#include “mlir/Pass/PassManager.h”
#include “mlir/Target/LLVMIR.h”
#include “mlir/Transforms/Passes.h”

#include “llvm/ADT/StringRef.h”
#include “llvm/IR/Module.h”
#include “llvm/Support/CommandLine.h”
#include “llvm/Support/ErrorOr.h”
#include “llvm/Support/MemoryBuffer.h”
#include “llvm/Support/SourceMgr.h”
#include “llvm/Support/TargetSelect.h”
#include “llvm/Support/raw_ostream.h”
#include
#include

using llvm::ArrayRef;
using llvm::cast;
using llvm::dyn_cast;
using llvm::isa;
using llvm::makeArrayRef;
using llvm::SmallVector;
using llvm::StringRef;
using llvm::Twine;

using namespace mlir::toy;
namespace mlir {
class MLIRContext;
class OwningModuleRef;
} // namespace mlir

struct Location {
std::shared_ptrstd::string file; ///< filename.
int line; ///< line number.
int col; ///< column number.
Location(char* str,int a,int b)
{
file = std::make_sharedstd::string(str);
line = a;
col = b;
}
};

mlir::Block* entryBlock;
mlir::FuncOp function;
static mlir::MLIRContext context;
class ToyImpl{
public:

ToyImpl(mlir::MLIRContext &context) : builder(&context) {}
mlir::Value var(const char* name, boost::python::list shape,const char* dtype);
mlir::Value transpose(mlir::Value operand);
mlir::Value add(mlir::Value lhs,mlir::Value rhs);
mlir::Value multiply(mlir::Value lhs,mlir::Value rhs);
// mlir::Value batch_norm(mlir::Value data, mlir::Value gamma,mlir::Value beta, mlir::Value moving_mean,mlir::Value moving_var);
// mlir::Value relu(mlir::Value operand);
// mlir::Value conv2d(mlir::Value data,mlir::Value weight);
// mlir::Value max_pool2d(mlir::Value operand);
// mlir::Value global_avg_pool2d(mlir::Value operand);
// mlir::Value batch_flatten(mlir::Value operand);
// mlir::Value softmax(mlir::Value operand);
// mlir::Value dense(mlir::Value data,mlir::Value weight);
// mlir::Value bias_add(mlir::Value operand);
mlir::FuncOp Function(boost::python::list free_vars,mlir::Value res);
mlir::ModuleOp Module(mlir::FuncOp func);
mlir::FuncOp Prototype(int number);
void dumpMLIR(mlir::ModuleOp module);
//void dumpMLIRAffine(mlir::ModuleOp module);
// void dumpMLIRLLVM(mlir::ModuleOp module);
// void dumpLLVM(mlir::ModuleOp module);

private:
mlir::ModuleOp theModule;
mlir::OpBuilder builder;
mlir::Location loc(Location loc) {
return builder.getFileLineColLoc(builder.getIdentifier(*loc.file), loc.line,
loc.col);
}
mlir::Type getType(ArrayRef<int64_t> shape) {
// If the shape is empty, then this type is unranked.
if (shape.empty())
return mlir::UnrankedTensorType::get(builder.getF64Type());

// Otherwise, we use the given shape.
return mlir::RankedTensorType::get(shape, builder.getF64Type());
}

};
ToyImpl getclass()
{
mlir::registerDialectmlir::toy::ToyDialect();
//mlir::MLIRContext context;
ToyImpl toy1(context);
return toy1;
}
void dumpMLIRAffine(mlir::ModuleOp module);

BOOST_PYTHON_MODULE(libtoy)
{
using namespace boost::python;

class_(“ToyImpl”,init<mlir::MLIRContext & >())
.def(“var”, &ToyImpl::var)
.def(“transpose”, &ToyImpl::transpose)
.def(“add”, &ToyImpl::add)
.def(“multiply”, &ToyImpl::multiply)
.def(“Function”, &ToyImpl::Function)
.def(“Module”, &ToyImpl::Module)
.def(“dumpMLIR”, &ToyImpl::dumpMLIR)

// .def(“dumpMLIRLLVM”, &ToyImpl::dumpMLIRLLVM)
// .def(“dumpLLVM”, &ToyImpl::dumpLLVM)
.def(“Prototype”, &ToyImpl::Prototype);
def(“getclass”, getclass);
def(“dumpMLIRAffine”,dumpMLIRAffine);
class_mlir::Value(“mlir::Value”);
class_mlir::FuncOp(“mlir::FuncOp”);
class_mlir::ModuleOp(“mlir::ModuleOp”);
}

mlir::Value ToyImpl::var(const char* name, boost::python::list shape,const char* dtype)
{

PyImport_AppendInittab( "libtoy", &initlibtoy); 

Py_Initialize();
boost::python::object module = boost::python::import(“libtoy”);
Location location(“fakefile”,0,0);
mlir::Location l= loc(location);
std::vector<int64_t> dims;
int num=1;
for(int i=0;i<len(shape);i++)
{
int val = boost::python::call_method<int64_t>(shape.ptr() , “getitem” , i);
num*=val;
dims.push_back(val);
}
std::vector data;
for(int i=0;i<num;i++)
data.push_back(0.0);
mlir::Type elementType = builder.getF64Type();
//std::cout<<“builder.getF64Type() 可以执行”<<std::endl;
auto dataType = mlir::RankedTensorType::get(dims, elementType);
//std::cout<<“mlir::RankedTensorType::get 可以执行”<<std::endl;
mlir::DenseElementsAttr dataAttribute = mlir::DenseElementsAttr::get(dataType, llvm::makeArrayRef(data));
//std::cout<<“mlir::DenseElementsAttr::get 可以执行”<<std::endl;
return builder.create(l, dataAttribute.getType(), dataAttribute);
}
mlir::Value ToyImpl::transpose(mlir::Value operand)
{
Location location(“fakefile”,0,0);
mlir::Location l = loc(location);
return builder.create(l, operand);
}
mlir::Value ToyImpl::add(mlir::Value lhs,mlir::Value rhs)
{
Location location(“fakefile”,0,0);
mlir::Location l = loc(location);
return builder.create(l, lhs, rhs);
}
mlir::Value ToyImpl::multiply(mlir::Value lhs,mlir::Value rhs)
{
Location location(“fakefile”,0,0);
mlir::Location l = loc(location);
return builder.create(l, lhs, rhs);
}
mlir::FuncOp ToyImpl::Prototype(int number)
{
Location location(“fakefile”,0,0);
mlir::Location l = loc(location);
mlir::Type t = mlir::UnrankedTensorType::get(builder.getF64Type());
llvm::SmallVector<mlir::Type, 4> arg_types(number,t);
auto func_type = builder.getFunctionType(arg_types, llvm::None);
function = mlir::FuncOp::create(l, “main”, func_type);
if (!function)
return nullptr;
auto &e = *function.addEntryBlock();
builder.setInsertionPointToStart(&e);
entryBlock = &e;
return function;
}

mlir::FuncOp ToyImpl::Function(boost::python::list free_vars,mlir::Value res)
{
Location location(“fakefile”,0,0);
mlir::Location l = loc(location);
int number = len(free_vars);
mlir::Type t = mlir::UnrankedTensorType::get(builder.getF64Type());
// llvm::SmallVector<mlir::Type, 4> arg_types(number,t);
// auto func_type = builder.getFunctionType(arg_types, llvm::None);
// mlir::FuncOp function = mlir::FuncOp::create(l, “main”, func_type);
// if (!function)
// return nullptr;
// auto &entryBlock = *function.addEntryBlock();
// builder.setInsertionPointToStart(&entryBlock);

// boost::python::list list;
// list.append(2);
// list.append(3);
// mlir::Value v = var("x",list,"float32");
// ReturnOp returnOp;
// if (!entryBlock->empty())
//   returnOp = dyn_cast<ReturnOp>(entryBlock->back());
// if (!returnOp) {
//   builder.create<ReturnOp>(l);
// } else if (returnOp.hasOperand()) {
//   // Otherwise, if this return operation has an operand then add a result to
//   // the function.
//   function.setType(builder.getFunctionType(function.getType().getInputs(),
//                                            t));
// }
builder.create<ReturnOp>(l,res);
return function;

}

mlir::ModuleOp ToyImpl::Module(mlir::FuncOp func) {
// We create an empty MLIR module and codegen functions one at a time and
// add them to the module.
mlir::ModuleOp theModule = mlir::ModuleOp::create(builder.getUnknownLoc());
theModule.push_back(func);

// Verify the module after we have finished constructing it, this will check
// the structural properties of the IR and invoke any specific verifiers we
// have on the toy operations.
if (failed(mlir::verify(theModule))) {
  theModule.emitError("module verification error");
  return nullptr;
}
return theModule;

}

void ToyImpl::dumpMLIR(mlir::ModuleOp module)
{

if (!module) {
    std::cout<<"module is null"<<std::endl;
    return;
}
module.dump();
std::cout<<std::endl;

}
void dumpMLIRAffine(mlir::ModuleOp module)
{
if (!module) {
std::cout<<“module is null”<<std::endl;
return;
}
module.dump();
std::cout<<std::endl;

mlir::registerPassManagerCLOptions();
//mlir::MLIRContext context1;
mlir::PassManager pm(&context);
// Apply any generic pass manager command line options and run the pipeline.
applyPassManagerCLOptions(pm);
// Inline all functions into main and then delete them.
pm.addPass(mlir::createInlinerPass());
pm.addPass(mlir::createSymbolDCEPass());
mlir::OpPassManager &optPM = pm.nest<mlir::FuncOp>();
optPM.addPass(mlir::toy::createShapeInferencePass());
//pm.addPass(mlir::toy::createLowerToAffinePass());
// optPM.addPass(mlir::createCanonicalizerPass());
// optPM.addPass(mlir::createCSEPass());

if (mlir::failed(pm.run(module))){
std::cout<<"pm.run(module) failed"<<std::endl;
  return ;
}
module.dump();

}
this is my code,and test code is following:
import libtoy
toy = libtoy.getclass()
f = toy.Prototype(2)
x = toy.var(“x”,[2,3],“float32”)
y = toy.var(“x”,[2,3],“float32”)
z = toy.add(x,y)
f1 = toy.Function([x,y],z)
mod = toy.Module(f1)
libtoy.dumpMLIRAffine(mod)

ToyImpl need a context for builder,and I can use ToyImpl to generate MLIR and give it a python API .So I made MLIRContext a global variable(if not,program crushed) ,how can I
register the dialect before creating the MLIRContext? How can I change my code to run correctly?

If you put your code in between triple backquotes it should display nicer (there is an entry in the format menu for this normally).
Also this is a bit beyond the usual code snippets, I’d use something like https://gist.github.com or other service to host this.

To answer the question, before your global MLIRContext variable, you can force initialization code to run:

static bool _register_dialects = [] {
  // ... call into what you want here
  return true;
}();
static mlir::MLIRContext context;