Hi,
Please find the latest report on new defect(s) introduced to llvm found with Coverity Scan.
35 new defect(s) introduced to llvm found with Coverity Scan.
15 defect(s), reported by Coverity Scan earlier, were marked fixed in the recent build analyzed by Coverity Scan.
New defect(s) Reported-by: Coverity Scan
Showing 20 of 35 defect(s)
** CID 1522826: Null pointer dereferences (FORWARD_NULL)
/lldb/source/Commands/CommandOptionsProcessLaunch.cpp: 44 in lldb_private::CommandOptionsProcessLaunch::SetOptionValue(unsigned int, llvm::StringRef, lldb_private::ExecutionContext *)()
*** CID 1522826: Null pointer dereferences (FORWARD_NULL)
/lldb/source/Commands/CommandOptionsProcessLaunch.cpp: 44 in lldb_private::CommandOptionsProcessLaunch::SetOptionValue(unsigned int, llvm::StringRef, lldb_private::ExecutionContext *)()
38 execution_context ? execution_context->GetTargetSP() : TargetSP();
39 switch (short_option) {
40 case ‘s’: // Stop at program entry point
41 launch_info.GetFlags().Set(eLaunchFlagStopAtEntry);
42 break;
43 case ‘m’: // Stop at user entry point
CID 1522826: Null pointer dereferences (FORWARD_NULL) Attempting to access the managed object of an empty smart pointer "target_sp".
44 target_sp->CreateBreakpointAtUserEntry(error);
45 break;
46 case ‘i’: // STDIN for read only
47 {
48 FileAction action;
49 if (action.Open(STDIN_FILENO, FileSpec(option_arg), true, false))
** CID 1522825: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2220 in ::SelectAndNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2226 in ::SelectAndNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
*** CID 1522825: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2220 in ::SelectAndNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2214 ::llvm::SmallVector<::mlir::Value, 4> tblgen_values; (void)tblgen_values;
2215 ::llvm::SmallVector<::mlir::NamedAttribute, 4> tblgen_attrs; (void)tblgen_attrs;
2216 tblgen_values.push_back((*tblgen_AndIOp_2.getODSResults(0).begin()));
2217 tblgen_values.push_back((*x.begin()));
2218 tblgen_values.push_back((*y.begin()));
2219 ::llvm::SmallVector<::mlir::Type, 4> tblgen_types; (void)tblgen_types;
CID 1522825: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
2220 for (auto v: castedOp0.getODSResults(0)) {
2221 tblgen_types.push_back(v.getType());
2222 }
2223 tblgen_SelectOp_3 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2224 }
2225
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2226 in ::SelectAndNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2220 for (auto v: castedOp0.getODSResults(0)) {
2221 tblgen_types.push_back(v.getType());
2222 }
2223 tblgen_SelectOp_3 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2224 }
2225
CID 1522825: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Value.
2226 for (auto v: ::llvm::SmallVector<::mlir::Value, 4>{ tblgen_SelectOp_3.getODSResults(0) }) {
2227 tblgen_repl_values.push_back(v);
2228 }
2229
2230 rewriter.replaceOp(op0, tblgen_repl_values);
2231 return ::mlir::success();
** CID 1522824: Control flow issues (DEADCODE)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/Mesh/IR/MeshOps.cpp.inc: 842 in mlir::mesh::ShardOp::setAnnotateForUsers(bool)()
*** CID 1522824: Control flow issues (DEADCODE)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/Mesh/IR/MeshOps.cpp.inc: 842 in mlir::mesh::ShardOp::setAnnotateForUsers(bool)()
836 void ShardOp::setAnnotateForUsersAttr(::mlir::UnitAttr attr) {
837 (*this)->setAttr(getAnnotateForUsersAttrName(), attr);
838 }
839
840 void ShardOp::setAnnotateForUsers(bool attrValue) {
841 if (attrValue)
CID 1522824: Control flow issues (DEADCODE) Execution cannot reach this statement: "<temporary>.UnitAttr(NULL);".
842 return (*this)->setAttr(getAnnotateForUsersAttrName(), ((attrValue) ? ::mlir::Builder((*this)->getContext()).getUnitAttr() : nullptr));
843 (*this)->removeAttr(getAnnotateForUsersAttrName());
844 }
845
846 ::mlir::Attribute ShardOp::removeAnnotateForUsersAttr() {
847 auto &attr = getProperties().annotate_for_users;
** CID 1522823: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2307 in ::SelectNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2313 in ::SelectNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
*** CID 1522823: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2307 in ::SelectNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2301 ::llvm::SmallVector<::mlir::Value, 4> tblgen_values; (void)tblgen_values;
2302 ::llvm::SmallVector<::mlir::NamedAttribute, 4> tblgen_attrs; (void)tblgen_attrs;
2303 tblgen_values.push_back((*pred.begin()));
2304 tblgen_values.push_back((*b.begin()));
2305 tblgen_values.push_back((*a.begin()));
2306 ::llvm::SmallVector<::mlir::Type, 4> tblgen_types; (void)tblgen_types;
CID 1522823: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
2307 for (auto v: castedOp0.getODSResults(0)) {
2308 tblgen_types.push_back(v.getType());
2309 }
2310 tblgen_SelectOp_0 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2311 }
2312
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2313 in ::SelectNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2307 for (auto v: castedOp0.getODSResults(0)) {
2308 tblgen_types.push_back(v.getType());
2309 }
2310 tblgen_SelectOp_0 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2311 }
2312
CID 1522823: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Value.
2313 for (auto v: ::llvm::SmallVector<::mlir::Value, 4>{ tblgen_SelectOp_0.getODSResults(0) }) {
2314 tblgen_repl_values.push_back(v);
2315 }
2316
2317 rewriter.replaceOp(op0, tblgen_repl_values);
2318 return ::mlir::success();
** CID 1522822: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2139 in ::SelectAndCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2133 in ::SelectAndCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
*** CID 1522822: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2139 in ::SelectAndCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2133 for (auto v: castedOp0.getODSResults(0)) {
2134 tblgen_types.push_back(v.getType());
2135 }
2136 tblgen_SelectOp_1 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2137 }
2138
CID 1522822: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Value.
2139 for (auto v: ::llvm::SmallVector<::mlir::Value, 4>{ tblgen_SelectOp_1.getODSResults(0) }) {
2140 tblgen_repl_values.push_back(v);
2141 }
2142
2143 rewriter.replaceOp(op0, tblgen_repl_values);
2144 return ::mlir::success();
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2133 in ::SelectAndCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2127 ::llvm::SmallVector<::mlir::Value, 4> tblgen_values; (void)tblgen_values;
2128 ::llvm::SmallVector<::mlir::NamedAttribute, 4> tblgen_attrs; (void)tblgen_attrs;
2129 tblgen_values.push_back((*tblgen_AndIOp_0.getODSResults(0).begin()));
2130 tblgen_values.push_back((*x.begin()));
2131 tblgen_values.push_back((*y.begin()));
2132 ::llvm::SmallVector<::mlir::Type, 4> tblgen_types; (void)tblgen_types;
CID 1522822: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
2133 for (auto v: castedOp0.getODSResults(0)) {
2134 tblgen_types.push_back(v.getType());
2135 }
2136 tblgen_SelectOp_1 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2137 }
2138
** CID 1522821: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 414 in mlir::arm_sve::ConvertToSvboolIntrOp::verifyInvariantsImpl()()
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 405 in mlir::arm_sve::ConvertToSvboolIntrOp::verifyInvariantsImpl()()
*** CID 1522821: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 414 in mlir::arm_sve::ConvertToSvboolIntrOp::verifyInvariantsImpl()()
408 }
409 }
410 {
411 unsigned index = 0; (void)index;
412 auto valueGroup0 = getODSResults(0);
413
CID 1522821: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
414 for (auto v : valueGroup0) {
415 if (::mlir::failed(__mlir_ods_local_type_constraint_ArmSVE1(*this, v.getType(), “result”, index++)))
416 return ::mlir::failure();
417 }
418 }
419 if (!((((::llvm::isa<::mlir::VectorType>((*this->getODSResults(0).begin()).getType()) &&
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 405 in mlir::arm_sve::ConvertToSvboolIntrOp::verifyInvariantsImpl()()
399
400 ::mlir::LogicalResult ConvertToSvboolIntrOp::verifyInvariantsImpl() {
401 {
402 unsigned index = 0; (void)index;
403 auto valueGroup0 = getODSOperands(0);
404
CID 1522821: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Value.
405 for (auto v : valueGroup0) {
406 if (::mlir::failed(__mlir_ods_local_type_constraint_ArmSVE2(*this, v.getType(), “operand”, index++)))
407 return ::mlir::failure();
408 }
409 }
410 {
** CID 1522820: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2473 in ::SelectOrNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2467 in ::SelectOrNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
*** CID 1522820: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2473 in ::SelectOrNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2467 for (auto v: castedOp0.getODSResults(0)) {
2468 tblgen_types.push_back(v.getType());
2469 }
2470 tblgen_SelectOp_3 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2471 }
2472
CID 1522820: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Value.
2473 for (auto v: ::llvm::SmallVector<::mlir::Value, 4>{ tblgen_SelectOp_3.getODSResults(0) }) {
2474 tblgen_repl_values.push_back(v);
2475 }
2476
2477 rewriter.replaceOp(op0, tblgen_repl_values);
2478 return ::mlir::success();
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2467 in ::SelectOrNotCond::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2461 ::llvm::SmallVector<::mlir::Value, 4> tblgen_values; (void)tblgen_values;
2462 ::llvm::SmallVector<::mlir::NamedAttribute, 4> tblgen_attrs; (void)tblgen_attrs;
2463 tblgen_values.push_back((*tblgen_OrIOp_2.getODSResults(0).begin()));
2464 tblgen_values.push_back((*x.begin()));
2465 tblgen_values.push_back((*y.begin()));
2466 ::llvm::SmallVector<::mlir::Type, 4> tblgen_types; (void)tblgen_types;
CID 1522820: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
2467 for (auto v: castedOp0.getODSResults(0)) {
2468 tblgen_types.push_back(v.getType());
2469 }
2470 tblgen_SelectOp_3 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2471 }
2472
** CID 1522819: Performance inefficiencies (AUTO_CAUSES_COPY)
/flang/lib/Optimizer/CodeGen/TypeConverter.cpp: 107 in fir::LLVMTypeConverter::LLVMTypeConverter(mlir::ModuleOp, bool, bool)::[lambda(mlir::TupleType) (instance 1)]::operator ()(mlir::TupleType) const()
*** CID 1522819: Performance inefficiencies (AUTO_CAUSES_COPY)
/flang/lib/Optimizer/CodeGen/TypeConverter.cpp: 107 in fir::LLVMTypeConverter::LLVMTypeConverter(mlir::ModuleOp, bool, bool)::[lambda(mlir::TupleType) (instance 1)]::operator ()(mlir::TupleType) const()
101 return mlir::VectorType::get(llvm::ArrayRef<int64_t>(vecTy.getLen()),
102 convertType(vecTy.getEleTy()));
103 });
104 addConversion([&](mlir::TupleType tuple) {
105 LLVM_DEBUG(llvm::dbgs() << "type convert: " << tuple << ‘\n’);
106 llvm::SmallVectormlir::Type members;
CID 1522819: Performance inefficiencies (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Type.
107 for (auto mem : tuple.getTypes()) {
108 // Prevent fir.box from degenerating to a pointer to a descriptor in the
109 // context of a tuple type.
110 if (auto box = mem.dyn_castfir::BaseBoxType())
111 members.push_back(convertBoxTypeAsStruct(box));
112 else
** CID 1522818: (NO_EFFECT)
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned short, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned short, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned short, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
*** CID 1522818: (NO_EFFECT)
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned short, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned short, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, f16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, f16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, f16) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned short, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned char, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned short, unsigned int, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned long, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned char, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned int, int>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, int) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, int) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1083 in mlir::sparse_tensor::SparseTensorStorage<unsigned int, unsigned short, double>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, double) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, double) const()
1077 writeCrd(l, currentPos, lvlCoords[l]);
1078 parentPos = currentPos;
1079 } else if (isSingletonDLT(dlt)) {
1080 writeCrd(l, parentPos, lvlCoords[l]);
1081 // the new parentPos equals the old parentPos.
1082 } else { // Dense level.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Level is not dense"", since the test will always evaluate as true.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned char, unsigned long, float>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, float) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, float) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1070 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, long>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, long) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, long) const()
1064 const auto dlt = getLvlTypes()[l]; // Avoid redundant bounds checking.
1065 if (isCompressedDLT(dlt)) {
1066 // If parentPos == parentSz
then it’s valid as an array-lookup;
1067 // however, it’s semantically invalid here since that entry
1068 // does not represent a segment of coordinates[l]
. Moreover, that
1069 // entry must be immutable for assembledSize
to remain valid.
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Parent position is out of bounds"", since the test will always evaluate as true.
1070 assert(parentPos < parentSz && “Parent position is out of bounds”);
1071 const uint64_t currentPos = positions[l][parentPos];
1072 // This increment won’t overflow the P
type, since it can’t
1073 // exceed the original value of positions[l][parentPos+1]
1074 // which was already verified to be within bounds for P
1075 // when it was written to the array.
/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h: 1088 in mlir::sparse_tensor::SparseTensorStorage<unsigned long, unsigned long, bf16>::SparseTensorStorage(unsigned long, const unsigned long *, unsigned long, const mlir::sparse_tensor::DimLevelType *, const unsigned long *, const unsigned long *, mlir::sparse_tensor::SparseTensorEnumeratorBase &)::[lambda(const T1 &, bf16) (instance 1)]::operator ()<std::vector<unsigned long, std::allocator>>(auto, const T1 &, bf16) const()
1082 } else { // Dense level.
1083 ASSERT_DENSE_DLT(dlt);
1084 parentPos = parentPos * getLvlSizes()[l] + lvlCoords[l];
1085 }
1086 parentSz = assembledSize(parentSz, l);
1087 }
CID 1522818: (NO_EFFECT) Comparing an array to null is not useful: ""Value position is out of bounds"", since the test will always evaluate as true.
1088 assert(parentPos < values.size() && “Value position is out of bounds”);
1089 values[parentPos] = val;
1090 });
1091 // The finalizeYieldPos loop
1092 for (uint64_t parentSz = 1, l = 0; l < lvlRank; ++l) {
1093 const auto dlt = lvlTypes[l]; // Avoid redundant bounds checking.
** CID 1522817: Performance inefficiencies (AUTO_CAUSES_COPY)
/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp: 338 in ::AArch64PointerAuth::runOnMachineFunction(llvm::MachineFunction &)()
*** CID 1522817: Performance inefficiencies (AUTO_CAUSES_COPY)
/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp: 338 in ::AArch64PointerAuth::runOnMachineFunction(llvm::MachineFunction &)()
332 }
333
334 // FIXME Do we need to emit any PAuth-related epilogue code at all
335 // when SCS is enabled?
336 if (HasAuthenticationInstrs &&
337 !MFnI->needsShadowCallStackPrologueEpilogue(MF)) {
CID 1522817: Performance inefficiencies (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type MachineInstrBundleIterator.
338 for (auto TailCall : TailCallInstrs)
339 Modified |= checkAuthenticatedLR(TailCall);
340 }
341
342 for (auto MI : DeletedInstrs)
343 MI->eraseFromParent();
344
345 return Modified;
** CID 1522816: Performance inefficiencies (AUTO_CAUSES_COPY)
/flang/lib/Lower/OpenACC.cpp: 3158 in genACC(Fortran::lower::AbstractConverter &, Fortran::semantics::SemanticsContext &, const Fortran::parser::OpenACCRoutineConstruct &, llvm::SmallVector<std::pair<std::__cxx11::basic_string<char, std::char_traits, std::allocator>, mlir::SymbolRefAttr>, (unsigned int)1> &)()
*** CID 1522816: Performance inefficiencies (AUTO_CAUSES_COPY)
/flang/lib/Lower/OpenACC.cpp: 3158 in genACC(Fortran::lower::AbstractConverter &, Fortran::semantics::SemanticsContext &, const Fortran::parser::OpenACCRoutineConstruct &, llvm::SmallVector<std::pair<std::__cxx11::basic_string<char, std::char_traits, std::allocator>, mlir::SymbolRefAttr>, (unsigned int)1> &)()
3152 }
3153
3154 mlir::OpBuilder modBuilder(mod.getBodyRegion());
3155 std::stringstream routineOpName;
3156 routineOpName << accRoutinePrefix.str() << routineCounter++;
3157
CID 1522816: Performance inefficiencies (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type RoutineOp.
3158 for (auto routineOp : mod.getOpsmlir::acc::RoutineOp()) {
3159 if (routineOp.getFuncName().str().compare(funcName) == 0) {
3160 // If the routine is already specified with the same clauses, just skip
3161 // the operation creation.
3162 if (routineOp.getBindName() == bindName &&
3163 routineOp.getGang() == hasGang &&
** CID 1522815: Performance inefficiencies (AUTO_CAUSES_COPY)
/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp: 4601 in createOutlinedFunction(llvm::OpenMPIRBuilder &, llvm::IRBuilderBase &, llvm::StringRef, llvm::SmallVectorImpl<llvm::Value *> &, llvm::function_ref<llvm::IRBuilderBase::InsertPoint (llvm::IRBuilderBase::InsertPoint, llvm::IRBuilderBase::InsertPoint)> &, llvm::function_ref<llvm::IRBuilderBase::InsertPoint (llvm::Argument &, llvm::Value *, llvm::Value *&, llvm::IRBuilderBase::InsertPoint, llvm::IRBuilderBase::InsertPoint)> &)()
*** CID 1522815: Performance inefficiencies (AUTO_CAUSES_COPY)
/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp: 4601 in createOutlinedFunction(llvm::OpenMPIRBuilder &, llvm::IRBuilderBase &, llvm::StringRef, llvm::SmallVectorImpl<llvm::Value *> &, llvm::function_ref<llvm::IRBuilderBase::InsertPoint (llvm::IRBuilderBase::InsertPoint, llvm::IRBuilderBase::InsertPoint)> &, llvm::function_ref<llvm::IRBuilderBase::InsertPoint (llvm::Argument &, llvm::Value *, llvm::Value *&, llvm::IRBuilderBase::InsertPoint, llvm::IRBuilderBase::InsertPoint)> &)()
4595 Builder.SetInsertPoint(EntryBB->getFirstNonPHI());
4596 auto AllocaIP = Builder.saveIP();
4597
4598 Builder.SetInsertPoint(UserCodeEntryBB->getFirstNonPHIOrDbg());
4599
4600 // Rewrite uses of input valus to parameters.
CID 1522815: Performance inefficiencies (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type tuple.
4601 for (auto InArg : zip(Inputs, Func->args())) {
4602 Value *Input = std::get<0>(InArg);
4603 Argument &Arg = std::get<1>(InArg);
4604 Value *InputCopy = nullptr;
4605
4606 Builder.restoreIP(
** CID 1522814: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2060 in ::RedundantSelectTrue::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2066 in ::RedundantSelectTrue::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
*** CID 1522814: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2060 in ::RedundantSelectTrue::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2054 ::llvm::SmallVector<::mlir::Value, 4> tblgen_values; (void)tblgen_values;
2055 ::llvm::SmallVector<::mlir::NamedAttribute, 4> tblgen_attrs; (void)tblgen_attrs;
2056 tblgen_values.push_back((*pred.begin()));
2057 tblgen_values.push_back((*a.begin()));
2058 tblgen_values.push_back((*c.begin()));
2059 ::llvm::SmallVector<::mlir::Type, 4> tblgen_types; (void)tblgen_types;
CID 1522814: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
2060 for (auto v: castedOp0.getODSResults(0)) {
2061 tblgen_types.push_back(v.getType());
2062 }
2063 tblgen_SelectOp_0 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2064 }
2065
/build-llvm/tools/clang/stage2-bins/tools/mlir/lib/Dialect/Arith/IR/ArithCanonicalization.inc: 2066 in ::RedundantSelectTrue::matchAndRewrite(mlir::Operation *, mlir::PatternRewriter &) const()
2060 for (auto v: castedOp0.getODSResults(0)) {
2061 tblgen_types.push_back(v.getType());
2062 }
2063 tblgen_SelectOp_0 = rewriter.create<::mlir::arith::SelectOp>(odsLoc, tblgen_types, tblgen_values, tblgen_attrs);
2064 }
2065
CID 1522814: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Value.
2066 for (auto v: ::llvm::SmallVector<::mlir::Value, 4>{ tblgen_SelectOp_0.getODSResults(0) }) {
2067 tblgen_repl_values.push_back(v);
2068 }
2069
2070 rewriter.replaceOp(op0, tblgen_repl_values);
2071 return ::mlir::success();
** CID 1522813: Performance inefficiencies (AUTO_CAUSES_COPY)
/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp: 1139 in ::MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(llvm::DenseMap<int, llvm::DenseMap<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>, llvm::DenseMapInfo<llvm::SUnit *, void>, llvm::detail::DenseMapPair<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>>>, llvm::DenseMapInfo<int, void>, llvm::detail::DenseMapPair<int, llvm::DenseMap<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>, llvm::DenseMapInfo<llvm::SUnit *, void>, llvm::detail::DenseMapPair<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>>>>> &, llvm::DenseMap<int, llvm::SmallVector<::SchedGroup, (unsigned int)4>, llvm::DenseMapInfo<int, void>, llvm::detail::DenseMapPair<int, llvm::SmallVector<::SchedGroup, (unsigned int)4>>> &, bool)()
*** CID 1522813: Performance inefficiencies (AUTO_CAUSES_COPY)
/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp: 1139 in ::MFMASmallGemmSingleWaveOpt::applyIGLPStrategy(llvm::DenseMap<int, llvm::DenseMap<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>, llvm::DenseMapInfo<llvm::SUnit *, void>, llvm::detail::DenseMapPair<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>>>, llvm::DenseMapInfo<int, void>, llvm::detail::DenseMapPair<int, llvm::DenseMap<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>, llvm::DenseMapInfo<llvm::SUnit *, void>, llvm::detail::DenseMapPair<llvm::SUnit *, llvm::SmallVector<int, (unsigned int)4>>>>> &, llvm::DenseMap<int, llvm::SmallVector<::SchedGroup, (unsigned int)4>, llvm::DenseMapInfo<int, void>, llvm::detail::DenseMapPair<int, llvm::SmallVector<::SchedGroup, (unsigned int)4>>> &, bool)()
1133 ++MFMACount;
1134 else if (TII->isDS(*I)) {
1135 if (I->mayLoad())
1136 ++DSRCount;
1137 else if (I->mayStore() && !IsPostRA) {
1138 ++DSWCount;
CID 1522813: Performance inefficiencies (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type SDep.
1139 for (auto Pred : SU.Preds) {
1140 if (Pred.getSUnit()->getInstr()->getOpcode() ==
1141 AMDGPU::V_PERM_B32_e64) {
1142 DSWithPerms.push_back(&SU);
1143 break;
1144 }
** CID 1522812: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 283 in mlir::arm_sve::ConvertFromSvboolIntrOp::verifyInvariantsImpl()()
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 292 in mlir::arm_sve::ConvertFromSvboolIntrOp::verifyInvariantsImpl()()
*** CID 1522812: (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 283 in mlir::arm_sve::ConvertFromSvboolIntrOp::verifyInvariantsImpl()()
277
278 ::mlir::LogicalResult ConvertFromSvboolIntrOp::verifyInvariantsImpl() {
279 {
280 unsigned index = 0; (void)index;
281 auto valueGroup0 = getODSOperands(0);
282
CID 1522812: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type Value.
283 for (auto v : valueGroup0) {
284 if (::mlir::failed(__mlir_ods_local_type_constraint_ArmSVE0(*this, v.getType(), “operand”, index++)))
285 return ::mlir::failure();
286 }
287 }
288 {
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/ArmSVE/IR/ArmSVE.cpp.inc: 292 in mlir::arm_sve::ConvertFromSvboolIntrOp::verifyInvariantsImpl()()
286 }
287 }
288 {
289 unsigned index = 0; (void)index;
290 auto valueGroup0 = getODSResults(0);
291
CID 1522812: (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
292 for (auto v : valueGroup0) {
293 if (::mlir::failed(__mlir_ods_local_type_constraint_ArmSVE1(*this, v.getType(), “result”, index++)))
294 return ::mlir::failure();
295 }
296 }
297 if (!((((::llvm::isa<::mlir::VectorType>((*this->getODSResults(0).begin()).getType()) &&
** CID 1522811: Memory - illegal accesses (OVERRUN)
/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp: 428 in tryEorOfLogicalImmediates(unsigned long, llvm::SmallVectorImplllvm::AArch64_IMM::ImmInsnModel &)()
*** CID 1522811: Memory - illegal accesses (OVERRUN)
/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp: 428 in tryEorOfLogicalImmediates(unsigned long, llvm::SmallVectorImplllvm::AArch64_IMM::ImmInsnModel &)()
422 };
423
424 // This RepeatedOnesTable lookup is a faster implementation of the division
425 // 0xffffffffffffffff / ((1 << SmallSize) - 1), and can be thought of as
426 // dividing the 64-bit value into fields of width SmallSize, and placing a
427 // one in the least significant bit of each field.
CID 1522811: Memory - illegal accesses (OVERRUN) Overrunning array "RepeatedOnesTable" of 7 8-byte elements at element index 32 (byte offset 263) using index "llvm::countr_zero(SmallSize)" (which evaluates to 32).
428 uint64_t SmallOnes = RepeatedOnesTable[countr_zero(SmallSize)];
429
430 // Now we try to find the number of ones in each of the smaller repetitions,
431 // by looking at runs of ones in Imm. This can take three attempts, as the
432 // EOR may have changed the length of the first two runs we find.
433
** CID 1522810: Uninitialized members (UNINIT_CTOR)
/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp: 1421 in ::IGroupLPDAGMutation::IGroupLPDAGMutation(bool)()
*** CID 1522810: Uninitialized members (UNINIT_CTOR)
/llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp: 1421 in ::IGroupLPDAGMutation::IGroupLPDAGMutation(bool)()
1415 bool IsBottomUp = 1;
1416
1417 // Whether the mutation is being applied to post RA scheduling
1418 bool IsPostRA = false;
1419
1420 IGroupLPDAGMutation() = default;
CID 1522810: Uninitialized members (UNINIT_CTOR) Non-static class member "DAG" is not initialized in this constructor nor in any functions that it calls.
1421 IGroupLPDAGMutation(bool IsPostRA) : IsPostRA(IsPostRA) {}
1422 };
1423
1424 unsigned SchedGroup::NumSchedGroups = 0;
1425
1426 bool SchedGroup::tryAddEdge(SUnit *A, SUnit *B) {
** CID 1522809: Performance inefficiencies (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.cpp.inc: 4394 in mlir::NVVM::ElectSyncOp::verifyInvariantsImpl()()
*** CID 1522809: Performance inefficiencies (AUTO_CAUSES_COPY)
/build-llvm/tools/clang/stage2-bins/tools/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.cpp.inc: 4394 in mlir::NVVM::ElectSyncOp::verifyInvariantsImpl()()
4388
4389 ::mlir::LogicalResult ElectSyncOp::verifyInvariantsImpl() {
4390 {
4391 unsigned index = 0; (void)index;
4392 auto valueGroup0 = getODSResults(0);
4393
CID 1522809: Performance inefficiencies (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type OpResult.
4394 for (auto v : valueGroup0) {
4395 if (::mlir::failed(__mlir_ods_local_type_constraint_NVVMOps6(*this, v.getType(), “result”, index++)))
4396 return ::mlir::failure();
4397 }
4398 }
4399 return ::mlir::success();
** CID 1522808: (NULL_RETURNS)
*** CID 1522808: (NULL_RETURNS)
/flang/lib/Lower/ConvertExprToHLFIR.cpp: 1439 in ::UnaryOp<Fortran::evaluate::Convert<Fortran::evaluate::Type<(Fortran::common::TypeCategory)3, (int)4>, (Fortran::common::TypeCategory)3>>::gen(mlir::Location, fir::FirOpBuilder &, const Fortran::evaluate::Convert<Fortran::evaluate::Type<(Fortran::common::TypeCategory)3, (int)4>, (Fortran::common::TypeCategory)3> &, hlfir::Entity)()
1433 }
1434 // allocate space on the stack for toBuffer
1435 auto dest = builder.createfir::AllocaOp(loc, toTy,
1436 mlir::ValueRange{bufferSize});
1437 auto src = hlfir::convertToAddress(loc, builder, lhs,
1438 lhs.getFortranElementType());
CID 1522808: (NULL_RETURNS) Dereferencing a pointer that might be "nullptr" "src.first.getCharBox()" when calling "getAddr".
1439 builder.createfir::CharConvertOp(loc, src.first.getCharBox()->getAddr(),
1440 origBufferSize, dest);
1441 if (src.second.has_value())
1442 src.second.value()();
1443
1444 return hlfir::EntityWithAttributes{builder.createhlfir::DeclareOp(
/flang/lib/Lower/ConvertExprToHLFIR.cpp: 1439 in ::UnaryOp<Fortran::evaluate::Convert<Fortran::evaluate::Type<(Fortran::common::TypeCategory)3, (int)1>, (Fortran::common::TypeCategory)3>>::gen(mlir::Location, fir::FirOpBuilder &, const Fortran::evaluate::Convert<Fortran::evaluate::Type<(Fortran::common::TypeCategory)3, (int)1>, (Fortran::common::TypeCategory)3> &, hlfir::Entity)()
1433 }
1434 // allocate space on the stack for toBuffer
1435 auto dest = builder.createfir::AllocaOp(loc, toTy,
1436 mlir::ValueRange{bufferSize});
1437 auto src = hlfir::convertToAddress(loc, builder, lhs,
1438 lhs.getFortranElementType());
CID 1522808: (NULL_RETURNS) Dereferencing a pointer that might be "nullptr" "src.first.getCharBox()" when calling "getAddr".
1439 builder.createfir::CharConvertOp(loc, src.first.getCharBox()->getAddr(),
1440 origBufferSize, dest);
1441 if (src.second.has_value())
1442 src.second.value()();
1443
1444 return hlfir::EntityWithAttributes{builder.createhlfir::DeclareOp(
/flang/lib/Lower/ConvertExprToHLFIR.cpp: 1439 in ::UnaryOp<Fortran::evaluate::Convert<Fortran::evaluate::Type<(Fortran::common::TypeCategory)3, (int)2>, (Fortran::common::TypeCategory)3>>::gen(mlir::Location, fir::FirOpBuilder &, const Fortran::evaluate::Convert<Fortran::evaluate::Type<(Fortran::common::TypeCategory)3, (int)2>, (Fortran::common::TypeCategory)3> &, hlfir::Entity)()
1433 }
1434 // allocate space on the stack for toBuffer
1435 auto dest = builder.createfir::AllocaOp(loc, toTy,
1436 mlir::ValueRange{bufferSize});
1437 auto src = hlfir::convertToAddress(loc, builder, lhs,
1438 lhs.getFortranElementType());
CID 1522808: (NULL_RETURNS) Dereferencing a pointer that might be "nullptr" "src.first.getCharBox()" when calling "getAddr".
1439 builder.createfir::CharConvertOp(loc, src.first.getCharBox()->getAddr(),
1440 origBufferSize, dest);
1441 if (src.second.has_value())
1442 src.second.value()();
1443
1444 return hlfir::EntityWithAttributes{builder.createhlfir::DeclareOp(
** CID 1522807: Performance inefficiencies (AUTO_CAUSES_COPY)
/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp: 142 in wrapForExternalCallers(mlir::OpBuilder &, mlir::Location, const mlir::LLVMTypeConverter &, mlir::FunctionOpInterface, mlir::LLVM::LLVMFuncOp)()
*** CID 1522807: Performance inefficiencies (AUTO_CAUSES_COPY)
/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp: 142 in wrapForExternalCallers(mlir::OpBuilder &, mlir::Location, const mlir::LLVMTypeConverter &, mlir::FunctionOpInterface, mlir::LLVM::LLVMFuncOp)()
136
137 OpBuilder::InsertionGuard guard(rewriter);
138 rewriter.setInsertionPointToStart(wrapperFuncOp.addEntryBlock());
139
140 SmallVector<Value, 8> args;
141 size_t argOffset = resultStructType ? 1 : 0;
CID 1522807: Performance inefficiencies (AUTO_CAUSES_COPY) Using the "auto" keyword without an "&" causes the copy of an object of type enumerator_result.
142 for (auto [index, argType] : llvm::enumerate(type.getInputs())) {
143 Value arg = wrapperFuncOp.getArgument(index + argOffset);
144 if (auto memrefType = dyn_cast(argType)) {
145 Value loaded = rewriter.createLLVM::LoadOp(
146 loc, typeConverter.convertType(memrefType), arg);
147 MemRefDescriptor::unpack(rewriter, loc, loaded, memrefType, args);
To view the defects in Coverity Scan visit, Coverity Scan - Sign in