[mlir][sparse] first end-to-end matmul with codegen

(1) also fixes memory leak in sparse2dense rewriting
(2) still needs fix in dense2sparse by skipping zeros

Reviewed By: wrengr

Differential Revision: https://reviews.llvm.org/D137736
This commit is contained in:
Aart Bik
2022-11-09 13:05:43 -08:00
parent 8bcf5df304
commit a61a9a700a
5 changed files with 28 additions and 8 deletions

View File

@@ -564,7 +564,10 @@ private:
SmallVector<Value, 4> sizes;
sizesForTensor(rewriter, sizes, loc, srcTp, src);
Value dst = allocDenseTensor(rewriter, loc, dstTp, sizes);
Block *insertionBlock = rewriter.getInsertionBlock();
bool noEscape = bufferization::allocationDoesNotEscape(op->getOpResult(0));
rewriter.create<ForeachOp>(loc, src, llvm::None,
[&](OpBuilder &builder, Location loc,
@@ -575,6 +578,12 @@ private:
});
rewriter.replaceOpWithNewOp<bufferization::ToTensorOp>(op, dstTp, dst);
// Deallocate the buffer.
if (noEscape) {
rewriter.setInsertionPoint(insertionBlock->getTerminator());
deallocDenseTensor(rewriter, loc, dst);
}
return success();
}