mirror of
https://github.com/intel/llvm.git
synced 2026-02-03 02:26:27 +08:00
[mlir][sparse] first end-to-end matmul with codegen
(1) also fixes memory leak in sparse2dense rewriting (2) still needs fix in dense2sparse by skipping zeros Reviewed By: wrengr Differential Revision: https://reviews.llvm.org/D137736
This commit is contained in:
@@ -564,7 +564,10 @@ private:
|
||||
|
||||
SmallVector<Value, 4> sizes;
|
||||
sizesForTensor(rewriter, sizes, loc, srcTp, src);
|
||||
|
||||
Value dst = allocDenseTensor(rewriter, loc, dstTp, sizes);
|
||||
Block *insertionBlock = rewriter.getInsertionBlock();
|
||||
bool noEscape = bufferization::allocationDoesNotEscape(op->getOpResult(0));
|
||||
|
||||
rewriter.create<ForeachOp>(loc, src, llvm::None,
|
||||
[&](OpBuilder &builder, Location loc,
|
||||
@@ -575,6 +578,12 @@ private:
|
||||
});
|
||||
|
||||
rewriter.replaceOpWithNewOp<bufferization::ToTensorOp>(op, dstTp, dst);
|
||||
|
||||
// Deallocate the buffer.
|
||||
if (noEscape) {
|
||||
rewriter.setInsertionPoint(insertionBlock->getTerminator());
|
||||
deallocDenseTensor(rewriter, loc, dst);
|
||||
}
|
||||
return success();
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user