mirror of
https://github.com/intel/llvm.git
synced 2026-02-05 22:17:23 +08:00
[mlir][sparse] Refactor the convert operator conversion to support codegen for the operator.
Outline the code that generates the loop structure to iterate over a dense tensor or a sparse constant to genDenseTensorOrSparseConstantIterLoop. Move a few routines to CodegenUtils for sharing. Reviewed By: wrengr Differential Revision: https://reviews.llvm.org/D136210
This commit is contained in:
@@ -110,14 +110,6 @@ static void sizesFromType(OpBuilder &builder, SmallVector<Value, 4> &sizes,
|
||||
}
|
||||
}
|
||||
|
||||
/// Populates given sizes array from source.
|
||||
static void sizesFromSrc(OpBuilder &builder, SmallVector<Value, 4> &sizes,
|
||||
Location loc, Value src) {
|
||||
unsigned rank = src.getType().cast<ShapedType>().getRank();
|
||||
for (unsigned i = 0; i < rank; i++)
|
||||
sizes.push_back(linalg::createOrFoldDimOp(builder, loc, src, i));
|
||||
}
|
||||
|
||||
/// Populates the given sizes array for concatenation from type (for static
|
||||
/// sizes) and from an already-converted opaque pointer source (for dynamic
|
||||
/// sizes).
|
||||
@@ -213,38 +205,6 @@ static void newParams(OpBuilder &builder, SmallVector<Value, 8> ¶ms,
|
||||
params.push_back(ptr);
|
||||
}
|
||||
|
||||
/// Generates the code to read the value from tensor[ivs].The generated code
|
||||
/// looks like the following and the insertion point after this routine is
|
||||
/// inside the if-then branch behind the assignment to ind.
|
||||
/// if (tensor[ivs] != 0)
|
||||
/// insert_point
|
||||
static Value genValueForDense(OpBuilder &builder, Location loc, Value tensor,
|
||||
ValueRange ivs) {
|
||||
Value val = builder.create<tensor::ExtractOp>(loc, tensor, ivs);
|
||||
Value cond = genIsNonzero(builder, loc, val);
|
||||
scf::IfOp ifOp = builder.create<scf::IfOp>(loc, cond, /*else*/ false);
|
||||
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
|
||||
return val;
|
||||
}
|
||||
|
||||
/// Generates the code to read the value from tensor[ivs], and conditionally
|
||||
/// stores the indices ivs to the memory in ind. The generated code looks like
|
||||
/// the following and the insertion point after this routine is inside the
|
||||
/// if-then branch behind the assignment to ind. This is to ensure that the
|
||||
/// addEltX call generated after is inside the if-then branch.
|
||||
/// if (tensor[ivs] != 0)
|
||||
/// ind = ivs
|
||||
static Value genIndexAndValueForDense(OpBuilder &builder, Location loc,
|
||||
Value tensor, Value ind, ValueRange ivs) {
|
||||
Value val = genValueForDense(builder, loc, tensor, ivs);
|
||||
unsigned i = 0;
|
||||
for (auto iv : ivs) {
|
||||
Value idx = constantIndex(builder, loc, i++);
|
||||
builder.create<memref::StoreOp>(loc, iv, ind, idx);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
/// Generates a call to release/delete a `SparseTensorCOO`.
|
||||
static void genDelCOOCall(OpBuilder &builder, Location loc, Type elemTp,
|
||||
Value coo) {
|
||||
@@ -287,57 +247,6 @@ static Value genGetNextCall(OpBuilder &builder, Location loc, Value iter,
|
||||
.getResult(0);
|
||||
}
|
||||
|
||||
/// If the tensor is a sparse constant, generates and returns the pair of
|
||||
/// the constants for the indices and the values.
|
||||
static Optional<std::pair<Value, Value>>
|
||||
genSplitSparseConstant(OpBuilder &builder, Location loc, Value tensor) {
|
||||
if (auto constOp = tensor.getDefiningOp<arith::ConstantOp>()) {
|
||||
if (auto attr = constOp.getValue().dyn_cast<SparseElementsAttr>()) {
|
||||
DenseElementsAttr indicesAttr = attr.getIndices();
|
||||
Value indices = builder.create<arith::ConstantOp>(loc, indicesAttr);
|
||||
DenseElementsAttr valuesAttr = attr.getValues();
|
||||
Value values = builder.create<arith::ConstantOp>(loc, valuesAttr);
|
||||
return std::make_pair(indices, values);
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Generates the code to copy the index at indices[ivs] to ind, and return
|
||||
/// the value at value[ivs].
|
||||
static Value genIndexAndValueForSparse(OpBuilder &builder, Location loc,
|
||||
Value indices, Value values, Value ind,
|
||||
ValueRange ivs, unsigned rank) {
|
||||
for (unsigned i = 0; i < rank; i++) {
|
||||
Value idx = constantIndex(builder, loc, i);
|
||||
Value val = builder.create<tensor::ExtractOp>(loc, indices,
|
||||
ValueRange{ivs[0], idx});
|
||||
val = builder.create<arith::IndexCastOp>(loc, builder.getIndexType(), val);
|
||||
builder.create<memref::StoreOp>(loc, val, ind, idx);
|
||||
}
|
||||
return builder.create<tensor::ExtractOp>(loc, values, ivs[0]);
|
||||
}
|
||||
|
||||
/// Generates code to allocate a buffer of the given type, and zero
|
||||
/// initialize it. If the buffer type has any dynamic sizes, then the
|
||||
/// `sizes` parameter should be as filled by sizesFromPtr(); that way
|
||||
/// we can reuse the genDimSizeCall() results generated by sizesFromPtr().
|
||||
static Value allocDenseTensor(OpBuilder &builder, Location loc,
|
||||
RankedTensorType tensorTp, ValueRange sizes) {
|
||||
Type elemTp = tensorTp.getElementType();
|
||||
auto shape = tensorTp.getShape();
|
||||
auto memTp = MemRefType::get(shape, elemTp);
|
||||
SmallVector<Value> dynamicSizes;
|
||||
for (unsigned i = 0, rank = tensorTp.getRank(); i < rank; i++) {
|
||||
if (shape[i] == ShapedType::kDynamicSize)
|
||||
dynamicSizes.push_back(sizes[i]);
|
||||
}
|
||||
Value mem = builder.create<memref::AllocOp>(loc, memTp, dynamicSizes);
|
||||
Value zero = constantZero(builder, loc, elemTp);
|
||||
builder.create<linalg::FillOp>(loc, ValueRange{zero}, ValueRange{mem});
|
||||
return mem;
|
||||
}
|
||||
|
||||
/// Generates code to deallocate a dense buffer.
|
||||
static void deallocDenseTensor(OpBuilder &builder, Location loc, Value buffer) {
|
||||
builder.create<memref::DeallocOp>(loc, buffer);
|
||||
@@ -905,43 +814,17 @@ public:
|
||||
Value coo = genNewCall(rewriter, loc, params);
|
||||
Value ind = genAlloca(rewriter, loc, rank, rewriter.getIndexType());
|
||||
Value perm = params[2];
|
||||
SmallVector<Value> lo;
|
||||
SmallVector<Value> hi;
|
||||
SmallVector<Value> st;
|
||||
Value zero = constantIndex(rewriter, loc, 0);
|
||||
Value one = constantIndex(rewriter, loc, 1);
|
||||
auto indicesValues = genSplitSparseConstant(rewriter, loc, src);
|
||||
bool isCOOConstant = indicesValues.has_value();
|
||||
Value indices;
|
||||
Value values;
|
||||
if (isCOOConstant) {
|
||||
indices = indicesValues->first;
|
||||
values = indicesValues->second;
|
||||
lo.push_back(zero);
|
||||
hi.push_back(linalg::createOrFoldDimOp(rewriter, loc, values, 0));
|
||||
st.push_back(one);
|
||||
} else {
|
||||
for (unsigned i = 0; i < rank; i++) {
|
||||
lo.push_back(zero);
|
||||
hi.push_back(linalg::createOrFoldDimOp(rewriter, loc, src, i));
|
||||
st.push_back(one);
|
||||
}
|
||||
}
|
||||
Type eltType = stp.getElementType();
|
||||
Value elemPtr = genAllocaScalar(rewriter, loc, eltType);
|
||||
scf::buildLoopNest(
|
||||
rewriter, op.getLoc(), lo, hi, st, {},
|
||||
[&](OpBuilder &builder, Location loc, ValueRange ivs,
|
||||
ValueRange args) -> scf::ValueVector {
|
||||
Value val;
|
||||
if (isCOOConstant)
|
||||
val = genIndexAndValueForSparse(rewriter, loc, indices, values, ind,
|
||||
ivs, rank);
|
||||
else
|
||||
val = genIndexAndValueForDense(rewriter, loc, src, ind, ivs);
|
||||
genDenseTensorOrSparseConstantIterLoop(
|
||||
rewriter, loc, src, rank,
|
||||
[&](OpBuilder &builder, Location loc, Value val, ValueRange indices) {
|
||||
for (unsigned i = 0; i < rank; i++) {
|
||||
Value idx = constantIndex(builder, loc, i);
|
||||
builder.create<memref::StoreOp>(loc, indices[i], ind, idx);
|
||||
}
|
||||
builder.create<memref::StoreOp>(loc, val, elemPtr);
|
||||
genAddEltCall(rewriter, loc, eltType, coo, elemPtr, ind, perm);
|
||||
return {};
|
||||
genAddEltCall(builder, loc, eltType, coo, elemPtr, ind, perm);
|
||||
});
|
||||
// Final call to construct sparse tensor storage.
|
||||
params[6] = constantAction(rewriter, loc, Action::kFromCOO);
|
||||
|
||||
Reference in New Issue
Block a user