[mlir][sparse] refactor sparsification and bufferization pass into proper TD pass

Registering the SparsificationAndBufferization into a proper TD pass
has the advantage that it can be invoked and tested in isolation. This
change also moves some bufferization specific set up from the pipeline
file into the pass file, keeping the logic more locally.

Reviewed By: Peiming

Differential Revision: https://reviews.llvm.org/D158219
This commit is contained in:
Aart Bik
2023-08-17 14:13:50 -07:00
parent 110d141f12
commit 8154494e28
4 changed files with 73 additions and 40 deletions

View File

@@ -22,6 +22,7 @@
//===----------------------------------------------------------------------===//
namespace mlir {
namespace bufferization {
struct OneShotBufferizationOptions;
} // namespace bufferization
@@ -215,12 +216,13 @@ std::unique_ptr<Pass> createStorageSpecifierToLLVMPass();
//===----------------------------------------------------------------------===//
// The mini-pipeline for sparsification and bufferization.
//
// Note that this mini-pipeline is not defined through the tablegen pass
// mechanism, and, thus, is not individually available through the command-line.
// It is solely used as part of the full sparse compiler pipeline.
//===----------------------------------------------------------------------===//
bufferization::OneShotBufferizationOptions
getBufferizationOptionsForSparsification(bool analysisOnly);
std::unique_ptr<Pass> createSparsificationAndBufferizationPass();
std::unique_ptr<Pass> createSparsificationAndBufferizationPass(
const bufferization::OneShotBufferizationOptions &bufferizationOptions,
const SparsificationOptions &sparsificationOptions,

View File

@@ -373,4 +373,23 @@ def StorageSpecifierToLLVM : Pass<"sparse-storage-specifier-to-llvm", "ModuleOp"
];
}
def SparsificationAndBufferization : Pass<"sparsification-and-bufferization", "ModuleOp"> {
let summary = "Mini-pipeline that combines bufferization and sparsifiation";
let description = [{
This pass forms a mini-pipeline that combines bufferization and sparsifiation.
}];
let constructor = "mlir::createSparsificationAndBufferizationPass()";
let dependentDialects = [
"affine::AffineDialect",
"arith::ArithDialect",
"bufferization::BufferizationDialect",
"gpu::GPUDialect",
"LLVM::LLVMDialect",
"linalg::LinalgDialect",
"memref::MemRefDialect",
"scf::SCFDialect",
"sparse_tensor::SparseTensorDialect",
];
}
#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_PASSES

View File

@@ -25,31 +25,6 @@
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
using namespace mlir;
using namespace mlir::sparse_tensor;
/// Return configuration options for One-Shot Bufferize.
static bufferization::OneShotBufferizationOptions
getBufferizationOptions(bool analysisOnly) {
using namespace bufferization;
OneShotBufferizationOptions options;
options.bufferizeFunctionBoundaries = true;
// TODO(springerm): To spot memory leaks more easily, returning dense allocs
// should be disallowed.
options.allowReturnAllocs = true;
options.setFunctionBoundaryTypeConversion(LayoutMapOption::IdentityLayoutMap);
options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithStaticIdentityLayout(
cast<TensorType>(value.getType()), memorySpace);
};
if (analysisOnly) {
options.testAnalysisOnly = true;
options.printConflicts = true;
}
return options;
}
//===----------------------------------------------------------------------===//
// Pipeline implementation.
//===----------------------------------------------------------------------===//
@@ -58,7 +33,8 @@ void mlir::sparse_tensor::buildSparseCompiler(
OpPassManager &pm, const SparseCompilerOptions &options) {
pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
pm.addPass(createSparsificationAndBufferizationPass(
getBufferizationOptions(options.testBufferizationAnalysisOnly),
getBufferizationOptionsForSparsification(
options.testBufferizationAnalysisOnly),
options.sparsificationOptions(), options.sparseTensorConversionOptions(),
options.createSparseDeallocs, options.enableRuntimeLibrary,
options.enableBufferInitialization, options.vectorLength,

View File

@@ -8,6 +8,7 @@
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
@@ -18,15 +19,21 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
using namespace mlir;
using namespace mlir::func;
namespace mlir {
#define GEN_PASS_DEF_SPARSIFICATIONANDBUFFERIZATION
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc"
namespace sparse_tensor {
/// Return `true` if one of the given types is a sparse tensor type.
@@ -50,8 +57,8 @@ static bool containsSparseTensor(TypeRange types) {
/// * Dense tensor ops are lowered through BufferizableOpInterface
/// implementations.
class SparsificationAndBufferizationPass
: public PassWrapper<SparsificationAndBufferizationPass,
OperationPass<ModuleOp>> {
: public impl::SparsificationAndBufferizationBase<
SparsificationAndBufferizationPass> {
public:
SparsificationAndBufferizationPass(
const bufferization::OneShotBufferizationOptions &bufferizationOptions,
@@ -97,12 +104,6 @@ public:
return success();
}
void getDependentDialects(::mlir::DialectRegistry &registry) const override {
registry.insert<bufferization::BufferizationDialect>();
registry.insert<gpu::GPUDialect>();
registry.insert<LLVM::LLVMDialect>();
}
void runOnOperation() override {
{
// Run enabling transformations.
@@ -179,7 +180,42 @@ private:
} // namespace sparse_tensor
} // namespace mlir
std::unique_ptr<Pass> mlir::createSparsificationAndBufferizationPass(
mlir::bufferization::OneShotBufferizationOptions
mlir::getBufferizationOptionsForSparsification(bool analysisOnly) {
using namespace mlir::bufferization;
OneShotBufferizationOptions options;
options.bufferizeFunctionBoundaries = true;
// TODO(springerm): To spot memory leaks more easily, returning dense allocs
// should be disallowed.
options.allowReturnAllocs = true;
options.setFunctionBoundaryTypeConversion(LayoutMapOption::IdentityLayoutMap);
options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithStaticIdentityLayout(
cast<TensorType>(value.getType()), memorySpace);
};
if (analysisOnly) {
options.testAnalysisOnly = true;
options.printConflicts = true;
}
return options;
}
std::unique_ptr<mlir::Pass> mlir::createSparsificationAndBufferizationPass() {
SparsificationOptions sparseOptions;
SparseTensorConversionOptions convOptions;
return createSparsificationAndBufferizationPass(
getBufferizationOptionsForSparsification(/*analysisOnly=*/false),
sparseOptions, convOptions,
/*createSparseDeallocs=*/false,
/*enableRuntimeLibrary=*/false,
/*enableBufferInitialization=*/false,
/*vectorLength=*/0,
/*enableVLAVectorization=*/false,
/*enableSIMDIndex32=*/false);
}
std::unique_ptr<mlir::Pass> mlir::createSparsificationAndBufferizationPass(
const bufferization::OneShotBufferizationOptions &bufferizationOptions,
const SparsificationOptions &sparsificationOptions,
const SparseTensorConversionOptions &sparseTensorConversionOptions,