[mlir][gpu] Split ops sinking from gpu-kernel-outlining pass into separate pass

Previously `gpu-kernel-outlining` pass was also doing index computation sinking into gpu.launch before actual outlining.
Split ops sinking from `gpu-kernel-outlining` pass into separate pass, so users can use theirs own sinking pass before outlining.
To achieve old behavior users will need to call both passes: `-gpu-launch-sink-index-computations -gpu-kernel-outlining`.

Differential Revision: https://reviews.llvm.org/D119932
This commit is contained in:
Ivan Butygin
2022-02-16 15:38:14 +03:00
parent f165c23bf3
commit d271fc04d5
6 changed files with 138 additions and 6 deletions

View File

@@ -23,6 +23,10 @@ class Module;
} // namespace llvm
namespace mlir {
/// Pass that moves ops which are likely an index computation into gpu.launch
/// body.
std::unique_ptr<Pass> createGpuLauchSinkIndexComputationsPass();
/// Replaces `gpu.launch` with `gpu.launch_func` by moving the region into
/// a separate kernel function.
std::unique_ptr<OperationPass<ModuleOp>>

View File

@@ -11,6 +11,12 @@
include "mlir/Pass/PassBase.td"
def GpuLaunchSinkIndexComputations : Pass<"gpu-launch-sink-index-computations"> {
let summary = "Sink index computations into gpu.launch body";
let constructor = "mlir::createGpuLauchSinkIndexComputationsPass()";
let dependentDialects = ["mlir::gpu::GPUDialect"];
}
def GpuKernelOutlining : Pass<"gpu-kernel-outlining", "ModuleOp"> {
let summary = "Outline gpu.launch bodies to kernel functions";
let constructor = "mlir::createGpuKernelOutliningPass()";

View File

@@ -59,7 +59,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
/// Identifies operations that are beneficial to sink into kernels. These
/// operations may not have side-effects, as otherwise sinking (and hence
/// duplicating them) is not legal.
static bool isLikelyAnIndexComputatio(Operation *op) {
static bool isLikelyAnIndexComputation(Operation *op) {
return isa<arith::ConstantOp, ConstantOp, memref::DimOp, arith::SelectOp,
arith::CmpIOp>(op);
}
@@ -232,6 +232,26 @@ static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
}
namespace {
/// Pass that moves ops which are likely an index computation into gpu.launch
/// body.
class GpuLaunchSinkIndexComputationsPass
: public GpuLaunchSinkIndexComputationsBase<
GpuLaunchSinkIndexComputationsPass> {
public:
void runOnOperation() override {
Operation *op = getOperation();
if (op->walk([](gpu::LaunchOp launch) {
// Pull in instructions that can be sunk
if (failed(sinkOperationsIntoLaunchOp(launch,
isLikelyAnIndexComputation)))
return WalkResult::interrupt();
return WalkResult::advance();
}).wasInterrupted())
signalPassFailure();
}
};
/// Pass that moves the kernel of each LaunchOp into its separate nested module.
///
/// This pass moves the kernel code of each LaunchOp into a function created
@@ -280,9 +300,6 @@ public:
std::string kernelFnName =
Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
// Pull in instructions that can be sunk
if (failed(sinkOperationsIntoLaunchOp(op, isLikelyAnIndexComputatio)))
return WalkResult::interrupt();
gpu::GPUFuncOp outlinedFunc =
outlineKernelFuncImpl(op, kernelFnName, operands);
@@ -360,6 +377,10 @@ private:
} // namespace
std::unique_ptr<Pass> mlir::createGpuLauchSinkIndexComputationsPass() {
return std::make_unique<GpuLaunchSinkIndexComputationsPass>();
}
std::unique_ptr<OperationPass<ModuleOp>>
mlir::createGpuKernelOutliningPass(StringRef dataLayoutStr) {
return std::make_unique<GpuKernelOutliningPass>(dataLayoutStr);

View File

@@ -11,6 +11,7 @@
#include "mlir/Dialect/Async/IR/Async.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Pass/Pass.h"
namespace mlir {

View File

@@ -1,5 +1,5 @@
// RUN: mlir-opt -allow-unregistered-dialect -gpu-kernel-outlining -split-input-file -verify-diagnostics %s | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect -gpu-kernel-outlining=data-layout-str='#dlti.dl_spec<#dlti.dl_entry<index,32:i32>>' -split-input-file %s | FileCheck --check-prefix CHECK-DL %s
// RUN: mlir-opt -allow-unregistered-dialect -gpu-launch-sink-index-computations -gpu-kernel-outlining -split-input-file -verify-diagnostics %s | FileCheck %s
// RUN: mlir-opt -allow-unregistered-dialect -gpu-launch-sink-index-computations -gpu-kernel-outlining=data-layout-str='#dlti.dl_spec<#dlti.dl_entry<index,32:i32>>' -split-input-file %s | FileCheck --check-prefix CHECK-DL %s
// CHECK: module attributes {gpu.container_module}

View File

@@ -0,0 +1,100 @@
// RUN: mlir-opt -allow-unregistered-dialect -gpu-launch-sink-index-computations -split-input-file -verify-diagnostics %s | FileCheck %s
// CHECK-LABEL: @extra_constants
// CHECK-SAME: %[[ARG0:.*]]: memref<?xf32>
func @extra_constants(%arg0: memref<?xf32>) {
%cst = arith.constant 8 : index
%cst2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
%cst3 = memref.dim %arg0, %c0 : memref<?xf32>
// CHECK: gpu.launch blocks
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst,
%grid_z = %cst)
threads(%tx, %ty, %tz) in (%block_x = %cst, %block_y = %cst,
%block_z = %cst) {
// CHECK-NOT: arith.constant 8
// CHECK: %[[CST2:.*]] = arith.constant 2
// CHECK-NEXT: %[[CST0:.*]] = arith.constant 0
// CHECK-NEXT: %[[DIM:.*]] = memref.dim %[[ARG0]], %[[CST0]]
// CHECK-NEXT: "use"(%[[CST2]], %[[ARG0]], %[[DIM]]) : (index, memref<?xf32>, index) -> ()
// CHECK-NEXT: gpu.terminator
"use"(%cst2, %arg0, %cst3) : (index, memref<?xf32>, index) -> ()
gpu.terminator
}
return
}
// -----
// CHECK-LABEL: @extra_constants_not_inlined
// CHECK-SAME: %[[ARG0:.*]]: memref<?xf32>
func @extra_constants_not_inlined(%arg0: memref<?xf32>) {
%cst = arith.constant 8 : index
%cst2 = arith.constant 2 : index
%c0 = arith.constant 0 : index
// CHECK: %[[CST_X:.*]] = "secret_constant"()
%cst3 = "secret_constant"() : () -> index
// CHECK: gpu.launch blocks
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst,
%grid_z = %cst)
threads(%tx, %ty, %tz) in (%block_x = %cst, %block_y = %cst,
%block_z = %cst) {
// CHECK-NOT: arith.constant 8
// CHECK-NOT: "secret_constant"()
// CHECK: %[[CST2:.*]] = arith.constant 2
// CHECK-NEXT: "use"(%[[CST2]], %[[ARG0]], %[[CST_X]]) : (index, memref<?xf32>, index) -> ()
// CHECK-NEXT: gpu.terminator
"use"(%cst2, %arg0, %cst3) : (index, memref<?xf32>, index) -> ()
gpu.terminator
}
return
}
// -----
// CHECK-LABEL: @multiple_uses
func @multiple_uses(%arg0 : memref<?xf32>) {
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
// CHECK: gpu.launch blocks
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1,
%grid_z = %c1)
threads(%tx, %ty, %tz) in (%block_x = %c1, %block_y = %c1,
%block_z = %c1) {
// CHECK: %[[C2:.*]] = arith.constant 2
// CHECK-NEXT: "use1"(%[[C2]], %[[C2]])
// CHECK-NEXT: "use2"(%[[C2]])
// CHECK-NEXT: gpu.terminator
"use1"(%c2, %c2) : (index, index) -> ()
"use2"(%c2) : (index) -> ()
gpu.terminator
}
return
}
// -----
// CHECK-LABEL: @multiple_uses2
func @multiple_uses2(%arg0 : memref<*xf32>) {
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
%d = memref.dim %arg0, %c2 : memref<*xf32>
// CHECK: gpu.launch blocks
gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1,
%grid_z = %c1)
threads(%tx, %ty, %tz) in (%block_x = %c1, %block_y = %c1,
%block_z = %c1) {
// CHECK: %[[C2:.*]] = arith.constant 2 : index
// CHECK: %[[D:.*]] = memref.dim %[[ARG:.*]], %[[C2]]
// CHECK: "use1"(%[[D]])
// CHECK: "use2"(%[[C2]], %[[C2]])
// CHECK: "use3"(%[[ARG]])
// CHECK: gpu.terminator
"use1"(%d) : (index) -> ()
"use2"(%c2, %c2) : (index, index) -> ()
"use3"(%arg0) : (memref<*xf32>) -> ()
gpu.terminator
}
return
}