mirror of
https://github.com/intel/llvm.git
synced 2026-02-05 13:21:04 +08:00
Introduce analysis to check memref accesses (in MLFunctions) for out of bound ones. It works as follows: $ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir /tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2 %x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32> ^ /tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1 %y = load %B[%idy] : memref<128 x i32> ^ /tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1 %y = load %B[%idy] : memref<128 x i32> ^ #map0 = (d0, d1) -> (d0, d1) #map1 = (d0, d1) -> (d0 * 128 - d1) mlfunc @test() { %0 = alloc() : memref<9x9xi32> %1 = alloc() : memref<128xi32> for %i0 = -1 to 9 { for %i1 = -1 to 9 { %2 = affine_apply #map0(%i0, %i1) %3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32> %4 = affine_apply #map1(%i0, %i1) %5 = load %1[%4] : memref<128xi32> } } return } - Improves productivity while manually / semi-automatically developing MLIR for testing / prototyping; also provides an indirect way to catch errors in transformations. - This pass is an easy way to test the underlying affine analysis machinery including low level routines. Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256. While on this: - create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/ - fix a bug in AffineAnalysis.cpp::toAffineExpr TODO: extend to non-constant loop bounds (straightforward). Will transparently work for all accesses once floordiv, mod, ceildiv are supported in the AffineMap -> FlatAffineConstraints conversion. PiperOrigin-RevId: 219397961
95 lines
3.2 KiB
C++
95 lines
3.2 KiB
C++
//===- ComposeAffineMaps.cpp - MLIR Affine Transform Class-----*- C++ -*-===//
|
|
//
|
|
// Copyright 2019 The MLIR Authors.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
// =============================================================================
|
|
//
|
|
// This file implements a testing pass which composes affine maps from
|
|
// AffineApplyOps in an MLFunction, by forward subtituting results from an
|
|
// AffineApplyOp into any of its users which are also AffineApplyOps.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "mlir/IR/AffineMap.h"
|
|
#include "mlir/IR/Attributes.h"
|
|
#include "mlir/IR/Builders.h"
|
|
#include "mlir/IR/BuiltinOps.h"
|
|
#include "mlir/IR/StmtVisitor.h"
|
|
#include "mlir/Pass.h"
|
|
#include "mlir/StandardOps/StandardOps.h"
|
|
#include "mlir/Transforms/Passes.h"
|
|
#include "mlir/Transforms/Utils.h"
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
using namespace mlir;
|
|
|
|
namespace {
|
|
|
|
// ComposeAffineMaps walks stmt blocks in an MLFunction, and for each
|
|
// AffineApplyOp, forward substitutes its results into any users which are
|
|
// also AffineApplyOps. After forward subtituting its results, AffineApplyOps
|
|
// with no remaining uses are collected and erased after the walk.
|
|
// TODO(andydavis) Remove this when Chris adds instruction combiner pass.
|
|
struct ComposeAffineMaps : public FunctionPass, StmtWalker<ComposeAffineMaps> {
|
|
std::vector<OperationStmt *> affineApplyOpsToErase;
|
|
|
|
explicit ComposeAffineMaps() {}
|
|
using StmtListType = llvm::iplist<Statement>;
|
|
void walk(StmtListType::iterator Start, StmtListType::iterator End);
|
|
void visitOperationStmt(OperationStmt *stmt);
|
|
PassResult runOnMLFunction(MLFunction *f) override;
|
|
using StmtWalker<ComposeAffineMaps>::walk;
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
FunctionPass *mlir::createComposeAffineMapsPass() {
|
|
return new ComposeAffineMaps();
|
|
}
|
|
|
|
void ComposeAffineMaps::walk(StmtListType::iterator Start,
|
|
StmtListType::iterator End) {
|
|
while (Start != End) {
|
|
walk(&(*Start));
|
|
// Increment iterator after walk as visit function can mutate stmt list
|
|
// ahead of 'Start'.
|
|
++Start;
|
|
}
|
|
}
|
|
|
|
void ComposeAffineMaps::visitOperationStmt(OperationStmt *opStmt) {
|
|
if (auto affineApplyOp = opStmt->dyn_cast<AffineApplyOp>()) {
|
|
forwardSubstitute(affineApplyOp);
|
|
bool allUsesEmpty = true;
|
|
for (auto *result : affineApplyOp->getOperation()->getResults()) {
|
|
if (!result->use_empty()) {
|
|
allUsesEmpty = false;
|
|
break;
|
|
}
|
|
}
|
|
if (allUsesEmpty) {
|
|
affineApplyOpsToErase.push_back(opStmt);
|
|
}
|
|
}
|
|
}
|
|
|
|
PassResult ComposeAffineMaps::runOnMLFunction(MLFunction *f) {
|
|
affineApplyOpsToErase.clear();
|
|
walk(f);
|
|
for (auto *opStmt : affineApplyOpsToErase) {
|
|
opStmt->erase();
|
|
}
|
|
return success();
|
|
}
|