mirror of
https://github.com/intel/llvm.git
synced 2026-01-23 16:06:39 +08:00
Refactor dependencies to expose Vector transformations as patterns - NFC
This CL refactors some of the MLIR vector dependencies to allow decoupling VectorOps, vector analysis, vector transformations and vector conversions from each other. This makes the system more modular and allows extracting VectorToVector into VectorTransforms that do not depend on vector conversions. This refactoring exhibited a bunch of cyclic library dependencies that have been cleaned up. PiperOrigin-RevId: 283660308
This commit is contained in:
committed by
A. Unique TensorFlower
parent
50b2b26e70
commit
5c0c51a997
@@ -31,8 +31,9 @@ namespace mlir {
|
||||
class AffineExpr;
|
||||
class AffineForOp;
|
||||
class AffineMap;
|
||||
class Operation;
|
||||
class MemRefType;
|
||||
class NestedPattern;
|
||||
class Operation;
|
||||
class Value;
|
||||
|
||||
/// Returns the trip count of the loop as an affine map with its corresponding
|
||||
@@ -91,14 +92,16 @@ using VectorizableLoopFun = std::function<bool(AffineForOp)>;
|
||||
/// 1. no conditionals are nested under the loop;
|
||||
/// 2. all nested load/stores are to scalar MemRefs.
|
||||
/// TODO(ntv): relax the no-conditionals restriction
|
||||
bool isVectorizableLoopBody(AffineForOp loop);
|
||||
bool isVectorizableLoopBody(AffineForOp loop,
|
||||
NestedPattern &vectorTransferMatcher);
|
||||
|
||||
/// Checks whether the loop is structurally vectorizable and that all the LoadOp
|
||||
/// and StoreOp matched have access indexing functions that are are either:
|
||||
/// 1. invariant along the loop induction variable created by 'loop';
|
||||
/// 2. varying along at most one memory dimension. If such a unique dimension
|
||||
/// is found, it is written into `memRefDim`.
|
||||
bool isVectorizableLoopBody(AffineForOp loop, int *memRefDim);
|
||||
bool isVectorizableLoopBody(AffineForOp loop, int *memRefDim,
|
||||
NestedPattern &vectorTransferMatcher);
|
||||
|
||||
/// Checks where SSA dominance would be violated if a for op's body
|
||||
/// operations are shifted by the specified shifts. This method checks if a
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//===- VectorConversions.h - Utils to convert from the vector dialect -----===//
|
||||
//===- ConvertVectorToLLVM.h - Utils to convert from the vector dialect ---===//
|
||||
//
|
||||
// Copyright 2019 The MLIR Authors.
|
||||
//
|
||||
@@ -14,31 +14,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
#ifndef MLIR_CONVERSION_VECTORCONVERSIONS_VECTORCONVERSIONS_H_
|
||||
#define MLIR_CONVERSION_VECTORCONVERSIONS_VECTORCONVERSIONS_H_
|
||||
#ifndef MLIR_CONVERSION_VECTORTOLLVM_CONVERTVECTORTOLLVM_H_
|
||||
#define MLIR_CONVERSION_VECTORTOLLVM_CONVERTVECTORTOLLVM_H_
|
||||
|
||||
#include "mlir/Transforms/DialectConversion.h"
|
||||
|
||||
namespace mlir {
|
||||
class LLVMTypeConverter;
|
||||
class MLIRContext;
|
||||
class ModuleOp;
|
||||
template <typename T> class OpPassBase;
|
||||
|
||||
/// Collect a set of patterns to convert from the Vector dialect to affine loops
|
||||
/// surrounding ops in different dialects (vector, std etc).
|
||||
/// This is the general place where we want to implement Vector -> Vector and
|
||||
/// Vector -> Std legalizations.
|
||||
void populateVectorToAffineLoopsConversionPatterns(
|
||||
MLIRContext *context, OwningRewritePatternList &patterns);
|
||||
|
||||
/// Collect a set of patterns to convert from the Vector dialect to itself.
|
||||
/// Should be merged with populateVectorToAffineLoopsConversionPatterns.
|
||||
void populateVectorToVectorConversionPatterns(
|
||||
MLIRContext *context, OwningRewritePatternList &patterns,
|
||||
ArrayRef<int64_t> coarseVectorShape = {},
|
||||
ArrayRef<int64_t> fineVectorShape = {});
|
||||
|
||||
/// Collect a set of patterns to convert from the Vector dialect to LLVM.
|
||||
void populateVectorToLLVMConversionPatterns(LLVMTypeConverter &converter,
|
||||
OwningRewritePatternList &patterns);
|
||||
@@ -48,4 +33,4 @@ OpPassBase<ModuleOp> *createLowerVectorToLLVMPass();
|
||||
|
||||
} // namespace mlir
|
||||
|
||||
#endif // MLIR_CONVERSION_VECTORCONVERSIONS_VECTORCONVERSIONS_H_
|
||||
#endif // MLIR_CONVERSION_VECTORTOLLVM_CONVERTVECTORTOLLVM_H_
|
||||
@@ -0,0 +1,36 @@
|
||||
//===- ConvertVectorToLoops.h - Utils to convert from the vector dialect --===//
|
||||
//
|
||||
// Copyright 2019 The MLIR Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
#ifndef MLIR_CONVERSION_VECTORTOLLVM_CONVERTVECTORTOLOOPS_H_
|
||||
#define MLIR_CONVERSION_VECTORTOLLVM_CONVERTVECTORTOLOOPS_H_
|
||||
|
||||
#include "mlir/Transforms/DialectConversion.h"
|
||||
|
||||
namespace mlir {
|
||||
class MLIRContext;
|
||||
class ModuleOp;
|
||||
template <typename T> class OpPassBase;
|
||||
|
||||
/// Collect a set of patterns to convert from the Vector dialect to loops + std.
|
||||
void populateVectorToAffineLoopsConversionPatterns(
|
||||
MLIRContext *context, OwningRewritePatternList &patterns);
|
||||
|
||||
/// Create a pass to convert vector operations to affine loops + std dialect.
|
||||
OpPassBase<ModuleOp> *createLowerVectorToLoopsPass();
|
||||
|
||||
} // namespace mlir
|
||||
|
||||
#endif // MLIR_CONVERSION_VECTORTOLLVM_CONVERTVECTORTOLOOPS_H_
|
||||
@@ -1,4 +1,4 @@
|
||||
//===- VectorAnalysis.h - Analysis for Vectorization -------*- C++ -*-=======//
|
||||
//===- Utils.h - VectorOps Utils ----------------------------*- C++ -*-=======//
|
||||
//
|
||||
// Copyright 2019 The MLIR Authors.
|
||||
//
|
||||
@@ -15,8 +15,8 @@
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
|
||||
#ifndef MLIR_ANALYSIS_VECTORANALYSIS_H_
|
||||
#define MLIR_ANALYSIS_VECTORANALYSIS_H_
|
||||
#ifndef MLIR_DIALECT_VECTOROPS_UTILS_H_
|
||||
#define MLIR_DIALECT_VECTOROPS_UTILS_H_
|
||||
|
||||
#include "mlir/Support/LLVM.h"
|
||||
|
||||
@@ -140,4 +140,4 @@ bool operatesOnSuperVectorsOf(Operation &op, VectorType subVectorType);
|
||||
} // end namespace matcher
|
||||
} // end namespace mlir
|
||||
|
||||
#endif // MLIR_ANALYSIS_VECTORANALYSIS_H_
|
||||
#endif // MLIR_DIALECT_VECTOROPS_UTILS_H_
|
||||
82
mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
Normal file
82
mlir/include/mlir/Dialect/VectorOps/VectorTransforms.h
Normal file
@@ -0,0 +1,82 @@
|
||||
//===- VectorTransforms.h - Vector transformations as patterns --*- C++ -*-===//
|
||||
//
|
||||
// Copyright 2019 The MLIR Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
|
||||
#ifndef DIALECT_VECTOROPS_VECTORTRANSFORMS_H_
|
||||
#define DIALECT_VECTOROPS_VECTORTRANSFORMS_H_
|
||||
|
||||
#include "mlir/IR/PatternMatch.h"
|
||||
|
||||
namespace mlir {
|
||||
class MLIRContext;
|
||||
class OwningRewritePatternList;
|
||||
|
||||
/// Collect a set of patterns to convert from the Vector dialect to itself.
|
||||
/// Should be merged with populateVectorToAffineLoopsConversionPatterns.
|
||||
void populateVectorToVectorConversionPatterns(
|
||||
MLIRContext *context, OwningRewritePatternList &patterns,
|
||||
ArrayRef<int64_t> coarseVectorShape = {},
|
||||
ArrayRef<int64_t> fineVectorShape = {});
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// The following Declarative Rewrite Rule (DRR) helpers are used in rewrite
|
||||
// patterns. As such, they must not call into `rewriter.erase/replace` APIs and
|
||||
// it is the responsibility of the enclosing PatternRewriter to erase on
|
||||
// success.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace vector {
|
||||
|
||||
// Entry point for unrolling declarative pattern rewrites.
|
||||
// `op` is unrolled to the `targetShape` as follows, for each of its operands:
|
||||
// 1. the unrolled type `unrolledVectorType` and number of unrolled instances
|
||||
// `numUnrolledInstances` are computed from the `targetShape`. For now it is
|
||||
// assumed the unrolling factors divide the vector sizes.
|
||||
// 2. a fakeFork cast op is inserted that takes the operand and returns
|
||||
// `numUnrolledInstances` results of type `unrolledVectorType`.
|
||||
// 3. the original op is cloned `numUnrolledInstances` times, once for each
|
||||
// result of the fakeFork cast op.
|
||||
// 4. a fakeJoin cast op takes all these results and merges them into a single
|
||||
// aggregate vector result whose size matches the original non-unrolled op
|
||||
// operand types.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// opA(operand0, operand1) // numUnrolledInstances = 3
|
||||
//
|
||||
// operand0 operand1
|
||||
// | |
|
||||
// fork fork
|
||||
// <----------gather all fork ops --------->
|
||||
// /|\ /|\
|
||||
// f00 f01 f02 f10 f11 f12
|
||||
// <---------- clone op 3 times --------->
|
||||
// opA0(f00, f10), opA1(f01, f11), opA2(f02, f12)
|
||||
// \ | /
|
||||
// <-------------------- join ------------------------->
|
||||
//
|
||||
// Other local patterns then kick in iteratively (including DCE) and compose
|
||||
// until all the fakeFork and fakeJoin ops are removed.
|
||||
//
|
||||
// This will be extended in the future to support more advanced use cases than
|
||||
// simple pointwise ops.
|
||||
Value *unrollSingleResultOpMatchingType(PatternRewriter &builder, Operation *op,
|
||||
ArrayRef<int64_t> targetShape);
|
||||
|
||||
} // namespace vector
|
||||
} // namespace mlir
|
||||
|
||||
#endif // DIALECT_VECTOROPS_VECTORTRANSFORMS_H_
|
||||
@@ -26,7 +26,6 @@
|
||||
#include "mlir/Dialect/AffineOps/AffineOps.h"
|
||||
#include "mlir/Dialect/LoopOps/LoopOps.h"
|
||||
#include "mlir/Dialect/StandardOps/Ops.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/IR/Builders.h"
|
||||
#include "mlir/Transforms/FoldUtils.h"
|
||||
|
||||
|
||||
@@ -215,7 +215,6 @@ using select = ValueBuilder<SelectOp>;
|
||||
using std_load = ValueBuilder<LoadOp>;
|
||||
using std_store = OperationBuilder<StoreOp>;
|
||||
using subi = ValueBuilder<SubIOp>;
|
||||
using vector_type_cast = ValueBuilder<vector::TypeCastOp>;
|
||||
using view = ValueBuilder<ViewOp>;
|
||||
|
||||
/// Branches into the mlir::Block* captured by BlockHandle `b` with `operands`.
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include "mlir/Analysis/AffineStructures.h"
|
||||
#include "mlir/Analysis/NestedMatcher.h"
|
||||
#include "mlir/Dialect/AffineOps/AffineOps.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/Support/MathExtras.h"
|
||||
|
||||
#include "llvm/ADT/DenseSet.h"
|
||||
@@ -273,15 +272,12 @@ static bool isVectorElement(LoadOrStoreOpPointer memoryOp) {
|
||||
return memRefType.getElementType().template isa<VectorType>();
|
||||
}
|
||||
|
||||
static bool isVectorTransferReadOrWrite(Operation &op) {
|
||||
return isa<vector::TransferReadOp>(op) || isa<vector::TransferWriteOp>(op);
|
||||
}
|
||||
|
||||
using VectorizableOpFun = std::function<bool(AffineForOp, Operation &)>;
|
||||
|
||||
static bool
|
||||
isVectorizableLoopBodyWithOpCond(AffineForOp loop,
|
||||
VectorizableOpFun isVectorizableOp) {
|
||||
VectorizableOpFun isVectorizableOp,
|
||||
NestedPattern &vectorTransferMatcher) {
|
||||
auto *forOp = loop.getOperation();
|
||||
|
||||
// No vectorization across conditionals for now.
|
||||
@@ -303,9 +299,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
|
||||
return false;
|
||||
}
|
||||
|
||||
auto vectorTransfers = matcher::Op(isVectorTransferReadOrWrite);
|
||||
SmallVector<NestedMatch, 8> vectorTransfersMatched;
|
||||
vectorTransfers.match(forOp, &vectorTransfersMatched);
|
||||
vectorTransferMatcher.match(forOp, &vectorTransfersMatched);
|
||||
if (!vectorTransfersMatched.empty()) {
|
||||
return false;
|
||||
}
|
||||
@@ -331,18 +326,20 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim) {
|
||||
bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim,
|
||||
NestedPattern &vectorTransferMatcher) {
|
||||
VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) {
|
||||
auto load = dyn_cast<AffineLoadOp>(op);
|
||||
auto store = dyn_cast<AffineStoreOp>(op);
|
||||
return load ? isContiguousAccess(loop.getInductionVar(), load, memRefDim)
|
||||
: isContiguousAccess(loop.getInductionVar(), store, memRefDim);
|
||||
});
|
||||
return isVectorizableLoopBodyWithOpCond(loop, fun);
|
||||
return isVectorizableLoopBodyWithOpCond(loop, fun, vectorTransferMatcher);
|
||||
}
|
||||
|
||||
bool mlir::isVectorizableLoopBody(AffineForOp loop) {
|
||||
return isVectorizableLoopBodyWithOpCond(loop, nullptr);
|
||||
bool mlir::isVectorizableLoopBody(AffineForOp loop,
|
||||
NestedPattern &vectorTransferMatcher) {
|
||||
return isVectorizableLoopBodyWithOpCond(loop, nullptr, vectorTransferMatcher);
|
||||
}
|
||||
|
||||
/// Checks whether SSA dominance would be violated if a for op's body
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "mlir/Analysis/SliceAnalysis.h"
|
||||
#include "mlir/Analysis/VectorAnalysis.h"
|
||||
#include "mlir/Dialect/AffineOps/AffineOps.h"
|
||||
#include "mlir/Dialect/LoopOps/LoopOps.h"
|
||||
#include "mlir/IR/Function.h"
|
||||
|
||||
@@ -15,11 +15,11 @@
|
||||
// limitations under the License.
|
||||
// =============================================================================
|
||||
|
||||
#include "mlir/Analysis/VectorAnalysis.h"
|
||||
#include "mlir/Analysis/AffineAnalysis.h"
|
||||
#include "mlir/Analysis/LoopAnalysis.h"
|
||||
#include "mlir/Dialect/AffineOps/AffineOps.h"
|
||||
#include "mlir/Dialect/StandardOps/Ops.h"
|
||||
#include "mlir/Dialect/VectorOps/Utils.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/IR/Builders.h"
|
||||
#include "mlir/IR/IntegerSet.h"
|
||||
|
||||
@@ -8,4 +8,5 @@ add_subdirectory(LoopsToGPU)
|
||||
add_subdirectory(LoopToStandard)
|
||||
add_subdirectory(StandardToLLVM)
|
||||
add_subdirectory(StandardToSPIRV)
|
||||
add_subdirectory(VectorConversions)
|
||||
add_subdirectory(VectorToLLVM)
|
||||
add_subdirectory(VectorToLoops)
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h"
|
||||
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
|
||||
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
|
||||
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
|
||||
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
|
||||
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
add_llvm_library(MLIRVectorConversions
|
||||
VectorToLLVM.cpp
|
||||
VectorToLoops.cpp
|
||||
VectorToVector.cpp
|
||||
|
||||
ADDITIONAL_HEADER_DIRS
|
||||
${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorConversions
|
||||
)
|
||||
set(LIBS
|
||||
MLIRLLVMIR
|
||||
MLIRTransforms
|
||||
LLVMCore
|
||||
LLVMSupport
|
||||
)
|
||||
|
||||
add_dependencies(MLIRVectorConversions ${LIBS})
|
||||
add_dependencies(MLIRVectorConversions MLIRVectorTransformPatternsIncGen)
|
||||
target_link_libraries(MLIRVectorConversions ${LIBS})
|
||||
15
mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
Normal file
15
mlir/lib/Conversion/VectorToLLVM/CMakeLists.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
add_llvm_library(MLIRVectorToLLVM
|
||||
ConvertVectorToLLVM.cpp
|
||||
|
||||
ADDITIONAL_HEADER_DIRS
|
||||
${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorToLLVM
|
||||
)
|
||||
set(LIBS
|
||||
MLIRLLVMIR
|
||||
MLIRTransforms
|
||||
LLVMCore
|
||||
LLVMSupport
|
||||
)
|
||||
|
||||
add_dependencies(MLIRVectorToLLVM ${LIBS})
|
||||
target_link_libraries(MLIRVectorToLLVM ${LIBS})
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
|
||||
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
|
||||
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
|
||||
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
|
||||
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/IR/Attributes.h"
|
||||
15
mlir/lib/Conversion/VectorToLoops/CMakeLists.txt
Normal file
15
mlir/lib/Conversion/VectorToLoops/CMakeLists.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
add_llvm_library(MLIRVectorToLoops
|
||||
ConvertVectorToLoops.cpp
|
||||
|
||||
ADDITIONAL_HEADER_DIRS
|
||||
${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorToLoops
|
||||
)
|
||||
set(LIBS
|
||||
MLIRLLVMIR
|
||||
MLIRTransforms
|
||||
LLVMCore
|
||||
LLVMSupport
|
||||
)
|
||||
|
||||
add_dependencies(MLIRVectorToLoops ${LIBS})
|
||||
target_link_libraries(MLIRVectorToLoops ${LIBS})
|
||||
@@ -21,8 +21,8 @@
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/Conversion/VectorToLoops/ConvertVectorToLoops.h"
|
||||
#include "mlir/EDSC/Builders.h"
|
||||
#include "mlir/EDSC/Helpers.h"
|
||||
#include "mlir/IR/AffineExpr.h"
|
||||
@@ -41,6 +41,8 @@ using vector::TransferWriteOp;
|
||||
|
||||
namespace {
|
||||
|
||||
using vector_type_cast = edsc::intrinsics::ValueBuilder<vector::TypeCastOp>;
|
||||
|
||||
/// Implements lowering of TransferReadOp and TransferWriteOp to a
|
||||
/// proper abstraction for the hardware.
|
||||
///
|
||||
@@ -356,7 +358,6 @@ PatternMatchResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite(
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/// Populate the given list with patterns that convert from Vector to LLVM.
|
||||
void mlir::populateVectorToAffineLoopsConversionPatterns(
|
||||
MLIRContext *context, OwningRewritePatternList &patterns) {
|
||||
patterns.insert<VectorTransferRewriter<vector::TransferReadOp>,
|
||||
@@ -1,11 +1,13 @@
|
||||
add_llvm_library(MLIRVectorOps
|
||||
DialectRegistration.cpp
|
||||
VectorOps.cpp
|
||||
VectorToVector.cpp
|
||||
|
||||
ADDITIONAL_HEADER_DIRS
|
||||
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/VectorOps
|
||||
)
|
||||
|
||||
add_dependencies(MLIRVectorOps MLIRVectorOpsIncGen)
|
||||
add_dependencies(MLIRVectorOps MLIRVectorTransformPatternsIncGen)
|
||||
|
||||
target_link_libraries(MLIRVectorOps MLIRIR)
|
||||
|
||||
@@ -21,10 +21,9 @@
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "mlir/Analysis/VectorAnalysis.h"
|
||||
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
|
||||
#include "mlir/Dialect/StandardOps/Ops.h"
|
||||
#include "mlir/Dialect/VectorOps/Utils.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorTransforms.h"
|
||||
#include "mlir/EDSC/Builders.h"
|
||||
#include "mlir/EDSC/Helpers.h"
|
||||
#include "mlir/IR/AffineExpr.h"
|
||||
@@ -198,7 +197,7 @@ static bool hasShape(Value *v, ArrayRef<int64_t> shape) {
|
||||
//
|
||||
// This will be extended in the future to support more advanced use cases than
|
||||
// simple pointwise ops.
|
||||
static Value *unrollSingleResultOpMatchingType(PatternRewriter &builder,
|
||||
Value * mlir::vector::unrollSingleResultOpMatchingType(PatternRewriter &builder,
|
||||
Operation *op,
|
||||
ArrayRef<int64_t> targetShape) {
|
||||
LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE
|
||||
@@ -16,7 +16,6 @@
|
||||
// =============================================================================
|
||||
|
||||
#include "mlir/EDSC/Intrinsics.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/EDSC/Builders.h"
|
||||
#include "mlir/IR/AffineExpr.h"
|
||||
|
||||
|
||||
@@ -26,9 +26,9 @@
|
||||
#include "mlir/Analysis/NestedMatcher.h"
|
||||
#include "mlir/Analysis/SliceAnalysis.h"
|
||||
#include "mlir/Analysis/Utils.h"
|
||||
#include "mlir/Analysis/VectorAnalysis.h"
|
||||
#include "mlir/Dialect/AffineOps/AffineOps.h"
|
||||
#include "mlir/Dialect/StandardOps/Ops.h"
|
||||
#include "mlir/Dialect/VectorOps/Utils.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/IR/AffineExpr.h"
|
||||
#include "mlir/IR/AffineMap.h"
|
||||
|
||||
@@ -24,9 +24,9 @@
|
||||
#include "mlir/Analysis/NestedMatcher.h"
|
||||
#include "mlir/Analysis/SliceAnalysis.h"
|
||||
#include "mlir/Analysis/Utils.h"
|
||||
#include "mlir/Analysis/VectorAnalysis.h"
|
||||
#include "mlir/Dialect/AffineOps/AffineOps.h"
|
||||
#include "mlir/Dialect/StandardOps/Ops.h"
|
||||
#include "mlir/Dialect/VectorOps/Utils.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorOps.h"
|
||||
#include "mlir/IR/AffineExpr.h"
|
||||
#include "mlir/IR/Builders.h"
|
||||
@@ -589,6 +589,13 @@ makePatterns(const llvm::DenseSet<Operation *> ¶llelLoops, int vectorRank,
|
||||
}
|
||||
}
|
||||
|
||||
static NestedPattern &vectorTransferPattern() {
|
||||
static auto pattern = matcher::Op([](Operation &op) {
|
||||
return isa<vector::TransferReadOp>(op) || isa<vector::TransferWriteOp>(op);
|
||||
});
|
||||
return pattern;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
/// Base state for the vectorize pass.
|
||||
@@ -893,7 +900,8 @@ isVectorizableLoopPtrFactory(const llvm::DenseSet<Operation *> ¶llelLoops,
|
||||
if (parallelIt == parallelLoops.end())
|
||||
return false;
|
||||
int memRefDim = -1;
|
||||
auto vectorizableBody = isVectorizableLoopBody(loop, &memRefDim);
|
||||
auto vectorizableBody =
|
||||
isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
|
||||
if (!vectorizableBody)
|
||||
return false;
|
||||
return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
|
||||
@@ -1172,7 +1180,7 @@ static LogicalResult vectorizeRootMatch(NestedMatch m,
|
||||
// vectorizable. If a pattern is not vectorizable anymore, we just skip it.
|
||||
// TODO(ntv): implement a non-greedy profitability analysis that keeps only
|
||||
// non-intersecting patterns.
|
||||
if (!isVectorizableLoopBody(loop)) {
|
||||
if (!isVectorizableLoopBody(loop, vectorTransferPattern())) {
|
||||
LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
|
||||
return failure();
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// RUN: mlir-opt %s -test-affine-lower-vector-transfers | FileCheck %s
|
||||
// RUN: mlir-opt %s -test-convert-vector-to-loops | FileCheck %s
|
||||
|
||||
// CHECK: #[[ADD:map[0-9]+]] = (d0, d1) -> (d0 + d1)
|
||||
// CHECK: #[[SUB:map[0-9]+]] = ()[s0] -> (s0 - 1)
|
||||
@@ -6,9 +6,9 @@ add_llvm_library(MLIRTestTransforms
|
||||
TestLinalgTransforms.cpp
|
||||
TestLoopMapping.cpp
|
||||
TestLoopParametricTiling.cpp
|
||||
TestLowerVectorTransfers.cpp
|
||||
TestOpaqueLoc.cpp
|
||||
TestMemRefStrideCalculation.cpp
|
||||
TestVectorToLoopsConversion.cpp
|
||||
TestVectorToVectorConversion.cpp
|
||||
TestVectorizationUtils.cpp
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//===- TestLowerVectorTransfers.cpp - Test VectorTransfers lowering -------===//
|
||||
//===- TestVectorToLoopsConversion.cpp - Test VectorTransfers lowering ----===//
|
||||
//
|
||||
// Copyright 2019 The MLIR Authors.
|
||||
//
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
|
||||
#include "mlir/Conversion/VectorToLoops/ConvertVectorToLoops.h"
|
||||
#include "mlir/IR/PatternMatch.h"
|
||||
#include "mlir/Pass/Pass.h"
|
||||
#include "mlir/Transforms/Passes.h"
|
||||
@@ -26,8 +26,8 @@ using namespace mlir;
|
||||
|
||||
namespace {
|
||||
|
||||
struct TestLowerVectorTransfersPass
|
||||
: public FunctionPass<TestLowerVectorTransfersPass> {
|
||||
struct TestVectorToLoopsPass
|
||||
: public FunctionPass<TestVectorToLoopsPass> {
|
||||
void runOnFunction() override {
|
||||
OwningRewritePatternList patterns;
|
||||
auto *context = &getContext();
|
||||
@@ -38,7 +38,6 @@ struct TestLowerVectorTransfersPass
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
static PassRegistration<TestLowerVectorTransfersPass>
|
||||
pass("test-affine-lower-vector-transfers",
|
||||
"Materializes vector transfer ops to a "
|
||||
"proper abstraction for the hardware");
|
||||
static PassRegistration<TestVectorToLoopsPass>
|
||||
pass("test-convert-vector-to-loops",
|
||||
"Converts vector transfer ops to loops over scalars and vector casts");
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
|
||||
#include "mlir/Dialect/VectorOps/VectorTransforms.h"
|
||||
#include "mlir/IR/PatternMatch.h"
|
||||
#include "mlir/Pass/Pass.h"
|
||||
#include "mlir/Transforms/Passes.h"
|
||||
|
||||
@@ -22,8 +22,8 @@
|
||||
#include "mlir/Analysis/AffineAnalysis.h"
|
||||
#include "mlir/Analysis/NestedMatcher.h"
|
||||
#include "mlir/Analysis/SliceAnalysis.h"
|
||||
#include "mlir/Analysis/VectorAnalysis.h"
|
||||
#include "mlir/Dialect/AffineOps/AffineOps.h"
|
||||
#include "mlir/Dialect/VectorOps/Utils.h"
|
||||
#include "mlir/IR/Builders.h"
|
||||
#include "mlir/IR/Diagnostics.h"
|
||||
#include "mlir/IR/StandardTypes.h"
|
||||
|
||||
@@ -21,7 +21,7 @@ set(LIBS
|
||||
MLIRAffineToStandard
|
||||
MLIRLoopsToGPU
|
||||
MLIRLinalgToLLVM
|
||||
|
||||
|
||||
MLIRLoopToStandard
|
||||
MLIREDSC
|
||||
MLIRFxpMathOps
|
||||
@@ -51,7 +51,8 @@ set(LIBS
|
||||
MLIRTestTransforms
|
||||
MLIRSupport
|
||||
MLIRVectorOps
|
||||
MLIRVectorConversions
|
||||
MLIRVectorToLLVM
|
||||
MLIRVectorToLoops
|
||||
)
|
||||
if(MLIR_CUDA_CONVERSIONS_ENABLED)
|
||||
list(APPEND LIBS
|
||||
|
||||
Reference in New Issue
Block a user