mirror of
https://github.com/intel/llvm.git
synced 2026-01-24 00:20:25 +08:00
[mlir][vector] Use inferRankReducedResultType for subview type inference. (#84395)
Fixes https://github.com/openxla/iree/issues/16475
This commit is contained in:
@@ -1255,42 +1255,6 @@ getTransferFoldableInnerUnitDims(MemRefType srcType, VectorType vectorType) {
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Returns a MemRef type that drops inner `dimsToDrop` dimensions from
|
||||
/// `srcType`. E.g., if `srcType` is memref<512x16x1x1xf32> and `dimsToDrop` is
|
||||
/// two, it returns memref<512x16x16> type.
|
||||
static MemRefType getMemRefTypeWithDroppingInnerDims(OpBuilder &builder,
|
||||
MemRefType srcType,
|
||||
size_t dimsToDrop) {
|
||||
MemRefLayoutAttrInterface layout = srcType.getLayout();
|
||||
if (isa<AffineMapAttr>(layout) && layout.isIdentity()) {
|
||||
return MemRefType::get(srcType.getShape().drop_back(dimsToDrop),
|
||||
srcType.getElementType(), nullptr,
|
||||
srcType.getMemorySpace());
|
||||
}
|
||||
MemRefLayoutAttrInterface updatedLayout;
|
||||
if (auto strided = dyn_cast<StridedLayoutAttr>(layout)) {
|
||||
auto strides = llvm::to_vector(strided.getStrides().drop_back(dimsToDrop));
|
||||
updatedLayout = StridedLayoutAttr::get(strided.getContext(),
|
||||
strided.getOffset(), strides);
|
||||
return MemRefType::get(srcType.getShape().drop_back(dimsToDrop),
|
||||
srcType.getElementType(), updatedLayout,
|
||||
srcType.getMemorySpace());
|
||||
}
|
||||
|
||||
// Non-strided layout case.
|
||||
AffineMap map = srcType.getLayout().getAffineMap();
|
||||
int numSymbols = map.getNumSymbols();
|
||||
for (size_t i = 0; i < dimsToDrop; ++i) {
|
||||
int dim = srcType.getRank() - i - 1;
|
||||
map = map.replace(builder.getAffineDimExpr(dim),
|
||||
builder.getAffineConstantExpr(0), map.getNumDims() - 1,
|
||||
numSymbols);
|
||||
}
|
||||
return MemRefType::get(srcType.getShape().drop_back(dimsToDrop),
|
||||
srcType.getElementType(), updatedLayout,
|
||||
srcType.getMemorySpace());
|
||||
}
|
||||
|
||||
/// Drop inner most contiguous unit dimensions from transfer_read operand.
|
||||
class DropInnerMostUnitDimsTransferRead
|
||||
: public OpRewritePattern<vector::TransferReadOp> {
|
||||
@@ -1337,8 +1301,10 @@ class DropInnerMostUnitDimsTransferRead
|
||||
rewriter.getIndexAttr(0));
|
||||
SmallVector<OpFoldResult> strides(srcType.getRank(),
|
||||
rewriter.getIndexAttr(1));
|
||||
MemRefType resultMemrefType =
|
||||
getMemRefTypeWithDroppingInnerDims(rewriter, srcType, dimsToDrop);
|
||||
auto resultMemrefType =
|
||||
cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
|
||||
srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes,
|
||||
strides));
|
||||
ArrayAttr inBoundsAttr =
|
||||
readOp.getInBounds()
|
||||
? rewriter.getArrayAttr(
|
||||
@@ -1421,8 +1387,10 @@ class DropInnerMostUnitDimsTransferWrite
|
||||
rewriter.getIndexAttr(0));
|
||||
SmallVector<OpFoldResult> strides(srcType.getRank(),
|
||||
rewriter.getIndexAttr(1));
|
||||
MemRefType resultMemrefType =
|
||||
getMemRefTypeWithDroppingInnerDims(rewriter, srcType, dimsToDrop);
|
||||
auto resultMemrefType =
|
||||
cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
|
||||
srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes,
|
||||
strides));
|
||||
ArrayAttr inBoundsAttr =
|
||||
writeOp.getInBounds()
|
||||
? rewriter.getArrayAttr(
|
||||
|
||||
@@ -16,22 +16,27 @@ func.func @contiguous_inner_most_view(%in: memref<1x1x8x1xf32, strided<[3072, 8,
|
||||
|
||||
// -----
|
||||
|
||||
func.func @contiguous_outer_dyn_inner_most_view(%in: memref<?x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>>) -> vector<1x8x1xf32>{
|
||||
func.func @contiguous_outer_dyn_inner_most_view(%a: index, %b: index, %memref: memref<?x?x8x1xf32>) -> vector<8x1xf32> {
|
||||
%c0 = arith.constant 0 : index
|
||||
%cst = arith.constant 0.0 : f32
|
||||
%0 = vector.transfer_read %in[%c0, %c0, %c0, %c0], %cst {in_bounds = [true, true, true]} : memref<?x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>>, vector<1x8x1xf32>
|
||||
return %0 : vector<1x8x1xf32>
|
||||
%pad = arith.constant 0.0 : f32
|
||||
%v = vector.transfer_read %memref[%a, %b, %c0, %c0], %pad {in_bounds = [true, true]} : memref<?x?x8x1xf32>, vector<8x1xf32>
|
||||
return %v : vector<8x1xf32>
|
||||
}
|
||||
// CHECK: func @contiguous_outer_dyn_inner_most_view(
|
||||
// CHECK: func.func @contiguous_outer_dyn_inner_most_view(
|
||||
// CHECK-SAME: %[[IDX0:[a-zA-Z0-9]+]]
|
||||
// CHECK-SAME: %[[IDX1:[a-zA-Z0-9]+]]
|
||||
// CHECK-SAME: %[[SRC:[a-zA-Z0-9]+]]
|
||||
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[D0:.+]] = memref.dim %[[SRC]], %[[C0]]
|
||||
// CHECK: %[[SRC_0:.+]] = memref.subview %[[SRC]][0, 0, 0, 0] [%[[D0]], 1, 8, 1] [1, 1, 1, 1]
|
||||
// CHECK-SAME: memref<?x1x8x1xf32, strided<[3072, 8, 1, 1], offset: ?>> to memref<?x1x8xf32, strided<[3072, 8, 1], offset: ?>>
|
||||
// CHECK: %[[VEC:.+]] = vector.transfer_read %[[SRC_0]]
|
||||
// CHECK-SAME: memref<?x1x8xf32, strided<[3072, 8, 1], offset: ?>>, vector<1x8xf32>
|
||||
// CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC]]
|
||||
// CHECK: return %[[RESULT]]
|
||||
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[PAD:.+]] = arith.constant 0.000000e+00 : f32
|
||||
// CHECK: %[[D0:.+]] = memref.dim %[[SRC]], %[[C0]]
|
||||
// CHECK: %[[D1:.+]] = memref.dim %[[SRC]], %[[C1]]
|
||||
// CHECK: %[[VIEW:.+]] = memref.subview %[[SRC]][0, 0, 0, 0] [%[[D0]], %[[D1]], 8, 1] [1, 1, 1, 1]
|
||||
// CHECK-SAME: memref<?x?x8x1xf32> to memref<?x?x8xf32, strided<[?, 8, 1], offset: ?>>
|
||||
// CHECK: %[[VEC:.+]] = vector.transfer_read %[[VIEW]]
|
||||
// CHECK-SAME: memref<?x?x8xf32, strided<[?, 8, 1], offset: ?>>, vector<8xf32>
|
||||
// CHECK: %[[RESULT:.+]] = vector.shape_cast %[[VEC]]
|
||||
// CHECK: return %[[RESULT]]
|
||||
|
||||
// -----
|
||||
|
||||
@@ -43,7 +48,7 @@ func.func @contiguous_inner_most_dim(%A: memref<16x1xf32>, %i:index, %j:index) -
|
||||
}
|
||||
// CHECK: func @contiguous_inner_most_dim(%[[SRC:.+]]: memref<16x1xf32>, %[[I:.+]]: index, %[[J:.+]]: index) -> vector<8x1xf32>
|
||||
// CHECK: %[[SRC_0:.+]] = memref.subview %[[SRC]]
|
||||
// CHECK-SAME: memref<16x1xf32> to memref<16xf32>
|
||||
// CHECK-SAME: memref<16x1xf32> to memref<16xf32, strided<[1]>>
|
||||
// CHECK: %[[V:.+]] = vector.transfer_read %[[SRC_0]]
|
||||
// CHECK: %[[RESULT]] = vector.shape_cast %[[V]] : vector<8xf32> to vector<8x1xf32>
|
||||
// CHECK: return %[[RESULT]]
|
||||
@@ -111,7 +116,7 @@ func.func @drop_two_inner_most_dim_for_transfer_write(%arg0: memref<1x512x16x1x1
|
||||
// CHECK-SAME: %[[IDX:[a-zA-Z0-9]+]]
|
||||
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
|
||||
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[DEST]]
|
||||
// CHECK-SAME: memref<1x512x16x1x1xf32> to memref<1x512x16xf32>
|
||||
// CHECK-SAME: memref<1x512x16x1x1xf32> to memref<1x512x16xf32, strided<[8192, 16, 1]>>
|
||||
// CHECK: %[[CAST:.+]] = vector.shape_cast %[[VEC]] : vector<1x16x16x1x1xf32> to vector<1x16x16xf32>
|
||||
// CHECK: vector.transfer_write %[[CAST]], %[[SUBVIEW]]
|
||||
// CHECK-SAME: [%[[C0]], %[[IDX]], %[[C0]]]
|
||||
|
||||
Reference in New Issue
Block a user