mirror of
https://github.com/intel/llvm.git
synced 2026-01-27 06:06:34 +08:00
Revert "[mlir][linalg] Relax tensor.extract vectorization" (#102232)
Reverts llvm/llvm-project#99299 because it breaks the lowering. To repro: `mlir-opt -transform-interpreter ~/repro.mlir` ```mlir #map = affine_map<(d0, d1) -> (d0)> #map1 = affine_map<(d0, d1) -> (d1)> #map2 = affine_map<(d0, d1) -> (d0, d1)> #map3 = affine_map<(d0, d1) -> (d0 + d1)> module { func.func @foo(%arg0: index, %arg1: tensor<2xf32>, %arg2: tensor<4xf32>, %arg3: tensor<1xf32>) -> tensor<4x1xf32> { %c0 = arith.constant 0 : index %cst = arith.constant 1.000000e+00 : f32 %cst_0 = arith.constant 0.000000e+00 : f32 %0 = tensor.empty() : tensor<4x1xf32> %1 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel"]} ins(%arg2, %arg3 : tensor<4xf32>, tensor<1xf32>) outs(%0 : tensor<4x1xf32>) { ^bb0(%in: f32, %in_1: f32, %out: f32): %2 = linalg.index 0 : index %3 = linalg.index 1 : index %4 = affine.apply #map3(%3, %arg0) %extracted = tensor.extract %arg1[%c0] : tensor<2xf32> %5 = arith.cmpi eq, %2, %c0 : index %6 = arith.cmpi ult, %2, %c0 : index %7 = arith.select %5, %cst, %in : f32 %8 = arith.select %6, %cst_0, %7 : f32 %9 = arith.cmpi eq, %4, %c0 : index %10 = arith.cmpi ult, %4, %c0 : index %11 = arith.select %9, %cst, %in_1 : f32 %12 = arith.select %10, %cst_0, %11 : f32 %13 = arith.mulf %8, %12 : f32 %14 = arith.mulf %13, %extracted : f32 %15 = arith.cmpi eq, %2, %4 : index %16 = arith.select %15, %cst, %cst_0 : f32 %17 = arith.subf %16, %14 : f32 linalg.yield %17 : f32 } -> tensor<4x1xf32> return %1 : tensor<4x1xf32> } } module attributes {transform.with_named_sequence} { transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op transform.structured.vectorize %0 : !transform.any_op transform.yield } } ```
This commit is contained in:
@@ -946,22 +946,27 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp,
|
||||
if (linalgOp.hasDynamicShape())
|
||||
return VectorMemoryAccessKind::Gather;
|
||||
|
||||
// True for vectors that are effectively 1D, e.g. `vector<1x4x1xi32>`, false
|
||||
// otherwise.
|
||||
bool isOutput1DVector = (llvm::count_if(targetShape, [](int64_t dimSize) {
|
||||
return dimSize > 1;
|
||||
}) == 1);
|
||||
// 1. Assume that it's a gather load when reading _into_:
|
||||
// * an n-D "vector", like `tensor<1x2x4xi32` or `tensor<2x1x4xi32>`, or
|
||||
// * a 1-D "vector" with the trailing dim equal 1, e.g. `tensor<1x4x1xi32`.
|
||||
// TODO: Relax these conditions.
|
||||
// FIXME: This condition assumes non-dynamic sizes.
|
||||
if ((llvm::count_if(targetShape,
|
||||
[](int64_t dimSize) { return dimSize > 1; }) != 1) ||
|
||||
targetShape.back() == 1)
|
||||
return VectorMemoryAccessKind::Gather;
|
||||
|
||||
// 1. Assume that it's a gather load when reading non-1D vector.
|
||||
if (!isOutput1DVector)
|
||||
// 2. Assume that it's a gather load when reading _from_ a tensor for which
|
||||
// the trailing dimension is 1, e.g. `tensor<1x4x1xi32>`.
|
||||
// TODO: Relax this condition.
|
||||
if (inputShape.getShape().back() == 1)
|
||||
return VectorMemoryAccessKind::Gather;
|
||||
|
||||
bool leadingIdxsLoopInvariant = true;
|
||||
|
||||
// 2. Analyze the leading indices of `extractOp`.
|
||||
// 3. Analyze the leading indices of `extractOp`.
|
||||
// Look at the way each index is calculated and decide whether it is suitable
|
||||
// for a contiguous load, i.e. whether it's loop invariant. If not, it's a
|
||||
// gather load.
|
||||
// for a contiguous load, i.e. whether it's loop invariant.
|
||||
auto indices = extractOp.getIndices();
|
||||
auto leadIndices = indices.drop_back(1);
|
||||
|
||||
@@ -977,13 +982,13 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp,
|
||||
return VectorMemoryAccessKind::Gather;
|
||||
}
|
||||
|
||||
// 3. Analyze the trailing index for `extractOp`.
|
||||
// 4. Analyze the trailing index for `extractOp`.
|
||||
// At this point we know that the leading indices are loop invariant. This
|
||||
// means that is potentially a scalar or a contiguous load. We can decide
|
||||
// based on the trailing idx.
|
||||
auto extractOpTrailingIdx = indices.back();
|
||||
|
||||
// 3a. Scalar broadcast load
|
||||
// 4a. Scalar broadcast load
|
||||
// If the trailing index is loop invariant then this is a scalar load.
|
||||
if (leadingIdxsLoopInvariant &&
|
||||
isLoopInvariantIdx(linalgOp, extractOpTrailingIdx)) {
|
||||
@@ -992,7 +997,7 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp,
|
||||
return VectorMemoryAccessKind::ScalarBroadcast;
|
||||
}
|
||||
|
||||
// 3b. Contiguous loads
|
||||
// 4b. Contiguous loads
|
||||
// The trailing `extractOp` index should increment with every loop iteration.
|
||||
// This effectively means that it must be based on the trailing loop index.
|
||||
// This is what the following bool captures.
|
||||
@@ -1006,7 +1011,7 @@ getTensorExtractMemoryAccessPattern(tensor::ExtractOp extractOp,
|
||||
return VectorMemoryAccessKind::Contiguous;
|
||||
}
|
||||
|
||||
// 4. Fallback case - gather load.
|
||||
// 5. Fallback case - gather load.
|
||||
LDBG("Found gather load: " << extractOp);
|
||||
return VectorMemoryAccessKind::Gather;
|
||||
}
|
||||
|
||||
@@ -595,59 +595,3 @@ module attributes {transform.with_named_sequence} {
|
||||
transform.yield
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// -----
|
||||
|
||||
func.func @vectorize_scalar_broadcast_column_tensor(%in: tensor<1x1x4xi32>) -> tensor<1x1x4xi32> {
|
||||
%c4 = arith.constant 4 : index
|
||||
%c0 = arith.constant 0 : index
|
||||
%cst = arith.constant dense<[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14]]> : tensor<15x1xi32>
|
||||
|
||||
%out = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>], iterator_types = ["parallel", "parallel", "parallel"]} outs(%in : tensor<1x1x4xi32>) {
|
||||
^bb0(%out: i32):
|
||||
%8 = linalg.index 0 : index
|
||||
%idx_0 = linalg.index 0 : index
|
||||
%extracted = tensor.extract %cst[%idx_0, %c0] : tensor<15x1xi32>
|
||||
linalg.yield %extracted : i32
|
||||
} -> tensor<1x1x4xi32>
|
||||
|
||||
return %out:tensor<1x1x4xi32>
|
||||
}
|
||||
|
||||
// CHECK: #[[$ATTR_1:.+]] = affine_map<(d0, d1) -> (0, 0, 0)>
|
||||
// CHECK-LABEL: func.func @vectorize_scalar_broadcast_column_tensor(
|
||||
// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x4xi32>) -> tensor<1x1x4xi32> {
|
||||
// CHECK: %[[VAL_1:.*]] = arith.constant 4 : index
|
||||
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_3:.*]] = arith.constant dense<{{\[\[}}0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14]]> : tensor<15x1xi32>
|
||||
// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = arith.constant 4 : index
|
||||
// CHECK: %[[VAL_7:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32
|
||||
// CHECK: %[[VAL_9:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_7]], %[[VAL_7]], %[[VAL_7]]], %[[VAL_8]] : tensor<1x1x4xi32>, vector<1x1x4xi32>
|
||||
// CHECK: %[[VAL_10:.*]] = vector.step : vector<1xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = vector.broadcast %[[VAL_10]] : vector<1xindex> to vector<4x1x1xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = vector.transpose %[[VAL_11]], [2, 1, 0] : vector<4x1x1xindex> to vector<1x1x4xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = vector.step : vector<1xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = vector.broadcast %[[VAL_13]] : vector<1xindex> to vector<4x1x1xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = vector.transpose %[[VAL_14]], [2, 1, 0] : vector<4x1x1xindex> to vector<1x1x4xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = arith.constant dense<true> : vector<1x1x4xi1>
|
||||
// CHECK: %[[VAL_17:.*]] = arith.constant dense<0> : vector<1x1x4xi32>
|
||||
// CHECK: %[[VAL_18:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_19:.*]] = arith.constant 0 : i32
|
||||
// CHECK: %[[VAL_20:.*]] = vector.shape_cast %[[VAL_15]] : vector<1x1x4xindex> to vector<4xindex>
|
||||
// CHECK: %[[VAL_21:.*]] = vector.extractelement %[[VAL_20]]{{\[}}%[[VAL_19]] : i32] : vector<4xindex>
|
||||
// CHECK: %[[VAL_22:.*]] = arith.constant 0 : i32
|
||||
// CHECK: %[[VAL_23:.*]] = vector.transfer_read %[[VAL_3]]{{\[}}%[[VAL_21]], %[[VAL_2]]], %[[VAL_22]] {in_bounds = [true, true, true], permutation_map = #[[$ATTR_1]]} : tensor<15x1xi32>, vector<1x1x4xi32>
|
||||
// CHECK: %[[VAL_24:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_25:.*]] = vector.transfer_write %[[VAL_23]], %[[VAL_0]]{{\[}}%[[VAL_24]], %[[VAL_24]], %[[VAL_24]]] : vector<1x1x4xi32>, tensor<1x1x4xi32>
|
||||
|
||||
module attributes {transform.with_named_sequence} {
|
||||
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
|
||||
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
|
||||
transform.structured.vectorize %0 vector_sizes [1, 1, 4]{ vectorize_nd_extract } : !transform.any_op
|
||||
transform.yield
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user