diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td index 74c0909ce58e..0fca130a58e3 100644 --- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td +++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td @@ -1947,37 +1947,39 @@ def TileToForallOp : } //===----------------------------------------------------------------------===// -// VectorizeOp +// VectorizeChildrenAndApplyPatternsOp //===----------------------------------------------------------------------===// -def VectorizeOp : Op { let description = [{ - Indicates that the given `target` op all the ops it contains should be - vectorized with the configuration specified by the attributes of this op. - This vectorization only handles structured ops that operate on shaped types - and does not vectorize loops or straight-line. Internally, it applies a - set of rewrite patterns, some of which enable vectorization and some of - which clean up the results. Therefore, it can only be applied to an op with - the "isolated from above property". If finer granularity is required, it can - be achieved by outlining the target part of the payload IR into, e.g., a - function, performing the transformation, and inlining it back. This - transformation only fails if the entire pattern rewriting failed, i.e., it - does **not** fail when no ops were vectorized. + Vectorizes all children contained in the given `target` using the + configuration specified by the attributes of this op. This only vectorizes + structured ops that operate on shaped types and does not vectorize loops or + straight-line. Internally, it applies a set of rewrite patterns, some of + which enable vectorization and some of which clean up the results. + Therefore, it can only be applied to an op with the "isolated from above" + property. This transformation only fails if the entire pattern rewriting + failed, i.e., it does **not** fail when no ops were vectorized. - Note that this transformation is invalidating the handles to any payload IR + Finer granularity can be achieved either with the `VectorizeOp` for + individual ops or by outlining the target part of the payload IR into, e.g., + a function, performing this transformation, and inlining it back. + + Note that this transformation invalidates the handles to any payload IR operation that is contained inside the vectorization target. This transformation supports the following attributes: - - `vectorize_padding`: a UnitAttr to activate the vectorization of + - `vectorize_padding`: a `UnitAttr` to activate the vectorization of `tensor.pad` ops. Different pipelines may prefer to lower such ops to loops. - - `disable_multi_reduction_to_contract_patterns`: a UnitAttr to deactivate + - `disable_multi_reduction_to_contract_patterns`: a `UnitAttr` to deactivate the rewrite of `vector.multi_reduction` to `vector.contract`. This is intended to be used in tests only. - - `disable_transfer_permutation_map_lowering_patterns`: a UnitAttr to + - `disable_transfer_permutation_map_lowering_patterns`: a `UnitAttr` to deactivate the rewrite of `vector.transfer` with permutation maps into explicit `vector.transpose` operations. This is intended to be used in tests only but may be promoted to a first class attribute in the future. @@ -2015,7 +2017,7 @@ def VectorizeOp : Op, TransformOpInterface, ReportTrackingListenerFailuresOpTrait]> { let description = [{ @@ -2029,9 +2031,9 @@ def MaskedVectorizeOp : OphasTrait()) { auto diag = this->emitOpError("requires isolated-from-above targets"); diag.attachNote(target->getLoc()) << "non-isolated target"; @@ -2992,9 +2996,9 @@ transform::VectorizeOp::applyToOne(transform::TransformRewriter &rewriter, } //===----------------------------------------------------------------------===// -// MaskedVectorizeOp +// VectorizeOp //===----------------------------------------------------------------------===// -DiagnosedSilenceableFailure transform::MaskedVectorizeOp::apply( +DiagnosedSilenceableFailure transform::VectorizeOp::apply( transform::TransformRewriter &rewriter, mlir::transform::TransformResults &transformResults, mlir::transform::TransformState &state) { @@ -3058,19 +3062,19 @@ DiagnosedSilenceableFailure transform::MaskedVectorizeOp::apply( return DiagnosedSilenceableFailure::success(); } -void transform::MaskedVectorizeOp::getEffects( +void transform::VectorizeOp::getEffects( SmallVectorImpl &effects) { consumesHandle(getTarget(), effects); onlyReadsHandle(getVectorSizes(), effects); modifiesPayload(effects); } -SmallVector MaskedVectorizeOp::getMixedVectorSizes() { +SmallVector VectorizeOp::getMixedVectorSizes() { OpBuilder b(getContext()); return getMixedValues(getStaticVectorSizes(), getVectorSizes(), b); } -LogicalResult transform::MaskedVectorizeOp::verify() { +LogicalResult transform::VectorizeOp::verify() { if (getStaticVectorSizes().size() != getScalableSizes().size()) return emitOpError("expected same number of vector sizes (") << getStaticVectorSizes().size() << ") and scalable sizes (" diff --git a/mlir/python/mlir/dialects/_structured_transform_ops_ext.py b/mlir/python/mlir/dialects/_structured_transform_ops_ext.py index fd3dbca7c5a6..6273452c0648 100644 --- a/mlir/python/mlir/dialects/_structured_transform_ops_ext.py +++ b/mlir/python/mlir/dialects/_structured_transform_ops_ext.py @@ -360,8 +360,8 @@ class MapCopyToThreadsOp: ) -class MaskedVectorizeOp: - """Specialization for MaskedVectorizeOp class.""" +class VectorizeOp: + """Specialization for VectorizeOp class.""" def __init__( self, @@ -730,8 +730,8 @@ class TileToForallOp: ) -class VectorizeOp: - """Specialization for VectorizeOp class.""" +class VectorizeChildrenAndApplyPatternsOp: + """Specialization for VectorizeChildrenAndApplyPatternsOp class.""" def __init__( self, diff --git a/mlir/test/Dialect/LLVM/transform-e2e.mlir b/mlir/test/Dialect/LLVM/transform-e2e.mlir index 2cb753a3d7fb..54a17940d8c0 100644 --- a/mlir/test/Dialect/LLVM/transform-e2e.mlir +++ b/mlir/test/Dialect/LLVM/transform-e2e.mlir @@ -17,7 +17,7 @@ transform.sequence failures(propagate) { %0 = transform.structured.match ops{["linalg.matmul"]} in %module_op : (!transform.any_op) -> !transform.any_op %1, %loops:3 = transform.structured.tile %0 [2, 2, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op) %2 = get_parent_op %1 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - transform.structured.vectorize %2 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize_children_and_apply_patterns %2 : (!transform.any_op) -> !transform.any_op %b = transform.bufferization.one_shot_bufferize layout{IdentityLayoutMap} %module_op {bufferize_function_boundaries = true} : (!transform.any_op) -> !transform.any_op diff --git a/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir b/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir index da6ebdbd24de..37a925cf0df9 100644 --- a/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir +++ b/mlir/test/Dialect/Linalg/matmul-shared-memory-padding.mlir @@ -80,7 +80,7 @@ transform.sequence failures(propagate) { : (!transform.any_op) -> (!transform.any_op, !transform.any_op) // Apply masked vectorization to padding ops. - transform.structured.masked_vectorize %tiled_pad_op vector_sizes [128, 4] + transform.structured.vectorize %tiled_pad_op vector_sizes [128, 4] : !transform.any_op // Assign shared memory buffer to padding. @@ -105,7 +105,7 @@ transform.sequence failures(propagate) { : (!transform.any_op) -> !transform.any_op %bufferized_copy_back = transform.structured.match ops{["linalg.copy"]} in %func_op_2 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize + transform.structured.vectorize %bufferized_copy_back vector_sizes [128, 4] : !transform.any_op // Canonicalize, cleanup and vector lowering. This step also removes buffer @@ -192,7 +192,7 @@ transform.sequence failures(propagate) { } // Apply masked vectorization to padding ops. - transform.structured.masked_vectorize %tiled_pad_op vector_sizes [128, 4] + transform.structured.vectorize %tiled_pad_op vector_sizes [128, 4] : !transform.any_op // Assign shared memory buffer to padding. @@ -217,7 +217,7 @@ transform.sequence failures(propagate) { : (!transform.any_op) -> !transform.any_op %bufferized_copy_back = transform.structured.match ops{["linalg.copy"]} in %func_op_2 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize + transform.structured.vectorize %bufferized_copy_back vector_sizes [128, 4] : !transform.any_op // Canonicalize, cleanup and vector lowering. This step also removes buffer diff --git a/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir b/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir index 45c2eb5dfdf5..be807a9d5691 100644 --- a/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir +++ b/mlir/test/Dialect/Linalg/pad-to-specific-memory-space.mlir @@ -111,7 +111,7 @@ transform.sequence failures(propagate) { padding_dimensions=[0, 1, 2], pack_paddings=[1, 1, 1] } : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op) - transform.structured.masked_vectorize %pad vector_sizes [10, 12] : !transform.any_op + transform.structured.vectorize %pad vector_sizes [10, 12] : !transform.any_op %vector_write = transform.structured.match ops{["vector.transfer_write"]} in %arg1 : (!transform.any_op) -> !transform.any_op %mask_op = transform.get_parent_op %vector_write {op_name = "vector.mask"} : (!transform.any_op) -> !transform.any_op %buffer, %new_ops = transform.structured.bufferize_to_allocation %mask_op {memory_space = 3, emit_dealloc} : !transform.any_op diff --git a/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir b/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir index 8797d847c43a..07cb79fdba2d 100644 --- a/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-compose-masked-vectorize-and-cleanups.mlir @@ -26,7 +26,7 @@ transform.sequence failures(propagate) { : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op) %tiled_linalg_op_0, %loops_1:3 = transform.structured.tile %tiled_linalg_op[8, 8, 8] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op) - transform.structured.masked_vectorize %tiled_linalg_op_0 vector_sizes [8, 8, 8] + transform.structured.vectorize %tiled_linalg_op_0 vector_sizes [8, 8, 8] : !transform.any_op %func = transform.structured.match ops{["func.func"]} in %module diff --git a/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir b/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir index ea84b6b75876..8a06a55a1b57 100644 --- a/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-matmul-to-outerproduct.mlir @@ -31,7 +31,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op transform.apply_patterns to %2 { transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct" } : !transform.any_op diff --git a/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir b/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir index b335a65250d9..43fea65ed7f3 100644 --- a/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-vectorize.mlir @@ -20,7 +20,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op } // ----- @@ -45,7 +45,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op } // ----- @@ -65,7 +65,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op } // ----- @@ -111,7 +111,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op } // ----- @@ -159,7 +159,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 {vectorize_padding} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 {vectorize_padding} : (!transform.any_op) -> !transform.any_op } // ----- @@ -176,5 +176,5 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op // expected-error @below {{op requires isolated-from-above targets}} - %2 = transform.structured.vectorize %0 : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %0 : (!transform.any_op) -> !transform.any_op } diff --git a/mlir/test/Dialect/Linalg/vectorization-masked.mlir b/mlir/test/Dialect/Linalg/vectorization-masked.mlir deleted file mode 100644 index 82e8dfe37f79..000000000000 --- a/mlir/test/Dialect/Linalg/vectorization-masked.mlir +++ /dev/null @@ -1,514 +0,0 @@ -// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s - -func.func @vectorize_dynamic_identity(%arg0: tensor, - %arg1: tensor, - %arg2: tensor) -> tensor { - %0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>, - affine_map<(d0) -> (d0)>, - affine_map<(d0) -> (d0)>], - iterator_types = ["parallel"] } - ins(%arg0, %arg1 : tensor, tensor) - outs(%arg2 : tensor) { - ^bb(%in0: f32, %in1: f32, %out: f32) : - %0 = arith.addf %in0, %in1 : f32 - linalg.yield %0 : f32 - } -> tensor - return %0 : tensor -} - -// CHECK-LABEL: @vectorize_dynamic_identity -// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor -// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1> -// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> -// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> -// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> -// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<4xf32> -// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor } : vector<4xi1> -> tensor - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [4] : !transform.any_op -} - -// ----- - -func.func @vectorize_dynamic_1d_broadcast(%arg0: tensor, - %arg1: tensor, - %arg2: tensor) -> tensor { - %0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (0)>, - affine_map<(d0) -> (d0)>, - affine_map<(d0) -> (d0)>], - iterator_types = ["parallel"] } - ins(%arg0, %arg1 : tensor, tensor) - outs(%arg2 : tensor) { - ^bb(%in0: f32, %in1: f32, %out: f32) : - %0 = arith.addf %in0, %in1 : f32 - linalg.yield %0 : f32 - } -> tensor - return %0 : tensor -} - -// CHECK-LABEL: @vectorize_dynamic_1d_broadcast -// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor -// CHECK: %[[VAL_7:.*]] = vector.transfer_read %{{.*}} {permutation_map = #{{.*}}} : tensor, vector<4xf32> -// CHECK: %[[VAL_9:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1> -// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> -// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> -// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_7]], %[[VAL_10]] : vector<4xf32> -// CHECK: %[[VAL_14:.*]] = vector.mask %{{.*}} { vector.transfer_write %[[VAL_13]], {{.*}} {in_bounds = [true]} : vector<4xf32>, tensor } : vector<4xi1> -> tensor - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [4] : !transform.any_op -} - -// ----- - -func.func @vectorize_dynamic_2d_transpose(%arg0: tensor, - %arg1: tensor, - %arg2: tensor) -> tensor { - %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"] } - ins(%arg0, %arg1 : tensor, tensor) - outs(%arg2 : tensor) { - ^bb(%in0: f32, %in1: f32, %out: f32) : - %0 = arith.addf %in0, %in1 : f32 - linalg.yield %0 : f32 - } -> tensor - return %0 : tensor -} - -// CHECK-LABEL: @vectorize_dynamic_2d_transpose -// CHECK: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor -// CHECK: %[[VAL_5:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_6:.*]] = tensor.dim %{{.*}}, %[[VAL_5]] : tensor -// CHECK: %[[VAL_9:.*]] = vector.create_mask %[[VAL_6]], %[[VAL_4]] : vector<8x4xi1> -// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor, vector<4x8xf32> } : vector<8x4xi1> -> vector<4x8xf32> -// CHECK: %[[VAL_12:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<4x8xi1> -// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> -// CHECK: %[[VAL_14:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> -// CHECK: %[[VAL_16:.*]] = arith.addf %[[VAL_10]], %[[VAL_13]] : vector<4x8xf32> -// CHECK: %[[VAL_17:.*]] = vector.mask %[[VAL_12]] { vector.transfer_write %[[VAL_16]], %{{.*}} {in_bounds = [true, true]} : vector<4x8xf32>, tensor } : vector<4x8xi1> -> tensor - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [4, 8] : !transform.any_op -} - -// ----- - -func.func @vectorize_dynamic_generic_2d_broadcast(%arg0: tensor, - %arg1: tensor, - %arg2: tensor) -> tensor { - %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"] } - ins(%arg0, %arg1 : tensor, tensor) - outs(%arg2 : tensor) { - ^bb(%in0: f32, %in1: f32, %out: f32) : - %0 = arith.addf %in0, %in1 : f32 - linalg.yield %0 : f32 - } -> tensor - return %0 : tensor -} - -// CHECK-LABEL: @vectorize_dynamic_generic_2d_broadcast -// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor -// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_6:.*]] = tensor.dim %{{.*}}, %[[VAL_5]] : tensor -// CHECK: %[[VAL_9:.*]] = vector.create_mask %[[VAL_6]] : vector<8xi1> -// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor, vector<4x8xf32> } : vector<8xi1> -> vector<4x8xf32> -// CHECK: %[[VAL_12:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<4x8xi1> -// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> -// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> -// CHECK: %[[VAL_16:.*]] = arith.addf %[[VAL_10]], %[[VAL_13]] : vector<4x8xf32> -// CHECK: %[[VAL_18:.*]] = vector.mask %[[VAL_12]] { vector.transfer_write %{{.*}} {in_bounds = [true, true]} : vector<4x8xf32>, tensor } : vector<4x8xi1> -> tensor - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [4, 8] : !transform.any_op -} - -// ----- - -func.func @vectorize_dynamic_reduction(%arg0: tensor, - %arg1: tensor) -> tensor { - %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"] } - ins(%arg0 : tensor) - outs(%arg1 : tensor) { - ^bb(%in: f32, %out: f32) : - %0 = arith.addf %in, %out : f32 - linalg.yield %0 : f32 - } -> tensor - return %0 : tensor -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [4, 8] : !transform.any_op -} - -// CHECK-LABEL: @vectorize_dynamic_reduction( -// CHECK-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { -// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor -// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor -// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_3]], %[[VAL_5]] : vector<4x8xi1> -// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_0]]{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> -// CHECK: %[[VAL_11:.*]] = vector.create_mask %[[VAL_3]] : vector<4xi1> -// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_11]] { vector.transfer_read %[[VAL_1]]{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> -// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_8]] { vector.multi_reduction , %[[VAL_9]], %[[VAL_12]] [1] : vector<4x8xf32> to vector<4xf32> } : vector<4x8xi1> -> vector<4xf32> -// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_11]] { vector.transfer_write %[[VAL_13]], %[[VAL_1]]{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor } : vector<4xi1> -> tensor -// CHECK: return %[[VAL_15]] : tensor -// CHECK: } - -// ----- - -func.func @vectorize_dynamic_transpose_reduction(%arg0: tensor, - %arg1: tensor) -> tensor { - %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2) -> (d2, d1)>], - iterator_types = ["reduction", "parallel", "parallel"] } - ins(%arg0 : tensor) - outs(%arg1 : tensor) { - ^bb(%in: f32, %out: f32) : - %0 = arith.addf %in, %out : f32 - linalg.yield %0 : f32 - } -> tensor - return %0 : tensor -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [4, 8, 16] : !transform.any_op -} - -// CHECK-LABEL: @vectorize_dynamic_transpose_reduction( -// CHECK-SAME: %[[VAL_0:.*]]: tensor, -// CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { -// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor -// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor -// CHECK: %[[VAL_6:.*]] = arith.constant 2 : index -// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_6]] : tensor -// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_3]], %[[VAL_5]], %[[VAL_7]] : vector<4x8x16xi1> -// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_0]]{{.*}} {in_bounds = [true, true, true]} : tensor, vector<4x8x16xf32> } : vector<4x8x16xi1> -> vector<4x8x16xf32> -// CHECK: %[[VAL_13:.*]] = vector.create_mask %[[VAL_7]], %[[VAL_5]] : vector<16x8xi1> -// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_13]] { vector.transfer_read %[[VAL_1]]{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor, vector<8x16xf32> } : vector<16x8xi1> -> vector<8x16xf32> -// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_10]] { vector.multi_reduction , %[[VAL_11]], %[[VAL_14]] [0] : vector<4x8x16xf32> to vector<8x16xf32> } : vector<4x8x16xi1> -> vector<8x16xf32> -// CHECK: %[[VAL_17:.*]] = vector.mask %[[VAL_13]] { vector.transfer_write %[[VAL_15]], %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : vector<8x16xf32>, tensor } : vector<16x8xi1> -> tensor - -// ----- - -func.func @vectorize_partial_dynamic_identity(%arg0: tensor<8x?xf32>, - %arg1: tensor<8x?xf32>, - %arg2: tensor<8x?xf32>) -> tensor<8x?xf32> { - %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"] } - ins(%arg0, %arg1 : tensor<8x?xf32>, tensor<8x?xf32>) - outs(%arg2 : tensor<8x?xf32>) { - ^bb(%in0: f32, %in1: f32, %out: f32) : - %0 = arith.addf %in0, %in1 : f32 - linalg.yield %0 : f32 - } -> tensor<8x?xf32> - return %0 : tensor<8x?xf32> -} - -// CHECK-LABEL: func.func @vectorize_partial_dynamic_identity( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x?xf32>, %[[VAL_1:.*]]: tensor<8x?xf32>, %[[VAL_2:.*]]: tensor<8x?xf32>) -> tensor<8x?xf32> { -// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_4:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<8x?xf32> -// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 8 : index -// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_7]], %[[VAL_4]] : vector<8x32xi1> -// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_0]][%[[VAL_5]], %[[VAL_5]]], %[[VAL_6]] {in_bounds = [true, true]} : tensor<8x?xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> -// CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_1]][%[[VAL_5]], %[[VAL_5]]], %[[VAL_10]] {in_bounds = [true, true]} : tensor<8x?xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> -// CHECK: %[[VAL_12:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_2]][%[[VAL_5]], %[[VAL_5]]], %[[VAL_12]] {in_bounds = [true, true]} : tensor<8x?xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> -// CHECK: %[[VAL_14:.*]] = arith.addf %[[VAL_9]], %[[VAL_11]] : vector<8x32xf32> -// CHECK: %[[VAL_15:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_16:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write %[[VAL_14]], %[[VAL_2]][%[[VAL_15]], %[[VAL_15]]] {in_bounds = [true, true]} : vector<8x32xf32>, tensor<8x?xf32> } : vector<8x32xi1> -> tensor<8x?xf32> - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [8, 32] : !transform.any_op -} - -// ----- - -func.func @do_not_generate_masks(%arg0: tensor<8x32xf32>, - %arg1: tensor<8x32xf32>, - %arg2: tensor<8x32xf32>) -> tensor<8x32xf32> { - %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"] } - ins(%arg0, %arg1 : tensor<8x32xf32>, tensor<8x32xf32>) - outs(%arg2 : tensor<8x32xf32>) { - ^bb(%in0: f32, %in1: f32, %out: f32) : - %0 = arith.addf %in0, %in1 : f32 - linalg.yield %0 : f32 - } -> tensor<8x32xf32> - return %0 : tensor<8x32xf32> -} - -// CHECK-LABEL: func.func @do_not_generate_masks -// CHECK-NOT: vector.mask - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [8, 32] : !transform.any_op -} - -// ----- - -func.func @vectorize_static_shape_with_mask(%arg0: tensor<8x30xf32>, - %arg1: tensor<8x30xf32>, - %arg2: tensor<8x30xf32>) -> tensor<8x30xf32> { - %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"] } - ins(%arg0, %arg1 : tensor<8x30xf32>, tensor<8x30xf32>) - outs(%arg2 : tensor<8x30xf32>) { - ^bb(%in0: f32, %in1: f32, %out: f32) : - %0 = arith.addf %in0, %in1 : f32 - linalg.yield %0 : f32 - } -> tensor<8x30xf32> - return %0 : tensor<8x30xf32> -} - -// CHECK-LABEL: func.func @vectorize_static_shape_with_mask( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x30xf32>, %[[VAL_1:.*]]: tensor<8x30xf32>, %[[VAL_2:.*]]: tensor<8x30xf32>) -> tensor<8x30xf32> { -// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 8 : index -// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 30 : index -// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_5]], %[[VAL_6]] : vector<8x32xi1> -// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %[[VAL_0]][%[[VAL_3]], %[[VAL_3]]], %[[VAL_4]] {in_bounds = [true, true]} : tensor<8x30xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> -// CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %[[VAL_1]][%[[VAL_3]], %[[VAL_3]]], %[[VAL_9]] {in_bounds = [true, true]} : tensor<8x30xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> -// CHECK: %[[VAL_11:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %[[VAL_2]][%[[VAL_3]], %[[VAL_3]]], %[[VAL_11]] {in_bounds = [true, true]} : tensor<8x30xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> -// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<8x32xf32> -// CHECK: %[[VAL_14:.*]] = arith.constant 0 : index -// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %[[VAL_13]], %[[VAL_2]][%[[VAL_14]], %[[VAL_14]]] {in_bounds = [true, true]} : vector<8x32xf32>, tensor<8x30xf32> } : vector<8x32xi1> -> tensor<8x30xf32> - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [8, 32] : !transform.any_op -} - -// ----- - -func.func @vectorize_dynamic_fill(%A : tensor, %arg0 : f32) -> tensor { - %0 = linalg.fill ins(%arg0 : f32) outs(%A : tensor) -> tensor - return %0 : tensor -} - -// CHECK-LABEL: func.func @vectorize_dynamic_fill -// CHECK: %[[DIM0:.*]] = tensor.dim -// CHECK: %[[DIM1:.*]] = tensor.dim -// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM0]], %[[DIM1]] : vector<8x16xi1> -// CHECK: %[[BCAST:.*]] = vector.broadcast %{{.*}} : f32 to vector<8x16xf32> -// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[BCAST]], {{.*}} {in_bounds = [true, true]} : vector<8x16xf32>, tensor } : vector<8x16xi1> - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [8, 16] : !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_masked_vectorize_linalg_copy -func.func @test_masked_vectorize_linalg_copy(%A : memref, %B : memref) { - // CHECK: %[[c0:.*]] = arith.constant 0 : index - // CHECK: %[[d0:.*]] = memref.dim %{{.*}}, %[[c0]] : memref - // CHECK: %[[c1:.*]] = arith.constant 1 : index - // CHECK: %[[d1:.*]] = memref.dim %{{.*}}, %[[c1]] : memref - // CHECK: %[[mask:.*]] = vector.create_mask %[[d0]], %[[d1]] : vector<2x4xi1> - // CHECK: vector.mask %[[mask]] {{.*}} vector.transfer_read %{{.*}} {in_bounds = [true, true]} : memref, vector<2x4xf32> } : vector<2x4xi1> -> vector<2x4xf32> - // CHECK: vector.mask %[[mask]] {{.*}} vector.transfer_write %{{.*}} {in_bounds = [true, true]} : vector<2x4xf32>, memref } : vector<2x4xi1> - linalg.copy ins(%A : memref) outs(%B : memref) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [2, 4] : !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_masked_vectorize_pad -func.func @test_masked_vectorize_pad( - %0 : tensor, %h0 : index, %h1 : index) - -> tensor<2x4xf32> -{ - // CHECK-DAG: %[[c42:.*]] = arith.constant 4.243000e+01 : f32 - // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index - // CHECK-DAG: %[[empty:.*]] = tensor.empty() : tensor<2x4xf32> - // CHECK: %[[d0:.*]] = tensor.dim {{.*}} : tensor - // CHECK: %[[d1:.*]] = tensor.dim {{.*}} : tensor - // CHECK: %[[mask:.*]] = vector.create_mask %[[d0]], %[[d1]] : vector<2x4xi1> - // CHECK-DAG: %[[c0_2:.*]] = arith.constant 0 : index - // CHECK: %[[masked_read:.*]] = vector.mask %[[mask]] { - // CHECK-SAME: vector.transfer_read %{{.*}}[%[[c0_2]], %[[c0_2]]], %[[c42]] - // CHECK-SAME: {in_bounds = [true, true]} : tensor, vector<2x4xf32> - // CHECK-SAME: } : vector<2x4xi1> -> vector<2x4xf32> - // CHECK: vector.transfer_write %[[masked_read]], %[[empty]][%[[c0_2]], %[[c0_2]]] - // CHECK-SAME: {in_bounds = [true, true]} : vector<2x4xf32>, tensor<2x4xf32> - %cst = arith.constant 42.43 : f32 - %c0 = arith.constant 0 : index - %1 = tensor.pad %0 low[0, %c0] high[%h0, %h1] { - ^bb0(%hh1: index, %hh2: index): - tensor.yield %cst : f32 - } : tensor to tensor<2x4xf32> - return %1: tensor<2x4xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 - : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [2, 4] : !transform.any_op -} - -// ----- - -// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s0 + s1)> -// CHECK: func @test_masked_vectorize_dynamic_pad -func.func @test_masked_vectorize_dynamic_pad( - %0 : tensor, %h0 : index, %h1 : index) - -> tensor -{ - // CHECK-DAG: %[[c42:.*]] = arith.constant 4.243000e+01 : f32 - // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index - // CHECK-DAG: %[[res_d0:.+]] = affine.apply #[[MAP]]() - // CHECK-DAG: %[[res_d1:.+]] = affine.apply #[[MAP]]() - // CHECK-DAG: %[[empty:.*]] = tensor.empty(%[[res_d0]], %[[res_d1]]) : tensor - // CHECK: %[[d0:.*]] = tensor.dim {{.*}} : tensor - // CHECK: %[[d1:.*]] = tensor.dim {{.*}} : tensor - // CHECK: %[[mask:.*]] = vector.create_mask %[[d0]], %[[d1]] : vector<2x4xi1> - // CHECK-DAG: %[[c0_2:.*]] = arith.constant 0 : index - // CHECK: %[[masked_read:.*]] = vector.mask %[[mask]] { - // CHECK-SAME: vector.transfer_read %{{.*}}[%[[c0_2]], %[[c0_2]]], %[[c42]] - // CHECK-SAME: {in_bounds = [true, true]} : tensor, vector<2x4xf32> - // CHECK-SAME: } : vector<2x4xi1> -> vector<2x4xf32> - // CHECK: %[[mask_2:.*]] = vector.create_mask %[[res_d0]], %[[res_d1]] : vector<2x4xi1> - // CHECK: %[[masked_write:.*]] = vector.mask %[[mask_2]] { - // CHECK-SAME: vector.transfer_write %[[masked_read]], %[[empty]][%[[c0_2]], %[[c0_2]]] - // CHECK-SAME: {in_bounds = [true, true]} : vector<2x4xf32>, tensor - // CHECK: return %[[masked_write]] : tensor - %cst = arith.constant 42.43 : f32 - %c0 = arith.constant 0 : index - %1 = tensor.pad %0 low[0, %c0] high[%h0, %h1] { - ^bb0(%hh1: index, %hh2: index): - tensor.yield %cst : f32 - } : tensor to tensor - return %1: tensor -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 - : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [2, 4] : !transform.any_op -} - -// ----- - -func.func @matmul(%A: memref, %B: memref, %C: memref) { - linalg.matmul ins(%A, %B: memref, memref) - outs(%C: memref) - return -} - -// CHECK-LABEL: func.func @matmul( -// CHECK-SAME: %[[A:.*]]: memref, %[[B:.*]]: memref, %[[C:.*]]: memref) { -// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_4:.*]] = memref.dim %[[A]], %[[VAL_3]] : memref -// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = memref.dim %[[B]], %[[VAL_5]] : memref -// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = memref.dim %[[A]], %[[VAL_7]] : memref -// CHECK: %[[MASK_A:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_8]] : vector<8x4xi1> -// CHECK: %[[LOAD_A:.*]] = vector.mask %[[MASK_A]] { vector.transfer_read %[[A]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x16x4xf32> } : vector<8x4xi1> -> vector<8x16x4xf32> -// CHECK: %[[MASK_B:.*]] = vector.create_mask %[[VAL_8]], %[[VAL_6]] : vector<4x16xi1> -// CHECK: %[[LOAD_B:.*]] = vector.mask %[[MASK_B]] { vector.transfer_read %[[B]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x16x4xf32> } : vector<4x16xi1> -> vector<8x16x4xf32> -// CHECK: %[[MASK_C:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<8x16xi1> -// CHECK: %[[LOAD_C:.*]] = vector.mask %[[MASK_C]] { vector.transfer_read %[[C]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true]} : memref, vector<8x16xf32> } : vector<8x16xi1> -> vector<8x16xf32> -// CHECK: %[[MULF:.*]] = arith.mulf %[[LOAD_A]], %[[LOAD_B]] : vector<8x16x4xf32> -// CHECK: %[[MASK_MULIT_RED:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]], %[[VAL_8]] : vector<8x16x4xi1> -// CHECK: %[[MULTI_RED:.*]] = vector.mask %[[MASK_MULIT_RED]] { vector.multi_reduction , %[[MULF]], %[[LOAD_C]] [2] : vector<8x16x4xf32> to vector<8x16xf32> } : vector<8x16x4xi1> -> vector<8x16xf32> -// CHECK: %[[C2:.*]] = arith.constant 0 : index -// CHECK: vector.mask %[[MASK_C]] { vector.transfer_write %[[MULTI_RED]], %[[C]]{{\[}}%[[C2]], %[[C2]]] {in_bounds = [true, true]} : vector<8x16xf32>, memref } : vector<8x16xi1> - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %matmul vector_sizes [8, 16, 4] : !transform.any_op -} - -// ----- - -func.func @matmul_scalable(%A: memref, %B: memref, %C: memref) { - linalg.matmul ins(%A, %B: memref, memref) - outs(%C: memref) - return -} - -// CHECK-LABEL: func.func @matmul_scalable( -// CHECK-SAME: %[[A:.*]]: memref, %[[B:.*]]: memref, %[[C:.*]]: memref) { -// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_4:.*]] = memref.dim %[[A]], %[[VAL_3]] : memref -// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = memref.dim %[[B]], %[[VAL_5]] : memref -// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = memref.dim %[[A]], %[[VAL_7]] : memref -// CHECK: %[[MASK_A:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_8]] : vector<8x4xi1> -// CHECK: %[[LOAD_A:.*]] = vector.mask %[[MASK_A]] { vector.transfer_read %[[A]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x[16]x4xf32> } : vector<8x4xi1> -> vector<8x[16]x4xf32> -// CHECK: %[[MASK_B:.*]] = vector.create_mask %[[VAL_8]], %[[VAL_6]] : vector<4x[16]xi1> -// CHECK: %[[LOAD_B:.*]] = vector.mask %[[MASK_B]] { vector.transfer_read %[[B]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x[16]x4xf32> } : vector<4x[16]xi1> -> vector<8x[16]x4xf32> -// CHECK: %[[MASK_C:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<8x[16]xi1> -// CHECK: %[[LOAD_C:.*]] = vector.mask %[[MASK_C]] { vector.transfer_read %[[C]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true]} : memref, vector<8x[16]xf32> } : vector<8x[16]xi1> -> vector<8x[16]xf32> -// CHECK: %[[MULF:.*]] = arith.mulf %[[LOAD_A]], %[[LOAD_B]] : vector<8x[16]x4xf32> -// CHECK: %[[MASK_MULIT_RED:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]], %[[VAL_8]] : vector<8x[16]x4xi1> -// CHECK: %[[MULTI_RED:.*]] = vector.mask %[[MASK_MULIT_RED]] { vector.multi_reduction , %[[MULF]], %[[LOAD_C]] [2] : vector<8x[16]x4xf32> to vector<8x[16]xf32> } : vector<8x[16]x4xi1> -> vector<8x[16]xf32> -// CHECK: %[[C2:.*]] = arith.constant 0 : index -// CHECK: vector.mask %[[MASK_C]] { vector.transfer_write %[[MULTI_RED]], %[[C]]{{\[}}%[[C2]], %[[C2]]] {in_bounds = [true, true]} : vector<8x[16]xf32>, memref } : vector<8x[16]xi1> - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %matmul vector_sizes [8, [16], 4] : !transform.any_op -} diff --git a/mlir/test/Dialect/Linalg/vectorization-scalable.mlir b/mlir/test/Dialect/Linalg/vectorization-scalable.mlir index 957313b43d4b..641b626f576e 100644 --- a/mlir/test/Dialect/Linalg/vectorization-scalable.mlir +++ b/mlir/test/Dialect/Linalg/vectorization-scalable.mlir @@ -29,7 +29,7 @@ func.func @vectorize_dynamic_identity(%arg0: tensor, transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [[4]] : !transform.any_op + transform.structured.vectorize %0 vector_sizes [[4]] : !transform.any_op } // ----- @@ -71,7 +71,7 @@ func.func @vectorize_partial_dynamic_identity(%arg0: tensor<8x?xf32>, transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [8, [32]] : !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, [32]] : !transform.any_op } // ----- @@ -111,7 +111,7 @@ func.func @vectorize_static_shape_with_mask(%arg0: tensor<8x30xf32>, transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [8, [32]] : !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, [32]] : !transform.any_op } // ----- @@ -131,6 +131,6 @@ func.func @vectorize_dynamic_fill(%A : tensor, %arg0 : f32) -> tensor !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [8, [16]] : !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, [16]] : !transform.any_op } diff --git a/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir new file mode 100644 index 000000000000..ecba1f324680 --- /dev/null +++ b/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir @@ -0,0 +1,1787 @@ +// RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s + +// CHECK-LABEL: contraction_dot +func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref) { + +// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584xf32> +// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [0] : vector<1584xf32> to f32 + linalg.dot ins(%A, %B: memref<1584xf32>, memref<1584xf32>) + outs(%C: memref) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 : !transform.any_op +} + +// ----- + +// CHECK-LABEL: contraction_matvec +func.func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %C: memref<1584xf32>) { + +// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584xf32> +// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [1] : vector<1584x1584xf32> to vector<1584xf32> + linalg.matvec ins(%A, %B: memref<1584x1584xf32>, memref<1584xf32>) + outs(%C: memref<1584xf32>) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: contraction_matmul +func.func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf32>, %C: memref<1584x1584xf32>) { +// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584xf32> +// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [2] : vector<1584x1584x1584xf32> to vector<1584x1584xf32> + linalg.matmul ins(%A, %B: memref<1584x1584xf32>, memref<1584x1584xf32>) + outs(%C: memref<1584x1584xf32>) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: contraction_batch_matmul +func.func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1584x1584x1584xf32>, %C: memref<1584x1584x1584xf32>) { +// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584x1584xf32> +// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [3] : vector<1584x1584x1584x1584xf32> to vector<1584x1584x1584xf32> + linalg.batch_matmul + ins(%A, %B: memref<1584x1584x1584xf32>, memref<1584x1584x1584xf32>) + outs(%C: memref<1584x1584x1584xf32>) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.batch_matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +#matmul_trait = { + args_in = 2, + args_out = 1, + indexing_maps = [ + affine_map<(m, n, k) -> (m, k)>, + affine_map<(m, n, k) -> (k, n)>, + affine_map<(m, n, k) -> (m, n)> + ], + iterator_types = ["parallel", "parallel", "reduction"] +} + +// CHECK-LABEL: func @vectorization_test +func.func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>, + %C: memref<8x32xf32>) { + // CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32> + // CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32> + // CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<8x32xf32>, vector<8x32xf32> + // CHECK: %[[MUL:.*]] = arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32> + // CHECK: %[[R:.*]] = vector.multi_reduction , %[[MUL]], %[[ACC]] [2] : vector<8x32x16xf32> to vector<8x32xf32> + // CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<8x32xf32> + linalg.generic #matmul_trait + ins(%A, %B : memref<8x16xf32>, memref<16x32xf32>) + outs(%C : memref<8x32xf32>) { + ^bb(%a: f32, %b: f32, %c: f32) : + %d = arith.mulf %a, %b: f32 + %e = arith.addf %c, %d: f32 + linalg.yield %e : f32 + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +#matmul_transpose_out_trait = { + args_in = 2, + args_out = 1, + indexing_maps = [ + affine_map<(m, n, k) -> (m, k)>, + affine_map<(m, n, k) -> (k, n)>, + affine_map<(m, n, k) -> (n, m)> + ], + iterator_types = ["parallel", "parallel", "reduction"] +} + +// CHECK-LABEL: func @generic_output_transpose +func.func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>, + %C: memref<32x8xf32>) { + // CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32> + // CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32> + // CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<32x8xf32>, vector<8x32xf32> + // CHECK: %[[MUL:.*]] = arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32> + // CHECK: %[[R:.*]] = vector.multi_reduction , %[[MUL]], %[[ACC]] [2] : vector<8x32x16xf32> to vector<8x32xf32> + // CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<32x8xf32> + linalg.generic #matmul_transpose_out_trait + ins(%A, %B : memref<8x16xf32>, memref<16x32xf32>) + outs(%C : memref<32x8xf32>) { + ^bb(%a: f32, %b: f32, %c: f32) : + %d = arith.mulf %a, %b: f32 + %e = arith.addf %c, %d: f32 + linalg.yield %e : f32 + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> +#map1 = affine_map<(d0, d1, d2) -> (d1, d0, d2)> +// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d1, d0, d2)> +// CHECK: func @generic_interchanged_transpose +func.func @generic_interchanged_transpose(%arg0: tensor<12x128x32xf32>) -> tensor<128x12x32xf32> { + // CHECK: %[[IN:.+]] = vector.transfer_read + // CHECK: vector.transfer_write %[[IN]], {{.+}} permutation_map = #[[MAP]] + %0 = tensor.empty() : tensor<128x12x32xf32> + %1 = linalg.generic {indexing_maps = [#map0, #map1], + iterator_types = ["parallel", "parallel", "parallel"]} + ins(%arg0 : tensor<12x128x32xf32>) + outs(%0 : tensor<128x12x32xf32>) { + ^bb0(%arg1: f32, %arg2: f32): + linalg.yield %arg1 : f32 + } -> tensor<128x12x32xf32> + return %1 : tensor<128x12x32xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +#matmul_trait = { + args_in = 2, + args_out = 1, + indexing_maps = [ + affine_map<(m, n, k) -> (m, k)>, + affine_map<(m, n, k) -> (k, n)>, + affine_map<(m, n, k) -> (m, n)> + ], + iterator_types = ["parallel", "parallel", "reduction"] +} + +// CHECK-LABEL: func @vectorization_test_integer +func.func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32>, + %C: memref<8x32xi32>) { + // CHECK: vector.transfer_read %{{.*}} : memref<8x16xi32>, vector<8x32x16xi32> + // CHECK: vector.transfer_read %{{.*}} : memref<16x32xi32>, vector<8x32x16xi32> + // CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<8x32xi32>, vector<8x32xi32> + // CHECK: %[[MUL:.*]] = arith.muli %{{.*}}, %{{.*}} : vector<8x32x16xi32> + // CHECK: vector.multi_reduction , %[[MUL]], %[[ACC]] [2] : vector<8x32x16xi32> to vector<8x32xi32> + // CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xi32>, memref<8x32xi32> + linalg.generic #matmul_trait + ins(%A, %B : memref<8x16xi32>, memref<16x32xi32>) + outs(%C : memref<8x32xi32>) { + ^bb(%a: i32, %b: i32, %c: i32) : + %d = arith.muli %a, %b: i32 + %e = arith.addi %c, %d: i32 + linalg.yield %e : i32 + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @vectorization_test_2 +func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>, + %C: memref<8x32xf32>) { + // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32> + // CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [2] : vector<8x32x16xf32> to vector<8x32xf32> + linalg.matmul + ins(%A, %B: memref<8x16xf32>, memref<16x32xf32>) + outs(%C: memref<8x32xf32>) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_scalar_input +func.func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) { + // CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32> + // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32> + linalg.generic { + indexing_maps = [affine_map<(m, n) -> ()>, affine_map<(m, n) -> (m, n)>], + iterator_types = ["parallel", "parallel"]} + ins(%arg0 : f32) + outs(%A: memref<8x16xf32>) { + ^bb(%0: f32, %1: f32) : + linalg.yield %0 : f32 + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_do_not_vectorize_unsupported_element_types +func.func @test_do_not_vectorize_unsupported_element_types(%A : memref<8x16xcomplex>, %arg0 : complex) { + // CHECK-NOT: vector.broadcast + // CHECK-NOT: vector.transfer_write + linalg.generic { + indexing_maps = [affine_map<(m, n) -> ()>, affine_map<(m, n) -> (m, n)>], + iterator_types = ["parallel", "parallel"]} + ins(%arg0 : complex) + outs(%A: memref<8x16xcomplex>) { + ^bb(%0: complex, %1: complex) : + linalg.yield %0 : complex + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +#map0 = affine_map<(d0) -> (d0)> + +func.func @vectorize_affine_apply(%arg0: tensor<5xf32>, %arg3: index) -> tensor<5xi32> { + %0 = tensor.empty() : tensor<5xi32> + %1 = linalg.generic {indexing_maps = [#map0, #map0], + iterator_types = ["parallel"]} + ins(%arg0 : tensor<5xf32>) + outs(%0 : tensor<5xi32>) { + ^bb0(%arg1: f32, %arg2: i32): + %2 = linalg.index 0 : index + %11 = affine.apply affine_map<() -> (123)>() + %12 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %11) + %13 = affine.apply affine_map<(d0)[s0] -> (d0 + s0)>(%12)[%arg3] + %14 = affine.apply affine_map<(d0) -> (d0 + 1)>(%13) + %15 = affine.apply affine_map<(d0, d1, d2) -> (d0 + d1 + d2)>(%13, %14, %12) + %3 = arith.index_cast %15 : index to i32 + linalg.yield %3 : i32 + } -> tensor<5xi32> + return %1 : tensor<5xi32> +} + +// CHECK-LABEL: func.func @vectorize_affine_apply +// CHECK-SAME: %arg0: tensor<5xf32> +// CHECK-SAME: %[[ARG1:.*]]: index +// CHECK: %[[CST:.*]] = arith.constant dense<[123, 124, 125, 126, 127]> : vector<5xindex> +// CHECK: %[[CST_0:.*]] = arith.constant dense<1> : vector<5xindex> +// CHECK: %[[C0:.*]] = arith.constant 0 : index +// CHECK: %[[EMPTY:.*]] = tensor.empty() : tensor<5xi32> +// CHECK: %[[BCAST:.*]] = vector.broadcast %[[ARG1]] : index to vector<5xindex> +// CHECK: %[[ADDI_1:.*]] = arith.addi %[[BCAST]], %[[CST]] : vector<5xindex> +// CHECK: %[[ADDI_2:.*]] = arith.addi %[[ADDI_1]], %[[CST_0]] : vector<5xindex> +// CHECK: %[[ADDI_3:.*]] = arith.addi %[[ADDI_1]], %[[ADDI_2]] : vector<5xindex> +// CHECK: %[[ADDI_4:.*]] = arith.addi %[[ADDI_3]], %[[CST]] : vector<5xindex> +// CHECK: %[[CAST:.*]] = arith.index_cast %[[ADDI_4]] : vector<5xindex> to vector<5xi32> +// CHECK: vector.transfer_write %[[CAST]], %[[EMPTY]][%[[C0:.*]]] {in_bounds = [true]} : vector<5xi32>, tensor<5xi32> + +transform.sequence failures(propagate) { + ^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_fill +func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) { + // CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32> + // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32> + linalg.fill ins(%arg0 : f32) outs(%A : memref<8x16xf32>) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_fill +func.func @test_vectorize_fill_scalar(%A : memref, %arg0 : f32) { + // CHECK-SAME: (%[[M:.*]]: memref, %[[val:.*]]: f32) + // CHECK: %[[VEC:.*]] = vector.broadcast %[[val]] : f32 to vector + // CHECK: vector.transfer_write %[[VEC]], %[[M]][] : vector, memref + linalg.fill ins(%arg0 : f32) outs(%A : memref) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_copy +func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) { + // CHECK: %[[V:.*]] = vector.transfer_read {{.*}} : memref<8x16xf32>, vector<8x16xf32> + // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32> + memref.copy %A, %B : memref<8x16xf32> to memref<8x16xf32> + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_copy_scalar +func.func @test_vectorize_copy_scalar(%A : memref, %B : memref) { + // CHECK-SAME: (%[[A:.*]]: memref, %[[B:.*]]: memref) + // CHECK: %[[V:.*]] = vector.transfer_read %[[A]][]{{.*}} : memref, vector + // CHECK: %[[val:.*]] = vector.extractelement %[[V]][] : vector + // CHECK: %[[VV:.*]] = vector.broadcast %[[val]] : f32 to vector + // CHECK: vector.transfer_write %[[VV]], %[[B]][] : vector, memref + memref.copy %A, %B : memref to memref + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_copy_complex +// CHECK-NOT: vector< +func.func @test_vectorize_copy_complex(%A : memref<8x16xcomplex>, %B : memref<8x16xcomplex>) { + memref.copy %A, %B : memref<8x16xcomplex> to memref<8x16xcomplex> + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_trailing_index + // CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>) +func.func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) { + // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : vector<8xindex> + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + linalg.generic { + indexing_maps = [ + affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], + iterator_types = ["parallel", "parallel", "parallel", "parallel"]} + outs(%arg0: memref<1x2x4x8xindex>) { + ^bb0(%arg1: index): + // CHECK: %[[BCST:.*]] = vector.broadcast %[[CST0]] : vector<8xindex> to vector<1x2x4x8xindex> + // CHECK: vector.transfer_write %[[BCST]], %[[ARG0]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] {{.*}} : vector<1x2x4x8xindex>, memref<1x2x4x8xindex> + %0 = linalg.index 3 : index + linalg.yield %0 : index + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_vectorize_inner_index + // CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>) +func.func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) { + // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<[0, 1]> : vector<2xindex> + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + linalg.generic { + indexing_maps = [ + affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], + iterator_types = ["parallel", "parallel", "parallel", "parallel"]} + outs(%arg0: memref<1x2x4x8xindex>) { + ^bb0(%arg1: index): + // CHECK: %[[BCST:.*]] = vector.broadcast %[[CST0]] : vector<2xindex> to vector<1x8x4x2xindex> + // CHECK: %[[TRAN:.*]] = vector.transpose %[[BCST]], [0, 3, 2, 1] : vector<1x8x4x2xindex> to vector<1x2x4x8xindex> + // CHECK: vector.transfer_write %[[TRAN]], %[[ARG0]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] {{.*}} : vector<1x2x4x8xindex>, memref<1x2x4x8xindex> + %0 = linalg.index 1 : index + linalg.yield %0 : index + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @generic_vectorize + // CHECK-SAME: (%[[ARG0:.*]]: memref<4x256xf32>, %[[ARG1:.*]]: memref<4x256xf32>, + // CHECK-SAME: %[[ARG2:.*]]: memref<256xf32>, %[[ARG3:.*]]: f32) +func.func @generic_vectorize(%arg0: memref<4x256xf32>, + %arg1: memref<4x256xf32>, + %arg2: memref<256xf32>, %i: f32) { + // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<4x256xf32> + // CHECK-DAG: %[[CST1:.*]] = arith.constant dense<1.000000e+00> : vector<4x256xf32> + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + %c1_f32 = arith.constant 1.0 : f32 + linalg.generic { + args_in = 0 : i64, + args_out = 10 : i64, + indexing_maps = [ + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"]} + ins(%arg1, %arg2: memref<4x256xf32>, memref<256xf32>) + outs( + %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0 : + memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, + memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, + memref<4x256xf32>, memref<4x256xf32>) { + ^bb0(%arg3 : f32, %arg4 : f32, %arg5: f32, %arg6: f32, %arg7: f32, %arg8: f32, + // CHECK: %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32> + // CHECK: %[[V0:.*]] = vector.transfer_read %[[ARG2]][%[[C0]]], {{.*}} : memref<256xf32>, vector<4x256xf32> + // CHECK: %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32> + // CHECK: %[[V1:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32> + %arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32, + %arg14 : f32): + // CHECK: %[[ADD:.*]] = arith.addf %[[V0]], %[[V1]] : vector<4x256xf32> + %6 = arith.addf %arg4, %arg6 : f32 + // CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32> + %7 = arith.cmpf ogt, %arg3, %arg6 : f32 + // CHECK: %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32> + %8 = arith.constant 2.0 : f32 + // CHECK: %[[DIV:.*]] = arith.divf %[[V3]], %[[ARG3B]] : vector<4x256xf32> + %9 = arith.divf %arg5, %i : f32 + // CHECK: %[[EXP:.*]] = math.exp2 %[[V3]] : vector<4x256xf32> + %10 = math.exp2 %arg5 : f32 + // CHECK: %[[MUL:.*]] = arith.mulf %[[V3]], %[[CST0]] : vector<4x256xf32> + %11 = arith.mulf %arg5, %8 : f32 + // CHECK: %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32> + %12 = math.rsqrt %arg5 : f32 + // CHECK: %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32> + %13 = arith.select %7, %arg5, %arg6 : f32 + // CHECK: %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32> + %14 = arith.subf %arg5, %arg4 : f32 + // CHECK: %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32> + %15 = math.tanh %arg5 : f32 + // CHECK: vector.transfer_write %[[ADD]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[CST0]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[CST1]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[DIV]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[EXP]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[MUL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[RSQRT]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[SEL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[SUB]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + // CHECK: vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> + linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32, + f32, f32, f32, f32, f32, f32, f32, f32 + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @generic_vectorize_tensor +// CHECK-SAME: (%[[ARG0:.*]]: tensor<4x256xf32>, %[[ARG1:.*]]: tensor<4x256xf32>, +// CHECK-SAME: %[[ARG2:.*]]: tensor<256xf32>, %[[ARG3:.*]]: f32) +func.func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>, + %arg1: tensor<4x256xf32>, %arg2: tensor<256xf32>, + %i: f32) -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>) { + %c1_f32 = arith.constant 1.0 : f32 + %r:10 = linalg.generic { + indexing_maps = [ + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"]} + ins(%arg1, %arg2: tensor<4x256xf32>, tensor<256xf32>) + outs( + %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0 : + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>) { + ^bb0(%arg3 : f32, %arg4 : f32, %arg5: f32, %arg6: f32, %arg7: f32, %arg8: f32, + %arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32, + %arg14 : f32): + // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<4x256xf32> + // CHECK-DAG: %[[CST1:.*]] = arith.constant dense<1.000000e+00> : vector<4x256xf32> + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + // CHECK: %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32> + // CHECK: %[[V0:.*]] = vector.transfer_read %[[ARG2]][%[[C0]]], {{.*}} : tensor<256xf32>, vector<4x256xf32> + // CHECK: %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32> + // CHECK: %[[V1:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32> + // CHECK: %[[ADD:.*]] = arith.addf %[[V0]], %[[V1]] : vector<4x256xf32> + %6 = arith.addf %arg4, %arg6 : f32 + // CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32> + %7 = arith.cmpf ogt, %arg3, %arg6 : f32 + // CHECK: %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32> + %8 = arith.constant 2.0 : f32 + // CHECK: %[[DIV:.*]] = arith.divf %[[V3]], %[[ARG3B]] : vector<4x256xf32> + %9 = arith.divf %arg5, %i : f32 + // CHECK: %[[EXP:.*]] = math.exp2 %[[V3]] : vector<4x256xf32> + %10 = math.exp2 %arg5 : f32 + // CHECK: %[[MUL:.*]] = arith.mulf %[[V3]], %[[CST0]] : vector<4x256xf32> + %11 = arith.mulf %arg5, %8 : f32 + // CHECK: %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32> + %12 = math.rsqrt %arg5 : f32 + // CHECK: %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32> + %13 = arith.select %7, %arg5, %arg6 : f32 + // CHECK: %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32> + %14 = arith.subf %arg5, %arg4 : f32 + // CHECK: %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32> + %15 = math.tanh %arg5 : f32 + // CHECK: %[[R0:.*]] = vector.transfer_write %[[ADD]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R1:.*]] = vector.transfer_write %[[CST0]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R2:.*]] = vector.transfer_write %[[CST1]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R3:.*]] = vector.transfer_write %[[DIV]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R4:.*]] = vector.transfer_write %[[EXP]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R5:.*]] = vector.transfer_write %[[MUL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R6:.*]] = vector.transfer_write %[[RSQRT]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R7:.*]] = vector.transfer_write %[[SEL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R8:.*]] = vector.transfer_write %[[SUB]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + // CHECK: %[[R9:.*]] = vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> + linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32, + f32, f32, f32, f32, f32, f32, f32, f32 + } -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>) + // CHECK: return %[[R0]], %[[R1]], %[[R2]], %[[R3]], %[[R4]], %[[R5]], %[[R6]], %[[R7]], %[[R8]], %[[R9]] : tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32> + return %r#0, %r#1, %r#2, %r#3, %r#4, %r#5, %r#6, %r#7, %r#8, %r#9: + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, + tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, 0, 0, d1)> +// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (d0, 0, 0, 0)> +// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0) -> (0, 0, d0, 0)> +// CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1) -> (d1, 0, d0, 0)> +// CHECK: func @generic_vectorize_broadcast_transpose +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[CF:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[V0:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP0]]} : memref<4x4xf32>, vector<4x4x4x4xf32> +// CHECK: %[[V1:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP1]]} : memref<4xf32>, vector<4x4x4x4xf32> +// CHECK: %[[V2:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP2]]} : memref<4xf32>, vector<4x4x4x4xf32> +// CHECK: %[[V3:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP3]]} : memref<4x4xf32>, vector<4x4x4x4xf32> +// CHECK: %[[SUB:.*]] = arith.subf %[[V0]], %[[V1]] : vector<4x4x4x4xf32> +// CHECK: %[[ADD0:.*]] = arith.addf %[[V2]], %[[SUB]] : vector<4x4x4x4xf32> +// CHECK: %[[ADD1:.*]] = arith.addf %[[V3]], %[[ADD0]] : vector<4x4x4x4xf32> +// CHECK: vector.transfer_write %[[ADD1]], {{.*}} : vector<4x4x4x4xf32>, memref<4x4x4x4xf32> +func.func @generic_vectorize_broadcast_transpose( + %A: memref<4xf32>, %B: memref<4x4xf32>, %C: memref<4x4x4x4xf32>) { + linalg.generic { + indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d3)>, + affine_map<(d0, d1, d2, d3) -> (d0)>, + affine_map<(d0, d1, d2, d3) -> (d2)>, + affine_map<(d0, d1, d2, d3) -> (d2, d0)>, + affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], + iterator_types = ["parallel", "parallel", "parallel", "parallel"]} + ins(%B, %A, %A, %B: memref<4x4xf32>, memref<4xf32>, memref<4xf32>, memref<4x4xf32>) + outs(%C : memref<4x4x4x4xf32>) { + ^bb0(%arg0: f32, %arg1: f32, %arg2: f32, %arg3: f32, %arg4: f32): + %s = arith.subf %arg0, %arg1 : f32 + %a = arith.addf %arg2, %s : f32 + %b = arith.addf %arg3, %a : f32 + linalg.yield %b : f32 + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// Test different input maps. +#matmul_trait = { + indexing_maps = [ + affine_map<(d0, d1, d2, d3) -> (d1, d0)>, + affine_map<(d0, d1, d2, d3) -> (d3, d1)>, + affine_map<(d0, d1, d2, d3) -> (d3, d1, d0, d2)>, + affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> + ], + iterator_types = ["parallel", "parallel", "parallel", "parallel"] +} + +// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1, d0, 0, 0)> +// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (0, d1, 0, d0)> +// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d2, d1, d3, d0)> +// CHECK: func @vectorization_transpose +// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP0]]} : memref<14x7xf32>, vector<7x14x8x16xf32> +// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP1]]} : memref<16x14xf32>, vector<7x14x8x16xf32> +// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP2]]} : memref<16x14x7x8xf32>, vector<7x14x8x16xf32> +// CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32> +// CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32> +// CHECK: vector.transfer_write {{.*}} : vector<7x14x8x16xf32>, memref<7x14x8x16xf32> +func.func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>, + %C: memref<16x14x7x8xf32>, %D: memref<7x14x8x16xf32>) { + linalg.generic #matmul_trait + ins(%A, %B, %C : memref<14x7xf32>, memref<16x14xf32>, memref<16x14x7x8xf32>) + outs(%D : memref<7x14x8x16xf32>) { + ^bb(%a: f32, %b: f32, %c: f32, %d: f32) : + %e = arith.addf %a, %b: f32 + %f = arith.addf %e, %c: f32 + linalg.yield %f : f32 + } + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @matmul_tensors +// CHECK-SAME: (%[[ARG0:.*]]: tensor<8x4xf32>, %[[ARG1:.*]]: tensor<4x12xf32>, +// CHECK-SAME: %[[ARG2:.*]]: tensor<8x12xf32>) -> tensor<8x12xf32> +func.func @matmul_tensors( + %arg0: tensor<8x4xf32>, %arg1: tensor<4x12xf32>, %arg2: tensor<8x12xf32>) + -> tensor<8x12xf32> { + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + // CHECK-DAG: %[[V0:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x4xf32>, vector<8x12x4xf32> + // CHECK-DAG: %[[V1:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x12xf32>, vector<8x12x4xf32> + // CHECK-DAG: %[[V2:.*]] = vector.transfer_read %[[ARG2]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x12xf32>, vector<8x12xf32> + // + // linalg matmul lowers gets expanded to a 3D reduction, canonicalization later + // convert it to a 2D contract. + // CHECK: %[[MUL:.*]] = arith.mulf %[[V0]], %[[V1]] : vector<8x12x4xf32> + // CHECK: %[[R:.*]] = vector.multi_reduction , %[[MUL]], %[[V2]] [2] : vector<8x12x4xf32> to vector<8x12xf32> + // CHECK: %[[W:.*]] = vector.transfer_write %[[R]], %[[ARG2]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<8x12xf32>, tensor<8x12xf32> + %0 = linalg.matmul ins(%arg0, %arg1: tensor<8x4xf32>, tensor<4x12xf32>) + outs(%arg2: tensor<8x12xf32>) + -> tensor<8x12xf32> + // CHECK: return %[[W]] : tensor<8x12xf32> + return %0 : tensor<8x12xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @pad_static( +// CHECK-SAME: %[[ARG0:.*]]: tensor<2x?x2xf32>, %[[PAD:.*]]: f32 +// CHECK-NOT: tensor.pad +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[INIT:.*]] = tensor.empty() : tensor<2x3x4xf32> +// CHECK-DAG: %[[VEC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<2x3x4xf32> +// CHECK: %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]]{{.*}} : vector<2x3x4xf32>, tensor<2x3x4xf32> +// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %[[PAD]] {in_bounds = [true, false, true]} : tensor<2x?x2xf32>, vector<2x3x2xf32> +// CHECK: %[[RESULT:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x3x2xf32>, tensor<2x3x4xf32> +// CHECK: return %[[RESULT]] +func.func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4xf32> { + %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] { + ^bb0(%arg1: index, %arg2: index, %arg3: index): + tensor.yield %pad_value : f32 + } : tensor<2x?x2xf32> to tensor<2x3x4xf32> + return %0 : tensor<2x3x4xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @pad_static_source( +// CHECK-SAME: %[[ARG0:.*]]: tensor<2x5x2xf32>, %[[PAD:.*]]: f32 +// CHECK-NOT: tensor.pad +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK: %[[INIT:.*]] = tensor.empty() : tensor<2x6x4xf32> +// CHECK: %[[VEC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<2x6x4xf32> +// CHECK: %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<2x6x4xf32>, tensor<2x6x4xf32> +// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : tensor<2x5x2xf32>, vector<2x5x2xf32> +// CHECK: %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x5x2xf32>, tensor<2x6x4xf32> +// CHECK: return %[[WRITE]] +func.func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tensor<2x6x4xf32> { + %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] { + ^bb0(%arg1: index, %arg2: index, %arg3: index): + tensor.yield %pad_value : f32 + } : tensor<2x5x2xf32> to tensor<2x6x4xf32> + return %0 : tensor<2x6x4xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + + +// ----- + +// CHECK-LABEL: func @pad_static_dynamic( +// CHECK-SAME: %[[SRC:.*]]: tensor<1x2x2x?xf32>, %[[LOW:.*]]: index, %[[HIGH:.*]]: index +// CHECK-NOT: tensor.pad +// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index +// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index +// CHECK-DAG: %[[C5:.*]] = arith.constant 5 : index +// CHECK: %[[V0:.*]] = arith.addi %[[LOW]], %[[C2]] : index +// CHECK: %[[V1:.*]] = arith.addi %[[V0]], %[[C3]] : index +// CHECK: %[[V2:.*]] = arith.addi %[[HIGH]], %[[C5]] : index +// CHECK: %[[DIM3:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> +// CHECK: %[[V4:.*]] = arith.addi %[[DIM3]], %[[C3]] : index +// CHECK: %[[V5:.*]] = arith.addi %[[V4]], %[[C2]] : index +// CHECK: %[[INIT:.*]] = tensor.empty(%[[V1]], %[[V2]], %[[V5]]) : tensor<6x?x?x?xf32> +// CHECK: %[[FILL:.*]] = linalg.fill ins(%{{.*}} : f32) outs(%[[INIT]] : tensor<6x?x?x?xf32>) -> tensor<6x?x?x?xf32> +// CHECK: %[[SRCDIM:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> +// CHECK: %[[RESULT:.*]] = tensor.insert_slice %[[SRC]] into %[[FILL]][2, %[[LOW]], 3, 3] [1, 2, 2, %[[SRCDIM]]] [1, 1, 1, 1] : tensor<1x2x2x?xf32> into tensor<6x?x?x?xf32> +// CHECK: return %[[RESULT]] +func.func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index, + %pad_value: f32) -> tensor<6x?x?x?xf32> { + %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] { + ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): + tensor.yield %pad_value : f32 + } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32> + return %0 : tensor<6x?x?x?xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @pad_static_complex( +// CHECK-NOT: vector< +func.func @pad_static_complex(%arg0: tensor<2x5x2xcomplex>, %pad_value: complex) -> tensor<2x6x4xcomplex> { + %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] { + ^bb0(%arg1: index, %arg2: index, %arg3: index): + tensor.yield %pad_value : complex + } : tensor<2x5x2xcomplex> to tensor<2x6x4xcomplex> + return %0 : tensor<2x6x4xcomplex> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @pad_and_transfer_read +// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> +// CHECK-NOT: tensor.pad +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C5:.*]] = arith.constant 5.0 +// CHECK: %[[RESULT:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %[[C5]] : tensor<5x6xf32>, vector<7x9xf32> +// CHECK: return %[[RESULT]] +func.func @pad_and_transfer_read(%arg0: tensor<5x6xf32>) -> vector<7x9xf32> { + %c0 = arith.constant 0 : index + %c5 = arith.constant 5.0 : f32 + %c6 = arith.constant 6.0 : f32 + %0 = tensor.pad %arg0 low[0, 0] high[5, 7] { + ^bb0(%arg1: index, %arg2: index): + tensor.yield %c5 : f32 + } : tensor<5x6xf32> to tensor<10x13xf32> + %1 = vector.transfer_read %0[%c0, %c0], %c6 + : tensor<10x13xf32>, vector<7x9xf32> + return %1 : vector<7x9xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +func.func private @make_vector() -> vector<7x9xf32> + +// CHECK-LABEL: func @pad_and_transfer_write_static +// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> +// CHECK-NOT: tensor.pad +// CHECK: %[[C0:.*]] = arith.constant 0 : index +// CHECK: %[[VEC0:.*]] = call @make_vector() : () -> vector<7x9xf32> +// CHECK: %[[RESULT:.*]] = vector.transfer_write %[[VEC0]], %[[ARG0]][%[[C0]], %[[C0]]] : vector<7x9xf32>, tensor<5x6xf32> +// CHECK: return %[[RESULT]] +func.func @pad_and_transfer_write_static( + %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> { + %c0 = arith.constant 0 : index + %c5 = arith.constant 5.0 : f32 + %0 = tensor.pad %arg0 low[0, 0] high[5, 7] { + ^bb0(%arg2: index, %arg3: index): + tensor.yield %c5 : f32 + } : tensor<5x6xf32> to tensor<10x13xf32> + %1 = call @make_vector() : () -> vector<7x9xf32> + %2 = vector.transfer_write %1, %0[%c0, %c0] + : vector<7x9xf32>, tensor<10x13xf32> + %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32> + return %3 : tensor<5x6xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + + +// ----- + +func.func private @make_vector() -> vector<7x9xf32> + +// CHECK-LABEL: func @pad_and_transfer_write_dynamic_static +// CHECK-SAME: %[[ARG0:.*]]: tensor, %[[SIZE:.*]]: index, %[[PADDING:.*]]: index +// CHECK-NOT: tensor.pad +// CHECK: %[[C0:.*]] = arith.constant 0 : index +// CHECK: %[[SUB:.*]] = tensor.extract_slice %[[ARG0]][0, 0] [%[[SIZE]], 6] [1, 1] : tensor to tensor +// CHECK: %[[VEC0:.*]] = call @make_vector() : () -> vector<7x9xf32> +// CHECK: %[[RESULT:.*]] = vector.transfer_write %[[VEC0]], %[[SUB]][%[[C0]], %[[C0]]] : vector<7x9xf32>, tensor +// CHECK: return %[[RESULT]] +func.func @pad_and_transfer_write_dynamic_static( + %arg0: tensor, %size: index, %padding: index) -> tensor { + %c0 = arith.constant 0 : index + %c5 = arith.constant 5.0 : f32 + %s = tensor.extract_slice %arg0[0, 0] [%size, 6] [1, 1] + : tensor to tensor + %0 = tensor.pad %s low[0, 0] high[%padding, 7] { + ^bb0(%arg2: index, %arg3: index): + tensor.yield %c5 : f32 + } : tensor to tensor + %1 = call @make_vector() : () -> vector<7x9xf32> + %2 = vector.transfer_write %1, %0[%c0, %c0] + : vector<7x9xf32>, tensor + %3 = tensor.extract_slice %2[0, 0] [%size, 6] [1, 1] : tensor to tensor + return %3 : tensor +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + + +// ----- + +func.func private @make_vector() -> tensor<12x13xf32> + +// CHECK-LABEL: func @pad_and_insert_slice_source +// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> +// CHECK-NOT: tensor.pad +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C5:.*]] = arith.constant 5.0 +// CHECK: %[[VEC0:.*]] = call @make_vector() : () -> tensor<12x13xf32> +// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %[[C5]] : tensor<5x6xf32>, vector<7x9xf32> +// CHECK: %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[VEC0]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<7x9xf32>, tensor<12x13xf32> +// CHECK: return %[[WRITE]] +func.func @pad_and_insert_slice_source( + %arg0: tensor<5x6xf32>) -> tensor<12x13xf32> { + %c0 = arith.constant 0 : index + %c5 = arith.constant 5.0 : f32 + %0 = tensor.pad %arg0 low[0, 0] high[2, 3] { + ^bb0(%arg2: index, %arg3: index): + tensor.yield %c5 : f32 + } : tensor<5x6xf32> to tensor<7x9xf32> + %1 = call @make_vector() : () -> tensor<12x13xf32> + %r = tensor.insert_slice %0 into %1[0, 0][7, 9][1, 1] : tensor<7x9xf32> into tensor<12x13xf32> + return %r : tensor<12x13xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + + +// ----- + +func.func private @make_vector() -> tensor<12x13xf32> + +// CHECK-LABEL: func @pad_and_insert_slice_dest +// Check the insert slice is not rewritten if the padded result is used by the destination operand. +// CHECK: %[[T1:.*]] = call @make_vector() : () -> tensor<12x13xf32> +// CHECK: = tensor.insert_slice %[[T1]] into +func.func @pad_and_insert_slice_dest( + %arg0: tensor<1x5x6xf32>) -> tensor<1x12x13xf32> { + %c5 = arith.constant 5.0 : f32 + %0 = tensor.pad %arg0 low[0, 0, 0] high[0, 7, 7] { + ^bb0(%arg2: index, %arg3: index, %arg4: index): + tensor.yield %c5 : f32 + } : tensor<1x5x6xf32> to tensor<1x12x13xf32> + %1 = call @make_vector() : () -> tensor<12x13xf32> + %r = tensor.insert_slice %1 into %0[0, 0, 0][1, 12, 13][1, 1, 1] : tensor<12x13xf32> into tensor<1x12x13xf32> + return %r : tensor<1x12x13xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @pad_tensor_non_const_pad_value +// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> +// CHECK-NOT: tensor.pad +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index +// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index +// CHECK: %[[FILL:.*]] = tensor.generate +// CHECK: %[[RES:.*]] = arith.mulf +// CHECK: tensor.yield %[[RES]] : f32 +// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true]} : tensor<5x6xf32>, vector<5x6xf32> +// CHECK: %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C3]], %[[C4]]] {in_bounds = [true, true]} : vector<5x6xf32>, tensor<12x13xf32> +// CHECK: return %[[WRITE]] +func.func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x13xf32> { + %c0 = arith.constant 0 : index + %c5 = arith.constant 5.0 : f32 + %0 = tensor.pad %arg0 low[3, 4] high[4, 3] { + ^bb0(%arg1: index, %arg2: index): + %i1 = arith.index_cast %arg1 : index to i32 + %i2 = arith.index_cast %arg2 : index to i32 + %f1 = arith.sitofp %i1 : i32 to f32 + %f2 = arith.sitofp %i2 : i32 to f32 + %m = arith.mulf %f1, %f2 : f32 + tensor.yield %m : f32 + } : tensor<5x6xf32> to tensor<12x13xf32> + return %0 : tensor<12x13xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @sum_exp +func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>) + -> tensor<4x16xf32> +{ + // CHECK: vector.transfer_read {{.*}} : tensor<4x16x8xf32>, vector<4x16x8xf32> + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x16xf32>, vector<4x16xf32> + // CHECK: math.exp {{.*}} : vector<4x16x8xf32> + // CHECK: vector.multi_reduction , %{{.*}}, %{{.*}} [2] : vector<4x16x8xf32> to vector<4x16xf32> + // CHECK: vector.transfer_write {{.*}} : vector<4x16xf32>, tensor<4x16xf32> + // CHECK: return {{.*}} : tensor<4x16xf32> + %0 = linalg.generic { + indexing_maps = [ + affine_map<(d0, d1, d2) -> (d0, d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d1)> + ], + iterator_types = ["parallel", "parallel", "reduction"] + } ins(%input : tensor<4x16x8xf32>) outs(%output : tensor<4x16xf32>) { + ^bb0(%arg0: f32, %arg1: f32): + %1 = math.exp %arg0 : f32 + %2 = arith.addf %1, %arg1 : f32 + linalg.yield %2 : f32 + } -> tensor<4x16xf32> + return %0 : tensor<4x16xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-DAG: #[[$M1:.*]] = affine_map<(d0, d1) -> (d1, d0, 0, 0)> +// CHECK-DAG: #[[$M2:.*]] = affine_map<(d0, d1) -> (0, 0, d1, d0)> +// CHECK-DAG: #[[$M3:.*]] = affine_map<(d0, d1) -> (d1, d0)> + +// CHECK-LABEL: func @sum_exp_2 +func.func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: tensor<5x2xf32>) + -> tensor<5x2xf32> +{ + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M1]]} : tensor<3x2xf32>, vector<2x3x4x5xf32> + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M2]]} : tensor<5x4xf32>, vector<2x3x4x5xf32> + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M3]]} : tensor<5x2xf32>, vector<2x5xf32> + // CHECK: math.exp {{.*}} : vector<2x3x4x5xf32> + // CHECK: math.exp {{.*}} : vector<2x3x4x5xf32> + // CHECK: addf {{.*}} : vector<2x3x4x5xf32> + // CHECK: vector.multi_reduction , {{.*}}, %{{.*}} [1, 2] : vector<2x3x4x5xf32> to vector<2x5xf32> + // CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true], permutation_map = #[[$M3]]} : vector<2x5xf32>, tensor<5x2xf32> + // CHECK: return {{.*}} : tensor<5x2xf32> + %0 = linalg.generic { + indexing_maps = [ + affine_map<(d0, d1, d2, d3) -> (d1, d0)>, + affine_map<(d0, d1, d2, d3) -> (d3, d2)>, + affine_map<(d0, d1, d2, d3) -> (d3, d0)> + ], + iterator_types = ["parallel", "reduction", "reduction", "parallel"] + } ins(%input, %input_2 : tensor<3x2xf32>, tensor<5x4xf32>) outs(%output : tensor<5x2xf32>) { + ^bb0(%arg0: f32, %arg1: f32, %arg2: f32): + %1 = math.exp %arg0 : f32 + %2 = math.exp %arg1 : f32 + %3 = arith.addf %1, %2 : f32 + %4 = arith.addf %3, %arg2 : f32 + linalg.yield %4 : f32 + } -> tensor<5x2xf32> + return %0 : tensor<5x2xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @red_max_2d( +func.func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { + // CHECK: %[[CMINF:.+]] = arith.constant dense<-3.402820e+38> : vector<4xf32> + // CHECK: tensor.empty() : tensor<4xf32> + // CHECK: vector.multi_reduction , {{.*}}, %[[CMINF]] [1] : vector<4x4xf32> to vector<4xf32> + // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> + %ident = arith.constant -3.40282e+38 : f32 + %init = tensor.empty() : tensor<4xf32> + %fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"]} + ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) { + ^bb0(%in0: f32, %out0: f32): + %max = arith.maximumf %in0, %out0 : f32 + linalg.yield %max : f32 + } -> tensor<4xf32> + return %red : tensor<4xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @red_min_2d( +func.func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { + // CHECK: %[[CMAXF:.+]] = arith.constant dense<3.402820e+38> : vector<4xf32> + // CHECK: tensor.empty() : tensor<4xf32> + // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32> + // CHECK: vector.multi_reduction , {{.*}}, %[[CMAXF]] [1] : vector<4x4xf32> to vector<4xf32> + // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> + %maxf32 = arith.constant 3.40282e+38 : f32 + %init = tensor.empty() : tensor<4xf32> + %fill = linalg.fill ins(%maxf32 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"]} + ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) { + ^bb0(%in0: f32, %out0: f32): + %min = arith.minimumf %out0, %in0 : f32 + linalg.yield %min : f32 + } -> tensor<4xf32> + return %red : tensor<4xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @red_mul_2d( +func.func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { + // CHECK: tensor.empty() : tensor<4xf32> + // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xf32> to vector<4xf32> + // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> + %ident = arith.constant 1.0 : f32 + %init = tensor.empty() : tensor<4xf32> + %fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"]} + ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) { + ^bb0(%in0: f32, %out0: f32): + %mul = arith.mulf %in0, %out0 : f32 + linalg.yield %mul : f32 + } -> tensor<4xf32> + return %red : tensor<4xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @red_or_2d( +func.func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> { + // CHECK: tensor.empty() : tensor<4xi1> + // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1> + // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1> + %ident = arith.constant false + %init = tensor.empty() : tensor<4xi1> + %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"]} + ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) { + ^bb0(%in0: i1, %out0: i1): + %or = arith.ori %in0, %out0 : i1 + linalg.yield %or : i1 + } -> tensor<4xi1> + return %red : tensor<4xi1> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @red_and_2d( +func.func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> { + // CHECK: tensor.empty() : tensor<4xi1> + // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1> + // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1> + %ident = arith.constant true + %init = tensor.empty() : tensor<4xi1> + %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"]} + ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) { + ^bb0(%in0: i1, %out0: i1): + %and = arith.andi %in0, %out0 : i1 + linalg.yield %and : i1 + } -> tensor<4xi1> + return %red : tensor<4xi1> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @red_xor_2d( +func.func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> { + // CHECK: tensor.empty() : tensor<4xi1> + // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1> + // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1> + %ident = arith.constant false + %init = tensor.empty() : tensor<4xi1> + %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"]} + ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) { + ^bb0(%in0: i1, %out0: i1): + %xor = arith.xori %in0, %out0 : i1 + linalg.yield %xor : i1 + } -> tensor<4xi1> + return %red : tensor<4xi1> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-DAG: #[[$M5:.*]] = affine_map<(d0, d1) -> (d0, 0)> + +// CHECK-LABEL: func @explicit_broadcast( +func.func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4x4xf32> { + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32> + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M5]]} : tensor<4x1xf32>, vector<4x4xf32> + // CHECK: subf {{.*}} : vector<4x4xf32> + // CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<4x4xf32>, tensor<4x4xf32> + %c0 = arith.constant 0.0 : f32 + %init = tensor.empty() : tensor<4x4xf32> + %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4x4xf32>) -> tensor<4x4xf32> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, 0)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"]} + ins(%arg0, %arg1 : tensor<4x4xf32>, tensor<4x1xf32>) + outs(%fill : tensor<4x4xf32>) { + ^bb0(%arg7: f32, %arg8: f32, %arg9: f32): + %40 = arith.subf %arg7, %arg8 : f32 + linalg.yield %40 : f32 + } -> tensor<4x4xf32> + return %red : tensor<4x4xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-DAG: #[[$M6:.*]] = affine_map<(d0, d1) -> (d0, 0)> + +// CHECK-LABEL: func @fused_broadcast_red_2d +func.func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4xf32> { + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32> + // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M6]]} : tensor<4x1xf32>, vector<4x4xf32> + // CHECK: subf {{.*}} : vector<4x4xf32> + // CHECK: math.exp {{.*}} : vector<4x4xf32> + // CHECK: vector.multi_reduction , {{.*}}, {{.*}} : vector<4x4xf32> to vector<4xf32> + // CHECK: vector.transfer_write {{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<4xf32> + %c0 = arith.constant 0.0 : f32 + %init = tensor.empty() : tensor<4xf32> + %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> + %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, 0)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"]} + ins(%arg0, %arg1 : tensor<4x4xf32>, tensor<4x1xf32>) + outs(%fill : tensor<4xf32>) { + ^bb0(%arg7: f32, %arg8: f32, %arg9: f32): + %40 = arith.subf %arg7, %arg8 : f32 + %41 = math.exp %40 : f32 + %42 = arith.addf %41, %arg9 : f32 + linalg.yield %42 : f32 + } -> tensor<4xf32> + return %red : tensor<4xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op + + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @reduce_1d( +// CHECK-SAME: %[[A:.*]]: tensor<32xf32> +func.func @reduce_1d(%arg0: tensor<32xf32>) -> tensor { + // CHECK-DAG: %[[vF0:.*]] = arith.constant dense<0.000000e+00> : vector + // CHECK-DAG: %[[F0:.*]] = arith.constant 0.000000e+00 : f32 + // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index + %f0 = arith.constant 0.000000e+00 : f32 + + // CHECK: %[[init:.*]] = tensor.empty() : tensor + %0 = tensor.empty() : tensor + + %1 = linalg.fill ins(%f0 : f32) outs(%0 : tensor) -> tensor + // CHECK: %[[r:.*]] = vector.transfer_read %[[A]][%[[C0]]] + // CHECK-SAME: : tensor<32xf32>, vector<32xf32> + // CHECK: %[[f0:.*]] = vector.extractelement %[[vF0]][] : vector + // CHECK: %[[red:.*]] = vector.multi_reduction , %[[r]], %[[f0]] [0] + // CHECK-SAME: : vector<32xf32> to f32 + // CHECK: %[[red_v1:.*]] = vector.broadcast %[[red]] : f32 to vector + // CHECK: %[[res:.*]] = vector.transfer_write %[[red_v1]], %[[init]][] + // CHECK-SAME: : vector, tensor + %2 = linalg.generic { + indexing_maps = [affine_map<(d0) -> (d0)>, + affine_map<(d0) -> ()>], + iterator_types = ["reduction"]} + ins(%arg0 : tensor<32xf32>) + outs(%1 : tensor) { + ^bb0(%a: f32, %b: f32): + %3 = arith.addf %a, %b : f32 + linalg.yield %3 : f32 + } -> tensor + + return %2 : tensor +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + + +// ----- + +// This test checks that vectorization does not occur when an input indexing map +// is not a projected permutation. In the future, this can be converted to a +// positive test when support is added. + +// CHECK-LABEL: func @not_projected_permutation +func.func @not_projected_permutation(%arg0: tensor<8x8xf32>) -> tensor<6x6x3x3xf32> { + %c0 = arith.constant 0.0 : f32 + %init = tensor.empty() : tensor<6x6x3x3xf32> + %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<6x6x3x3xf32>) -> tensor<6x6x3x3xf32> + // CHECK: linalg.generic + %result = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0 + d2, d1 + d3)>, + affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], + iterator_types = ["parallel", "parallel", "parallel", "parallel"]} + ins(%arg0 : tensor<8x8xf32>) + outs(%fill : tensor<6x6x3x3xf32>) { + ^bb0(%arg7: f32, %arg9: f32): + linalg.yield %arg7 : f32 + } -> tensor<6x6x3x3xf32> + return %result : tensor<6x6x3x3xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// Check vectorization can handle cases where outputs are a mix of reduced and non-reduced values. +func.func @mixed_parallel_reduced_results(%arg0 : tensor<2x4x8xf32>, + %arg1 : tensor<2x4xf32>, %arg2 : tensor<2x4x8xf32>, %arg3 : tensor<2x4xf32>) -> + (tensor<2x4x8xf32>, tensor<2x4xf32>) { + %0:2 = linalg.generic { + indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>, + affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], + iterator_types = ["parallel", "parallel", "reduction"]} + ins(%arg0, %arg1 : tensor<2x4x8xf32>, tensor<2x4xf32>) + outs(%arg2, %arg3 : tensor<2x4x8xf32>, tensor<2x4xf32>) { + ^bb0(%b0 : f32, %b1 : f32, %b2 : f32, %b3 : f32): + %1 = arith.mulf %b0, %b1 : f32 + %2 = arith.addf %1, %b3 : f32 + linalg.yield %1, %2 : f32, f32 + } -> (tensor<2x4x8xf32>, tensor<2x4xf32>) + return %0#0, %0#1 : tensor<2x4x8xf32>, tensor<2x4xf32> +} +// CHECK-LABEL: func @mixed_parallel_reduced_results( +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<2x4x8xf32> +// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<2x4xf32> +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<2x4x8xf32> +// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]+]]: tensor<2x4xf32> +// CHECK-DAG: %[[V0:.+]] = vector.transfer_read %[[ARG0]] +// CHECK-DAG: %[[V1:.+]] = vector.transfer_read %[[ARG1]] +// CHECK-DAG: %[[V2:.+]] = vector.transfer_read %[[ARG3]] +// CHECK-DAG: %[[MUL:.+]] = arith.mulf %[[V0]], %[[V1]] +// CHECK-DAG: %[[ADD:.+]] = vector.multi_reduction , %[[MUL]], %[[V2]] +// CHECK-DAG: vector.transfer_write %[[MUL]], %[[ARG2]] +// CHECK-DAG: vector.transfer_write %[[ADD]], %[[ARG3]] + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +func.func @vectorize_map(%arg0: memref<64xf32>, + %arg1: memref<64xf32>, %arg2: memref<64xf32>) { + linalg.map ins(%arg0, %arg1 : memref<64xf32>, memref<64xf32>) + outs(%arg2 : memref<64xf32>) + (%in: f32, %in_0: f32) { + %0 = arith.addf %in, %in_0 : f32 + linalg.yield %0 : f32 + } + return +} +// CHECK-LABEL: func @vectorize_map +// CHECK: %[[LHS:.*]] = vector.transfer_read +// CHECK-NEXT: %[[RHS:.*]] = vector.transfer_read +// CHECK-NEXT: arith.addf %[[LHS]], %[[RHS]] : vector<64xf32> + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.map"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +func.func @vectorize_transpose(%arg0: memref<16x32x64xf32>, + %arg1: memref<32x64x16xf32>) { + linalg.transpose ins(%arg0 : memref<16x32x64xf32>) + outs(%arg1 : memref<32x64x16xf32>) permutation = [1, 2, 0] + return +} +// CHECK-LABEL: func @vectorize_transpose +// CHECK: vector.transpose +// CHECK-SAME: [1, 2, 0] : vector<16x32x64xf32> to vector<32x64x16xf32> + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +func.func @vectorize_reduce(%arg0: memref<16x32x64xf32>, + %arg1: memref<16x64xf32>) { + linalg.reduce ins(%arg0 : memref<16x32x64xf32>) + outs(%arg1 : memref<16x64xf32>) dimensions = [1] + (%in: f32, %init: f32) { + %0 = arith.addf %in, %init : f32 + linalg.yield %0 : f32 + } + return +} +// CHECK-LABEL: func @vectorize_reduce +// CHECK: vector.multi_reduction +// CHECK-SAME: : vector<16x32x64xf32> to vector<16x64xf32> + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// This is a regression test. This IR cannot be vectorized, but +// structured.vectorize_children_and_apply_patterns should nevertheless succeed. + +#map = affine_map<(d0) -> (d0)> +// CHECK-LABEL: @not_vectorizable +func.func @not_vectorizable(%arg0: tensor<1x?xf32>, %arg1: index, %arg2: index, %arg3: index) -> tensor<1x128xf32> { + %0 = tensor.empty() : tensor<1x128xf32> + %1 = scf.for %arg5 = %arg2 to %arg1 step %arg3 iter_args(%arg6 = %0) -> (tensor<1x128xf32>) { + %extracted_slice = tensor.extract_slice %arg6[0, 0] [1, %arg1] [1, 1] : tensor<1x128xf32> to tensor + %expanded = tensor.expand_shape %extracted_slice [[0, 1]] : tensor into tensor<1x?xf32> + %extracted_slice_0 = tensor.extract_slice %arg0[0, %arg3] [1, %arg2] [1, 1] : tensor<1x?xf32> to tensor + %extracted_slice_1 = tensor.extract_slice %expanded[0, %arg3] [1, %arg2] [1, 1] : tensor<1x?xf32> to tensor + %2 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} ins(%extracted_slice_0 : tensor) outs(%extracted_slice_1 : tensor) { + ^bb0(%in: f32, %out: f32): + %3 = arith.addf %in, %out : f32 + linalg.yield %3 : f32 + } -> tensor + %inserted_slice = tensor.insert_slice %2 into %expanded[0, %arg3] [1, %arg2] [1, 1] : tensor into tensor<1x?xf32> + %collapsed = tensor.collapse_shape %inserted_slice [[0, 1]] : tensor<1x?xf32> into tensor + %inserted_slice_2 = tensor.insert_slice %collapsed into %arg6[0, 0] [1, %arg1] [1, 1] : tensor into tensor<1x128xf32> + scf.yield %inserted_slice_2 : tensor<1x128xf32> + } + return %1 : tensor<1x128xf32> +} +transform.sequence failures(propagate) { +^bb0(%arg0: !transform.any_op): + %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op + %1 = transform.structured.vectorize_children_and_apply_patterns %0 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// Regression test: %13 was incorrectly detected as a reduction and +// vectorization failed. + +func.func @wrong_reduction_detection(%input: tensor<120x64xf32>) -> tensor<120x64xf32> { + %c0 = arith.constant 0 : index + %c4 = arith.constant 4 : index + %c64 = arith.constant 64 : index + %cst_6 = arith.constant 4.000000e+00 : f32 + %1 = scf.for %arg0 = %c0 to %c64 step %c4 iter_args(%arg1 = %input) -> (tensor<120x64xf32>) { + %extracted_slice = tensor.extract_slice %arg1[%c0, %arg0] [1, 4] [1, 1] : tensor<120x64xf32> to tensor<1x4xf32> + %10 = linalg.fill {__internal_linalg_transform__ = "1"} ins(%cst_6 : f32) outs(%extracted_slice : tensor<1x4xf32>) -> tensor<1x4xf32> + %11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} outs(%10 : tensor<1x4xf32>) { + ^bb0(%out: f32): + %12 = linalg.index 0 : index + %13 = arith.addi %arg0, %12 : index + %18 = arith.index_cast %13 : index to i32 + %20 = arith.uitofp %18 : i32 to f32 + %67 = arith.mulf %out, %20 : f32 + linalg.yield %67 : f32 + } -> tensor<1x4xf32> + %inserted_slice = tensor.insert_slice %11 into %arg1[%c0, %arg0] [1, 4] [1, 1] : tensor<1x4xf32> into tensor<120x64xf32> + scf.yield %inserted_slice : tensor<120x64xf32> + } + return %1 : tensor<120x64xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// CHECK-LABEL: @wrong_reduction_detection +// CHECK: vector.broadcast +// CHECK: vector.transfer_write + +// ----- + +// Don't vectorize tensor<0xf32> : (!transform.any_op) -> !transform.any_op +// CHECK-LABEL: @tensor_size0 +// CHECK: linalg.generic +func.func @tensor_size0(%arg0: tensor<0xf32>, + %arg1: tensor) -> tensor { + %0 = linalg.generic + {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> ()>], + iterator_types = ["reduction"]} + ins(%arg0 : tensor<0xf32>) outs(%arg1 : tensor) { + ^bb0(%in: f32, %out: f32): + %12 = arith.addf %out, %in : f32 + linalg.yield %12 : f32 + } -> tensor + return %0 : tensor +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_masked_pad_static_dynamic +func.func @test_masked_pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index, + %pad_value: f32) -> tensor<6x?x?x?xf32> { + // CHECK: tensor.pad + %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] { + ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): + tensor.yield %pad_value : f32 + } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32> + return %0 : tensor<6x?x?x?xf32> +} + + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op +} + +// ----- + +func.func @zero_dim_tensor(%input: tensor, %output: tensor) -> tensor +{ + %0 = linalg.generic { indexing_maps = [ affine_map<() -> ()>, affine_map<() -> ()> ], + iterator_types = [] } + ins(%input : tensor) + outs(%output : tensor) { + ^bb0(%arg0: f32, %arg1: f32): + %2 = arith.addf %arg0, %arg1 : f32 + linalg.yield %2 : f32 + } -> tensor + return %0 : tensor +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// CHECK-LABEL: func @zero_dim_tensor +// CHECK: vector.transfer_read {{.*}} : tensor, vector +// CHECK: vector.extractelement +// CHECK: vector.transfer_read {{.*}} : tensor, vector +// CHECK: vector.extractelement +// CHECK: arith.addf {{.*}} : f32 +// CHECK: vector.broadcast %{{.*}} : f32 to vector +// CHECK: vector.transfer_write {{.*}} : vector, tensor + +// ----- + +// Make sure we generate the right transfer writes for multi-output generic ops +// with different permutation maps. + +func.func @multi_output_generic_different_perm_maps(%in0: tensor<4x1xf32>, + %out0: tensor<4x1xf32>, + %out1: tensor<1x4xf32>) -> (tensor<4x1xf32>, tensor<1x4xf32>) { + %13:2 = linalg.generic {indexing_maps = [ affine_map<(d0, d1) -> (d1, d0)>, + affine_map<(d0, d1) -> (d1, d0)>, + affine_map<(d0, d1) -> (d0, d1)> ], + iterator_types = ["parallel", "parallel"]} + ins(%in0 : tensor<4x1xf32>) + outs(%out0, %out1 : tensor<4x1xf32>, tensor<1x4xf32>) { + ^bb0(%in: f32, %out: f32, %out_2: f32): + %16 = arith.addf %in, %in : f32 + linalg.yield %16, %16 : f32, f32 + } -> (tensor<4x1xf32>, tensor<1x4xf32>) + return %13#0, %13#1 : tensor<4x1xf32>, tensor<1x4xf32> +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %5 = transform.structured.vectorize_children_and_apply_patterns %4 : (!transform.any_op) -> !transform.any_op +} + +// CHECK-LABEL: func @multi_output_generic_different_perm_maps +// CHECK: %[[VAL_5:.*]] = vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor<4x1xf32>, vector<4x1xf32> +// CHECK: %[[VAL_6:.*]] = arith.addf %[[VAL_5]], %[[VAL_5]] : vector<4x1xf32> +// CHECK: %[[VAL_7:.*]] = vector.transpose %[[VAL_6]], [1, 0] : vector<4x1xf32> to vector<1x4xf32> +// CHECK: %[[VAL_8:.*]] = vector.transpose %[[VAL_7]], [1, 0] : vector<1x4xf32> to vector<4x1xf32> +// CHECK: vector.transfer_write %[[VAL_8]], %{{.*}} {in_bounds = [true, true]} : vector<4x1xf32>, tensor<4x1xf32> +// CHECK: vector.transfer_write %[[VAL_7]], %{{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x4xf32> diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir index a5ec058b6e02..ddeaff76a04d 100644 --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -1,1787 +1,514 @@ // RUN: mlir-opt %s -test-transform-dialect-interpreter -split-input-file | FileCheck %s -// CHECK-LABEL: contraction_dot -func.func @contraction_dot(%A: memref<1584xf32>, %B: memref<1584xf32>, %C: memref) { - -// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584xf32> -// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [0] : vector<1584xf32> to f32 - linalg.dot ins(%A, %B: memref<1584xf32>, memref<1584xf32>) - outs(%C: memref) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.dot"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 : !transform.any_op -} - -// ----- - -// CHECK-LABEL: contraction_matvec -func.func @contraction_matvec(%A: memref<1584x1584xf32>, %B: memref<1584xf32>, %C: memref<1584xf32>) { - -// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584xf32> -// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [1] : vector<1584x1584xf32> to vector<1584xf32> - linalg.matvec ins(%A, %B: memref<1584x1584xf32>, memref<1584xf32>) - outs(%C: memref<1584xf32>) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.matvec"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: contraction_matmul -func.func @contraction_matmul(%A: memref<1584x1584xf32>, %B: memref<1584x1584xf32>, %C: memref<1584x1584xf32>) { -// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584xf32> -// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [2] : vector<1584x1584x1584xf32> to vector<1584x1584xf32> - linalg.matmul ins(%A, %B: memref<1584x1584xf32>, memref<1584x1584xf32>) - outs(%C: memref<1584x1584xf32>) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: contraction_batch_matmul -func.func @contraction_batch_matmul(%A: memref<1584x1584x1584xf32>, %B: memref<1584x1584x1584xf32>, %C: memref<1584x1584x1584xf32>) { -// CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<1584x1584x1584x1584xf32> -// CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [3] : vector<1584x1584x1584x1584xf32> to vector<1584x1584x1584xf32> - linalg.batch_matmul - ins(%A, %B: memref<1584x1584x1584xf32>, memref<1584x1584x1584xf32>) - outs(%C: memref<1584x1584x1584xf32>) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.batch_matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -#matmul_trait = { - args_in = 2, - args_out = 1, - indexing_maps = [ - affine_map<(m, n, k) -> (m, k)>, - affine_map<(m, n, k) -> (k, n)>, - affine_map<(m, n, k) -> (m, n)> - ], - iterator_types = ["parallel", "parallel", "reduction"] -} - -// CHECK-LABEL: func @vectorization_test -func.func @vectorization_test(%A: memref<8x16xf32>, %B: memref<16x32xf32>, - %C: memref<8x32xf32>) { - // CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32> - // CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32> - // CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<8x32xf32>, vector<8x32xf32> - // CHECK: %[[MUL:.*]] = arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32> - // CHECK: %[[R:.*]] = vector.multi_reduction , %[[MUL]], %[[ACC]] [2] : vector<8x32x16xf32> to vector<8x32xf32> - // CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<8x32xf32> - linalg.generic #matmul_trait - ins(%A, %B : memref<8x16xf32>, memref<16x32xf32>) - outs(%C : memref<8x32xf32>) { - ^bb(%a: f32, %b: f32, %c: f32) : - %d = arith.mulf %a, %b: f32 - %e = arith.addf %c, %d: f32 - linalg.yield %e : f32 - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -#matmul_transpose_out_trait = { - args_in = 2, - args_out = 1, - indexing_maps = [ - affine_map<(m, n, k) -> (m, k)>, - affine_map<(m, n, k) -> (k, n)>, - affine_map<(m, n, k) -> (n, m)> - ], - iterator_types = ["parallel", "parallel", "reduction"] -} - -// CHECK-LABEL: func @generic_output_transpose -func.func @generic_output_transpose(%A: memref<8x16xf32>, %B: memref<16x32xf32>, - %C: memref<32x8xf32>) { - // CHECK: vector.transfer_read %{{.*}} : memref<8x16xf32>, vector<8x32x16xf32> - // CHECK: vector.transfer_read %{{.*}} : memref<16x32xf32>, vector<8x32x16xf32> - // CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<32x8xf32>, vector<8x32xf32> - // CHECK: %[[MUL:.*]] = arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32> - // CHECK: %[[R:.*]] = vector.multi_reduction , %[[MUL]], %[[ACC]] [2] : vector<8x32x16xf32> to vector<8x32xf32> - // CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xf32>, memref<32x8xf32> - linalg.generic #matmul_transpose_out_trait - ins(%A, %B : memref<8x16xf32>, memref<16x32xf32>) - outs(%C : memref<32x8xf32>) { - ^bb(%a: f32, %b: f32, %c: f32) : - %d = arith.mulf %a, %b: f32 - %e = arith.addf %c, %d: f32 - linalg.yield %e : f32 - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> -#map1 = affine_map<(d0, d1, d2) -> (d1, d0, d2)> -// CHECK: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d1, d0, d2)> -// CHECK: func @generic_interchanged_transpose -func.func @generic_interchanged_transpose(%arg0: tensor<12x128x32xf32>) -> tensor<128x12x32xf32> { - // CHECK: %[[IN:.+]] = vector.transfer_read - // CHECK: vector.transfer_write %[[IN]], {{.+}} permutation_map = #[[MAP]] - %0 = tensor.empty() : tensor<128x12x32xf32> - %1 = linalg.generic {indexing_maps = [#map0, #map1], - iterator_types = ["parallel", "parallel", "parallel"]} - ins(%arg0 : tensor<12x128x32xf32>) - outs(%0 : tensor<128x12x32xf32>) { - ^bb0(%arg1: f32, %arg2: f32): - linalg.yield %arg1 : f32 - } -> tensor<128x12x32xf32> - return %1 : tensor<128x12x32xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -#matmul_trait = { - args_in = 2, - args_out = 1, - indexing_maps = [ - affine_map<(m, n, k) -> (m, k)>, - affine_map<(m, n, k) -> (k, n)>, - affine_map<(m, n, k) -> (m, n)> - ], - iterator_types = ["parallel", "parallel", "reduction"] -} - -// CHECK-LABEL: func @vectorization_test_integer -func.func @vectorization_test_integer(%A: memref<8x16xi32>, %B: memref<16x32xi32>, - %C: memref<8x32xi32>) { - // CHECK: vector.transfer_read %{{.*}} : memref<8x16xi32>, vector<8x32x16xi32> - // CHECK: vector.transfer_read %{{.*}} : memref<16x32xi32>, vector<8x32x16xi32> - // CHECK: %[[ACC:.*]] = vector.transfer_read %{{.*}} : memref<8x32xi32>, vector<8x32xi32> - // CHECK: %[[MUL:.*]] = arith.muli %{{.*}}, %{{.*}} : vector<8x32x16xi32> - // CHECK: vector.multi_reduction , %[[MUL]], %[[ACC]] [2] : vector<8x32x16xi32> to vector<8x32xi32> - // CHECK: vector.transfer_write %{{.*}}, %{{.*}} : vector<8x32xi32>, memref<8x32xi32> - linalg.generic #matmul_trait - ins(%A, %B : memref<8x16xi32>, memref<16x32xi32>) - outs(%C : memref<8x32xi32>) { - ^bb(%a: i32, %b: i32, %c: i32) : - %d = arith.muli %a, %b: i32 - %e = arith.addi %c, %d: i32 - linalg.yield %e : i32 - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @vectorization_test_2 -func.func @vectorization_test_2(%A: memref<8x16xf32>, %B: memref<16x32xf32>, - %C: memref<8x32xf32>) { - // CHECK: arith.mulf %{{.*}}, %{{.*}} : vector<8x32x16xf32> - // CHECK: vector.multi_reduction , %{{.*}}, {{.*}} [2] : vector<8x32x16xf32> to vector<8x32xf32> - linalg.matmul - ins(%A, %B: memref<8x16xf32>, memref<16x32xf32>) - outs(%C: memref<8x32xf32>) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_scalar_input -func.func @test_vectorize_scalar_input(%A : memref<8x16xf32>, %arg0 : f32) { - // CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32> - // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32> - linalg.generic { - indexing_maps = [affine_map<(m, n) -> ()>, affine_map<(m, n) -> (m, n)>], - iterator_types = ["parallel", "parallel"]} - ins(%arg0 : f32) - outs(%A: memref<8x16xf32>) { - ^bb(%0: f32, %1: f32) : +func.func @vectorize_dynamic_identity(%arg0: tensor, + %arg1: tensor, + %arg2: tensor) -> tensor { + %0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (d0)>, + affine_map<(d0) -> (d0)>, + affine_map<(d0) -> (d0)>], + iterator_types = ["parallel"] } + ins(%arg0, %arg1 : tensor, tensor) + outs(%arg2 : tensor) { + ^bb(%in0: f32, %in1: f32, %out: f32) : + %0 = arith.addf %in0, %in1 : f32 linalg.yield %0 : f32 - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_do_not_vectorize_unsupported_element_types -func.func @test_do_not_vectorize_unsupported_element_types(%A : memref<8x16xcomplex>, %arg0 : complex) { - // CHECK-NOT: vector.broadcast - // CHECK-NOT: vector.transfer_write - linalg.generic { - indexing_maps = [affine_map<(m, n) -> ()>, affine_map<(m, n) -> (m, n)>], - iterator_types = ["parallel", "parallel"]} - ins(%arg0 : complex) - outs(%A: memref<8x16xcomplex>) { - ^bb(%0: complex, %1: complex) : - linalg.yield %0 : complex - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -#map0 = affine_map<(d0) -> (d0)> - -func.func @vectorize_affine_apply(%arg0: tensor<5xf32>, %arg3: index) -> tensor<5xi32> { - %0 = tensor.empty() : tensor<5xi32> - %1 = linalg.generic {indexing_maps = [#map0, #map0], - iterator_types = ["parallel"]} - ins(%arg0 : tensor<5xf32>) - outs(%0 : tensor<5xi32>) { - ^bb0(%arg1: f32, %arg2: i32): - %2 = linalg.index 0 : index - %11 = affine.apply affine_map<() -> (123)>() - %12 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %11) - %13 = affine.apply affine_map<(d0)[s0] -> (d0 + s0)>(%12)[%arg3] - %14 = affine.apply affine_map<(d0) -> (d0 + 1)>(%13) - %15 = affine.apply affine_map<(d0, d1, d2) -> (d0 + d1 + d2)>(%13, %14, %12) - %3 = arith.index_cast %15 : index to i32 - linalg.yield %3 : i32 - } -> tensor<5xi32> - return %1 : tensor<5xi32> -} - -// CHECK-LABEL: func.func @vectorize_affine_apply -// CHECK-SAME: %arg0: tensor<5xf32> -// CHECK-SAME: %[[ARG1:.*]]: index -// CHECK: %[[CST:.*]] = arith.constant dense<[123, 124, 125, 126, 127]> : vector<5xindex> -// CHECK: %[[CST_0:.*]] = arith.constant dense<1> : vector<5xindex> -// CHECK: %[[C0:.*]] = arith.constant 0 : index -// CHECK: %[[EMPTY:.*]] = tensor.empty() : tensor<5xi32> -// CHECK: %[[BCAST:.*]] = vector.broadcast %[[ARG1]] : index to vector<5xindex> -// CHECK: %[[ADDI_1:.*]] = arith.addi %[[BCAST]], %[[CST]] : vector<5xindex> -// CHECK: %[[ADDI_2:.*]] = arith.addi %[[ADDI_1]], %[[CST_0]] : vector<5xindex> -// CHECK: %[[ADDI_3:.*]] = arith.addi %[[ADDI_1]], %[[ADDI_2]] : vector<5xindex> -// CHECK: %[[ADDI_4:.*]] = arith.addi %[[ADDI_3]], %[[CST]] : vector<5xindex> -// CHECK: %[[CAST:.*]] = arith.index_cast %[[ADDI_4]] : vector<5xindex> to vector<5xi32> -// CHECK: vector.transfer_write %[[CAST]], %[[EMPTY]][%[[C0:.*]]] {in_bounds = [true]} : vector<5xi32>, tensor<5xi32> - -transform.sequence failures(propagate) { - ^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_fill -func.func @test_vectorize_fill(%A : memref<8x16xf32>, %arg0 : f32) { - // CHECK: %[[V:.*]] = vector.broadcast {{.*}} : f32 to vector<8x16xf32> - // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32> - linalg.fill ins(%arg0 : f32) outs(%A : memref<8x16xf32>) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_fill -func.func @test_vectorize_fill_scalar(%A : memref, %arg0 : f32) { - // CHECK-SAME: (%[[M:.*]]: memref, %[[val:.*]]: f32) - // CHECK: %[[VEC:.*]] = vector.broadcast %[[val]] : f32 to vector - // CHECK: vector.transfer_write %[[VEC]], %[[M]][] : vector, memref - linalg.fill ins(%arg0 : f32) outs(%A : memref) - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_copy -func.func @test_vectorize_copy(%A : memref<8x16xf32>, %B : memref<8x16xf32>) { - // CHECK: %[[V:.*]] = vector.transfer_read {{.*}} : memref<8x16xf32>, vector<8x16xf32> - // CHECK: vector.transfer_write %[[V]], {{.*}} : vector<8x16xf32>, memref<8x16xf32> - memref.copy %A, %B : memref<8x16xf32> to memref<8x16xf32> - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_copy_scalar -func.func @test_vectorize_copy_scalar(%A : memref, %B : memref) { - // CHECK-SAME: (%[[A:.*]]: memref, %[[B:.*]]: memref) - // CHECK: %[[V:.*]] = vector.transfer_read %[[A]][]{{.*}} : memref, vector - // CHECK: %[[val:.*]] = vector.extractelement %[[V]][] : vector - // CHECK: %[[VV:.*]] = vector.broadcast %[[val]] : f32 to vector - // CHECK: vector.transfer_write %[[VV]], %[[B]][] : vector, memref - memref.copy %A, %B : memref to memref - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_copy_complex -// CHECK-NOT: vector< -func.func @test_vectorize_copy_complex(%A : memref<8x16xcomplex>, %B : memref<8x16xcomplex>) { - memref.copy %A, %B : memref<8x16xcomplex> to memref<8x16xcomplex> - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["memref.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_trailing_index - // CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>) -func.func @test_vectorize_trailing_index(%arg0: memref<1x2x4x8xindex>) { - // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : vector<8xindex> - // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index - linalg.generic { - indexing_maps = [ - affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], - iterator_types = ["parallel", "parallel", "parallel", "parallel"]} - outs(%arg0: memref<1x2x4x8xindex>) { - ^bb0(%arg1: index): - // CHECK: %[[BCST:.*]] = vector.broadcast %[[CST0]] : vector<8xindex> to vector<1x2x4x8xindex> - // CHECK: vector.transfer_write %[[BCST]], %[[ARG0]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] {{.*}} : vector<1x2x4x8xindex>, memref<1x2x4x8xindex> - %0 = linalg.index 3 : index - linalg.yield %0 : index - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @test_vectorize_inner_index - // CHECK-SAME: (%[[ARG0:.*]]: memref<1x2x4x8xindex>) -func.func @test_vectorize_inner_index(%arg0: memref<1x2x4x8xindex>) { - // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<[0, 1]> : vector<2xindex> - // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index - linalg.generic { - indexing_maps = [ - affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], - iterator_types = ["parallel", "parallel", "parallel", "parallel"]} - outs(%arg0: memref<1x2x4x8xindex>) { - ^bb0(%arg1: index): - // CHECK: %[[BCST:.*]] = vector.broadcast %[[CST0]] : vector<2xindex> to vector<1x8x4x2xindex> - // CHECK: %[[TRAN:.*]] = vector.transpose %[[BCST]], [0, 3, 2, 1] : vector<1x8x4x2xindex> to vector<1x2x4x8xindex> - // CHECK: vector.transfer_write %[[TRAN]], %[[ARG0]][%[[C0]], %[[C0]], %[[C0]], %[[C0]]] {{.*}} : vector<1x2x4x8xindex>, memref<1x2x4x8xindex> - %0 = linalg.index 1 : index - linalg.yield %0 : index - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @generic_vectorize - // CHECK-SAME: (%[[ARG0:.*]]: memref<4x256xf32>, %[[ARG1:.*]]: memref<4x256xf32>, - // CHECK-SAME: %[[ARG2:.*]]: memref<256xf32>, %[[ARG3:.*]]: f32) -func.func @generic_vectorize(%arg0: memref<4x256xf32>, - %arg1: memref<4x256xf32>, - %arg2: memref<256xf32>, %i: f32) { - // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<4x256xf32> - // CHECK-DAG: %[[CST1:.*]] = arith.constant dense<1.000000e+00> : vector<4x256xf32> - // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index - %c1_f32 = arith.constant 1.0 : f32 - linalg.generic { - args_in = 0 : i64, - args_out = 10 : i64, - indexing_maps = [ - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"]} - ins(%arg1, %arg2: memref<4x256xf32>, memref<256xf32>) - outs( - %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0 : - memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, - memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, memref<4x256xf32>, - memref<4x256xf32>, memref<4x256xf32>) { - ^bb0(%arg3 : f32, %arg4 : f32, %arg5: f32, %arg6: f32, %arg7: f32, %arg8: f32, - // CHECK: %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32> - // CHECK: %[[V0:.*]] = vector.transfer_read %[[ARG2]][%[[C0]]], {{.*}} : memref<256xf32>, vector<4x256xf32> - // CHECK: %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32> - // CHECK: %[[V1:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : memref<4x256xf32>, vector<4x256xf32> - %arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32, - %arg14 : f32): - // CHECK: %[[ADD:.*]] = arith.addf %[[V0]], %[[V1]] : vector<4x256xf32> - %6 = arith.addf %arg4, %arg6 : f32 - // CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32> - %7 = arith.cmpf ogt, %arg3, %arg6 : f32 - // CHECK: %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32> - %8 = arith.constant 2.0 : f32 - // CHECK: %[[DIV:.*]] = arith.divf %[[V3]], %[[ARG3B]] : vector<4x256xf32> - %9 = arith.divf %arg5, %i : f32 - // CHECK: %[[EXP:.*]] = math.exp2 %[[V3]] : vector<4x256xf32> - %10 = math.exp2 %arg5 : f32 - // CHECK: %[[MUL:.*]] = arith.mulf %[[V3]], %[[CST0]] : vector<4x256xf32> - %11 = arith.mulf %arg5, %8 : f32 - // CHECK: %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32> - %12 = math.rsqrt %arg5 : f32 - // CHECK: %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32> - %13 = arith.select %7, %arg5, %arg6 : f32 - // CHECK: %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32> - %14 = arith.subf %arg5, %arg4 : f32 - // CHECK: %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32> - %15 = math.tanh %arg5 : f32 - // CHECK: vector.transfer_write %[[ADD]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[CST0]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[CST1]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[DIV]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[EXP]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[MUL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[RSQRT]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[SEL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[SUB]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - // CHECK: vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, memref<4x256xf32> - linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32, - f32, f32, f32, f32, f32, f32, f32, f32 - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @generic_vectorize_tensor -// CHECK-SAME: (%[[ARG0:.*]]: tensor<4x256xf32>, %[[ARG1:.*]]: tensor<4x256xf32>, -// CHECK-SAME: %[[ARG2:.*]]: tensor<256xf32>, %[[ARG3:.*]]: f32) -func.func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>, - %arg1: tensor<4x256xf32>, %arg2: tensor<256xf32>, - %i: f32) -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>) { - %c1_f32 = arith.constant 1.0 : f32 - %r:10 = linalg.generic { - indexing_maps = [ - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"]} - ins(%arg1, %arg2: tensor<4x256xf32>, tensor<256xf32>) - outs( - %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0, %arg0 : - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>) { - ^bb0(%arg3 : f32, %arg4 : f32, %arg5: f32, %arg6: f32, %arg7: f32, %arg8: f32, - %arg9 : f32, %arg10 : f32, %arg11 : f32, %arg12 : f32, %arg13 : f32, - %arg14 : f32): - // CHECK-DAG: %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<4x256xf32> - // CHECK-DAG: %[[CST1:.*]] = arith.constant dense<1.000000e+00> : vector<4x256xf32> - // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index - // CHECK: %[[V2:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32> - // CHECK: %[[V0:.*]] = vector.transfer_read %[[ARG2]][%[[C0]]], {{.*}} : tensor<256xf32>, vector<4x256xf32> - // CHECK: %[[V3:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32> - // CHECK: %[[V1:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x256xf32>, vector<4x256xf32> - // CHECK: %[[ADD:.*]] = arith.addf %[[V0]], %[[V1]] : vector<4x256xf32> - %6 = arith.addf %arg4, %arg6 : f32 - // CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[V2]], %[[V1]] : vector<4x256xf32> - %7 = arith.cmpf ogt, %arg3, %arg6 : f32 - // CHECK: %[[ARG3B:.*]] = vector.broadcast %[[ARG3]] : f32 to vector<4x256xf32> - %8 = arith.constant 2.0 : f32 - // CHECK: %[[DIV:.*]] = arith.divf %[[V3]], %[[ARG3B]] : vector<4x256xf32> - %9 = arith.divf %arg5, %i : f32 - // CHECK: %[[EXP:.*]] = math.exp2 %[[V3]] : vector<4x256xf32> - %10 = math.exp2 %arg5 : f32 - // CHECK: %[[MUL:.*]] = arith.mulf %[[V3]], %[[CST0]] : vector<4x256xf32> - %11 = arith.mulf %arg5, %8 : f32 - // CHECK: %[[RSQRT:.*]] = math.rsqrt %[[V3]] : vector<4x256xf32> - %12 = math.rsqrt %arg5 : f32 - // CHECK: %[[SEL:.*]] = arith.select %[[CMP]], %[[V3]], %[[V1]] : vector<4x256xi1>, vector<4x256xf32> - %13 = arith.select %7, %arg5, %arg6 : f32 - // CHECK: %[[SUB:.*]] = arith.subf %[[V3]], %[[V0]] : vector<4x256xf32> - %14 = arith.subf %arg5, %arg4 : f32 - // CHECK: %[[TAN:.*]] = math.tanh %[[V3]] : vector<4x256xf32> - %15 = math.tanh %arg5 : f32 - // CHECK: %[[R0:.*]] = vector.transfer_write %[[ADD]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R1:.*]] = vector.transfer_write %[[CST0]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R2:.*]] = vector.transfer_write %[[CST1]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R3:.*]] = vector.transfer_write %[[DIV]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R4:.*]] = vector.transfer_write %[[EXP]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R5:.*]] = vector.transfer_write %[[MUL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R6:.*]] = vector.transfer_write %[[RSQRT]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R7:.*]] = vector.transfer_write %[[SEL]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R8:.*]] = vector.transfer_write %[[SUB]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - // CHECK: %[[R9:.*]] = vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> - linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32, - f32, f32, f32, f32, f32, f32, f32, f32 - } -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>) - // CHECK: return %[[R0]], %[[R1]], %[[R2]], %[[R3]], %[[R4]], %[[R5]], %[[R6]], %[[R7]], %[[R8]], %[[R9]] : tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32> - return %r#0, %r#1, %r#2, %r#3, %r#4, %r#5, %r#6, %r#7, %r#8, %r#9: - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, - tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-DAG: #[[$MAP0:.*]] = affine_map<(d0, d1) -> (d0, 0, 0, d1)> -// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> (d0, 0, 0, 0)> -// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0) -> (0, 0, d0, 0)> -// CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1) -> (d1, 0, d0, 0)> -// CHECK: func @generic_vectorize_broadcast_transpose -// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[CF:.*]] = arith.constant 0.000000e+00 : f32 -// CHECK: %[[V0:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP0]]} : memref<4x4xf32>, vector<4x4x4x4xf32> -// CHECK: %[[V1:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP1]]} : memref<4xf32>, vector<4x4x4x4xf32> -// CHECK: %[[V2:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP2]]} : memref<4xf32>, vector<4x4x4x4xf32> -// CHECK: %[[V3:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP3]]} : memref<4x4xf32>, vector<4x4x4x4xf32> -// CHECK: %[[SUB:.*]] = arith.subf %[[V0]], %[[V1]] : vector<4x4x4x4xf32> -// CHECK: %[[ADD0:.*]] = arith.addf %[[V2]], %[[SUB]] : vector<4x4x4x4xf32> -// CHECK: %[[ADD1:.*]] = arith.addf %[[V3]], %[[ADD0]] : vector<4x4x4x4xf32> -// CHECK: vector.transfer_write %[[ADD1]], {{.*}} : vector<4x4x4x4xf32>, memref<4x4x4x4xf32> -func.func @generic_vectorize_broadcast_transpose( - %A: memref<4xf32>, %B: memref<4x4xf32>, %C: memref<4x4x4x4xf32>) { - linalg.generic { - indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d3)>, - affine_map<(d0, d1, d2, d3) -> (d0)>, - affine_map<(d0, d1, d2, d3) -> (d2)>, - affine_map<(d0, d1, d2, d3) -> (d2, d0)>, - affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], - iterator_types = ["parallel", "parallel", "parallel", "parallel"]} - ins(%B, %A, %A, %B: memref<4x4xf32>, memref<4xf32>, memref<4xf32>, memref<4x4xf32>) - outs(%C : memref<4x4x4x4xf32>) { - ^bb0(%arg0: f32, %arg1: f32, %arg2: f32, %arg3: f32, %arg4: f32): - %s = arith.subf %arg0, %arg1 : f32 - %a = arith.addf %arg2, %s : f32 - %b = arith.addf %arg3, %a : f32 - linalg.yield %b : f32 - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// Test different input maps. -#matmul_trait = { - indexing_maps = [ - affine_map<(d0, d1, d2, d3) -> (d1, d0)>, - affine_map<(d0, d1, d2, d3) -> (d3, d1)>, - affine_map<(d0, d1, d2, d3) -> (d3, d1, d0, d2)>, - affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)> - ], - iterator_types = ["parallel", "parallel", "parallel", "parallel"] -} - -// CHECK-DAG: #[[MAP0:.*]] = affine_map<(d0, d1) -> (d1, d0, 0, 0)> -// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (0, d1, 0, d0)> -// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d2, d1, d3, d0)> -// CHECK: func @vectorization_transpose -// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP0]]} : memref<14x7xf32>, vector<7x14x8x16xf32> -// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP1]]} : memref<16x14xf32>, vector<7x14x8x16xf32> -// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP2]]} : memref<16x14x7x8xf32>, vector<7x14x8x16xf32> -// CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32> -// CHECK: arith.addf {{.*}} : vector<7x14x8x16xf32> -// CHECK: vector.transfer_write {{.*}} : vector<7x14x8x16xf32>, memref<7x14x8x16xf32> -func.func @vectorization_transpose(%A: memref<14x7xf32>, %B: memref<16x14xf32>, - %C: memref<16x14x7x8xf32>, %D: memref<7x14x8x16xf32>) { - linalg.generic #matmul_trait - ins(%A, %B, %C : memref<14x7xf32>, memref<16x14xf32>, memref<16x14x7x8xf32>) - outs(%D : memref<7x14x8x16xf32>) { - ^bb(%a: f32, %b: f32, %c: f32, %d: f32) : - %e = arith.addf %a, %b: f32 - %f = arith.addf %e, %c: f32 - linalg.yield %f : f32 - } - return -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @matmul_tensors -// CHECK-SAME: (%[[ARG0:.*]]: tensor<8x4xf32>, %[[ARG1:.*]]: tensor<4x12xf32>, -// CHECK-SAME: %[[ARG2:.*]]: tensor<8x12xf32>) -> tensor<8x12xf32> -func.func @matmul_tensors( - %arg0: tensor<8x4xf32>, %arg1: tensor<4x12xf32>, %arg2: tensor<8x12xf32>) - -> tensor<8x12xf32> { - // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index - // CHECK-DAG: %[[V0:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x4xf32>, vector<8x12x4xf32> - // CHECK-DAG: %[[V1:.*]] = vector.transfer_read %[[ARG1]][%[[C0]], %[[C0]]], {{.*}} : tensor<4x12xf32>, vector<8x12x4xf32> - // CHECK-DAG: %[[V2:.*]] = vector.transfer_read %[[ARG2]][%[[C0]], %[[C0]]], {{.*}} : tensor<8x12xf32>, vector<8x12xf32> - // - // linalg matmul lowers gets expanded to a 3D reduction, canonicalization later - // convert it to a 2D contract. - // CHECK: %[[MUL:.*]] = arith.mulf %[[V0]], %[[V1]] : vector<8x12x4xf32> - // CHECK: %[[R:.*]] = vector.multi_reduction , %[[MUL]], %[[V2]] [2] : vector<8x12x4xf32> to vector<8x12xf32> - // CHECK: %[[W:.*]] = vector.transfer_write %[[R]], %[[ARG2]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<8x12xf32>, tensor<8x12xf32> - %0 = linalg.matmul ins(%arg0, %arg1: tensor<8x4xf32>, tensor<4x12xf32>) - outs(%arg2: tensor<8x12xf32>) - -> tensor<8x12xf32> - // CHECK: return %[[W]] : tensor<8x12xf32> - return %0 : tensor<8x12xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @pad_static( -// CHECK-SAME: %[[ARG0:.*]]: tensor<2x?x2xf32>, %[[PAD:.*]]: f32 -// CHECK-NOT: tensor.pad -// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index -// CHECK-DAG: %[[INIT:.*]] = tensor.empty() : tensor<2x3x4xf32> -// CHECK-DAG: %[[VEC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<2x3x4xf32> -// CHECK: %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]]{{.*}} : vector<2x3x4xf32>, tensor<2x3x4xf32> -// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %[[PAD]] {in_bounds = [true, false, true]} : tensor<2x?x2xf32>, vector<2x3x2xf32> -// CHECK: %[[RESULT:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x3x2xf32>, tensor<2x3x4xf32> -// CHECK: return %[[RESULT]] -func.func @pad_static(%arg0: tensor<2x?x2xf32>, %pad_value: f32) -> tensor<2x3x4xf32> { - %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] { - ^bb0(%arg1: index, %arg2: index, %arg3: index): - tensor.yield %pad_value : f32 - } : tensor<2x?x2xf32> to tensor<2x3x4xf32> - return %0 : tensor<2x3x4xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @pad_static_source( -// CHECK-SAME: %[[ARG0:.*]]: tensor<2x5x2xf32>, %[[PAD:.*]]: f32 -// CHECK-NOT: tensor.pad -// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index -// CHECK: %[[INIT:.*]] = tensor.empty() : tensor<2x6x4xf32> -// CHECK: %[[VEC:.*]] = vector.broadcast %[[PAD]] : f32 to vector<2x6x4xf32> -// CHECK: %[[FILL:.*]] = vector.transfer_write %[[VEC]], %[[INIT]][%[[C0]], %[[C0]], %[[C0]]] {in_bounds = [true, true, true]} : vector<2x6x4xf32>, tensor<2x6x4xf32> -// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true, true]} : tensor<2x5x2xf32>, vector<2x5x2xf32> -// CHECK: %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C0]], %[[C0]], %[[C2]]] {in_bounds = [true, true, true]} : vector<2x5x2xf32>, tensor<2x6x4xf32> -// CHECK: return %[[WRITE]] -func.func @pad_static_source(%arg0: tensor<2x5x2xf32>, %pad_value: f32) -> tensor<2x6x4xf32> { - %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] { - ^bb0(%arg1: index, %arg2: index, %arg3: index): - tensor.yield %pad_value : f32 - } : tensor<2x5x2xf32> to tensor<2x6x4xf32> - return %0 : tensor<2x6x4xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - - -// ----- - -// CHECK-LABEL: func @pad_static_dynamic( -// CHECK-SAME: %[[SRC:.*]]: tensor<1x2x2x?xf32>, %[[LOW:.*]]: index, %[[HIGH:.*]]: index -// CHECK-NOT: tensor.pad -// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index -// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index -// CHECK-DAG: %[[C5:.*]] = arith.constant 5 : index -// CHECK: %[[V0:.*]] = arith.addi %[[LOW]], %[[C2]] : index -// CHECK: %[[V1:.*]] = arith.addi %[[V0]], %[[C3]] : index -// CHECK: %[[V2:.*]] = arith.addi %[[HIGH]], %[[C5]] : index -// CHECK: %[[DIM3:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> -// CHECK: %[[V4:.*]] = arith.addi %[[DIM3]], %[[C3]] : index -// CHECK: %[[V5:.*]] = arith.addi %[[V4]], %[[C2]] : index -// CHECK: %[[INIT:.*]] = tensor.empty(%[[V1]], %[[V2]], %[[V5]]) : tensor<6x?x?x?xf32> -// CHECK: %[[FILL:.*]] = linalg.fill ins(%{{.*}} : f32) outs(%[[INIT]] : tensor<6x?x?x?xf32>) -> tensor<6x?x?x?xf32> -// CHECK: %[[SRCDIM:.*]] = tensor.dim %[[SRC]], %[[C3]] : tensor<1x2x2x?xf32> -// CHECK: %[[RESULT:.*]] = tensor.insert_slice %[[SRC]] into %[[FILL]][2, %[[LOW]], 3, 3] [1, 2, 2, %[[SRCDIM]]] [1, 1, 1, 1] : tensor<1x2x2x?xf32> into tensor<6x?x?x?xf32> -// CHECK: return %[[RESULT]] -func.func @pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index, - %pad_value: f32) -> tensor<6x?x?x?xf32> { - %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] { - ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): - tensor.yield %pad_value : f32 - } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32> - return %0 : tensor<6x?x?x?xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @pad_static_complex( -// CHECK-NOT: vector< -func.func @pad_static_complex(%arg0: tensor<2x5x2xcomplex>, %pad_value: complex) -> tensor<2x6x4xcomplex> { - %0 = tensor.pad %arg0 low[0, 0, 2] high[0, 1, 0] { - ^bb0(%arg1: index, %arg2: index, %arg3: index): - tensor.yield %pad_value : complex - } : tensor<2x5x2xcomplex> to tensor<2x6x4xcomplex> - return %0 : tensor<2x6x4xcomplex> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @pad_and_transfer_read -// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> -// CHECK-NOT: tensor.pad -// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[C5:.*]] = arith.constant 5.0 -// CHECK: %[[RESULT:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %[[C5]] : tensor<5x6xf32>, vector<7x9xf32> -// CHECK: return %[[RESULT]] -func.func @pad_and_transfer_read(%arg0: tensor<5x6xf32>) -> vector<7x9xf32> { - %c0 = arith.constant 0 : index - %c5 = arith.constant 5.0 : f32 - %c6 = arith.constant 6.0 : f32 - %0 = tensor.pad %arg0 low[0, 0] high[5, 7] { - ^bb0(%arg1: index, %arg2: index): - tensor.yield %c5 : f32 - } : tensor<5x6xf32> to tensor<10x13xf32> - %1 = vector.transfer_read %0[%c0, %c0], %c6 - : tensor<10x13xf32>, vector<7x9xf32> - return %1 : vector<7x9xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -func.func private @make_vector() -> vector<7x9xf32> - -// CHECK-LABEL: func @pad_and_transfer_write_static -// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> -// CHECK-NOT: tensor.pad -// CHECK: %[[C0:.*]] = arith.constant 0 : index -// CHECK: %[[VEC0:.*]] = call @make_vector() : () -> vector<7x9xf32> -// CHECK: %[[RESULT:.*]] = vector.transfer_write %[[VEC0]], %[[ARG0]][%[[C0]], %[[C0]]] : vector<7x9xf32>, tensor<5x6xf32> -// CHECK: return %[[RESULT]] -func.func @pad_and_transfer_write_static( - %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> { - %c0 = arith.constant 0 : index - %c5 = arith.constant 5.0 : f32 - %0 = tensor.pad %arg0 low[0, 0] high[5, 7] { - ^bb0(%arg2: index, %arg3: index): - tensor.yield %c5 : f32 - } : tensor<5x6xf32> to tensor<10x13xf32> - %1 = call @make_vector() : () -> vector<7x9xf32> - %2 = vector.transfer_write %1, %0[%c0, %c0] - : vector<7x9xf32>, tensor<10x13xf32> - %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32> - return %3 : tensor<5x6xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - - -// ----- - -func.func private @make_vector() -> vector<7x9xf32> - -// CHECK-LABEL: func @pad_and_transfer_write_dynamic_static -// CHECK-SAME: %[[ARG0:.*]]: tensor, %[[SIZE:.*]]: index, %[[PADDING:.*]]: index -// CHECK-NOT: tensor.pad -// CHECK: %[[C0:.*]] = arith.constant 0 : index -// CHECK: %[[SUB:.*]] = tensor.extract_slice %[[ARG0]][0, 0] [%[[SIZE]], 6] [1, 1] : tensor to tensor -// CHECK: %[[VEC0:.*]] = call @make_vector() : () -> vector<7x9xf32> -// CHECK: %[[RESULT:.*]] = vector.transfer_write %[[VEC0]], %[[SUB]][%[[C0]], %[[C0]]] : vector<7x9xf32>, tensor -// CHECK: return %[[RESULT]] -func.func @pad_and_transfer_write_dynamic_static( - %arg0: tensor, %size: index, %padding: index) -> tensor { - %c0 = arith.constant 0 : index - %c5 = arith.constant 5.0 : f32 - %s = tensor.extract_slice %arg0[0, 0] [%size, 6] [1, 1] - : tensor to tensor - %0 = tensor.pad %s low[0, 0] high[%padding, 7] { - ^bb0(%arg2: index, %arg3: index): - tensor.yield %c5 : f32 - } : tensor to tensor - %1 = call @make_vector() : () -> vector<7x9xf32> - %2 = vector.transfer_write %1, %0[%c0, %c0] - : vector<7x9xf32>, tensor - %3 = tensor.extract_slice %2[0, 0] [%size, 6] [1, 1] : tensor to tensor - return %3 : tensor -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - - -// ----- - -func.func private @make_vector() -> tensor<12x13xf32> - -// CHECK-LABEL: func @pad_and_insert_slice_source -// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> -// CHECK-NOT: tensor.pad -// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[C5:.*]] = arith.constant 5.0 -// CHECK: %[[VEC0:.*]] = call @make_vector() : () -> tensor<12x13xf32> -// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %[[C5]] : tensor<5x6xf32>, vector<7x9xf32> -// CHECK: %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[VEC0]][%[[C0]], %[[C0]]] {in_bounds = [true, true]} : vector<7x9xf32>, tensor<12x13xf32> -// CHECK: return %[[WRITE]] -func.func @pad_and_insert_slice_source( - %arg0: tensor<5x6xf32>) -> tensor<12x13xf32> { - %c0 = arith.constant 0 : index - %c5 = arith.constant 5.0 : f32 - %0 = tensor.pad %arg0 low[0, 0] high[2, 3] { - ^bb0(%arg2: index, %arg3: index): - tensor.yield %c5 : f32 - } : tensor<5x6xf32> to tensor<7x9xf32> - %1 = call @make_vector() : () -> tensor<12x13xf32> - %r = tensor.insert_slice %0 into %1[0, 0][7, 9][1, 1] : tensor<7x9xf32> into tensor<12x13xf32> - return %r : tensor<12x13xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - - -// ----- - -func.func private @make_vector() -> tensor<12x13xf32> - -// CHECK-LABEL: func @pad_and_insert_slice_dest -// Check the insert slice is not rewritten if the padded result is used by the destination operand. -// CHECK: %[[T1:.*]] = call @make_vector() : () -> tensor<12x13xf32> -// CHECK: = tensor.insert_slice %[[T1]] into -func.func @pad_and_insert_slice_dest( - %arg0: tensor<1x5x6xf32>) -> tensor<1x12x13xf32> { - %c5 = arith.constant 5.0 : f32 - %0 = tensor.pad %arg0 low[0, 0, 0] high[0, 7, 7] { - ^bb0(%arg2: index, %arg3: index, %arg4: index): - tensor.yield %c5 : f32 - } : tensor<1x5x6xf32> to tensor<1x12x13xf32> - %1 = call @make_vector() : () -> tensor<12x13xf32> - %r = tensor.insert_slice %1 into %0[0, 0, 0][1, 12, 13][1, 1, 1] : tensor<12x13xf32> into tensor<1x12x13xf32> - return %r : tensor<1x12x13xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @pad_tensor_non_const_pad_value -// CHECK-SAME: %[[ARG0:.*]]: tensor<5x6xf32> -// CHECK-NOT: tensor.pad -// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index -// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index -// CHECK: %[[FILL:.*]] = tensor.generate -// CHECK: %[[RES:.*]] = arith.mulf -// CHECK: tensor.yield %[[RES]] : f32 -// CHECK: %[[READ:.*]] = vector.transfer_read %[[ARG0]][%[[C0]], %[[C0]]], %{{.*}} {in_bounds = [true, true]} : tensor<5x6xf32>, vector<5x6xf32> -// CHECK: %[[WRITE:.*]] = vector.transfer_write %[[READ]], %[[FILL]][%[[C3]], %[[C4]]] {in_bounds = [true, true]} : vector<5x6xf32>, tensor<12x13xf32> -// CHECK: return %[[WRITE]] -func.func @pad_tensor_non_const_pad_value(%arg0: tensor<5x6xf32>) -> tensor<12x13xf32> { - %c0 = arith.constant 0 : index - %c5 = arith.constant 5.0 : f32 - %0 = tensor.pad %arg0 low[3, 4] high[4, 3] { - ^bb0(%arg1: index, %arg2: index): - %i1 = arith.index_cast %arg1 : index to i32 - %i2 = arith.index_cast %arg2 : index to i32 - %f1 = arith.sitofp %i1 : i32 to f32 - %f2 = arith.sitofp %i2 : i32 to f32 - %m = arith.mulf %f1, %f2 : f32 - tensor.yield %m : f32 - } : tensor<5x6xf32> to tensor<12x13xf32> - return %0 : tensor<12x13xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @sum_exp -func.func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>) - -> tensor<4x16xf32> -{ - // CHECK: vector.transfer_read {{.*}} : tensor<4x16x8xf32>, vector<4x16x8xf32> - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x16xf32>, vector<4x16xf32> - // CHECK: math.exp {{.*}} : vector<4x16x8xf32> - // CHECK: vector.multi_reduction , %{{.*}}, %{{.*}} [2] : vector<4x16x8xf32> to vector<4x16xf32> - // CHECK: vector.transfer_write {{.*}} : vector<4x16xf32>, tensor<4x16xf32> - // CHECK: return {{.*}} : tensor<4x16xf32> - %0 = linalg.generic { - indexing_maps = [ - affine_map<(d0, d1, d2) -> (d0, d1, d2)>, - affine_map<(d0, d1, d2) -> (d0, d1)> - ], - iterator_types = ["parallel", "parallel", "reduction"] - } ins(%input : tensor<4x16x8xf32>) outs(%output : tensor<4x16xf32>) { - ^bb0(%arg0: f32, %arg1: f32): - %1 = math.exp %arg0 : f32 - %2 = arith.addf %1, %arg1 : f32 - linalg.yield %2 : f32 - } -> tensor<4x16xf32> - return %0 : tensor<4x16xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-DAG: #[[$M1:.*]] = affine_map<(d0, d1) -> (d1, d0, 0, 0)> -// CHECK-DAG: #[[$M2:.*]] = affine_map<(d0, d1) -> (0, 0, d1, d0)> -// CHECK-DAG: #[[$M3:.*]] = affine_map<(d0, d1) -> (d1, d0)> - -// CHECK-LABEL: func @sum_exp_2 -func.func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: tensor<5x2xf32>) - -> tensor<5x2xf32> -{ - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M1]]} : tensor<3x2xf32>, vector<2x3x4x5xf32> - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M2]]} : tensor<5x4xf32>, vector<2x3x4x5xf32> - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M3]]} : tensor<5x2xf32>, vector<2x5xf32> - // CHECK: math.exp {{.*}} : vector<2x3x4x5xf32> - // CHECK: math.exp {{.*}} : vector<2x3x4x5xf32> - // CHECK: addf {{.*}} : vector<2x3x4x5xf32> - // CHECK: vector.multi_reduction , {{.*}}, %{{.*}} [1, 2] : vector<2x3x4x5xf32> to vector<2x5xf32> - // CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true], permutation_map = #[[$M3]]} : vector<2x5xf32>, tensor<5x2xf32> - // CHECK: return {{.*}} : tensor<5x2xf32> - %0 = linalg.generic { - indexing_maps = [ - affine_map<(d0, d1, d2, d3) -> (d1, d0)>, - affine_map<(d0, d1, d2, d3) -> (d3, d2)>, - affine_map<(d0, d1, d2, d3) -> (d3, d0)> - ], - iterator_types = ["parallel", "reduction", "reduction", "parallel"] - } ins(%input, %input_2 : tensor<3x2xf32>, tensor<5x4xf32>) outs(%output : tensor<5x2xf32>) { - ^bb0(%arg0: f32, %arg1: f32, %arg2: f32): - %1 = math.exp %arg0 : f32 - %2 = math.exp %arg1 : f32 - %3 = arith.addf %1, %2 : f32 - %4 = arith.addf %3, %arg2 : f32 - linalg.yield %4 : f32 - } -> tensor<5x2xf32> - return %0 : tensor<5x2xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @red_max_2d( -func.func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { - // CHECK: %[[CMINF:.+]] = arith.constant dense<-3.402820e+38> : vector<4xf32> - // CHECK: tensor.empty() : tensor<4xf32> - // CHECK: vector.multi_reduction , {{.*}}, %[[CMINF]] [1] : vector<4x4xf32> to vector<4xf32> - // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> - %ident = arith.constant -3.40282e+38 : f32 - %init = tensor.empty() : tensor<4xf32> - %fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"]} - ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) { - ^bb0(%in0: f32, %out0: f32): - %max = arith.maximumf %in0, %out0 : f32 - linalg.yield %max : f32 - } -> tensor<4xf32> - return %red : tensor<4xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 { vectorize_padding } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @red_min_2d( -func.func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { - // CHECK: %[[CMAXF:.+]] = arith.constant dense<3.402820e+38> : vector<4xf32> - // CHECK: tensor.empty() : tensor<4xf32> - // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32> - // CHECK: vector.multi_reduction , {{.*}}, %[[CMAXF]] [1] : vector<4x4xf32> to vector<4xf32> - // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> - %maxf32 = arith.constant 3.40282e+38 : f32 - %init = tensor.empty() : tensor<4xf32> - %fill = linalg.fill ins(%maxf32 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"]} - ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) { - ^bb0(%in0: f32, %out0: f32): - %min = arith.minimumf %out0, %in0 : f32 - linalg.yield %min : f32 - } -> tensor<4xf32> - return %red : tensor<4xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @red_mul_2d( -func.func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> { - // CHECK: tensor.empty() : tensor<4xf32> - // CHECK: vector.transfer_read {{.*}} : tensor<4x4xf32>, vector<4x4xf32> - // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xf32> to vector<4xf32> - // CHECK: vector.transfer_write {{.*}} : vector<4xf32>, tensor<4xf32> - %ident = arith.constant 1.0 : f32 - %init = tensor.empty() : tensor<4xf32> - %fill = linalg.fill ins(%ident : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"]} - ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) { - ^bb0(%in0: f32, %out0: f32): - %mul = arith.mulf %in0, %out0 : f32 - linalg.yield %mul : f32 - } -> tensor<4xf32> - return %red : tensor<4xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @red_or_2d( -func.func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> { - // CHECK: tensor.empty() : tensor<4xi1> - // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1> - // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1> - // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1> - %ident = arith.constant false - %init = tensor.empty() : tensor<4xi1> - %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"]} - ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) { - ^bb0(%in0: i1, %out0: i1): - %or = arith.ori %in0, %out0 : i1 - linalg.yield %or : i1 - } -> tensor<4xi1> - return %red : tensor<4xi1> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @red_and_2d( -func.func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> { - // CHECK: tensor.empty() : tensor<4xi1> - // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1> - // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1> - // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1> - %ident = arith.constant true - %init = tensor.empty() : tensor<4xi1> - %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"]} - ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) { - ^bb0(%in0: i1, %out0: i1): - %and = arith.andi %in0, %out0 : i1 - linalg.yield %and : i1 - } -> tensor<4xi1> - return %red : tensor<4xi1> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @red_xor_2d( -func.func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> { - // CHECK: tensor.empty() : tensor<4xi1> - // CHECK: vector.transfer_read {{.*}} : tensor<4x4xi1>, vector<4x4xi1> - // CHECK: vector.multi_reduction , {{.*}}, {{.*}} [1] : vector<4x4xi1> to vector<4xi1> - // CHECK: vector.transfer_write {{.*}} : vector<4xi1>, tensor<4xi1> - %ident = arith.constant false - %init = tensor.empty() : tensor<4xi1> - %fill = linalg.fill ins(%ident : i1) outs(%init : tensor<4xi1>) -> tensor<4xi1> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"]} - ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) { - ^bb0(%in0: i1, %out0: i1): - %xor = arith.xori %in0, %out0 : i1 - linalg.yield %xor : i1 - } -> tensor<4xi1> - return %red : tensor<4xi1> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-DAG: #[[$M5:.*]] = affine_map<(d0, d1) -> (d0, 0)> - -// CHECK-LABEL: func @explicit_broadcast( -func.func @explicit_broadcast(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4x4xf32> { - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32> - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M5]]} : tensor<4x1xf32>, vector<4x4xf32> - // CHECK: subf {{.*}} : vector<4x4xf32> - // CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<4x4xf32>, tensor<4x4xf32> - %c0 = arith.constant 0.0 : f32 - %init = tensor.empty() : tensor<4x4xf32> - %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4x4xf32>) -> tensor<4x4xf32> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, 0)>, - affine_map<(d0, d1) -> (d0, d1)>], - iterator_types = ["parallel", "parallel"]} - ins(%arg0, %arg1 : tensor<4x4xf32>, tensor<4x1xf32>) - outs(%fill : tensor<4x4xf32>) { - ^bb0(%arg7: f32, %arg8: f32, %arg9: f32): - %40 = arith.subf %arg7, %arg8 : f32 - linalg.yield %40 : f32 - } -> tensor<4x4xf32> - return %red : tensor<4x4xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-DAG: #[[$M6:.*]] = affine_map<(d0, d1) -> (d0, 0)> - -// CHECK-LABEL: func @fused_broadcast_red_2d -func.func @fused_broadcast_red_2d(%arg0: tensor<4x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<4xf32> { - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<4x4xf32>, vector<4x4xf32> - // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true], permutation_map = #[[$M6]]} : tensor<4x1xf32>, vector<4x4xf32> - // CHECK: subf {{.*}} : vector<4x4xf32> - // CHECK: math.exp {{.*}} : vector<4x4xf32> - // CHECK: vector.multi_reduction , {{.*}}, {{.*}} : vector<4x4xf32> to vector<4xf32> - // CHECK: vector.transfer_write {{.*}} {in_bounds = [true]} : vector<4xf32>, tensor<4xf32> - %c0 = arith.constant 0.0 : f32 - %init = tensor.empty() : tensor<4xf32> - %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<4xf32>) -> tensor<4xf32> - %red = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, - affine_map<(d0, d1) -> (d0, 0)>, - affine_map<(d0, d1) -> (d0)>], - iterator_types = ["parallel", "reduction"]} - ins(%arg0, %arg1 : tensor<4x4xf32>, tensor<4x1xf32>) - outs(%fill : tensor<4xf32>) { - ^bb0(%arg7: f32, %arg8: f32, %arg9: f32): - %40 = arith.subf %arg7, %arg8 : f32 - %41 = math.exp %40 : f32 - %42 = arith.addf %41, %arg9 : f32 - linalg.yield %42 : f32 - } -> tensor<4xf32> - return %red : tensor<4xf32> -} - - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op - - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// CHECK-LABEL: func @reduce_1d( -// CHECK-SAME: %[[A:.*]]: tensor<32xf32> -func.func @reduce_1d(%arg0: tensor<32xf32>) -> tensor { - // CHECK-DAG: %[[vF0:.*]] = arith.constant dense<0.000000e+00> : vector - // CHECK-DAG: %[[F0:.*]] = arith.constant 0.000000e+00 : f32 - // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index - %f0 = arith.constant 0.000000e+00 : f32 - - // CHECK: %[[init:.*]] = tensor.empty() : tensor - %0 = tensor.empty() : tensor - - %1 = linalg.fill ins(%f0 : f32) outs(%0 : tensor) -> tensor - // CHECK: %[[r:.*]] = vector.transfer_read %[[A]][%[[C0]]] - // CHECK-SAME: : tensor<32xf32>, vector<32xf32> - // CHECK: %[[f0:.*]] = vector.extractelement %[[vF0]][] : vector - // CHECK: %[[red:.*]] = vector.multi_reduction , %[[r]], %[[f0]] [0] - // CHECK-SAME: : vector<32xf32> to f32 - // CHECK: %[[red_v1:.*]] = vector.broadcast %[[red]] : f32 to vector - // CHECK: %[[res:.*]] = vector.transfer_write %[[red_v1]], %[[init]][] - // CHECK-SAME: : vector, tensor - %2 = linalg.generic { - indexing_maps = [affine_map<(d0) -> (d0)>, - affine_map<(d0) -> ()>], - iterator_types = ["reduction"]} - ins(%arg0 : tensor<32xf32>) - outs(%1 : tensor) { - ^bb0(%a: f32, %b: f32): - %3 = arith.addf %a, %b : f32 - linalg.yield %3 : f32 - } -> tensor - - return %2 : tensor -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - - -// ----- - -// This test checks that vectorization does not occur when an input indexing map -// is not a projected permutation. In the future, this can be converted to a -// positive test when support is added. - -// CHECK-LABEL: func @not_projected_permutation -func.func @not_projected_permutation(%arg0: tensor<8x8xf32>) -> tensor<6x6x3x3xf32> { - %c0 = arith.constant 0.0 : f32 - %init = tensor.empty() : tensor<6x6x3x3xf32> - %fill = linalg.fill ins(%c0 : f32) outs(%init : tensor<6x6x3x3xf32>) -> tensor<6x6x3x3xf32> - // CHECK: linalg.generic - %result = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0 + d2, d1 + d3)>, - affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], - iterator_types = ["parallel", "parallel", "parallel", "parallel"]} - ins(%arg0 : tensor<8x8xf32>) - outs(%fill : tensor<6x6x3x3xf32>) { - ^bb0(%arg7: f32, %arg9: f32): - linalg.yield %arg7 : f32 - } -> tensor<6x6x3x3xf32> - return %result : tensor<6x6x3x3xf32> -} - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// Check vectorization can handle cases where outputs are a mix of reduced and non-reduced values. -func.func @mixed_parallel_reduced_results(%arg0 : tensor<2x4x8xf32>, - %arg1 : tensor<2x4xf32>, %arg2 : tensor<2x4x8xf32>, %arg3 : tensor<2x4xf32>) -> - (tensor<2x4x8xf32>, tensor<2x4xf32>) { - %0:2 = linalg.generic { - indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>, - affine_map<(d0, d1, d2) -> (d0, d1, d2)>, affine_map<(d0, d1, d2) -> (d0, d1)>], - iterator_types = ["parallel", "parallel", "reduction"]} - ins(%arg0, %arg1 : tensor<2x4x8xf32>, tensor<2x4xf32>) - outs(%arg2, %arg3 : tensor<2x4x8xf32>, tensor<2x4xf32>) { - ^bb0(%b0 : f32, %b1 : f32, %b2 : f32, %b3 : f32): - %1 = arith.mulf %b0, %b1 : f32 - %2 = arith.addf %1, %b3 : f32 - linalg.yield %1, %2 : f32, f32 - } -> (tensor<2x4x8xf32>, tensor<2x4xf32>) - return %0#0, %0#1 : tensor<2x4x8xf32>, tensor<2x4xf32> -} -// CHECK-LABEL: func @mixed_parallel_reduced_results( -// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<2x4x8xf32> -// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<2x4xf32> -// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<2x4x8xf32> -// CHECK-SAME: %[[ARG3:[a-zA-Z0-9]+]]: tensor<2x4xf32> -// CHECK-DAG: %[[V0:.+]] = vector.transfer_read %[[ARG0]] -// CHECK-DAG: %[[V1:.+]] = vector.transfer_read %[[ARG1]] -// CHECK-DAG: %[[V2:.+]] = vector.transfer_read %[[ARG3]] -// CHECK-DAG: %[[MUL:.+]] = arith.mulf %[[V0]], %[[V1]] -// CHECK-DAG: %[[ADD:.+]] = vector.multi_reduction , %[[MUL]], %[[V2]] -// CHECK-DAG: vector.transfer_write %[[MUL]], %[[ARG2]] -// CHECK-DAG: vector.transfer_write %[[ADD]], %[[ARG3]] - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { disable_multi_reduction_to_contract_patterns, disable_transfer_permutation_map_lowering_patterns } : (!transform.any_op) -> !transform.any_op -} - -// ----- - -func.func @vectorize_map(%arg0: memref<64xf32>, - %arg1: memref<64xf32>, %arg2: memref<64xf32>) { - linalg.map ins(%arg0, %arg1 : memref<64xf32>, memref<64xf32>) - outs(%arg2 : memref<64xf32>) - (%in: f32, %in_0: f32) { - %0 = arith.addf %in, %in_0 : f32 - linalg.yield %0 : f32 - } - return -} -// CHECK-LABEL: func @vectorize_map -// CHECK: %[[LHS:.*]] = vector.transfer_read -// CHECK-NEXT: %[[RHS:.*]] = vector.transfer_read -// CHECK-NEXT: arith.addf %[[LHS]], %[[RHS]] : vector<64xf32> - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.map"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -func.func @vectorize_transpose(%arg0: memref<16x32x64xf32>, - %arg1: memref<32x64x16xf32>) { - linalg.transpose ins(%arg0 : memref<16x32x64xf32>) - outs(%arg1 : memref<32x64x16xf32>) permutation = [1, 2, 0] - return -} -// CHECK-LABEL: func @vectorize_transpose -// CHECK: vector.transpose -// CHECK-SAME: [1, 2, 0] : vector<16x32x64xf32> to vector<32x64x16xf32> - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.transpose"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -func.func @vectorize_reduce(%arg0: memref<16x32x64xf32>, - %arg1: memref<16x64xf32>) { - linalg.reduce ins(%arg0 : memref<16x32x64xf32>) - outs(%arg1 : memref<16x64xf32>) dimensions = [1] - (%in: f32, %init: f32) { - %0 = arith.addf %in, %init : f32 - linalg.yield %0 : f32 - } - return -} -// CHECK-LABEL: func @vectorize_reduce -// CHECK: vector.multi_reduction -// CHECK-SAME: : vector<16x32x64xf32> to vector<16x64xf32> - -transform.sequence failures(propagate) { -^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["linalg.reduce"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op -} - -// ----- - -// This is a regression test. This IR cannot be vectorized, but -// structured.vectorize should nevertheless succeed. : (!transform.any_op) -> !transform.any_op - -#map = affine_map<(d0) -> (d0)> -// CHECK-LABEL: @not_vectorizable -func.func @not_vectorizable(%arg0: tensor<1x?xf32>, %arg1: index, %arg2: index, %arg3: index) -> tensor<1x128xf32> { - %0 = tensor.empty() : tensor<1x128xf32> - %1 = scf.for %arg5 = %arg2 to %arg1 step %arg3 iter_args(%arg6 = %0) -> (tensor<1x128xf32>) { - %extracted_slice = tensor.extract_slice %arg6[0, 0] [1, %arg1] [1, 1] : tensor<1x128xf32> to tensor - %expanded = tensor.expand_shape %extracted_slice [[0, 1]] : tensor into tensor<1x?xf32> - %extracted_slice_0 = tensor.extract_slice %arg0[0, %arg3] [1, %arg2] [1, 1] : tensor<1x?xf32> to tensor - %extracted_slice_1 = tensor.extract_slice %expanded[0, %arg3] [1, %arg2] [1, 1] : tensor<1x?xf32> to tensor - %2 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} ins(%extracted_slice_0 : tensor) outs(%extracted_slice_1 : tensor) { - ^bb0(%in: f32, %out: f32): - %3 = arith.addf %in, %out : f32 - linalg.yield %3 : f32 } -> tensor - %inserted_slice = tensor.insert_slice %2 into %expanded[0, %arg3] [1, %arg2] [1, 1] : tensor into tensor<1x?xf32> - %collapsed = tensor.collapse_shape %inserted_slice [[0, 1]] : tensor<1x?xf32> into tensor - %inserted_slice_2 = tensor.insert_slice %collapsed into %arg6[0, 0] [1, %arg1] [1, 1] : tensor into tensor<1x128xf32> - scf.yield %inserted_slice_2 : tensor<1x128xf32> - } - return %1 : tensor<1x128xf32> + return %0 : tensor } + +// CHECK-LABEL: @vectorize_dynamic_identity +// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor +// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1> +// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> +// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> +// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> +// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<4xf32> +// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor } : vector<4xi1> -> tensor + transform.sequence failures(propagate) { -^bb0(%arg0: !transform.any_op): - %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op - %1 = transform.structured.vectorize %0 : (!transform.any_op) -> !transform.any_op +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [4] : !transform.any_op } // ----- -// Regression test: %13 was incorrectly detected as a reduction and -// vectorization failed. +func.func @vectorize_dynamic_1d_broadcast(%arg0: tensor, + %arg1: tensor, + %arg2: tensor) -> tensor { + %0 = linalg.generic { indexing_maps = [affine_map<(d0) -> (0)>, + affine_map<(d0) -> (d0)>, + affine_map<(d0) -> (d0)>], + iterator_types = ["parallel"] } + ins(%arg0, %arg1 : tensor, tensor) + outs(%arg2 : tensor) { + ^bb(%in0: f32, %in1: f32, %out: f32) : + %0 = arith.addf %in0, %in1 : f32 + linalg.yield %0 : f32 + } -> tensor + return %0 : tensor +} -func.func @wrong_reduction_detection(%input: tensor<120x64xf32>) -> tensor<120x64xf32> { - %c0 = arith.constant 0 : index - %c4 = arith.constant 4 : index - %c64 = arith.constant 64 : index - %cst_6 = arith.constant 4.000000e+00 : f32 - %1 = scf.for %arg0 = %c0 to %c64 step %c4 iter_args(%arg1 = %input) -> (tensor<120x64xf32>) { - %extracted_slice = tensor.extract_slice %arg1[%c0, %arg0] [1, 4] [1, 1] : tensor<120x64xf32> to tensor<1x4xf32> - %10 = linalg.fill {__internal_linalg_transform__ = "1"} ins(%cst_6 : f32) outs(%extracted_slice : tensor<1x4xf32>) -> tensor<1x4xf32> - %11 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], iterator_types = ["parallel", "parallel"]} outs(%10 : tensor<1x4xf32>) { - ^bb0(%out: f32): - %12 = linalg.index 0 : index - %13 = arith.addi %arg0, %12 : index - %18 = arith.index_cast %13 : index to i32 - %20 = arith.uitofp %18 : i32 to f32 - %67 = arith.mulf %out, %20 : f32 - linalg.yield %67 : f32 - } -> tensor<1x4xf32> - %inserted_slice = tensor.insert_slice %11 into %arg1[%c0, %arg0] [1, 4] [1, 1] : tensor<1x4xf32> into tensor<120x64xf32> - scf.yield %inserted_slice : tensor<120x64xf32> - } - return %1 : tensor<120x64xf32> +// CHECK-LABEL: @vectorize_dynamic_1d_broadcast +// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor +// CHECK: %[[VAL_7:.*]] = vector.transfer_read %{{.*}} {permutation_map = #{{.*}}} : tensor, vector<4xf32> +// CHECK: %[[VAL_9:.*]] = vector.create_mask %[[VAL_4]] : vector<4xi1> +// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> +// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> +// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_7]], %[[VAL_10]] : vector<4xf32> +// CHECK: %[[VAL_14:.*]] = vector.mask %{{.*}} { vector.transfer_write %[[VAL_13]], {{.*}} {in_bounds = [true]} : vector<4xf32>, tensor } : vector<4xi1> -> tensor + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [4] : !transform.any_op +} + +// ----- + +func.func @vectorize_dynamic_2d_transpose(%arg0: tensor, + %arg1: tensor, + %arg2: tensor) -> tensor { + %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d1, d0)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] } + ins(%arg0, %arg1 : tensor, tensor) + outs(%arg2 : tensor) { + ^bb(%in0: f32, %in1: f32, %out: f32) : + %0 = arith.addf %in0, %in1 : f32 + linalg.yield %0 : f32 + } -> tensor + return %0 : tensor +} + +// CHECK-LABEL: @vectorize_dynamic_2d_transpose +// CHECK: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor +// CHECK: %[[VAL_5:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_6:.*]] = tensor.dim %{{.*}}, %[[VAL_5]] : tensor +// CHECK: %[[VAL_9:.*]] = vector.create_mask %[[VAL_6]], %[[VAL_4]] : vector<8x4xi1> +// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor, vector<4x8xf32> } : vector<8x4xi1> -> vector<4x8xf32> +// CHECK: %[[VAL_12:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<4x8xi1> +// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> +// CHECK: %[[VAL_14:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> +// CHECK: %[[VAL_16:.*]] = arith.addf %[[VAL_10]], %[[VAL_13]] : vector<4x8xf32> +// CHECK: %[[VAL_17:.*]] = vector.mask %[[VAL_12]] { vector.transfer_write %[[VAL_16]], %{{.*}} {in_bounds = [true, true]} : vector<4x8xf32>, tensor } : vector<4x8xi1> -> tensor + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op +} + +// ----- + +func.func @vectorize_dynamic_generic_2d_broadcast(%arg0: tensor, + %arg1: tensor, + %arg2: tensor) -> tensor { + %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] } + ins(%arg0, %arg1 : tensor, tensor) + outs(%arg2 : tensor) { + ^bb(%in0: f32, %in1: f32, %out: f32) : + %0 = arith.addf %in0, %in1 : f32 + linalg.yield %0 : f32 + } -> tensor + return %0 : tensor +} + +// CHECK-LABEL: @vectorize_dynamic_generic_2d_broadcast +// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_4:.*]] = tensor.dim %{{.*}}, %[[VAL_3]] : tensor +// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_6:.*]] = tensor.dim %{{.*}}, %[[VAL_5]] : tensor +// CHECK: %[[VAL_9:.*]] = vector.create_mask %[[VAL_6]] : vector<8xi1> +// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_9]] { vector.transfer_read %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor, vector<4x8xf32> } : vector<8xi1> -> vector<4x8xf32> +// CHECK: %[[VAL_12:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<4x8xi1> +// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> +// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_12]] { vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> +// CHECK: %[[VAL_16:.*]] = arith.addf %[[VAL_10]], %[[VAL_13]] : vector<4x8xf32> +// CHECK: %[[VAL_18:.*]] = vector.mask %[[VAL_12]] { vector.transfer_write %{{.*}} {in_bounds = [true, true]} : vector<4x8xf32>, tensor } : vector<4x8xi1> -> tensor + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op +} + +// ----- + +func.func @vectorize_dynamic_reduction(%arg0: tensor, + %arg1: tensor) -> tensor { + %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0)>], + iterator_types = ["parallel", "reduction"] } + ins(%arg0 : tensor) + outs(%arg1 : tensor) { + ^bb(%in: f32, %out: f32) : + %0 = arith.addf %in, %out : f32 + linalg.yield %0 : f32 + } -> tensor + return %0 : tensor } transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [4, 8] : !transform.any_op } -// CHECK-LABEL: @wrong_reduction_detection -// CHECK: vector.broadcast -// CHECK: vector.transfer_write +// CHECK-LABEL: @vectorize_dynamic_reduction( +// CHECK-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { +// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor +// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor +// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_3]], %[[VAL_5]] : vector<4x8xi1> +// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_0]]{{.*}} {in_bounds = [true, true]} : tensor, vector<4x8xf32> } : vector<4x8xi1> -> vector<4x8xf32> +// CHECK: %[[VAL_11:.*]] = vector.create_mask %[[VAL_3]] : vector<4xi1> +// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_11]] { vector.transfer_read %[[VAL_1]]{{.*}} {in_bounds = [true]} : tensor, vector<4xf32> } : vector<4xi1> -> vector<4xf32> +// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_8]] { vector.multi_reduction , %[[VAL_9]], %[[VAL_12]] [1] : vector<4x8xf32> to vector<4xf32> } : vector<4x8xi1> -> vector<4xf32> +// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_11]] { vector.transfer_write %[[VAL_13]], %[[VAL_1]]{{.*}} {in_bounds = [true]} : vector<4xf32>, tensor } : vector<4xi1> -> tensor +// CHECK: return %[[VAL_15]] : tensor +// CHECK: } // ----- -// Don't vectorize tensor<0xf32> : (!transform.any_op) -> !transform.any_op -// CHECK-LABEL: @tensor_size0 -// CHECK: linalg.generic -func.func @tensor_size0(%arg0: tensor<0xf32>, - %arg1: tensor) -> tensor { - %0 = linalg.generic - {indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> ()>], - iterator_types = ["reduction"]} - ins(%arg0 : tensor<0xf32>) outs(%arg1 : tensor) { - ^bb0(%in: f32, %out: f32): - %12 = arith.addf %out, %in : f32 - linalg.yield %12 : f32 - } -> tensor - return %0 : tensor +func.func @vectorize_dynamic_transpose_reduction(%arg0: tensor, + %arg1: tensor) -> tensor { + %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d1, d2)>, + affine_map<(d0, d1, d2) -> (d2, d1)>], + iterator_types = ["reduction", "parallel", "parallel"] } + ins(%arg0 : tensor) + outs(%arg1 : tensor) { + ^bb(%in: f32, %out: f32) : + %0 = arith.addf %in, %out : f32 + linalg.yield %0 : f32 + } -> tensor + return %0 : tensor } transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [4, 8, 16] : !transform.any_op } +// CHECK-LABEL: @vectorize_dynamic_transpose_reduction( +// CHECK-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { +// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor +// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor +// CHECK: %[[VAL_6:.*]] = arith.constant 2 : index +// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_6]] : tensor +// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_3]], %[[VAL_5]], %[[VAL_7]] : vector<4x8x16xi1> +// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_0]]{{.*}} {in_bounds = [true, true, true]} : tensor, vector<4x8x16xf32> } : vector<4x8x16xi1> -> vector<4x8x16xf32> +// CHECK: %[[VAL_13:.*]] = vector.create_mask %[[VAL_7]], %[[VAL_5]] : vector<16x8xi1> +// CHECK: %[[VAL_14:.*]] = vector.mask %[[VAL_13]] { vector.transfer_read %[[VAL_1]]{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : tensor, vector<8x16xf32> } : vector<16x8xi1> -> vector<8x16xf32> +// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_10]] { vector.multi_reduction , %[[VAL_11]], %[[VAL_14]] [0] : vector<4x8x16xf32> to vector<8x16xf32> } : vector<4x8x16xi1> -> vector<8x16xf32> +// CHECK: %[[VAL_17:.*]] = vector.mask %[[VAL_13]] { vector.transfer_write %[[VAL_15]], %{{.*}} {in_bounds = [true, true], permutation_map = #{{.*}}} : vector<8x16xf32>, tensor } : vector<16x8xi1> -> tensor + // ----- -// CHECK-LABEL: func @test_masked_pad_static_dynamic -func.func @test_masked_pad_static_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index, - %pad_value: f32) -> tensor<6x?x?x?xf32> { - // CHECK: tensor.pad - %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] { - ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): - tensor.yield %pad_value : f32 - } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32> - return %0 : tensor<6x?x?x?xf32> +func.func @vectorize_partial_dynamic_identity(%arg0: tensor<8x?xf32>, + %arg1: tensor<8x?xf32>, + %arg2: tensor<8x?xf32>) -> tensor<8x?xf32> { + %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] } + ins(%arg0, %arg1 : tensor<8x?xf32>, tensor<8x?xf32>) + outs(%arg2 : tensor<8x?xf32>) { + ^bb(%in0: f32, %in1: f32, %out: f32) : + %0 = arith.addf %in0, %in1 : f32 + linalg.yield %0 : f32 + } -> tensor<8x?xf32> + return %0 : tensor<8x?xf32> } +// CHECK-LABEL: func.func @vectorize_partial_dynamic_identity( +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x?xf32>, %[[VAL_1:.*]]: tensor<8x?xf32>, %[[VAL_2:.*]]: tensor<8x?xf32>) -> tensor<8x?xf32> { +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_4:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<8x?xf32> +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 8 : index +// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_7]], %[[VAL_4]] : vector<8x32xi1> +// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_0]][%[[VAL_5]], %[[VAL_5]]], %[[VAL_6]] {in_bounds = [true, true]} : tensor<8x?xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> +// CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_1]][%[[VAL_5]], %[[VAL_5]]], %[[VAL_10]] {in_bounds = [true, true]} : tensor<8x?xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> +// CHECK: %[[VAL_12:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_13:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read %[[VAL_2]][%[[VAL_5]], %[[VAL_5]]], %[[VAL_12]] {in_bounds = [true, true]} : tensor<8x?xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> +// CHECK: %[[VAL_14:.*]] = arith.addf %[[VAL_9]], %[[VAL_11]] : vector<8x32xf32> +// CHECK: %[[VAL_15:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_16:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write %[[VAL_14]], %[[VAL_2]][%[[VAL_15]], %[[VAL_15]]] {in_bounds = [true, true]} : vector<8x32xf32>, tensor<8x?xf32> } : vector<8x32xi1> -> tensor<8x?xf32> + transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): - %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_padding } : (!transform.any_op) -> !transform.any_op + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op } // ----- -func.func @zero_dim_tensor(%input: tensor, %output: tensor) -> tensor +func.func @do_not_generate_masks(%arg0: tensor<8x32xf32>, + %arg1: tensor<8x32xf32>, + %arg2: tensor<8x32xf32>) -> tensor<8x32xf32> { + %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] } + ins(%arg0, %arg1 : tensor<8x32xf32>, tensor<8x32xf32>) + outs(%arg2 : tensor<8x32xf32>) { + ^bb(%in0: f32, %in1: f32, %out: f32) : + %0 = arith.addf %in0, %in1 : f32 + linalg.yield %0 : f32 + } -> tensor<8x32xf32> + return %0 : tensor<8x32xf32> +} + +// CHECK-LABEL: func.func @do_not_generate_masks +// CHECK-NOT: vector.mask + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op +} + +// ----- + +func.func @vectorize_static_shape_with_mask(%arg0: tensor<8x30xf32>, + %arg1: tensor<8x30xf32>, + %arg2: tensor<8x30xf32>) -> tensor<8x30xf32> { + %0 = linalg.generic { indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>, + affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"] } + ins(%arg0, %arg1 : tensor<8x30xf32>, tensor<8x30xf32>) + outs(%arg2 : tensor<8x30xf32>) { + ^bb(%in0: f32, %in1: f32, %out: f32) : + %0 = arith.addf %in0, %in1 : f32 + linalg.yield %0 : f32 + } -> tensor<8x30xf32> + return %0 : tensor<8x30xf32> +} + +// CHECK-LABEL: func.func @vectorize_static_shape_with_mask( +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x30xf32>, %[[VAL_1:.*]]: tensor<8x30xf32>, %[[VAL_2:.*]]: tensor<8x30xf32>) -> tensor<8x30xf32> { +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 8 : index +// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 30 : index +// CHECK: %[[VAL_7:.*]] = vector.create_mask %[[VAL_5]], %[[VAL_6]] : vector<8x32xi1> +// CHECK: %[[VAL_8:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %[[VAL_0]][%[[VAL_3]], %[[VAL_3]]], %[[VAL_4]] {in_bounds = [true, true]} : tensor<8x30xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> +// CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_10:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %[[VAL_1]][%[[VAL_3]], %[[VAL_3]]], %[[VAL_9]] {in_bounds = [true, true]} : tensor<8x30xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> +// CHECK: %[[VAL_11:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_12:.*]] = vector.mask %[[VAL_7]] { vector.transfer_read %[[VAL_2]][%[[VAL_3]], %[[VAL_3]]], %[[VAL_11]] {in_bounds = [true, true]} : tensor<8x30xf32>, vector<8x32xf32> } : vector<8x32xi1> -> vector<8x32xf32> +// CHECK: %[[VAL_13:.*]] = arith.addf %[[VAL_8]], %[[VAL_10]] : vector<8x32xf32> +// CHECK: %[[VAL_14:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_15:.*]] = vector.mask %[[VAL_7]] { vector.transfer_write %[[VAL_13]], %[[VAL_2]][%[[VAL_14]], %[[VAL_14]]] {in_bounds = [true, true]} : vector<8x32xf32>, tensor<8x30xf32> } : vector<8x32xi1> -> tensor<8x30xf32> + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, 32] : !transform.any_op +} + +// ----- + +func.func @vectorize_dynamic_fill(%A : tensor, %arg0 : f32) -> tensor { + %0 = linalg.fill ins(%arg0 : f32) outs(%A : tensor) -> tensor + return %0 : tensor +} + +// CHECK-LABEL: func.func @vectorize_dynamic_fill +// CHECK: %[[DIM0:.*]] = tensor.dim +// CHECK: %[[DIM1:.*]] = tensor.dim +// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM0]], %[[DIM1]] : vector<8x16xi1> +// CHECK: %[[BCAST:.*]] = vector.broadcast %{{.*}} : f32 to vector<8x16xf32> +// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[BCAST]], {{.*}} {in_bounds = [true, true]} : vector<8x16xf32>, tensor } : vector<8x16xi1> + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [8, 16] : !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_masked_vectorize_linalg_copy +func.func @test_masked_vectorize_linalg_copy(%A : memref, %B : memref) { + // CHECK: %[[c0:.*]] = arith.constant 0 : index + // CHECK: %[[d0:.*]] = memref.dim %{{.*}}, %[[c0]] : memref + // CHECK: %[[c1:.*]] = arith.constant 1 : index + // CHECK: %[[d1:.*]] = memref.dim %{{.*}}, %[[c1]] : memref + // CHECK: %[[mask:.*]] = vector.create_mask %[[d0]], %[[d1]] : vector<2x4xi1> + // CHECK: vector.mask %[[mask]] {{.*}} vector.transfer_read %{{.*}} {in_bounds = [true, true]} : memref, vector<2x4xf32> } : vector<2x4xi1> -> vector<2x4xf32> + // CHECK: vector.mask %[[mask]] {{.*}} vector.transfer_write %{{.*}} {in_bounds = [true, true]} : vector<2x4xf32>, memref } : vector<2x4xi1> + linalg.copy ins(%A : memref) outs(%B : memref) + return +} + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %0 = transform.structured.match ops{["linalg.copy"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op +} + +// ----- + +// CHECK-LABEL: func @test_masked_vectorize_pad +func.func @test_masked_vectorize_pad( + %0 : tensor, %h0 : index, %h1 : index) + -> tensor<2x4xf32> { - %0 = linalg.generic { indexing_maps = [ affine_map<() -> ()>, affine_map<() -> ()> ], - iterator_types = [] } - ins(%input : tensor) - outs(%output : tensor) { - ^bb0(%arg0: f32, %arg1: f32): - %2 = arith.addf %arg0, %arg1 : f32 - linalg.yield %2 : f32 - } -> tensor - return %0 : tensor + // CHECK-DAG: %[[c42:.*]] = arith.constant 4.243000e+01 : f32 + // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index + // CHECK-DAG: %[[empty:.*]] = tensor.empty() : tensor<2x4xf32> + // CHECK: %[[d0:.*]] = tensor.dim {{.*}} : tensor + // CHECK: %[[d1:.*]] = tensor.dim {{.*}} : tensor + // CHECK: %[[mask:.*]] = vector.create_mask %[[d0]], %[[d1]] : vector<2x4xi1> + // CHECK-DAG: %[[c0_2:.*]] = arith.constant 0 : index + // CHECK: %[[masked_read:.*]] = vector.mask %[[mask]] { + // CHECK-SAME: vector.transfer_read %{{.*}}[%[[c0_2]], %[[c0_2]]], %[[c42]] + // CHECK-SAME: {in_bounds = [true, true]} : tensor, vector<2x4xf32> + // CHECK-SAME: } : vector<2x4xi1> -> vector<2x4xf32> + // CHECK: vector.transfer_write %[[masked_read]], %[[empty]][%[[c0_2]], %[[c0_2]]] + // CHECK-SAME: {in_bounds = [true, true]} : vector<2x4xf32>, tensor<2x4xf32> + %cst = arith.constant 42.43 : f32 + %c0 = arith.constant 0 : index + %1 = tensor.pad %0 low[0, %c0] high[%h0, %h1] { + ^bb0(%hh1: index, %hh2: index): + tensor.yield %cst : f32 + } : tensor to tensor<2x4xf32> + return %1: tensor<2x4xf32> } transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 + : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op } -// CHECK-LABEL: func @zero_dim_tensor -// CHECK: vector.transfer_read {{.*}} : tensor, vector -// CHECK: vector.extractelement -// CHECK: vector.transfer_read {{.*}} : tensor, vector -// CHECK: vector.extractelement -// CHECK: arith.addf {{.*}} : f32 -// CHECK: vector.broadcast %{{.*}} : f32 to vector -// CHECK: vector.transfer_write {{.*}} : vector, tensor - // ----- -// Make sure we generate the right transfer writes for multi-output generic ops -// with different permutation maps. - -func.func @multi_output_generic_different_perm_maps(%in0: tensor<4x1xf32>, - %out0: tensor<4x1xf32>, - %out1: tensor<1x4xf32>) -> (tensor<4x1xf32>, tensor<1x4xf32>) { - %13:2 = linalg.generic {indexing_maps = [ affine_map<(d0, d1) -> (d1, d0)>, - affine_map<(d0, d1) -> (d1, d0)>, - affine_map<(d0, d1) -> (d0, d1)> ], - iterator_types = ["parallel", "parallel"]} - ins(%in0 : tensor<4x1xf32>) - outs(%out0, %out1 : tensor<4x1xf32>, tensor<1x4xf32>) { - ^bb0(%in: f32, %out: f32, %out_2: f32): - %16 = arith.addf %in, %in : f32 - linalg.yield %16, %16 : f32, f32 - } -> (tensor<4x1xf32>, tensor<1x4xf32>) - return %13#0, %13#1 : tensor<4x1xf32>, tensor<1x4xf32> +// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s0 + s1)> +// CHECK: func @test_masked_vectorize_dynamic_pad +func.func @test_masked_vectorize_dynamic_pad( + %0 : tensor, %h0 : index, %h1 : index) + -> tensor +{ + // CHECK-DAG: %[[c42:.*]] = arith.constant 4.243000e+01 : f32 + // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index + // CHECK-DAG: %[[res_d0:.+]] = affine.apply #[[MAP]]() + // CHECK-DAG: %[[res_d1:.+]] = affine.apply #[[MAP]]() + // CHECK-DAG: %[[empty:.*]] = tensor.empty(%[[res_d0]], %[[res_d1]]) : tensor + // CHECK: %[[d0:.*]] = tensor.dim {{.*}} : tensor + // CHECK: %[[d1:.*]] = tensor.dim {{.*}} : tensor + // CHECK: %[[mask:.*]] = vector.create_mask %[[d0]], %[[d1]] : vector<2x4xi1> + // CHECK-DAG: %[[c0_2:.*]] = arith.constant 0 : index + // CHECK: %[[masked_read:.*]] = vector.mask %[[mask]] { + // CHECK-SAME: vector.transfer_read %{{.*}}[%[[c0_2]], %[[c0_2]]], %[[c42]] + // CHECK-SAME: {in_bounds = [true, true]} : tensor, vector<2x4xf32> + // CHECK-SAME: } : vector<2x4xi1> -> vector<2x4xf32> + // CHECK: %[[mask_2:.*]] = vector.create_mask %[[res_d0]], %[[res_d1]] : vector<2x4xi1> + // CHECK: %[[masked_write:.*]] = vector.mask %[[mask_2]] { + // CHECK-SAME: vector.transfer_write %[[masked_read]], %[[empty]][%[[c0_2]], %[[c0_2]]] + // CHECK-SAME: {in_bounds = [true, true]} : vector<2x4xf32>, tensor + // CHECK: return %[[masked_write]] : tensor + %cst = arith.constant 42.43 : f32 + %c0 = arith.constant 0 : index + %1 = tensor.pad %0 low[0, %c0] high[%h0, %h1] { + ^bb0(%hh1: index, %hh2: index): + tensor.yield %cst : f32 + } : tensor to tensor + return %1: tensor } transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): - %3 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - %4 = get_parent_op %3 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %5 = transform.structured.vectorize %4 : (!transform.any_op) -> !transform.any_op + %0 = transform.structured.match ops{["tensor.pad"]} in %arg1 + : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %0 vector_sizes [2, 4] : !transform.any_op } -// CHECK-LABEL: func @multi_output_generic_different_perm_maps -// CHECK: %[[VAL_5:.*]] = vector.transfer_read %{{.*}} {in_bounds = [true, true]} : tensor<4x1xf32>, vector<4x1xf32> -// CHECK: %[[VAL_6:.*]] = arith.addf %[[VAL_5]], %[[VAL_5]] : vector<4x1xf32> -// CHECK: %[[VAL_7:.*]] = vector.transpose %[[VAL_6]], [1, 0] : vector<4x1xf32> to vector<1x4xf32> -// CHECK: %[[VAL_8:.*]] = vector.transpose %[[VAL_7]], [1, 0] : vector<1x4xf32> to vector<4x1xf32> -// CHECK: vector.transfer_write %[[VAL_8]], %{{.*}} {in_bounds = [true, true]} : vector<4x1xf32>, tensor<4x1xf32> -// CHECK: vector.transfer_write %[[VAL_7]], %{{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x4xf32> +// ----- + +func.func @matmul(%A: memref, %B: memref, %C: memref) { + linalg.matmul ins(%A, %B: memref, memref) + outs(%C: memref) + return +} + +// CHECK-LABEL: func.func @matmul( +// CHECK-SAME: %[[A:.*]]: memref, %[[B:.*]]: memref, %[[C:.*]]: memref) { +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_4:.*]] = memref.dim %[[A]], %[[VAL_3]] : memref +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_6:.*]] = memref.dim %[[B]], %[[VAL_5]] : memref +// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_8:.*]] = memref.dim %[[A]], %[[VAL_7]] : memref +// CHECK: %[[MASK_A:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_8]] : vector<8x4xi1> +// CHECK: %[[LOAD_A:.*]] = vector.mask %[[MASK_A]] { vector.transfer_read %[[A]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x16x4xf32> } : vector<8x4xi1> -> vector<8x16x4xf32> +// CHECK: %[[MASK_B:.*]] = vector.create_mask %[[VAL_8]], %[[VAL_6]] : vector<4x16xi1> +// CHECK: %[[LOAD_B:.*]] = vector.mask %[[MASK_B]] { vector.transfer_read %[[B]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x16x4xf32> } : vector<4x16xi1> -> vector<8x16x4xf32> +// CHECK: %[[MASK_C:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<8x16xi1> +// CHECK: %[[LOAD_C:.*]] = vector.mask %[[MASK_C]] { vector.transfer_read %[[C]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true]} : memref, vector<8x16xf32> } : vector<8x16xi1> -> vector<8x16xf32> +// CHECK: %[[MULF:.*]] = arith.mulf %[[LOAD_A]], %[[LOAD_B]] : vector<8x16x4xf32> +// CHECK: %[[MASK_MULIT_RED:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]], %[[VAL_8]] : vector<8x16x4xi1> +// CHECK: %[[MULTI_RED:.*]] = vector.mask %[[MASK_MULIT_RED]] { vector.multi_reduction , %[[MULF]], %[[LOAD_C]] [2] : vector<8x16x4xf32> to vector<8x16xf32> } : vector<8x16x4xi1> -> vector<8x16xf32> +// CHECK: %[[C2:.*]] = arith.constant 0 : index +// CHECK: vector.mask %[[MASK_C]] { vector.transfer_write %[[MULTI_RED]], %[[C]]{{\[}}%[[C2]], %[[C2]]] {in_bounds = [true, true]} : vector<8x16xf32>, memref } : vector<8x16xi1> + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %matmul vector_sizes [8, 16, 4] : !transform.any_op +} + +// ----- + +func.func @matmul_scalable(%A: memref, %B: memref, %C: memref) { + linalg.matmul ins(%A, %B: memref, memref) + outs(%C: memref) + return +} + +// CHECK-LABEL: func.func @matmul_scalable( +// CHECK-SAME: %[[A:.*]]: memref, %[[B:.*]]: memref, %[[C:.*]]: memref) { +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_4:.*]] = memref.dim %[[A]], %[[VAL_3]] : memref +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_6:.*]] = memref.dim %[[B]], %[[VAL_5]] : memref +// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_8:.*]] = memref.dim %[[A]], %[[VAL_7]] : memref +// CHECK: %[[MASK_A:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_8]] : vector<8x4xi1> +// CHECK: %[[LOAD_A:.*]] = vector.mask %[[MASK_A]] { vector.transfer_read %[[A]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x[16]x4xf32> } : vector<8x4xi1> -> vector<8x[16]x4xf32> +// CHECK: %[[MASK_B:.*]] = vector.create_mask %[[VAL_8]], %[[VAL_6]] : vector<4x[16]xi1> +// CHECK: %[[LOAD_B:.*]] = vector.mask %[[MASK_B]] { vector.transfer_read %[[B]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true, true], permutation_map = #{{.*}}} : memref, vector<8x[16]x4xf32> } : vector<4x[16]xi1> -> vector<8x[16]x4xf32> +// CHECK: %[[MASK_C:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]] : vector<8x[16]xi1> +// CHECK: %[[LOAD_C:.*]] = vector.mask %[[MASK_C]] { vector.transfer_read %[[C]]{{\[}}%{{.*}}, %{{.*}}], %{{.*}} {in_bounds = [true, true]} : memref, vector<8x[16]xf32> } : vector<8x[16]xi1> -> vector<8x[16]xf32> +// CHECK: %[[MULF:.*]] = arith.mulf %[[LOAD_A]], %[[LOAD_B]] : vector<8x[16]x4xf32> +// CHECK: %[[MASK_MULIT_RED:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_6]], %[[VAL_8]] : vector<8x[16]x4xi1> +// CHECK: %[[MULTI_RED:.*]] = vector.mask %[[MASK_MULIT_RED]] { vector.multi_reduction , %[[MULF]], %[[LOAD_C]] [2] : vector<8x[16]x4xf32> to vector<8x[16]xf32> } : vector<8x[16]x4xi1> -> vector<8x[16]xf32> +// CHECK: %[[C2:.*]] = arith.constant 0 : index +// CHECK: vector.mask %[[MASK_C]] { vector.transfer_write %[[MULTI_RED]], %[[C]]{{\[}}%[[C2]], %[[C2]]] {in_bounds = [true, true]} : vector<8x[16]xf32>, memref } : vector<8x[16]xi1> + +transform.sequence failures(propagate) { +^bb1(%arg1: !transform.any_op): + %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize %matmul vector_sizes [8, [16], 4] : !transform.any_op +} diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir index da861942cc3e..3187385b5398 100644 --- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir +++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract-masked.mlir @@ -28,7 +28,7 @@ func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguou transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op + transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op } // ----- @@ -83,7 +83,7 @@ func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguo transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op + transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op } // ----- @@ -121,7 +121,7 @@ func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tenso transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op + transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op } // ----- @@ -176,7 +176,7 @@ func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_gather(% transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op + transform.structured.vectorize %0 vector_sizes [1, 4] vectorize_nd_extract : !transform.any_op } // ----- @@ -226,7 +226,7 @@ func.func @extract_masked_vectorize(%arg0: tensor, %arg1: tensor !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [3, 3] vectorize_nd_extract : !transform.any_op + transform.structured.vectorize %0 vector_sizes [3, 3] vectorize_nd_extract : !transform.any_op } // ----- @@ -269,5 +269,5 @@ func.func @tensor_extract_dynamic_shape(%arg1: tensor<123x321xf32>, %arg2: tenso transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [1, 3, 8] vectorize_nd_extract : !transform.any_op + transform.structured.vectorize %0 vector_sizes [1, 3, 8] vectorize_nd_extract : !transform.any_op } diff --git a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir index 84e36c8912c6..5cf9c81dff69 100644 --- a/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir +++ b/mlir/test/Dialect/Linalg/vectorize-tensor-extract.mlir @@ -31,7 +31,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op } // ----- @@ -65,7 +65,7 @@ func.func @vectorize_nd_tensor_extract_constant_idx(%arg0: tensor<3x3xf32>, %arg transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 { vectorize_nd_extract } : !transform.any_op + transform.structured.vectorize %0 { vectorize_nd_extract } : !transform.any_op } // ----- @@ -104,7 +104,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -156,7 +156,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -204,7 +204,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -248,7 +248,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -290,7 +290,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -332,7 +332,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -376,7 +376,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -416,7 +416,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -456,7 +456,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -495,7 +495,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } // ----- @@ -522,5 +522,5 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - %2 = transform.structured.vectorize %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 { vectorize_nd_extract } : (!transform.any_op) -> !transform.any_op } diff --git a/mlir/test/Dialect/Transform/selective-targeting.mlir b/mlir/test/Dialect/Transform/selective-targeting.mlir index 5bf47fd75d3d..139842cb447e 100644 --- a/mlir/test/Dialect/Transform/selective-targeting.mlir +++ b/mlir/test/Dialect/Transform/selective-targeting.mlir @@ -80,7 +80,7 @@ transform.with_pdl_patterns { transform.structured.tile %0 [4, 4, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op) %1 = pdl_match @pdl_target_attrC in %arg1 : (!transform.any_op) -> !transform.any_op %2 = get_parent_op %1 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - transform.structured.vectorize %2 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize_children_and_apply_patterns %2 : (!transform.any_op) -> !transform.any_op } } @@ -125,7 +125,7 @@ transform.with_pdl_patterns { ^bb1(%arg1: !transform.any_op): %0 = pdl_match @pdl_target in %arg1 : (!transform.any_op) -> !transform.any_op %1 = get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - transform.structured.vectorize %1 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op } } @@ -150,5 +150,5 @@ func.func @vectorize_all( transform.sequence failures(propagate) { ^bb0(%arg0: !transform.any_op): - transform.structured.vectorize %arg0 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize_children_and_apply_patterns %arg0 : (!transform.any_op) -> !transform.any_op } diff --git a/mlir/test/Dialect/Vector/transform-vector.mlir b/mlir/test/Dialect/Vector/transform-vector.mlir index 3e62a8fbf718..de6c022e2059 100644 --- a/mlir/test/Dialect/Vector/transform-vector.mlir +++ b/mlir/test/Dialect/Vector/transform-vector.mlir @@ -19,7 +19,7 @@ transform.sequence failures(propagate) { %1, %loops:3 = transform.structured.tile %0 [8, 4, 2] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op) %2 = get_parent_op %1 {isolated_from_above} : (!transform.any_op) -> !transform.any_op - transform.structured.vectorize %2 : (!transform.any_op) -> !transform.any_op + transform.structured.vectorize_children_and_apply_patterns %2 : (!transform.any_op) -> !transform.any_op %b = transform.bufferization.one_shot_bufferize layout{IdentityLayoutMap} %module_op {bufferize_function_boundaries = true, allow_return_allocs = true} diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir index dabf0dac4680..08f14dfae324 100644 --- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir @@ -112,7 +112,7 @@ func.func @entry() { transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [[4], [4]] : !transform.any_op + transform.structured.vectorize %0 vector_sizes [[4], [4]] : !transform.any_op } llvm.func @printCString(!llvm.ptr) diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir index 34b3835c4077..c3f49b2f39cf 100644 --- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/fill-1d.mlir @@ -49,7 +49,7 @@ func.func @entry() { transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!transform.any_op) -> !transform.any_op - transform.structured.masked_vectorize %0 vector_sizes [[4]] : !transform.any_op + transform.structured.vectorize %0 vector_sizes [[4]] : !transform.any_op } llvm.func @printCString(!llvm.ptr) diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir index 8a95d1c864d2..64954098aa03 100644 --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-matmul-masked-vec.mlir @@ -51,7 +51,7 @@ transform.sequence failures(propagate) { ^bb1(%arg1: !transform.any_op): %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op %func_op = get_parent_op %0 : (!transform.any_op) -> !transform.op<"func.func"> - transform.structured.masked_vectorize %0 vector_sizes [4, 4, 2] : !transform.any_op + transform.structured.vectorize %0 vector_sizes [4, 4, 2] : !transform.any_op transform.apply_patterns to %func_op { transform.apply_patterns.vector.lower_multi_reduction lowering_strategy = "innerreduction" } : !transform.op<"func.func"> diff --git a/mlir/test/python/dialects/transform_structured_ext.py b/mlir/test/python/dialects/transform_structured_ext.py index 69181160d548..f2b73ca8bb76 100644 --- a/mlir/test/python/dialects/transform_structured_ext.py +++ b/mlir/test/python/dialects/transform_structured_ext.py @@ -171,68 +171,66 @@ def testMatchOpNamesList(target): @run @create_sequence -def testMaskedVectorizeNoArgs(target): - structured.MaskedVectorizeOp(target) - # CHECK-LABEL: TEST: testMaskedVectorizeNoArgs +def testVectorizeNoArgs(target): + structured.VectorizeOp(target) + # CHECK-LABEL: TEST: testVectorizeNoArgs # CHECK: transform.sequence - # CHECK: transform.structured.masked_vectorize + # CHECK: transform.structured.vectorize # CHECK-NOT: vector_sizes -@run -@create_sequence -def testMaskedVectorizeStatic(target): - structured.MaskedVectorizeOp(target, [16, 4]) - # CHECK-LABEL: TEST: testMaskedVectorizeStatic +def testVectorizeStatic(target): + structured.VectorizeOp(target, [16, 4]) + # CHECK-LABEL: TEST: testVectorizeStatic # CHECK: transform.sequence - # CHECK: transform.structured.masked_vectorize + # CHECK: transform.structured.vectorize # CHECK-SAME: vector_sizes [16, 4] @run @create_sequence -def testMaskedVectorizeArray(target): +def testVectorizeArray(target): sizes = Attribute.parse("[16, 4]") - structured.MaskedVectorizeOp(target, sizes) - # CHECK-LABEL: TEST: testMaskedVectorizeArray + structured.VectorizeOp(target, sizes) + # CHECK-LABEL: TEST: testVectorizeArray # CHECK: transform.sequence - # CHECK: transform.structured.masked_vectorize + # CHECK: transform.structured.vectorize # CHECK-SAME: vector_sizes [16, 4] @run @create_sequence -def testMaskedVectorizeMixed(target): +def testVectorizeMixed(target): sz1 = structured.MatchOp.match_op_names(target, ["arith.constant"]) sz2 = Attribute.parse("4") - structured.MaskedVectorizeOp(target, [sz1, sz2]) - # CHECK-LABEL: TEST: testMaskedVectorizeMixed + structured.VectorizeOp(target, [sz1, sz2]) + # CHECK-LABEL: TEST: testVectorizeMixed # CHECK: transform.sequence # CHECK: %[[V0:.*]] = transform.structured.match - # CHECK: transform.structured.masked_vectorize + # CHECK: transform.structured.vectorize # CHECK-SAME: vector_sizes [%[[V0]] : !transform.any_op, 4] @run @create_sequence -def testMaskedVectorizeScalable(target): +def testVectorizeScalable(target): sz1 = structured.MatchOp.match_op_names(target, ["arith.constant"]) sz2 = Attribute.parse("4") - structured.MaskedVectorizeOp(target, [16, [sz1], [sz2], [8]]) - # CHECK-LABEL: TEST: testMaskedVectorizeScalable + structured.VectorizeOp(target, [16, [sz1], [sz2], [8]]) + # CHECK-LABEL: TEST: testVectorizeScalable # CHECK: transform.sequence # CHECK-DAG: %[[V0:.*]] = transform.structured.match - # CHECK-DAG: transform.structured.masked_vectorize + # CHECK-DAG: transform.structured.vectorize # CHECK-SAME: vector_sizes [16, [%[[V0]] : !transform.any_op], [4], [8]] @run @create_sequence -def testMaskedVectorizeArgs(target): - structured.MaskedVectorizeOp(target, [16, 4], vectorize_nd_extract=True) - # CHECK-LABEL: TEST: testMaskedVectorizeArgs +def testVectorizeArgs(target): + structured.VectorizeOp(target, [16, 4], vectorize_nd_extract=True) + # CHECK-LABEL: TEST: testVectorizeArgs # CHECK: transform.sequence - # CHECK: transform.structured.masked_vectorize + # CHECK: transform.structured.vectorize # CHECK-SAME: vectorize_nd_extract @@ -497,15 +495,15 @@ def testTileToForallMapping(target): @run @create_sequence -def testVectorizeAllAttrs(target): - structured.VectorizeOp( +def testVectorizeChildrenAndApplyPatternsAllAttrs(target): + structured.VectorizeChildrenAndApplyPatternsOp( target, disable_multi_reduction_to_contract_patterns=True, disable_transfer_permutation_map_lowering_patterns=True, vectorize_nd_extract=True, vectorize_padding=True, ) - # CHECK-LABEL: TEST: testVectorizeAllAttrs + # CHECK-LABEL: TEST: testVectorizeChildrenAndApplyPatternsAllAttrs # CHECK: transform.sequence # CHECK: = transform.structured.vectorize # CHECK-SAME: disable_multi_reduction_to_contract_patterns @@ -516,15 +514,15 @@ def testVectorizeAllAttrs(target): @run @create_sequence -def testVectorizeNoAttrs(target): - structured.VectorizeOp( +def testVectorizeChildrenAndApplyPatternsNoAttrs(target): + structured.VectorizeChildrenAndApplyPatternsOp( target, disable_multi_reduction_to_contract_patterns=False, disable_transfer_permutation_map_lowering_patterns=False, vectorize_nd_extract=False, vectorize_padding=False, ) - # CHECK-LABEL: TEST: testVectorizeNoAttrs + # CHECK-LABEL: TEST: testVectorizeChildrenAndApplyPatternsNoAttrs # CHECK: transform.sequence # CHECK: = transform.structured.vectorize # CHECK-NOT: disable_multi_reduction_to_contract_patterns