Adopt Properties to store operations inherent Attributes in the LLVM dialect

This is part of an on-going migration to adopt Properties inside MLIR.

Differential Revision: https://reviews.llvm.org/D148300
This commit is contained in:
Mehdi Amini
2023-04-13 22:40:24 -06:00
parent 14f0776550
commit 7151b94ce2
6 changed files with 25 additions and 23 deletions

View File

@@ -19,6 +19,7 @@ def LLVM_Dialect : Dialect {
let hasRegionArgAttrVerify = 1;
let hasRegionResultAttrVerify = 1;
let hasOperationAttrVerify = 1;
let usePropertiesForAttributes = 1;
let extraClassDeclaration = [{
/// Name of the data layout attributes.

View File

@@ -5,14 +5,14 @@
// CHECK-SAME: %[[A:.*]]: vector<16xf32>)
// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
// CHECK-SAME: {reassoc = false} : (f32, vector<16xf32>) -> f32
// CHECK-SAME: <{reassoc = false}> : (f32, vector<16xf32>) -> f32
// CHECK: return %[[V]] : f32
//
// REASSOC-LABEL: @reduce_add_f32(
// REASSOC-SAME: %[[A:.*]]: vector<16xf32>)
// REASSOC: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
// REASSOC: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
// REASSOC-SAME: {reassoc = true} : (f32, vector<16xf32>) -> f32
// REASSOC-SAME: <{reassoc = true}> : (f32, vector<16xf32>) -> f32
// REASSOC: return %[[V]] : f32
//
func.func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
@@ -26,14 +26,14 @@ func.func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
// CHECK-SAME: %[[A:.*]]: vector<16xf32>)
// CHECK: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fmul"(%[[C]], %[[A]])
// CHECK-SAME: {reassoc = false} : (f32, vector<16xf32>) -> f32
// CHECK-SAME: <{reassoc = false}> : (f32, vector<16xf32>) -> f32
// CHECK: return %[[V]] : f32
//
// REASSOC-LABEL: @reduce_mul_f32(
// REASSOC-SAME: %[[A:.*]]: vector<16xf32>)
// REASSOC: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
// REASSOC: %[[V:.*]] = "llvm.intr.vector.reduce.fmul"(%[[C]], %[[A]])
// REASSOC-SAME: {reassoc = true} : (f32, vector<16xf32>) -> f32
// REASSOC-SAME: <{reassoc = true}> : (f32, vector<16xf32>) -> f32
// REASSOC: return %[[V]] : f32
//
func.func @reduce_mul_f32(%arg0: vector<16xf32>) -> f32 {

View File

@@ -1267,7 +1267,7 @@ func.func @reduce_0d_f32(%arg0: vector<f32>) -> f32 {
// CHECK: %[[CA:.*]] = builtin.unrealized_conversion_cast %[[A]] : vector<f32> to vector<1xf32>
// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[CA]])
// CHECK-SAME: {reassoc = false} : (f32, vector<1xf32>) -> f32
// CHECK-SAME: <{reassoc = false}> : (f32, vector<1xf32>) -> f32
// CHECK: return %[[V]] : f32
// -----
@@ -1280,7 +1280,7 @@ func.func @reduce_f16(%arg0: vector<16xf16>) -> f16 {
// CHECK-SAME: %[[A:.*]]: vector<16xf16>)
// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : f16
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
// CHECK-SAME: {reassoc = false} : (f16, vector<16xf16>) -> f16
// CHECK-SAME: <{reassoc = false}> : (f16, vector<16xf16>) -> f16
// CHECK: return %[[V]] : f16
// -----
@@ -1293,7 +1293,7 @@ func.func @reduce_f32(%arg0: vector<16xf32>) -> f32 {
// CHECK-SAME: %[[A:.*]]: vector<16xf32>)
// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
// CHECK-SAME: {reassoc = false} : (f32, vector<16xf32>) -> f32
// CHECK-SAME: <{reassoc = false}> : (f32, vector<16xf32>) -> f32
// CHECK: return %[[V]] : f32
// -----
@@ -1306,7 +1306,7 @@ func.func @reduce_f64(%arg0: vector<16xf64>) -> f64 {
// CHECK-SAME: %[[A:.*]]: vector<16xf64>)
// CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64
// CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
// CHECK-SAME: {reassoc = false} : (f64, vector<16xf64>) -> f64
// CHECK-SAME: <{reassoc = false}> : (f64, vector<16xf64>) -> f64
// CHECK: return %[[V]] : f64
// -----

View File

@@ -5,33 +5,33 @@
module {
// GENERIC: "llvm.func"
// GENERIC: function_type = !llvm.func<void ()>
// GENERIC-SAME: function_type = !llvm.func<void ()>
// GENERIC-SAME: sym_name = "foo"
// GENERIC-SAME: () -> ()
// GENERIC: () -> ()
// CHECK: llvm.func @foo()
"llvm.func" () ({
}) {sym_name = "foo", function_type = !llvm.func<void ()>} : () -> ()
// GENERIC: "llvm.func"
// GENERIC: function_type = !llvm.func<i64 (i64, i64)>
// GENERIC-SAME: function_type = !llvm.func<i64 (i64, i64)>
// GENERIC-SAME: sym_name = "bar"
// GENERIC-SAME: () -> ()
// GENERIC: () -> ()
// CHECK: llvm.func @bar(i64, i64) -> i64
"llvm.func"() ({
}) {sym_name = "bar", function_type = !llvm.func<i64 (i64, i64)>} : () -> ()
// GENERIC: "llvm.func"
// GENERIC-SAME: function_type = !llvm.func<i64 (i64)>
// GENERIC-SAME: sym_name = "baz"
// CHECK: llvm.func @baz(%{{.*}}: i64) -> i64
"llvm.func"() ({
"llvm.func"() <{sym_name = "baz", function_type = !llvm.func<i64 (i64)>}> ({
// GENERIC: ^bb0
^bb0(%arg0: i64):
// GENERIC: llvm.return
llvm.return %arg0 : i64
// GENERIC: function_type = !llvm.func<i64 (i64)>
// GENERIC-SAME: sym_name = "baz"
// GENERIC-SAME: () -> ()
}) {sym_name = "baz", function_type = !llvm.func<i64 (i64)>} : () -> ()
// GENERIC: () -> ()
}) : () -> ()
// CHECK: llvm.func @qux(!llvm.ptr {llvm.noalias}, i64)
// CHECK: attributes {xxx = {yyy = 42 : i64}}

View File

@@ -100,7 +100,7 @@ llvm.mlir.global internal protected unnamed_addr @protected(42 : i32) : i32
// -----
// expected-error @+1 {{op requires attribute 'sym_name'}}
"llvm.mlir.global"() ({}) {linkage = "private", type = i64, constant, global_type = i64, value = 42 : i64} : () -> ()
"llvm.mlir.global"() ({}) {linkage = #llvm.linkage<private>, type = i64, constant, global_type = i64, value = 42 : i64} : () -> ()
// -----

View File

@@ -273,13 +273,13 @@ define void @vector_reductions(float %0, <8 x float> %1, <8 x i32> %2) {
%12 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %2)
; CHECK: "llvm.intr.vector.reduce.umin"(%{{.*}}) : (vector<8xi32>) -> i32
%13 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %2)
; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) {reassoc = false} : (f32, vector<8xf32>) -> f32
; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) <{reassoc = false}> : (f32, vector<8xf32>) -> f32
%14 = call float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1)
; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) {reassoc = false} : (f32, vector<8xf32>) -> f32
; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) <{reassoc = false}> : (f32, vector<8xf32>) -> f32
%15 = call float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1)
; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) {reassoc = true} : (f32, vector<8xf32>) -> f32
; CHECK: "llvm.intr.vector.reduce.fadd"(%{{.*}}, %{{.*}}) <{reassoc = true}> : (f32, vector<8xf32>) -> f32
%16 = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %0, <8 x float> %1)
; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) {reassoc = true} : (f32, vector<8xf32>) -> f32
; CHECK: "llvm.intr.vector.reduce.fmul"(%{{.*}}, %{{.*}}) <{reassoc = true}> : (f32, vector<8xf32>) -> f32
%17 = call reassoc float @llvm.vector.reduce.fmul.v8f32(float %0, <8 x float> %1)
; CHECK: "llvm.intr.vector.reduce.xor"(%{{.*}}) : (vector<8xi32>) -> i32
%18 = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %2)
@@ -361,7 +361,8 @@ define void @trap_intrinsics() {
call void @llvm.trap()
; CHECK: "llvm.intr.debugtrap"() : () -> ()
call void @llvm.debugtrap()
; CHECK: "llvm.intr.ubsantrap"() {failureKind = 1 : i8} : () -> ()
; CHECK: "llvm.intr.ubsantrap"()
; CHECK-SAME: failureKind = 1
call void @llvm.ubsantrap(i8 1)
ret void
}