mirror of
https://github.com/intel/llvm.git
synced 2026-01-26 12:26:52 +08:00
[MLIR][LLVM] Remove typed pointer remnants from integration tests (#71208)
This commit removes all LLVM dialect typed pointers from the integration tests. Typed pointers have been deprecated for a while now and it's planned to soon remove them from the LLVM dialect. Related PSA: https://discourse.llvm.org/t/psa-removal-of-typed-pointers-from-the-llvm-dialect/74502
This commit is contained in:
@@ -25,7 +25,7 @@
|
||||
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false
|
||||
// R_UN: %{compile} | env %{env} %{run} | FileCheck %s
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#BSR = #sparse_tensor.encoding<{
|
||||
map = (i, j) ->
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
// Do the same run, but now with direct IR generation and VLA vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#DenseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : dense, d1 : dense)
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false
|
||||
// R_UN: %{compile} | env %{env} %{run} | FileCheck %s
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#CSR = #sparse_tensor.encoding<{
|
||||
map = (i, j) -> ( i : dense, j : compressed)
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
// Do the same run, but now with direct IR generation and VLA vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseTensor = #sparse_tensor.encoding<{
|
||||
// Note that any dimToLvl permutation should give the same results
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
// vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : dense, d1 : compressed),
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
// vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseTensor = #sparse_tensor.encoding<{
|
||||
map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
// vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#DCSR = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
// vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : compressed, d1 : compressed),
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
// Do the same run, but now with VLA vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SortedCOO = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
// Do the same run, but now with VLA vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : dense, d1 : compressed)
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
// TODO: The test currently only operates on the triangular part of the
|
||||
// symmetric matrix.
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
|
||||
// UNSUPPORTED: target=aarch64{{.*}}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
// TODO: The test currently only operates on the triangular part of the
|
||||
// symmetric matrix.
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
// Do the same run, but now with VLA vectorization.
|
||||
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#SparseMatrix = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
// RUNNOT: %{compile} enable-runtime-library=false gpu-data-transfer-strategy=zero-copy" | %{run}
|
||||
//
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#CSR = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : dense, d1 : compressed)
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
// R_UN: %{compile} enable-runtime-library=false" | %{run}
|
||||
//
|
||||
|
||||
!Filename = !llvm.ptr<i8>
|
||||
!Filename = !llvm.ptr
|
||||
|
||||
#CSR = #sparse_tensor.encoding<{
|
||||
map = (d0, d1) -> (d0 : dense, d1 : compressed)
|
||||
|
||||
@@ -19,11 +19,11 @@ from tools import sparse_compiler
|
||||
def boilerplate(attr: st.EncodingAttr):
|
||||
"""Returns boilerplate main method."""
|
||||
return f"""
|
||||
func.func @main(%p : !llvm.ptr<i8>) -> () attributes {{ llvm.emit_c_interface }} {{
|
||||
func.func @main(%p : !llvm.ptr) -> () attributes {{ llvm.emit_c_interface }} {{
|
||||
%d = arith.constant sparse<[[0, 0], [1, 1], [0, 9], [9, 0], [4, 4]],
|
||||
[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<10x10xf64>
|
||||
%a = sparse_tensor.convert %d : tensor<10x10xf64> to tensor<10x10xf64, {attr}>
|
||||
sparse_tensor.out %a, %p : tensor<10x10xf64, {attr}>, !llvm.ptr<i8>
|
||||
sparse_tensor.out %a, %p : tensor<10x10xf64, {attr}>, !llvm.ptr
|
||||
return
|
||||
}}
|
||||
"""
|
||||
|
||||
@@ -37,10 +37,10 @@ func.func @vector_copy_i128(%src: memref<?x?xi128>, %dst: memref<?x?xi128>) {
|
||||
}
|
||||
|
||||
func.func @test_load_store_zaq0() {
|
||||
%init_a_str = llvm.mlir.addressof @init_tile_a : !llvm.ptr<array<17 x i8>>
|
||||
%init_b_str = llvm.mlir.addressof @init_tile_b : !llvm.ptr<array<17 x i8>>
|
||||
%final_a_str = llvm.mlir.addressof @final_tile_a : !llvm.ptr<array<17 x i8>>
|
||||
%final_b_str = llvm.mlir.addressof @final_tile_b : !llvm.ptr<array<17 x i8>>
|
||||
%init_a_str = llvm.mlir.addressof @init_tile_a : !llvm.ptr
|
||||
%init_b_str = llvm.mlir.addressof @init_tile_b : !llvm.ptr
|
||||
%final_a_str = llvm.mlir.addressof @final_tile_a : !llvm.ptr
|
||||
%final_b_str = llvm.mlir.addressof @final_tile_b : !llvm.ptr
|
||||
|
||||
%c0 = arith.constant 0 : index
|
||||
%min_elts_q = arith.constant 1 : index
|
||||
|
||||
@@ -13,18 +13,17 @@ module {
|
||||
llvm.func @entry() -> i32 {
|
||||
%c0 = llvm.mlir.constant(0 : index) : i64
|
||||
|
||||
%1 = llvm.mlir.addressof @const16 : !llvm.ptr<array<16 x i32>>
|
||||
%1 = llvm.mlir.addressof @const16 : !llvm.ptr
|
||||
%ptr = llvm.getelementptr %1[%c0, %c0]
|
||||
: (!llvm.ptr<array<16 x i32>>, i64, i64) -> !llvm.ptr<i32>
|
||||
%ptr2 = llvm.bitcast %ptr : !llvm.ptr<i32> to !llvm.ptr<vector<16xi32>>
|
||||
: (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<16 x i32>
|
||||
|
||||
// operand_attrs of *m operands need to be piped through to LLVM for
|
||||
// verification to pass.
|
||||
%v = llvm.inline_asm
|
||||
asm_dialect = intel
|
||||
operand_attrs = [{ elementtype = vector<16xi32> }]
|
||||
"vmovdqu32 $0, $1", "=x,*m" %ptr2
|
||||
: (!llvm.ptr<vector<16xi32>>) -> vector<16xi32>
|
||||
"vmovdqu32 $0, $1", "=x,*m" %ptr
|
||||
: (!llvm.ptr) -> vector<16xi32>
|
||||
|
||||
// CHECK: 0
|
||||
%v0 = vector.extract %v[0]: i32 from vector<16xi32>
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
module @mymod {
|
||||
func.func private @printMemrefF32(memref<*xf32>)
|
||||
memref.global "private" @bufferLhsGlobal : !shmemlhs
|
||||
llvm.func @printf(!llvm.ptr<i8>, ...) -> i32
|
||||
llvm.func @printf(!llvm.ptr, ...) -> i32
|
||||
func.func @main() {
|
||||
%c8192 = arith.constant 8192 : index
|
||||
%c-1_i32 = arith.constant -1 : i32
|
||||
|
||||
@@ -41,7 +41,7 @@ module @mymod {
|
||||
func.func private @printMemrefF32(memref<*xf32>)
|
||||
memref.global "private" @bufferLhsGlobal : !shmemlhs
|
||||
memref.global "private" @bufferRhsGlobal : !shmemrhs
|
||||
llvm.func @printf(!llvm.ptr<i8>, ...) -> i32
|
||||
llvm.func @printf(!llvm.ptr, ...) -> i32
|
||||
func.func @main() {
|
||||
%c32768 = arith.constant 32768 : index
|
||||
%c-1_i32 = arith.constant -1 : i32
|
||||
|
||||
Reference in New Issue
Block a user