mirror of
https://github.com/intel/llvm.git
synced 2026-01-16 05:32:28 +08:00
[mlir][bufferization] Fix OneShotBufferize when defaultMemorySpaceFn is used (#91524)
As described in issue llvm/llvm-project#91518, a previous PR llvm/llvm-project#78484 introduced the `defaultMemorySpaceFn` into bufferization options, allowing one to inform OneShotBufferize that it should use a specified function to derive the memory space attribute from the encoding attribute attached to tensor types. However, introducing this feature exposed unhandled edge cases, examples of which are introduced by this change in the new test under `test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir`. Fixing the inconsistencies introduced by `defaultMemorySpaceFn` is pretty simple. This change: - Updates the `bufferization.to_memref` and `bufferization.to_tensor` operations to explicitly include operand and destination types, whereas previously they relied on type inference to deduce the tensor types. Since the type inference cannot recover the correct tensor encoding/memory space, the operand and result types must be explicitly included. This is a small assembly format change, but it touches a large number of test files. - Makes minor updates to other bufferization functions to handle the changes in building the above ops. - Updates bufferization of `tensor.from_elements` to handle memory space. Integration/upgrade guide: In downstream projects, if you have tests or MLIR files that explicitly use `bufferization.to_tensor` or `bufferization.to_memref`, then update them to the new assembly format as follows: ``` %1 = bufferization.to_memref %0 : memref<10xf32> %2 = bufferization.to_tensor %1 : memref<10xf32> ``` becomes ``` %1 = bufferization.to_memref %0 : tensor<10xf32> to memref<10xf32> %2 = bufferization.to_tensor %0 : memref<10xf32> to tensor<10xf32> ```
This commit is contained in:
@@ -223,8 +223,8 @@ func.func @test_matmul(%A: memref<1x17x19xf32>,
|
||||
%B: memref<1x19x29xf32>,
|
||||
%C: memref<1x17x29xf32>) {
|
||||
|
||||
%A_tensor = bufferization.to_tensor %A restrict : memref<1x17x19xf32>
|
||||
%B_tensor = bufferization.to_tensor %B restrict : memref<1x19x29xf32>
|
||||
%A_tensor = bufferization.to_tensor %A restrict : memref<1x17x19xf32> to tensor<1x17x19xf32>
|
||||
%B_tensor = bufferization.to_tensor %B restrict : memref<1x19x29xf32> to tensor<1x19x29xf32>
|
||||
|
||||
%0 = tosa.matmul %A_tensor, %B_tensor
|
||||
: (tensor<1x17x19xf32>, tensor<1x19x29xf32>) ->
|
||||
|
||||
@@ -387,9 +387,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
|
||||
BufferizableOpInterface,
|
||||
SameOperandsAndResultShape,
|
||||
SameOperandsAndResultElementType,
|
||||
TypesMatchWith<"result type matches tensor equivalent of 'memref'",
|
||||
"memref", "result",
|
||||
"memref::getTensorTypeFromMemRefType($_self)">
|
||||
AllElementTypesMatch<["memref", "result"]>
|
||||
]> {
|
||||
let summary = "create a tensor from a `memref`";
|
||||
let description = [{
|
||||
@@ -404,7 +402,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
|
||||
|
||||
```mlir
|
||||
// Produces a value of tensor<4x?xf32> type.
|
||||
%t = bufferization.to_tensor %m : memref<4x?xf32, #layout, 0>
|
||||
%t = bufferization.to_tensor %m : memref<4x?xf32, #layout, 0> to tensor<4x?xf32>
|
||||
```
|
||||
|
||||
If the `writable` unit attribute is set, the produced tensor is considered
|
||||
@@ -427,7 +425,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
|
||||
Example:
|
||||
|
||||
```
|
||||
%t = bufferization.to_tensor %m restrict writable : memref<4xf32>
|
||||
%t = bufferization.to_tensor %m restrict writable : memref<4xf32> to tensor<4xf32>
|
||||
|
||||
// %t is writable, so the tensor.insert may bufferize in-place in the
|
||||
// absence of other conflicts.
|
||||
@@ -476,9 +474,16 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
|
||||
|
||||
let assemblyFormat = [{
|
||||
$memref (`restrict` $restrict^)? (`writable` $writable^)? attr-dict
|
||||
`:` type($memref)
|
||||
`:` type($memref) `to` type($result)
|
||||
}];
|
||||
|
||||
let builders = [
|
||||
OpBuilder<(ins "Value":$memref, CArg<"bool", "false">:$restrict, CArg<"bool", "false">:$writeable), [{
|
||||
auto rtt = memref::getTensorTypeFromMemRefType(memref.getType());
|
||||
build($_builder, $_state, rtt, memref, restrict, writeable);
|
||||
}]>
|
||||
];
|
||||
|
||||
let hasCanonicalizer = 1;
|
||||
let hasFolder = 1;
|
||||
}
|
||||
@@ -493,9 +498,8 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
|
||||
SameOperandsAndResultShape,
|
||||
SameOperandsAndResultElementType,
|
||||
Pure,
|
||||
TypesMatchWith<"type of 'tensor' is the tensor equivalent of 'memref'",
|
||||
"memref", "tensor",
|
||||
"memref::getTensorTypeFromMemRefType($_self)">
|
||||
AllShapesMatch<["memref", "tensor"]>,
|
||||
AllElementTypesMatch<["memref", "tensor"]>
|
||||
]> {
|
||||
let summary = "cast a tensor to memref";
|
||||
let description = [{
|
||||
@@ -503,7 +507,7 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
|
||||
|
||||
```mlir
|
||||
// Result type is memref<4x?xf32, #layout, 0>
|
||||
%m = bufferization.to_memref %t : memref<4x?xf32, #layout, 0>
|
||||
%m = bufferization.to_memref %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0>
|
||||
```
|
||||
|
||||
This operation is a specialized variant of the built-in
|
||||
@@ -550,7 +554,7 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
|
||||
}];
|
||||
|
||||
let assemblyFormat = [{
|
||||
$tensor (`read_only` $read_only^)? attr-dict `:` type($memref)
|
||||
$tensor (`read_only` $read_only^)? attr-dict `:` type($tensor) `to` type($memref)
|
||||
}];
|
||||
|
||||
let hasFolder = 1;
|
||||
|
||||
@@ -250,8 +250,8 @@ def OptimizeAllocationLiveness
|
||||
let summary = "This pass optimizes the liveness of temp allocations in the "
|
||||
"input function";
|
||||
let description =
|
||||
[{This pass will find all operations that have a memory allocation effect.
|
||||
It will search for the corresponding deallocation and move it right after
|
||||
[{This pass will find all operations that have a memory allocation effect.
|
||||
It will search for the corresponding deallocation and move it right after
|
||||
the last user of the allocation.
|
||||
This will optimize the liveness of the allocations.
|
||||
|
||||
@@ -510,6 +510,10 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> {
|
||||
/*default=*/"false",
|
||||
"The memory space of an memref types must always be inferred. If "
|
||||
"unset, a default memory space of 0 is used otherwise.">,
|
||||
Option<"useEncodingForMemorySpace", "use-encoding-for-memory-space", "bool",
|
||||
/*default=*/"false",
|
||||
"Use the Tensor encoding attribute for the memory space. Exclusive to"
|
||||
" the 'must-infer-memory-space' option">,
|
||||
Option<"testAnalysisOnly", "test-analysis-only", "bool",
|
||||
/*default=*/"false",
|
||||
"Test only: Only run inplaceability analysis and annotate IR">,
|
||||
|
||||
@@ -718,7 +718,7 @@ void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
|
||||
// loose all of its users and eventually DCE away.
|
||||
rewriter.setInsertionPointAfter(op);
|
||||
replacement = rewriter.create<bufferization::ToTensorOp>(
|
||||
replacement.getLoc(), replacement);
|
||||
replacement.getLoc(), opResult.getType(), replacement);
|
||||
}
|
||||
replacements.push_back(replacement);
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ BufferizeTypeConverter::BufferizeTypeConverter() {
|
||||
if (auto inputType = dyn_cast<MemRefType>(inputs[0].getType())) {
|
||||
// MemRef to MemRef cast.
|
||||
assert(inputType != type && "expected different types");
|
||||
// Unranked to ranked and ranked to unranked casts must be explicit.
|
||||
// Ranked to unranked casts must be explicit.
|
||||
auto rankedDestType = dyn_cast<MemRefType>(type);
|
||||
if (!rankedDestType)
|
||||
return nullptr;
|
||||
@@ -147,12 +147,31 @@ struct OneShotBufferizePass
|
||||
opt.dumpAliasSets = dumpAliasSets;
|
||||
opt.setFunctionBoundaryTypeConversion(
|
||||
parseLayoutMapOption(functionBoundaryTypeConversion));
|
||||
|
||||
if (mustInferMemorySpace && useEncodingForMemorySpace) {
|
||||
emitError(getOperation()->getLoc())
|
||||
<< "only one of 'must-infer-memory-space' and "
|
||||
"'use-encoding-for-memory-space' are allowed in "
|
||||
<< getArgument();
|
||||
return signalPassFailure();
|
||||
}
|
||||
|
||||
if (mustInferMemorySpace) {
|
||||
opt.defaultMemorySpaceFn =
|
||||
[](TensorType t) -> std::optional<Attribute> {
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
if (useEncodingForMemorySpace) {
|
||||
opt.defaultMemorySpaceFn =
|
||||
[](TensorType t) -> std::optional<Attribute> {
|
||||
if (auto rtt = dyn_cast<RankedTensorType>(t))
|
||||
return rtt.getEncoding();
|
||||
return std::nullopt;
|
||||
};
|
||||
}
|
||||
|
||||
opt.printConflicts = printConflicts;
|
||||
opt.bufferAlignment = bufferAlignment;
|
||||
opt.testAnalysisOnly = testAnalysisOnly;
|
||||
|
||||
@@ -203,7 +203,8 @@ struct ExecuteRegionOpInterface
|
||||
for (const auto &it : llvm::enumerate(executeRegionOp->getResultTypes())) {
|
||||
if (isa<TensorType>(it.value())) {
|
||||
newResults.push_back(rewriter.create<bufferization::ToTensorOp>(
|
||||
executeRegionOp.getLoc(), newOp->getResult(it.index())));
|
||||
executeRegionOp.getLoc(), it.value(),
|
||||
newOp->getResult(it.index())));
|
||||
} else {
|
||||
newResults.push_back(newOp->getResult(it.index()));
|
||||
}
|
||||
@@ -485,15 +486,17 @@ getBuffers(RewriterBase &rewriter, const MutableOperandRange &operands,
|
||||
/// ToTensorOps, so that the block body can be moved over to the new op.
|
||||
static SmallVector<Value>
|
||||
getBbArgReplacements(RewriterBase &rewriter, Block::BlockArgListType bbArgs,
|
||||
Block::BlockArgListType oldBbArgs,
|
||||
const DenseSet<int64_t> &tensorIndices) {
|
||||
SmallVector<Value> result;
|
||||
for (const auto &it : llvm::enumerate(bbArgs)) {
|
||||
size_t idx = it.index();
|
||||
Value val = it.value();
|
||||
if (tensorIndices.contains(idx)) {
|
||||
result.push_back(
|
||||
rewriter.create<bufferization::ToTensorOp>(val.getLoc(), val)
|
||||
.getResult());
|
||||
result.push_back(rewriter
|
||||
.create<bufferization::ToTensorOp>(
|
||||
val.getLoc(), oldBbArgs[idx].getType(), val)
|
||||
.getResult());
|
||||
} else {
|
||||
result.push_back(val);
|
||||
}
|
||||
@@ -763,7 +766,8 @@ struct ForOpInterface
|
||||
// iter_args of the new loop in ToTensorOps.
|
||||
rewriter.setInsertionPointToStart(loopBody);
|
||||
SmallVector<Value> iterArgs =
|
||||
getBbArgReplacements(rewriter, newForOp.getRegionIterArgs(), indices);
|
||||
getBbArgReplacements(rewriter, newForOp.getRegionIterArgs(),
|
||||
forOp.getRegionIterArgs(), indices);
|
||||
iterArgs.insert(iterArgs.begin(), newForOp.getInductionVar());
|
||||
|
||||
// Move loop body to new loop.
|
||||
@@ -1000,16 +1004,18 @@ struct WhileOpInterface
|
||||
// The old block uses tensors, so wrap the (memref) bbArgs of the new block
|
||||
// in ToTensorOps.
|
||||
rewriter.setInsertionPointToStart(newBeforeBody);
|
||||
SmallVector<Value> newBeforeArgs = getBbArgReplacements(
|
||||
rewriter, newWhileOp.getBeforeArguments(), indicesBefore);
|
||||
SmallVector<Value> newBeforeArgs =
|
||||
getBbArgReplacements(rewriter, newWhileOp.getBeforeArguments(),
|
||||
whileOp.getBeforeArguments(), indicesBefore);
|
||||
rewriter.mergeBlocks(whileOp.getBeforeBody(), newBeforeBody, newBeforeArgs);
|
||||
|
||||
// Set up new iter_args and move the loop body block to the new op.
|
||||
// The old block uses tensors, so wrap the (memref) bbArgs of the new block
|
||||
// in ToTensorOps.
|
||||
rewriter.setInsertionPointToStart(newAfterBody);
|
||||
SmallVector<Value> newAfterArgs = getBbArgReplacements(
|
||||
rewriter, newWhileOp.getAfterArguments(), indicesAfter);
|
||||
SmallVector<Value> newAfterArgs =
|
||||
getBbArgReplacements(rewriter, newWhileOp.getAfterArguments(),
|
||||
whileOp.getAfterArguments(), indicesAfter);
|
||||
rewriter.mergeBlocks(whileOp.getAfterBody(), newAfterBody, newAfterArgs);
|
||||
|
||||
// Replace loop results.
|
||||
@@ -1255,8 +1261,8 @@ struct ForallOpInterface
|
||||
forallOp.getBody()->getArguments().drop_front(rank), buffers)) {
|
||||
BlockArgument bbArg = std::get<0>(it);
|
||||
Value buffer = std::get<1>(it);
|
||||
Value bufferAsTensor =
|
||||
rewriter.create<ToTensorOp>(forallOp.getLoc(), buffer);
|
||||
Value bufferAsTensor = rewriter.create<ToTensorOp>(
|
||||
forallOp.getLoc(), bbArg.getType(), buffer);
|
||||
bbArg.replaceAllUsesWith(bufferAsTensor);
|
||||
}
|
||||
|
||||
|
||||
@@ -480,10 +480,6 @@ struct FromElementsOpInterface
|
||||
auto fromElementsOp = cast<tensor::FromElementsOp>(op);
|
||||
auto tensorType = cast<RankedTensorType>(fromElementsOp.getType());
|
||||
|
||||
// TODO: Implement memory space for this op.
|
||||
if (options.defaultMemorySpaceFn(tensorType) != Attribute())
|
||||
return op->emitError("memory space not implemented yet");
|
||||
|
||||
// Allocate a buffer for the result.
|
||||
Location loc = op->getLoc();
|
||||
auto shape = tensorType.getShape();
|
||||
@@ -493,10 +489,12 @@ struct FromElementsOpInterface
|
||||
/*copy=*/false);
|
||||
if (failed(tensorAlloc))
|
||||
return failure();
|
||||
auto memrefType =
|
||||
MemRefType::get(tensorType.getShape(), tensorType.getElementType());
|
||||
FailureOr<BaseMemRefType> memrefType =
|
||||
bufferization::getBufferType(*tensorAlloc, options);
|
||||
if (failed(memrefType))
|
||||
return failure();
|
||||
Value buffer = rewriter.create<bufferization::ToMemrefOp>(
|
||||
op->getLoc(), memrefType, *tensorAlloc);
|
||||
op->getLoc(), *memrefType, *tensorAlloc);
|
||||
|
||||
// Case: tensor<0xelem_type>.
|
||||
if (fromElementsOp.getElements().empty()) {
|
||||
|
||||
@@ -242,7 +242,7 @@ module {
|
||||
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
|
||||
tensor.yield %cst_f32 : f32
|
||||
} : tensor<1x32x32x8xf32> to tensor<1x40x8229x8xf32>
|
||||
%1 = bufferization.to_memref %padded : memref<1x40x8229x8xf32>
|
||||
%1 = bufferization.to_memref %padded : tensor<1x40x8229x8xf32> to memref<1x40x8229x8xf32>
|
||||
%alloc_0 = memref.alloc() {alignment = 64 : i64} : memref<1x32x32x8xf32>
|
||||
affine.for %arg1 = 0 to 1 {
|
||||
affine.for %arg2 = 0 to 32 {
|
||||
@@ -280,7 +280,7 @@ module {
|
||||
// SPIRV-NOT: affine.for %{{.*}}
|
||||
|
||||
// SPIRV: ReturnValue
|
||||
%2 = bufferization.to_tensor %alloc_1 : memref<1x32x32x8xf32>
|
||||
%2 = bufferization.to_tensor %alloc_1 : memref<1x32x32x8xf32> to tensor<1x32x32x8xf32>
|
||||
%3 = builtin.unrealized_conversion_cast %2 : tensor<1x32x32x8xf32> to !spirv.array<8192 x f32>
|
||||
spirv.ReturnValue %3 : !spirv.array<8192 x f32>
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ func.func @index_cast(%tensor: tensor<i32>, %scalar: i32) -> (tensor<index>, ind
|
||||
%index_scalar = arith.index_cast %scalar : i32 to index
|
||||
return %index_tensor, %index_scalar : tensor<index>, index
|
||||
}
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<i32>
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<i32>
|
||||
// CHECK-NEXT: %[[INDEX_MEMREF:.*]] = arith.index_cast %[[MEMREF]]
|
||||
// CHECK-SAME: memref<i32> to memref<index>
|
||||
// CHECK-NEXT: %[[INDEX_TENSOR:.*]] = bufferization.to_tensor %[[INDEX_MEMREF]]
|
||||
@@ -83,8 +83,8 @@ func.func @non_tensor() {
|
||||
// CHECK-SAME: %[[PRED:.*]]: i1,
|
||||
// CHECK-SAME: %[[TRUE_VAL:.*]]: tensor<f32>,
|
||||
// CHECK-SAME: %[[FALSE_VAL:.*]]: tensor<f32>) -> tensor<f32> {
|
||||
// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_memref %[[TRUE_VAL]] : memref<f32>
|
||||
// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_memref %[[FALSE_VAL]] : memref<f32>
|
||||
// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_memref %[[TRUE_VAL]] : tensor<f32>
|
||||
// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_memref %[[FALSE_VAL]] : tensor<f32>
|
||||
// CHECK: %[[RET_MEMREF:.*]] = arith.select %[[PRED]], %[[TRUE_VAL_MEMREF]], %[[FALSE_VAL_MEMREF]] : memref<f32>
|
||||
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[RET_MEMREF]] : memref<f32>
|
||||
// CHECK: return %[[RET]] : tensor<f32>
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
// CHECK-NEXT: %[[clone:.*]] = bufferization.clone %[[m]]
|
||||
// CHECK-NEXT: return %[[clone]]
|
||||
func.func private @no_interface_no_operands(%t : tensor<?x?x?xf16>) -> memref<?x?x?xf16> {
|
||||
%0 = bufferization.to_memref %t : memref<?x?x?xf16>
|
||||
%0 = bufferization.to_memref %t : tensor<?x?x?xf16> to memref<?x?x?xf16>
|
||||
return %0 : memref<?x?x?xf16>
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ func.func @to_memref_not_read_only(%idx : index, %f: f32) -> f32 {
|
||||
// Some op may write into the result of to_memref later.
|
||||
// CHECK: bufferization.to_memref
|
||||
// CHECK-SAME: {__inplace_operands_attr__ = ["false"]}
|
||||
%m = bufferization.to_memref %t : memref<5xf32>
|
||||
%m = bufferization.to_memref %t : tensor<5xf32> to memref<5xf32>
|
||||
%2 = tensor.extract %t[%idx] : tensor<5xf32>
|
||||
return %2 : f32
|
||||
}
|
||||
@@ -112,7 +112,7 @@ func.func @to_memref_read_only(%idx : index, %f: f32) -> f32 {
|
||||
// Some op may write into the result of to_memref later.
|
||||
// CHECK: bufferization.to_memref
|
||||
// CHECK-SAME: {__inplace_operands_attr__ = ["true"]}
|
||||
%m = bufferization.to_memref %t {read_only} : memref<5xf32>
|
||||
%m = bufferization.to_memref %t {read_only} : tensor<5xf32> to memref<5xf32>
|
||||
%2 = tensor.extract %t[%idx] : tensor<5xf32>
|
||||
return %2 : f32
|
||||
}
|
||||
|
||||
@@ -0,0 +1,111 @@
|
||||
// RUN: mlir-opt %s -one-shot-bufferize="use-encoding-for-memory-space" -split-input-file | FileCheck %s
|
||||
|
||||
func.func @alloc_tesor_with_space_no_encoding() -> tensor<128xf32> {
|
||||
%0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32>
|
||||
return %0 : tensor<128xf32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @alloc_tesor_with_space_no_encoding
|
||||
// CHECK-SAME: () -> tensor<128xf32> {
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> to tensor<128xf32>
|
||||
// CHECK: return %[[v0]] : tensor<128xf32>
|
||||
|
||||
// -----
|
||||
|
||||
func.func @alloc_tesor_with_space_and_cast() -> tensor<128xf32, 1> {
|
||||
%0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32>
|
||||
%1 = tensor.cast %0 : tensor<128xf32> to tensor<128xf32, 1>
|
||||
return %1 : tensor<128xf32, 1>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @alloc_tesor_with_space_and_cast
|
||||
// CHECK-SAME: () -> tensor<128xf32, 1 : i64> {
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> to tensor<128xf32, 1 : i64>
|
||||
// CHECK: return %[[v0]] : tensor<128xf32, 1 : i64>
|
||||
|
||||
// -----
|
||||
|
||||
func.func @alloc_tesor_with_space_with_encoding() -> tensor<128xf32, 1 : i64> {
|
||||
%0 = bufferization.alloc_tensor() {memory_space = 1 : i64} : tensor<128xf32, 1 : i64>
|
||||
return %0 : tensor<128xf32, 1 : i64>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @alloc_tesor_with_space_with_encoding
|
||||
// CHECK-SAME: () -> tensor<128xf32, 1 : i64> {
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> to tensor<128xf32, 1 : i64>
|
||||
// CHECK: return %[[v0]] : tensor<128xf32, 1 : i64>
|
||||
|
||||
// -----
|
||||
|
||||
func.func @alloc_tesor_copy_from_default_space(%arg0: tensor<128xf32>) -> tensor<128xf32> {
|
||||
%0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 1 : i64} : tensor<128xf32>
|
||||
return %0 : tensor<128xf32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @alloc_tesor_copy_from_default_space
|
||||
// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32>) -> tensor<128xf32> {
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32> to memref<128xf32, strided<[?], offset: ?>>
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
|
||||
// CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>> to memref<128xf32, 1>
|
||||
// CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> to tensor<128xf32>
|
||||
// CHECK: return %[[v1]] : tensor<128xf32>
|
||||
|
||||
// -----
|
||||
|
||||
func.func @alloc_tesor_copy_from_non_default_space(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 2> {
|
||||
%0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 2 : i64} : tensor<128xf32, 1>
|
||||
%1 = tensor.cast %0 : tensor<128xf32, 1> to tensor<128xf32, 2>
|
||||
return %1 : tensor<128xf32, 2>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @alloc_tesor_copy_from_non_default_space
|
||||
// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> {
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2>
|
||||
// CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2>
|
||||
// CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 2 : i64>
|
||||
// CHECK: return %[[v1]] : tensor<128xf32, 2 : i64>
|
||||
|
||||
// -----
|
||||
|
||||
// TODO: this should be illegal since ultimately we can not eliminate the `bufferization.to_tensor` when we
|
||||
// bufferize function boundaries.
|
||||
func.func @alloc_tesor_copy_from_non_default_space_no_cast(%arg0: tensor<128xf32, 1>,
|
||||
%arg1: tensor<4xf32, 1>) -> tensor<128xf32, 1> {
|
||||
%0 = bufferization.alloc_tensor() copy(%arg0) {memory_space = 2 : i64} : tensor<128xf32, 1>
|
||||
%1 = tensor.insert_slice %arg1 into %arg0 [0][4][1] : tensor<4xf32, 1> into tensor<128xf32, 1>
|
||||
return %0 : tensor<128xf32, 1>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @alloc_tesor_copy_from_non_default_space_no_cast
|
||||
// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>, %[[arg1:.+]]: tensor<4xf32, 1 : i64>) -> tensor<128xf32, 1 : i64> {
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg1]] : tensor<4xf32, 1 : i64> to memref<4xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[v1:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[v2:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2>
|
||||
// CHECK: memref.copy %[[v2]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2>
|
||||
// CHECK: %[[v3:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 1 : i64>
|
||||
// CHECK: %[[alloc_0:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
|
||||
// CHECK: memref.copy %[[v1]], %[[alloc_0]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 1>
|
||||
// CHECK: %[[subview:.+]] = memref.subview %[[alloc_0]][0] [4] [1] : memref<128xf32, 1> to memref<4xf32, strided<[1]>, 1>
|
||||
// CHECK: memref.copy %[[v0]], %[[subview]] : memref<4xf32, strided<[?], offset: ?>, 1> to memref<4xf32, strided<[1]>, 1>
|
||||
// CHECK: return %[[v3]] : tensor<128xf32, 1 : i64>
|
||||
|
||||
// -----
|
||||
|
||||
func.func @materialize_in_destination(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 2> {
|
||||
%0 = bufferization.alloc_tensor () {memory_space = 2 : i64} : tensor<128xf32, 2>
|
||||
%1 = bufferization.materialize_in_destination %arg0 in %0 : (tensor<128xf32, 1>, tensor<128xf32, 2>) -> tensor<128xf32, 2>
|
||||
return %1 : tensor<128xf32, 2>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @materialize_in_destination
|
||||
// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> {
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2>
|
||||
// CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2>
|
||||
// CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 2 : i64>
|
||||
// CHECK: return %[[v1]] : tensor<128xf32, 2 : i64>
|
||||
@@ -25,9 +25,9 @@ func.func @use_of_unknown_op_1(%t1: tensor<?xf32>)
|
||||
|
||||
%idx = arith.constant 0 : index
|
||||
%cst = arith.constant 0.0 : f32
|
||||
// CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : memref<?xf32, strided<[?], offset: ?>>
|
||||
// CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor<?xf32> to memref<?xf32, strided<[?], offset: ?>>
|
||||
// CHECK: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref<?xf32, strided<[?], offset: ?>>
|
||||
// CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : memref<?xf32>
|
||||
// CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK-NO-LAYOUT-MAP: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref<?xf32>
|
||||
%1 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32>
|
||||
return %1 : vector<5xf32>
|
||||
@@ -61,7 +61,7 @@ func.func @use_of_unknown_op_3(%t1: tensor<?xf32>)
|
||||
|
||||
// CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]])
|
||||
%0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32>
|
||||
// CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : memref<?xf32, strided<[?], offset: ?>>
|
||||
// CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor<?xf32> to memref<?xf32, strided<[?], offset: ?>>
|
||||
// CHECK: %[[v2:.*]] = vector.transfer_read %[[dummy_memref]]
|
||||
%2 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32>
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ func.func @copy_deallocated() -> tensor<10xf32> {
|
||||
// CHECK-LABEL: func @select_different_tensors(
|
||||
// CHECK-SAME: %[[t:.*]]: tensor<?xf32>
|
||||
func.func @select_different_tensors(%t: tensor<?xf32>, %sz: index, %pos: index, %c: i1) -> f32 {
|
||||
// CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<?xf32, strided{{.*}}>
|
||||
// CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor<?xf32> to memref<?xf32, strided{{.*}}>
|
||||
// CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref<?xf32>
|
||||
%0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
|
||||
|
||||
@@ -200,7 +200,7 @@ func.func @read_of_alias(%t: tensor<100xf32>, %pos1: index, %pos2: index,
|
||||
// CHECK-LABEL: func @from_unranked_to_unranked(
|
||||
// CHECK-SAME: %[[arg0:.*]]: tensor<*xi32>
|
||||
func.func @from_unranked_to_unranked(%arg0: tensor<*xi32>) -> tensor<*xi32> {
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[arg0]] : memref<*xi32>
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[arg0]] : tensor<*xi32> to memref<*xi32>
|
||||
// CHECK: %[[t:.*]] = bufferization.to_tensor %[[m]]
|
||||
// CHECK: return %[[t]] : tensor<*xi32>
|
||||
%0 = tensor.cast %arg0 : tensor<*xi32> to tensor<*xi32>
|
||||
@@ -227,7 +227,7 @@ func.func @tensor_copy(%arg0: tensor<5xf32>) -> tensor<5xf32> {
|
||||
|
||||
// CHECK-LABEL: func @materialize_in_destination_buffer(
|
||||
// CHECK-SAME: %[[t:.*]]: tensor<5xf32>, %[[m:.*]]: memref<5xf32>)
|
||||
// CHECK: %[[b:.*]] = bufferization.to_memref %[[t]] : memref<5xf32, strided<[?], offset: ?>>
|
||||
// CHECK: %[[b:.*]] = bufferization.to_memref %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>>
|
||||
// CHECK: memref.copy %[[b]], %[[m]]
|
||||
func.func @materialize_in_destination_buffer(%t: tensor<5xf32>, %m: memref<5xf32>) {
|
||||
bufferization.materialize_in_destination %t in restrict writable %m
|
||||
|
||||
@@ -1064,7 +1064,7 @@ func.func @main_func(%A : tensor<?xf32> {bufferization.writable = true},
|
||||
func.func @to_tensor_op_not_writable(%m: memref<?xf32>, %v: vector<5xf32>,
|
||||
%idx1: index, %idx2: index)
|
||||
-> vector<10xf32> {
|
||||
%0 = bufferization.to_tensor %m restrict : memref<?xf32>
|
||||
%0 = bufferization.to_tensor %m restrict : memref<?xf32> to tensor<?xf32>
|
||||
|
||||
// Write to the tensor. Cannot be inplace due to tensor_load.
|
||||
// CHECK: vector.transfer_write
|
||||
|
||||
@@ -28,9 +28,9 @@ module {
|
||||
// CHECK_COPY: memref.copy
|
||||
|
||||
func.func @contains_to_memref_op(%arg0: tensor<?xf32> {bufferization.writable = true}, %arg1: index) -> vector<5xf32> {
|
||||
%0 = bufferization.to_memref %arg0 : memref<?xf32>
|
||||
%0 = bufferization.to_memref %arg0 : tensor<?xf32> to memref<?xf32>
|
||||
%cst = arith.constant 0.000000e+00 : f32
|
||||
%1 = vector.transfer_read %0[%arg1], %cst : memref<?xf32>, vector<5xf32>
|
||||
return %1 : vector<5xf32>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ func.func @scf_while_non_equiv_yield(%arg0: tensor<5xi1>,
|
||||
|
||||
func.func @to_tensor_op_unsupported(%m: memref<?xf32>, %idx: index) -> (f32) {
|
||||
// expected-error @+1 {{to_tensor ops without `restrict` are not supported by One-Shot Analysis}}
|
||||
%0 = bufferization.to_tensor %m : memref<?xf32>
|
||||
%0 = bufferization.to_tensor %m : memref<?xf32> to tensor<?xf32>
|
||||
|
||||
%1 = tensor.extract %0[%idx] : tensor<?xf32>
|
||||
return %1 : f32
|
||||
|
||||
@@ -679,7 +679,7 @@ func.func @to_memref_op_unsupported(
|
||||
// to_memref op.
|
||||
// CHECK: %[[alloc:.*]] = memref.alloc
|
||||
// CHECK: memref.copy %[[arg0]], %[[alloc]]
|
||||
%0 = bufferization.to_memref %t1 : memref<?xf32>
|
||||
%0 = bufferization.to_memref %t1 : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK: "test.foo"(%[[alloc]])
|
||||
"test.foo"(%0) : (memref<?xf32>) -> ()
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
// Basic folding of to_tensor(to_memref(t)) -> t
|
||||
// CHECK-LABEL: func @tensor_load_of_buffer_cast(
|
||||
func.func @tensor_load_of_buffer_cast(%arg0: tensor<?xf32>) -> tensor<?xf32> {
|
||||
%0 = bufferization.to_memref %arg0 : memref<?xf32>
|
||||
%1 = bufferization.to_tensor %0 : memref<?xf32>
|
||||
%0 = bufferization.to_memref %arg0 : tensor<?xf32> to memref<?xf32>
|
||||
%1 = bufferization.to_tensor %0 : memref<?xf32> to tensor<?xf32>
|
||||
return %1 : tensor<?xf32>
|
||||
}
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<?xf32>) -> tensor<?xf32> {
|
||||
@@ -18,8 +18,8 @@ func.func @tensor_load_of_buffer_cast(%arg0: tensor<?xf32>) -> tensor<?xf32> {
|
||||
// Basic folding of to_memref(to_tensor(m)) -> m
|
||||
// CHECK-LABEL: func @buffer_cast_of_tensor_load(
|
||||
func.func @buffer_cast_of_tensor_load(%arg0: memref<?xf32>) -> memref<?xf32> {
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32>
|
||||
%1 = bufferization.to_memref %0 : memref<?xf32>
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32> to tensor<?xf32>
|
||||
%1 = bufferization.to_memref %0 : tensor<?xf32> to memref<?xf32>
|
||||
return %1 : memref<?xf32>
|
||||
}
|
||||
// CHECK-SAME: %[[MEMREF:.*]]: memref<?xf32>) -> memref<?xf32> {
|
||||
@@ -34,14 +34,14 @@ func.func @buffer_cast_of_tensor_load(%arg0: memref<?xf32>) -> memref<?xf32> {
|
||||
// CHECK-SAME: %[[MEMREF_ADDRSPACE2:.*]]: memref<?xf32, 2>)
|
||||
// CHECK-SAME: -> memref<?xf32, 7> {
|
||||
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor
|
||||
// CHECK-SAME: %[[MEMREF_ADDRSPACE2]] : memref<?xf32, 2>
|
||||
// CHECK-SAME: %[[MEMREF_ADDRSPACE2]] : memref<?xf32, 2> to tensor<?xf32, 7 : i64>
|
||||
// CHECK: %[[MEMREF_ADDRSPACE7:.*]] = bufferization.to_memref
|
||||
// CHECK-SAME: %[[TENSOR]] : memref<?xf32, 7>
|
||||
// CHECK-SAME: %[[TENSOR]] : tensor<?xf32, 7 : i64> to memref<?xf32, 7>
|
||||
// CHECK: return %[[MEMREF_ADDRSPACE7]]
|
||||
func.func @no_fold_buffer_cast_of_tensor_load(%arg0: memref<?xf32, 2>)
|
||||
-> memref<?xf32, 7> {
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32, 2>
|
||||
%1 = bufferization.to_memref %0 : memref<?xf32, 7>
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32, 2> to tensor<?xf32, 7>
|
||||
%1 = bufferization.to_memref %0 : tensor<?xf32, 7> to memref<?xf32, 7>
|
||||
return %1 : memref<?xf32, 7>
|
||||
}
|
||||
|
||||
@@ -61,8 +61,8 @@ func.func @canonicalize_buffer_cast_of_tensor_load(
|
||||
%arg0: memref<?xf32, strided<[1], offset: 3>>)
|
||||
-> memref<?xf32, strided<[1], offset: ?>>
|
||||
{
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32, strided<[1], offset: 3>>
|
||||
%1 = bufferization.to_memref %0 : memref<?xf32, strided<[1], offset: ?>>
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32, strided<[1], offset: 3>> to tensor<?xf32>
|
||||
%1 = bufferization.to_memref %0 : tensor<?xf32> to memref<?xf32, strided<[1], offset: ?>>
|
||||
return %1 : memref<?xf32, strided<[1], offset: ?>>
|
||||
}
|
||||
|
||||
@@ -74,8 +74,8 @@ func.func @canonicalize_buffer_cast_of_tensor_load(
|
||||
func.func @canonicalize_buffer_cast_of_tensor_load_to_copy(
|
||||
%arg0: memref<?xf32, strided<[1], offset: ?>>)
|
||||
-> memref<?xf32, strided<[1], offset: 3>> {
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32, strided<[1], offset: ?>>
|
||||
%1 = bufferization.to_memref %0 : memref<?xf32, strided<[1], offset: 3>>
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32, strided<[1], offset: ?>> to tensor<?xf32>
|
||||
%1 = bufferization.to_memref %0 : tensor<?xf32> to memref<?xf32, strided<[1], offset: 3>>
|
||||
return %1 : memref<?xf32, strided<[1], offset: 3>>
|
||||
}
|
||||
// CHECK-SAME: %[[M:.*]]: memref<?xf32, strided<[1], offset: ?>>)
|
||||
@@ -100,7 +100,7 @@ func.func @canonicalize_buffer_cast_of_tensor_load_to_copy(
|
||||
// CHECK: return %[[D]] : index
|
||||
func.func @dim_of_tensor_load(%arg0: memref<?xf32>) -> index {
|
||||
%c0 = arith.constant 0 : index
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32>
|
||||
%0 = bufferization.to_tensor %arg0 : memref<?xf32> to tensor<?xf32>
|
||||
%1 = tensor.dim %0, %c0 : tensor<?xf32>
|
||||
return %1 : index
|
||||
}
|
||||
@@ -252,10 +252,10 @@ func.func @clone_and_preceding_dealloc(%arg0: memref<?xf32>) -> memref<32xf32> {
|
||||
func.func @tensor_cast_to_memref(%arg0 : tensor<4x6x16x32xi8>) ->
|
||||
memref<?x?x16x32xi8> {
|
||||
%0 = tensor.cast %arg0 : tensor<4x6x16x32xi8> to tensor<?x?x16x32xi8>
|
||||
%1 = bufferization.to_memref %0 : memref<?x?x16x32xi8>
|
||||
%1 = bufferization.to_memref %0 : tensor<?x?x16x32xi8> to memref<?x?x16x32xi8>
|
||||
return %1 : memref<?x?x16x32xi8>
|
||||
}
|
||||
// CHECK: %[[M:.+]] = bufferization.to_memref %[[ARG0]] : memref<4x6x16x32xi8>
|
||||
// CHECK: %[[M:.+]] = bufferization.to_memref %[[ARG0]] : tensor<4x6x16x32xi8>
|
||||
// CHECK: %[[M1:.+]] = memref.cast %[[M]]
|
||||
// CHECK-SAME: memref<4x6x16x32xi8> to memref<?x?x16x32xi8>
|
||||
// CHECK: return %[[M1]] : memref<?x?x16x32xi8>
|
||||
@@ -266,7 +266,7 @@ func.func @tensor_cast_to_memref(%arg0 : tensor<4x6x16x32xi8>) ->
|
||||
// CHECK-LABEL: func @load_from_buffer_cast(
|
||||
func.func @load_from_buffer_cast(%arg0: index, %arg1: index,
|
||||
%arg2: tensor<?x?xf32>) -> f32 {
|
||||
%0 = bufferization.to_memref %arg2 : memref<?x?xf32>
|
||||
%0 = bufferization.to_memref %arg2 : tensor<?x?xf32> to memref<?x?xf32>
|
||||
%1 = memref.load %0[%arg0, %arg1] : memref<?x?xf32>
|
||||
return %1 : f32
|
||||
}
|
||||
|
||||
@@ -15,15 +15,15 @@ func.func @test_clone(%buf : memref<*xf32>) -> memref<*xf32> {
|
||||
func.func @test_to_memref(%arg0: tensor<?xi64>, %arg1: tensor<*xi64>)
|
||||
-> (memref<?xi64, affine_map<(d0) -> (d0 + 7)>>, memref<*xi64, 1>) {
|
||||
%0 = bufferization.to_memref %arg0
|
||||
: memref<?xi64, affine_map<(d0) -> (d0 + 7)>>
|
||||
: tensor<?xi64> to memref<?xi64, affine_map<(d0) -> (d0 + 7)>>
|
||||
%1 = bufferization.to_memref %arg1
|
||||
: memref<*xi64, 1>
|
||||
: tensor<*xi64> to memref<*xi64, 1>
|
||||
return %0, %1 : memref<?xi64, affine_map<(d0) -> (d0 + 7)>>, memref<*xi64, 1>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_to_tensor
|
||||
func.func @test_to_tensor(%buf : memref<2xf32>) -> tensor<2xf32> {
|
||||
%tensor = bufferization.to_tensor %buf restrict writable : memref<2xf32>
|
||||
%tensor = bufferization.to_tensor %buf restrict writable : memref<2xf32> to tensor<2xf32>
|
||||
return %tensor : tensor<2xf32>
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
// CHECK-NO-FUNC-LABEL: func @br(
|
||||
// CHECK-NO-FUNC-SAME: %[[t:.*]]: tensor<5xf32>)
|
||||
// CHECK-NO-FUNC: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<5xf32, strided<[?], offset: ?>>
|
||||
// CHECK-NO-FUNC: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>>
|
||||
// CHECK-NO-FUNC: %[[r:.*]] = scf.execute_region -> memref<5xf32, strided<[?], offset: ?>> {
|
||||
// CHECK-NO-FUNC: cf.br ^[[block:.*]](%[[m]]
|
||||
// CHECK-NO-FUNC: ^[[block]](%[[arg1:.*]]: memref<5xf32, strided<[?], offset: ?>>):
|
||||
@@ -23,7 +23,7 @@ func.func @br(%t: tensor<5xf32>) {
|
||||
|
||||
// CHECK-NO-FUNC-LABEL: func @cond_br(
|
||||
// CHECK-NO-FUNC-SAME: %[[t1:.*]]: tensor<5xf32>,
|
||||
// CHECK-NO-FUNC: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<5xf32, strided<[?], offset: ?>>
|
||||
// CHECK-NO-FUNC: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>>
|
||||
// CHECK-NO-FUNC: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32>
|
||||
// CHECK-NO-FUNC: %[[r:.*]] = scf.execute_region -> memref<5xf32, strided<[?], offset: ?>> {
|
||||
// CHECK-NO-FUNC: cf.cond_br %{{.*}}, ^[[block1:.*]](%[[m1]] : {{.*}}), ^[[block2:.*]](%[[alloc]] : {{.*}})
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// CHECK: #map = affine_map<(d0) -> (d0)>
|
||||
// CHECK-LABEL: func @basic(
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<4xf32>
|
||||
// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<4xf32> to memref<4xf32>
|
||||
// CHECK-DAG: %[[RESULT_MEMREF:.*]] = memref.alloc() {{.*}} : memref<4xf32>
|
||||
// CHECK: linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]}
|
||||
// CHECK-SAME: ins(%[[MEMREF]] : memref<4xf32>)
|
||||
@@ -46,7 +46,7 @@ func.func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK: #map = affine_map<(d0) -> (d0)>
|
||||
// CHECK-LABEL: func @empty_tensor(
|
||||
// CHECK-SAME: %[[IN:.*]]: tensor<?xf32>, %[[SIZE:.*]]: index)
|
||||
// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[IN]] : memref<?xf32>
|
||||
// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[IN]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK-DAG: %[[OUT_BUF:.*]] = memref.alloc(%[[SIZE]]) {{.*}} : memref<?xf32>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-SAME: ins(%[[MEMREF]] : memref<?xf32>)
|
||||
@@ -105,7 +105,7 @@ func.func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf3
|
||||
// CHECK-DAG: %[[DIM1:.*]] = tensor.dim %[[ARG]], %[[C1]] : tensor<?x?xf32>
|
||||
// CHECK-DAG: %[[RESULT0:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) {{.*}} : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[RESULT1:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) {{.*}} : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[MEMREF_ARG:.*]] = bufferization.to_memref %[[ARG]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[MEMREF_ARG:.*]] = bufferization.to_memref %[[ARG]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-SAME: ins(%[[MEMREF_ARG]] : memref<?x?xf32>)
|
||||
// CHECK-SAME: outs(%[[RESULT0]], %[[RESULT1]] : memref<?x?xf32>, memref<?x?xf32>)
|
||||
@@ -141,8 +141,8 @@ func.func @dynamic_results(%arg0: tensor<?x?xf32>)
|
||||
// CHECK-SAME: %[[ARG0_TENSOR:.*]]: tensor<2x3x4xvector<3x4xi4>>,
|
||||
// CHECK-SAME: %[[ARG1_TENSOR:.*]]: tensor<3x2xf32>) -> tensor<3x2xf32> {
|
||||
// CHECK-DAG: %[[INIT_BUFFER:.*]] = memref.alloc() {{.*}} : memref<3x2xf32>
|
||||
// CHECK-DAG: %[[ARG0_MEMREF:.*]] = bufferization.to_memref %[[ARG0_TENSOR]] : memref<2x3x4xvector<3x4xi4>>
|
||||
// CHECK-DAG: %[[ARG1_MEMREF:.*]] = bufferization.to_memref %[[ARG1_TENSOR]] : memref<3x2xf32>
|
||||
// CHECK-DAG: %[[ARG0_MEMREF:.*]] = bufferization.to_memref %[[ARG0_TENSOR]] : tensor<2x3x4xvector<3x4xi4>>
|
||||
// CHECK-DAG: %[[ARG1_MEMREF:.*]] = bufferization.to_memref %[[ARG1_TENSOR]] : tensor<3x2xf32>
|
||||
// CHECK: memref.copy %[[ARG1_MEMREF]], %[[INIT_BUFFER]] : memref<3x2xf32> to memref<3x2xf32>
|
||||
// CHECK: linalg.generic
|
||||
// CHECK-SAME: ins(%[[ARG0_MEMREF]] : memref<2x3x4xvector<3x4xi4>>)
|
||||
|
||||
@@ -373,7 +373,7 @@ func.func @fill_pack_general() -> tensor<1x1x8x4x4x8xi32>{
|
||||
%9 = tensor.empty() : tensor<1x1x16x64xi32>
|
||||
%extracted_slice_15 = tensor.extract_slice %9[0, 0, 0, 0] [1, 1, 16, 64] [1, 1, 1, 1] : tensor<1x1x16x64xi32> to tensor<1x1x16x64xi32>
|
||||
%16 = linalg.fill ins(%c0_i32 : i32) outs(%extracted_slice_15 : tensor<1x1x16x64xi32>) -> tensor<1x1x16x64xi32>
|
||||
%0 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x4x8xi32>
|
||||
%0 = bufferization.to_tensor %alloc restrict writable : memref<1x1x8x4x4x8xi32> to tensor<1x1x8x4x4x8xi32>
|
||||
%pack_18 = tensor.pack %16 outer_dims_perm = [0, 1, 3, 2] inner_dims_pos = [2, 3] inner_tiles = [4, 8] into %0 : tensor<1x1x16x64xi32> -> tensor<1x1x8x4x4x8xi32>
|
||||
return %pack_18 : tensor<1x1x8x4x4x8xi32>
|
||||
}
|
||||
@@ -921,7 +921,7 @@ func.func @erase_non_identity_noop(%arg0 : tensor<?x?xf32>, %arg1: tensor<?x?xf3
|
||||
^bb0(%in: f32, %out: f32):
|
||||
linalg.yield %in: f32
|
||||
} -> tensor<?x?xf32>
|
||||
return %0 : tensor<?x?xf32>
|
||||
return %0 : tensor<?x?xf32>
|
||||
}
|
||||
|
||||
// Do not erase ops with buffer semantics.
|
||||
@@ -1073,8 +1073,8 @@ func.func @transpose_identity_perm(%input: tensor<16x32x64xf32>,
|
||||
|
||||
// -----
|
||||
|
||||
func.func @transpose_transpose_cancel(%input: tensor<5x4x3xf32>,
|
||||
%init1: tensor<4x3x5xf32>,
|
||||
func.func @transpose_transpose_cancel(%input: tensor<5x4x3xf32>,
|
||||
%init1: tensor<4x3x5xf32>,
|
||||
%init2: tensor<5x4x3xf32>) -> tensor<5x4x3xf32> {
|
||||
// CHECK-LABEL: @transpose_transpose_cancel
|
||||
// CHECK-SAME: %[[INPUT:[a-zA-Z0-9]+]]: tensor<5x4x3xf32>
|
||||
|
||||
@@ -360,11 +360,11 @@ func.func @neg_map() -> memref<2x3xf32, #neg> {
|
||||
// CHECK-LABEL: func @memref_with_strided_offset
|
||||
func.func @memref_with_strided_offset(%arg0: tensor<128x512xf32>, %arg1: index, %arg2: index) -> tensor<16x512xf32> {
|
||||
%c0 = arith.constant 0 : index
|
||||
%0 = bufferization.to_memref %arg0 : memref<128x512xf32, strided<[?, ?], offset: ?>>
|
||||
%0 = bufferization.to_memref %arg0 : tensor<128x512xf32> to memref<128x512xf32, strided<[?, ?], offset: ?>>
|
||||
%subview = memref.subview %0[%arg2, 0] [%arg1, 512] [1, 1] : memref<128x512xf32, strided<[?, ?], offset: ?>> to memref<?x512xf32, strided<[?, ?], offset: ?>>
|
||||
// CHECK: %{{.*}} = memref.cast %{{.*}} : memref<?x512xf32, strided<[?, ?], offset: ?>> to memref<16x512xf32, strided<[?, ?], offset: ?>>
|
||||
%cast = memref.cast %subview : memref<?x512xf32, strided<[?, ?], offset: ?>> to memref<16x512xf32, strided<[?, ?], offset: ?>>
|
||||
%1 = bufferization.to_tensor %cast : memref<16x512xf32, strided<[?, ?], offset: ?>>
|
||||
%1 = bufferization.to_tensor %cast : memref<16x512xf32, strided<[?, ?], offset: ?>> to tensor<16x512xf32>
|
||||
return %1 : tensor<16x512xf32>
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
// CHECK-SAME: %[[PRED:.*]]: i1,
|
||||
// CHECK-SAME: %[[TRUE_TENSOR:.*]]: tensor<?xf32>,
|
||||
// CHECK-SAME: %[[FALSE_TENSOR:.*]]: tensor<?xf32>) -> tensor<?xf32> {
|
||||
// CHECK-DAG: %[[TRUE_MEMREF:.*]] = bufferization.to_memref %[[TRUE_TENSOR]] : memref<?xf32>
|
||||
// CHECK-DAG: %[[FALSE_MEMREF:.*]] = bufferization.to_memref %[[FALSE_TENSOR]] : memref<?xf32>
|
||||
// CHECK-DAG: %[[TRUE_MEMREF:.*]] = bufferization.to_memref %[[TRUE_TENSOR]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK-DAG: %[[FALSE_MEMREF:.*]] = bufferization.to_memref %[[FALSE_TENSOR]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK: %[[RESULT_MEMREF:.*]] = scf.if %[[PRED]] -> (memref<?xf32>) {
|
||||
// CHECK: scf.yield %[[TRUE_MEMREF]] : memref<?xf32>
|
||||
// CHECK: } else {
|
||||
@@ -29,7 +29,7 @@ func.func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) ->
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
|
||||
// CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index,
|
||||
// CHECK-SAME: %[[STEP:.*]]: index) -> tensor<f32> {
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<f32> to memref<f32>
|
||||
// Note: scf.for iter_args always bufferize to a memory write. This could be
|
||||
// optimized by analyzing the loop body.
|
||||
// CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
|
||||
@@ -70,7 +70,7 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor
|
||||
// CHECK-LABEL: func @for_correct_recursive_legalization_behavior(
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
|
||||
// CHECK-SAME: %[[INDEX:.*]]: index) -> tensor<f32> {
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<f32> to memref<f32>
|
||||
// Note: scf.for iter_args always bufferize to a memory write. This could be
|
||||
// optimized by analyzing the loop body.
|
||||
// CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
|
||||
@@ -78,7 +78,7 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor
|
||||
// CHECK: %[[RESULT:.*]] = scf.for %{{.*}} = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF_COPY]]) -> (memref<f32>) {
|
||||
// CHECK: %[[TENSOR_ITER:.*]] = bufferization.to_tensor %[[MEMREF_ITER]] : memref<f32>
|
||||
// CHECK: %[[TENSOR_MUNGED:.*]] = "test.munge_tensor"(%[[TENSOR_ITER]]) : (tensor<f32>) -> tensor<f32>
|
||||
// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : memref<f32>
|
||||
// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : tensor<f32> to memref<f32>
|
||||
// CHECK: scf.yield %[[MEMREF_MUNGED]] : memref<f32>
|
||||
// CHECK: }
|
||||
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT]] : memref<f32>
|
||||
@@ -96,7 +96,7 @@ func.func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %inde
|
||||
|
||||
// CHECK-LABEL: func @bufferize_while(
|
||||
// CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: tensor<f32>
|
||||
// CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : memref<f32>
|
||||
// CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : tensor<f32> to memref<f32>
|
||||
// Note: scf.while iter_args always bufferize to a memory write. This could be
|
||||
// optimized by analyzing the loop body.
|
||||
// CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
|
||||
|
||||
73
mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir
Normal file
73
mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir
Normal file
@@ -0,0 +1,73 @@
|
||||
// RUN: mlir-opt %s -one-shot-bufferize="use-encoding-for-memory-space allow-return-allocs-from-loops allow-unknown-ops" -allow-unregistered-dialect -split-input-file | FileCheck %s
|
||||
|
||||
// Here and below, unknown op 'some.use' will force 'bufferization.to_tensor' operations to remain in the body,
|
||||
// allowing us to check that the encoding on the '%iter' tensor is correctly preserved.
|
||||
|
||||
func.func @scf_for_iter_arg(%arg0: tensor<128xf32, 1>, %arg1: index, %arg2: index, %arg3: index) -> tensor<128xf32, 1> {
|
||||
%0 = scf.for %i = %arg1 to %arg2 step %arg3 iter_args(%iter = %arg0) -> tensor<128xf32, 1> {
|
||||
%0 = "some.use"(%iter) : (tensor<128xf32, 1>) -> tensor<128xf32, 1>
|
||||
scf.yield %0 : tensor<128xf32, 1>
|
||||
}
|
||||
return %0 : tensor<128xf32, 1>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func.func @scf_for_iter_arg
|
||||
// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>, %[[arg1:.+]]: index, %[[arg2:.+]]: index, %[[arg3:.+]]: index)
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1>
|
||||
// CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 1>
|
||||
// CHECK: %[[cast:.+]] = memref.cast %[[alloc]] : memref<128xf32, 1> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[v1:.+]] = scf.for %{{.+}} = %[[arg1]] to %[[arg2]] step %[[arg3]] iter_args(%[[arg6:.+]] = %[[cast]]) -> (memref<128xf32, strided<[?], offset: ?>, 1>)
|
||||
// CHECK-NEXT: %[[v3:.+]] = bufferization.to_tensor %[[arg6]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64>
|
||||
// CHECK-NEXT: %[[v4:.+]] = "some.use"(%[[v3]]) : (tensor<128xf32, 1 : i64>) -> tensor<128xf32, 1 : i64>
|
||||
// CHECK-NEXT: %[[v5:.+]] = bufferization.to_memref %[[v4]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK-NEXT: scf.yield %[[v5]] : memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[v2:.+]] = bufferization.to_tensor %[[v1]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64>
|
||||
// CHECK: return %[[v2]] : tensor<128xf32, 1 : i64>
|
||||
|
||||
// -----
|
||||
|
||||
func.func @scf_forall(
|
||||
%idx: index,
|
||||
%idx2: index,
|
||||
%arg1: tensor<?xf32, 1>,
|
||||
%arg2: tensor<?xf32, 1>) -> (tensor<?xf32, 1>) {
|
||||
%cst = arith.constant 4.200000e+01 : f32
|
||||
%c0 = arith.constant 0 : index
|
||||
%c1 = arith.constant 1 : index
|
||||
%2 = scf.forall (%arg3) in (%idx2) shared_outs(%o = %arg2) -> (tensor<?xf32, 1>) {
|
||||
%8 = "some.use"(%o) : (tensor<?xf32, 1>) -> tensor<?xf32, 1>
|
||||
scf.forall.in_parallel {
|
||||
tensor.parallel_insert_slice %8 into %o[5] [%idx] [%c1] :
|
||||
tensor<?xf32, 1> into tensor<?xf32, 1>
|
||||
}
|
||||
}
|
||||
return %2 : tensor<?xf32, 1>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func.func @scf_forall
|
||||
// CHECK: scf.forall
|
||||
// CHECK: %[[v2:.+]] = bufferization.to_tensor %{{.+}} : memref<?xf32, 1> to tensor<?xf32, 1 : i64>
|
||||
// CHECK: %[[v3:.+]] = "some.use"(%[[v2]]) : (tensor<?xf32, 1 : i64>) -> tensor<?xf32, 1 : i64>
|
||||
// CHECK: bufferization.to_memref %[[v3]] : tensor<?xf32, 1 : i64> to memref<?xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[v1:.+]] = bufferization.to_tensor %{{.+}} : memref<?xf32, 1> to tensor<?xf32, 1 : i64>
|
||||
// CHECK: return %[[v1]] : tensor<?xf32, 1 : i64>
|
||||
|
||||
// -----
|
||||
|
||||
func.func @scf_execute_region(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 1> {
|
||||
%0 = scf.execute_region -> tensor<128xf32, 1> {
|
||||
scf.yield %arg0 : tensor<128xf32, 1>
|
||||
}
|
||||
%1 = "some.use"(%0) : (tensor<128xf32, 1>) -> tensor<128xf32, 1>
|
||||
return %1 : tensor<128xf32, 1>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func.func @scf_execute_region
|
||||
// CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>)
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[v1:.+]] = scf.execute_region -> memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: scf.yield %[[v0]] : memref<128xf32, strided<[?], offset: ?>, 1>
|
||||
// CHECK: %[[v2:.+]] = bufferization.to_tensor %[[v1]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64>
|
||||
// CHECK: %[[v3:.+]] = "some.use"(%[[v2]]) : (tensor<128xf32, 1 : i64>) -> tensor<128xf32, 1 : i64>
|
||||
// CHECK: return %[[v3]] : tensor<128xf32, 1 : i64>
|
||||
@@ -6,7 +6,7 @@
|
||||
// CHECK: %[[WTRUE:.*]] = shape.const_witness true
|
||||
// CHECK: %[[MEMREF:.*]] = shape.assuming %[[WTRUE]] -> (memref<2xf16>) {
|
||||
// CHECK: %[[TENSOR_VAL:.*]] = "test.source"() : () -> tensor<2xf16>
|
||||
// CHECK: %[[YIELDED_MEMREF:.*]] = bufferization.to_memref %[[TENSOR_VAL]] : memref<2xf16>
|
||||
// CHECK: %[[YIELDED_MEMREF:.*]] = bufferization.to_memref %[[TENSOR_VAL]] : tensor<2xf16> to memref<2xf16>
|
||||
// CHECK: shape.assuming_yield %[[YIELDED_MEMREF]] : memref<2xf16>
|
||||
// CHECK: }
|
||||
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[MEMREF:.*]] : memref<2xf16>
|
||||
|
||||
@@ -14,19 +14,19 @@
|
||||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<?x?xf16>) -> tensor<?x?xf16> {
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<?x?xf16> to memref<?x?xf16>
|
||||
// CHECK: %[[VAL_6:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_7:.*]] = memref.dim %[[VAL_5]], %[[VAL_3]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_5]], %[[VAL_4]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_9:.*]], %[[VAL_10:.*]] = gpu.alloc async {{\[}}%[[VAL_6]]] (%[[VAL_7]], %[[VAL_8]]) : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_11:.*]] = gpu.memcpy async {{\[}}%[[VAL_10]]] %[[VAL_9]], %[[VAL_5]] : memref<?x?xf16>, memref<?x?xf16>
|
||||
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?x?xf16> to memref<?x?xf16>
|
||||
// CHECK: %[[VAL_13:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_12]], %[[VAL_3]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.dim %[[VAL_12]], %[[VAL_4]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_16:.*]], %[[VAL_17:.*]] = gpu.alloc async {{\[}}%[[VAL_13]]] (%[[VAL_14]], %[[VAL_15]]) : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_18:.*]] = gpu.memcpy async {{\[}}%[[VAL_17]]] %[[VAL_16]], %[[VAL_12]] : memref<?x?xf16>, memref<?x?xf16>
|
||||
// CHECK: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?x?xf16> to memref<?x?xf16>
|
||||
// CHECK: %[[VAL_20:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_21:.*]] = memref.dim %[[VAL_19]], %[[VAL_3]] : memref<?x?xf16>
|
||||
// CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_19]], %[[VAL_4]] : memref<?x?xf16>
|
||||
|
||||
@@ -30,13 +30,13 @@
|
||||
// CHECK: %[[VAL_23:.*]] = memref.dim %[[VAL_11]], %[[VAL_3]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]] = gpu.alloc async {{\[}}%[[VAL_22]]] (%[[VAL_23]]) : memref<?xf64>
|
||||
// CHECK: %[[VAL_26:.*]] = gpu.memcpy async {{\[}}%[[VAL_25]]] %[[VAL_24]], %[[VAL_11]] : memref<?xf64>, memref<?xf64>
|
||||
// CHECK: %[[VAL_27:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?xf64>
|
||||
// CHECK: %[[VAL_27:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?x?xf64> to memref<?x?xf64>
|
||||
// CHECK: %[[VAL_28:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_29:.*]] = memref.dim %[[VAL_27]], %[[VAL_3]] : memref<?x?xf64>
|
||||
// CHECK: %[[VAL_30:.*]] = memref.dim %[[VAL_27]], %[[VAL_4]] : memref<?x?xf64>
|
||||
// CHECK: %[[VAL_31:.*]], %[[VAL_32:.*]] = gpu.alloc async {{\[}}%[[VAL_28]]] (%[[VAL_29]], %[[VAL_30]]) : memref<?x?xf64>
|
||||
// CHECK: %[[VAL_33:.*]] = gpu.memcpy async {{\[}}%[[VAL_32]]] %[[VAL_31]], %[[VAL_27]] : memref<?x?xf64>, memref<?x?xf64>
|
||||
// CHECK: %[[VAL_34:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?x?xf64>
|
||||
// CHECK: %[[VAL_34:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?x?xf64> to memref<?x?xf64>
|
||||
// CHECK: %[[VAL_35:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_36:.*]] = memref.dim %[[VAL_34]], %[[VAL_3]] : memref<?x?xf64>
|
||||
// CHECK: %[[VAL_37:.*]] = memref.dim %[[VAL_34]], %[[VAL_4]] : memref<?x?xf64>
|
||||
|
||||
@@ -30,12 +30,12 @@ module {
|
||||
// CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_10]], %[[VAL_3]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_23:.*]], %[[VAL_24:.*]] = gpu.alloc async {{\[}}%[[VAL_21]]] (%[[VAL_22]]) : memref<?xf64>
|
||||
// CHECK: %[[VAL_25:.*]] = gpu.memcpy async {{\[}}%[[VAL_24]]] %[[VAL_23]], %[[VAL_10]] : memref<?xf64>, memref<?xf64>
|
||||
// CHECK: %[[VAL_26:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_26:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_27:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_28:.*]] = memref.dim %[[VAL_26]], %[[VAL_3]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_29:.*]], %[[VAL_30:.*]] = gpu.alloc async {{\[}}%[[VAL_27]]] (%[[VAL_28]]) : memref<?xf64>
|
||||
// CHECK: %[[VAL_31:.*]] = gpu.memcpy async {{\[}}%[[VAL_30]]] %[[VAL_29]], %[[VAL_26]] : memref<?xf64>, memref<?xf64>
|
||||
// CHECK: %[[VAL_32:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_32:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_33:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_34:.*]] = memref.dim %[[VAL_32]], %[[VAL_3]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_35:.*]], %[[VAL_36:.*]] = gpu.alloc async {{\[}}%[[VAL_33]]] (%[[VAL_34]]) : memref<?xf64>
|
||||
|
||||
@@ -28,11 +28,11 @@
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
|
||||
// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK: %[[VAL_7:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_8:.*]], %[[VAL_9:.*]] = gpu.alloc async {{\[}}%[[VAL_7]]] () : memref<8x8xf64>
|
||||
// CHECK: %[[VAL_10:.*]] = gpu.memcpy async {{\[}}%[[VAL_9]]] %[[VAL_8]], %[[VAL_6]] : memref<8x8xf64>, memref<8x8xf64>
|
||||
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
|
||||
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK: %[[VAL_12:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_13:.*]], %[[VAL_14:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] () : memref<8x8xf64>
|
||||
// CHECK: %[[VAL_15:.*]] = gpu.memcpy async {{\[}}%[[VAL_14]]] %[[VAL_13]], %[[VAL_11]] : memref<8x8xf64>, memref<8x8xf64>
|
||||
|
||||
@@ -30,13 +30,13 @@
|
||||
// CHECK: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor<?x?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor<?x?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_13:.*]] = memref.dim %[[VAL_11]], %[[VAL_3]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_11]], %[[VAL_4]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_15:.*]], %[[VAL_16:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] (%[[VAL_13]], %[[VAL_14]]) : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_17:.*]] = gpu.memcpy async {{\[}}%[[VAL_16]]] %[[VAL_15]], %[[VAL_11]] : memref<?x?xf32>, memref<?x?xf32>
|
||||
// CHECK: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: %[[VAL_19:.*]] = gpu.wait async
|
||||
// CHECK: %[[VAL_20:.*]] = memref.dim %[[VAL_18]], %[[VAL_3]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_21:.*]] = memref.dim %[[VAL_18]], %[[VAL_4]] : memref<?x?xf32>
|
||||
|
||||
@@ -14,8 +14,8 @@
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = tensor.empty() : tensor<77xi1, #{{.*}}>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<1x77xi1>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<1x77xi1>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<1x77xi1>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<1x77xi1>
|
||||
// CHECK: %[[VAL_8:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] iter_args(%[[VAL_10:.*]] = %[[VAL_5]]) -> (tensor<77xi1, #{{.*}}>) {
|
||||
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1>
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
|
||||
@@ -79,7 +79,7 @@ func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
|
||||
@@ -122,7 +122,7 @@ func.func @dense2(%arga: tensor<32x16xf32>,
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x4xf32, #sparse> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_10]] : memref<8x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_10]] :
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_8]] : f32) outs(%[[VAL_14]] : memref<8x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_5]] {
|
||||
// CHECK: %[[VAL_16:.*]] = arith.subi %[[VAL_15]], %[[VAL_7]] : index
|
||||
@@ -54,7 +54,7 @@
|
||||
// CHECK: memref.store %[[VAL_30]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_27]]] : memref<8x8xf32>
|
||||
// CHECK: } {"Emitted from" = "linalg.generic"}
|
||||
// CHECK: } {"Emitted from" = "linalg.generic"}
|
||||
// CHECK: %[[VAL_31:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<8x8xf32>
|
||||
// CHECK: %[[VAL_31:.*]] = bufferization.to_tensor %[[VAL_14]] :
|
||||
// CHECK: return %[[VAL_31]] : tensor<8x8xf32>
|
||||
// CHECK: }
|
||||
func.func @padded_mul(%arg0: tensor<4x4xf32, #CSR>, %arg1: tensor<8x8xf32>) -> tensor<8x8xf32> {
|
||||
|
||||
@@ -101,7 +101,7 @@ func.func @sparse_scale(%argx: tensor<?x?xf32, #SortedCOO>) -> tensor<?x?xf32, #
|
||||
// C_HECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>>
|
||||
// C_HECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>>
|
||||
// C_HECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// C_HECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// C_HECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// C_HECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// C_HECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// C_HECK: %[[VAL_13:.*]] = scf.while (%[[VAL_14:.*]] = %[[VAL_11]]) : (index) -> index {
|
||||
@@ -170,7 +170,7 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>,
|
||||
// C_HECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>>
|
||||
// C_HECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>>
|
||||
// C_HECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// C_HECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64>
|
||||
// C_HECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x64xf64> to memref<32x64xf64>
|
||||
// C_HECK: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_15]] : memref<32x64xf64>)
|
||||
// C_HECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// C_HECK: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
|
||||
@@ -51,7 +51,7 @@ func.func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>)
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[VAL_INITTENSOR:.*]] = tensor.empty() : tensor<32xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_INITTENSOR]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_INITTENSOR]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_7]] : memref<32xf32>)
|
||||
// CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_5]] {
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_8]]] : memref<?xf32>
|
||||
@@ -247,7 +247,7 @@ func.func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>)
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]]
|
||||
// CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>)
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
@@ -278,7 +278,7 @@ func.func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tens
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]]
|
||||
// CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>)
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
@@ -309,7 +309,7 @@ func.func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tens
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
@@ -366,7 +366,7 @@ func.func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tens
|
||||
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
@@ -406,7 +406,7 @@ func.func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tens
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]]
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>)
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -463,7 +463,7 @@ func.func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tens
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]]
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>)
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -830,7 +830,7 @@ func.func @two_way_inv_alt(%arga: tensor<16xf32, #SV>,
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<f32> to memref<f32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_6]][] : memref<f32>
|
||||
@@ -875,7 +875,7 @@ func.func @sum_reduction(%arga: tensor<?xf32, #SV>, %argx: tensor<f32>) -> tenso
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<f32> to memref<f32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_11]][] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -977,11 +977,11 @@ func.func @sum_reduction_ss(%arga: tensor<16xf32, #SV>,
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<f32> to memref<f32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<f32> to memref<f32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_13]][] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_9]][] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -1089,11 +1089,11 @@ func.func @sum_reduction_inv(%arga: tensor<16xf32, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant true
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_3]] {level = 0 : index} : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 0 : index} : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
@@ -1272,7 +1272,7 @@ func.func @four_tensors_op(%arga: tensor<?xf64>,
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<?xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_3]] : memref<f64>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<f64> to memref<f64>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_15]][] : memref<f64>
|
||||
// CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_19:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
|
||||
@@ -25,8 +25,8 @@
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>)
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index
|
||||
@@ -62,8 +62,8 @@ func.func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xi1>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1>
|
||||
// CHECK: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_10]] : memref<32x16xi1>)
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
|
||||
// CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index
|
||||
@@ -98,8 +98,8 @@ func.func @cmp_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>)
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index
|
||||
@@ -137,8 +137,8 @@ func.func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>)
|
||||
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_7]] {
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref<?xindex>
|
||||
@@ -202,8 +202,8 @@ func.func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xi1>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_14]] : memref<32x16xi1>)
|
||||
// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<?xindex>
|
||||
@@ -265,8 +265,8 @@ func.func @cmp_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>)
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
|
||||
@@ -306,8 +306,8 @@ func.func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
@@ -376,9 +376,9 @@ func.func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xi1>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_14]] : memref<32x16xi1>)
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_14]] : memref<32x16xi1>)
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_15]], %[[VAL_19:.*]] = %[[VAL_6]]) : (index, index) -> (index, index) {
|
||||
@@ -444,9 +444,9 @@ func.func @cmp_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>)
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_12]] to %[[VAL_13]] step %[[VAL_5]] {
|
||||
@@ -488,8 +488,8 @@ func.func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
@@ -584,8 +584,8 @@ func.func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xi1>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_16]] : memref<32x16xi1>)
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
@@ -679,8 +679,8 @@ func.func @cmp_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -726,7 +726,7 @@ func.func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -891,7 +891,7 @@ func.func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xi1>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_3]] : i1) outs(%[[VAL_17]] : memref<32x16xi1>)
|
||||
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -1166,7 +1166,7 @@ func.func @sub_ss_batched(%0: tensor<2x3xf64, #BatchedVector>, %1: tensor<2x3xf6
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -1260,7 +1260,7 @@ func.func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
@@ -1362,7 +1362,7 @@ func.func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #T
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>)
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -1415,8 +1415,8 @@ func.func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #T
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<16xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<16xf32> to memref<16xf32>
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_5]] : index
|
||||
@@ -1464,7 +1464,7 @@ func.func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx:
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<f32> to memref<f32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<f32>
|
||||
// CHECK: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_9]]) -> (f32) {
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref<?xindex>
|
||||
@@ -1511,7 +1511,7 @@ func.func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor<f32>) ->
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.lvl %[[VAL_0]], %[[VAL_3]] : tensor<?x?xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?x?xf64> to memref<?x?xf64>
|
||||
// CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[VAL_11]] : memref<?x?xf64>)
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_3]] to %[[VAL_8]] step %[[VAL_4]] {
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_12]]] : memref<?xindex>
|
||||
@@ -1563,9 +1563,9 @@ func.func @scale(%arga: tensor<?x?xf64, #Tds>, %argx: tensor<?x?xf64>) -> tensor
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<?x?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_3]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_4]] {
|
||||
@@ -1638,10 +1638,10 @@ func.func @sampled_dense_dense(%args: tensor<?x?xf32, #Tss>,
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<?x?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<?x?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<?x?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_3]] : memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_memref %[[VAL_4]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<f32> to memref<f32>
|
||||
// CHECK-DAG: %[[VAL_22:.*]] = sparse_tensor.lvl %[[VAL_2]], %[[VAL_6]] : tensor<?x?xf32,
|
||||
// CHECK-DAG: %[[VAL_24:.*]] = bufferization.to_memref %[[VAL_5]] : memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_24:.*]] = bufferization.to_memref %[[VAL_5]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_21]][] : memref<f32>
|
||||
// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
|
||||
@@ -33,9 +33,9 @@
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
|
||||
// CHECK: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_4]] : index
|
||||
// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
|
||||
@@ -75,9 +75,9 @@ func.func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
|
||||
// CHECK: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_4]] : index
|
||||
// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
|
||||
@@ -120,8 +120,8 @@ func.func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_9]] {
|
||||
// CHECK: %[[VAL_18:.*]] = arith.muli %[[VAL_16]], %[[VAL_5]] : index
|
||||
@@ -187,9 +187,9 @@ func.func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
|
||||
// CHECK: %[[VAL_16:.*]] = arith.muli %[[VAL_14]], %[[VAL_5]] : index
|
||||
// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
|
||||
@@ -234,9 +234,9 @@ func.func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_7]] to %[[VAL_3]] step %[[VAL_8]] {
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_15]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_15]], %[[VAL_8]] : index
|
||||
@@ -305,9 +305,9 @@ func.func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = arith.addi %[[VAL_13]], %[[VAL_6]] : index
|
||||
@@ -354,9 +354,9 @@ func.func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_8]] to %[[VAL_4]] step %[[VAL_9]] {
|
||||
// CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_18]], %[[VAL_9]] : index
|
||||
@@ -450,9 +450,9 @@ func.func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_15]], %[[VAL_6]] : index
|
||||
@@ -499,9 +499,9 @@ func.func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]]:2 = scf.while (%[[VAL_18:.*]] = %[[VAL_15]], %[[VAL_19:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
|
||||
@@ -575,9 +575,9 @@ func.func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_6]] {
|
||||
@@ -625,9 +625,9 @@ func.func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_18]], %[[VAL_22:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
|
||||
@@ -726,9 +726,9 @@ func.func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_6]] {
|
||||
@@ -778,9 +778,9 @@ func.func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_16]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_16]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_19:.*]]:2 = scf.while (%[[VAL_20:.*]] = %[[VAL_17]], %[[VAL_21:.*]] = %[[VAL_7]]) : (index, index) -> (index, index) {
|
||||
@@ -883,9 +883,9 @@ func.func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_14]] to %[[VAL_15]] step %[[VAL_5]] {
|
||||
@@ -937,9 +937,9 @@ func.func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_19]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_19]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_22:.*]]:2 = scf.while (%[[VAL_23:.*]] = %[[VAL_20]], %[[VAL_24:.*]] = %[[VAL_8]]) : (index, index) -> (index, index) {
|
||||
@@ -1067,9 +1067,9 @@ func.func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>)
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>)
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_16]] to %[[VAL_17]] step %[[VAL_5]] {
|
||||
@@ -1127,11 +1127,11 @@ func.func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor<?x?x?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.lvl %[[VAL_1]], %[[VAL_6]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_3]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.lvl %[[VAL_1]], %[[VAL_5]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = tensor.dim %[[VAL_2]], %[[VAL_6]] : tensor<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_0]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_5]] to %[[VAL_13]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_19:.*]] = arith.muli %[[VAL_17]], %[[VAL_10]] : index
|
||||
// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_5]] to %[[VAL_10]] step %[[VAL_6]] {
|
||||
@@ -1191,7 +1191,7 @@ func.func @kernel_3d(%arga: tensor<?x?xf32>,
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20x30xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<10x20x30xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<f32> to memref<f32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref<f32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -1246,10 +1246,10 @@ func.func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor<f32>)
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor<?x?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor<?x?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<?x?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<?x?x?xf32> to memref<?x?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor<?x?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<f32> to memref<f32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_12]][] : memref<f32>
|
||||
// CHECK: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f32) {
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<?xf32>
|
||||
@@ -1305,10 +1305,10 @@ func.func @sum_reduction_inv(%arga: tensor<?x?x?xf32>,
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<30xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : memref<10x20x30xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<10x20x30xf32>)
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20xf32> to memref<20xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<30xf32> to memref<30xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<10x20x30xf32> to memref<10x20x30xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<10x20x30xf32>)
|
||||
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] {
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_14]]] : memref<?xf32>
|
||||
// CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_7]] to %[[VAL_5]] step %[[VAL_8]] {
|
||||
|
||||
@@ -25,8 +25,8 @@
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<4xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<4xf32> to memref<4xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<4xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -112,8 +112,8 @@ func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>,
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34xi32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34xi32> to memref<34xi32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi32> to memref<32xi32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : i32) outs(%[[VAL_11]] : memref<32xi32>)
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -163,8 +163,8 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<34x19xf64>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34x19xf64> to memref<34x19xf64>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64>
|
||||
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_3]] {
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_3]] : index
|
||||
@@ -223,7 +223,7 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_5]] {
|
||||
@@ -287,7 +287,7 @@ func.func @mul_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>,
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x16xf64>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_6]] {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_6]] : memref<8x4x2xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_6]] : tensor<8x4x2xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_10]] : memref<8x4x2xf32>)
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_2]] to %[[VAL_5]] step %[[VAL_1]] {
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_2]] to %[[VAL_4]] step %[[VAL_1]] {
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
|
||||
@@ -70,7 +70,7 @@ func.func @abs(%arga: tensor<32xf64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
|
||||
@@ -99,10 +99,10 @@ func.func @ceil(%arga: tensor<32xf64, #SV>,
|
||||
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> {
|
||||
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
|
||||
@@ -131,10 +131,10 @@ func.func @floor(%arga: tensor<32xf64, #SV>,
|
||||
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> {
|
||||
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
|
||||
@@ -169,8 +169,8 @@ func.func @neg(%arga: tensor<32xf64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
|
||||
@@ -229,8 +229,8 @@ func.func @add(%arga: tensor<32xf64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
|
||||
@@ -289,8 +289,8 @@ func.func @sub(%arga: tensor<32xf64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] {
|
||||
@@ -325,7 +325,7 @@ func.func @mul(%arga: tensor<32xf64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xf64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
|
||||
|
||||
@@ -25,10 +25,10 @@
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<100xf64, #sparse> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<100xf64, #sparse> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<100xf64, #sparse> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_8]] : memref<100xf64>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_12]] : memref<100xf64>)
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_8]] :
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_12]] : memref<100xf64>)
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_3]]) : (index, index) -> (index, index) {
|
||||
// CHECK: %[[VAL_18:.*]] = arith.cmpi ult, %[[VAL_16]], %[[VAL_14]] : index
|
||||
// CHECK: scf.condition(%[[VAL_18]]) %[[VAL_16]], %[[VAL_17]] : index, index
|
||||
@@ -57,7 +57,7 @@
|
||||
// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_32:.*]]#1 to %[[VAL_5]] step %[[VAL_2]] {
|
||||
// CHECK: memref.store %[[VAL_7]], %[[VAL_12]]{{\[}}%[[VAL_31]]] : memref<100xf64>
|
||||
// CHECK: }
|
||||
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_12]] : memref<100xf64>
|
||||
// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_12]] :
|
||||
// CHECK: return %[[VAL_33]] : tensor<100xf64>
|
||||
// CHECK: }
|
||||
func.func @sparse_fusion(%argA: tensor<100xf64, #SV>) -> tensor<100xf64> {
|
||||
|
||||
@@ -33,8 +33,8 @@
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
|
||||
@@ -94,8 +94,8 @@ func.func @add(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
|
||||
@@ -154,8 +154,8 @@ func.func @sub(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] {
|
||||
@@ -190,7 +190,7 @@ func.func @mul(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
|
||||
@@ -224,7 +224,7 @@ func.func @divsbyc(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
|
||||
@@ -258,8 +258,8 @@ func.func @divubyc(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] {
|
||||
@@ -296,8 +296,8 @@ func.func @and(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
|
||||
@@ -356,8 +356,8 @@ func.func @or(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) {
|
||||
@@ -414,7 +414,7 @@ func.func @xor(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
|
||||
@@ -448,7 +448,7 @@ func.func @ashrbyc(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
|
||||
@@ -482,7 +482,7 @@ func.func @lsrbyc(%arga: tensor<32xi64, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<32xi64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] {
|
||||
|
||||
@@ -18,8 +18,8 @@
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20x30xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x30xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20x30xf32> to memref<20x30xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_5]] {
|
||||
@@ -58,13 +58,13 @@ func.func @matmul1(%a: tensor<10x20xf32, #DCSR>,
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 10 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<10x20xf32>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<10x20xf32> to memref<10x20xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index}
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index}
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index}
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index}
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]]
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x30xf32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32>
|
||||
// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -203,13 +203,13 @@ func.func @matmul2(%A: tensor<4x8xf64, #DCSR>,
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 6 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xi32>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<8x8xi32> to memref<8x8xi32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref<?xi32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<6x6xi32>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<6x6xi32> to memref<6x6xi32>
|
||||
// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_13]], %[[VAL_14]]] : memref<6x6xi32>
|
||||
@@ -255,13 +255,13 @@ func.func @conv2d(%input: tensor<8x8xi32>,
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : i64
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<5x3xi8>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<5x3xi8> to memref<5x3xi8>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref<?xi8>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : memref<5x6xi64>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<5x6xi64> to memref<5x6xi64>
|
||||
// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -309,7 +309,7 @@ func.func @quantized_matmul(%input1: tensor<5x3xi8>,
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<f32> to memref<f32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_11]][] : memref<f32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
|
||||
@@ -85,7 +85,7 @@ func.func @sqsum(%arg0: tensor<?x?x?x?xi32, #COO>) -> tensor<i32> {
|
||||
// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
|
||||
// CHECK: %[[VAL_5:.*]] = arith.constant dense<0> : tensor<10xi32>
|
||||
// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_5]] : memref<10xi32>
|
||||
// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_5]] : tensor<10xi32> to memref<10xi32>
|
||||
// CHECK: linalg.fill ins(%[[VAL_4]] : i32) outs(%[[VAL_6]] : memref<10xi32>)
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #sparse{{.*}}> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #sparse{{.*}}> to memref<?xindex>
|
||||
|
||||
@@ -29,8 +29,8 @@
|
||||
// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK-HIR-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
|
||||
// CHECK-HIR-DAG: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_5]] : index
|
||||
@@ -60,8 +60,8 @@
|
||||
// CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref<?xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-MIR: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK-MIR-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_5]] : index
|
||||
|
||||
@@ -32,8 +32,8 @@
|
||||
// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[DEMAP]] {level = 1 : index}
|
||||
// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[DEMAP]] {level = 1 : index}
|
||||
// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[DEMAP]]
|
||||
// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK-HIR-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_12]]] : memref<64xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
|
||||
@@ -62,8 +62,8 @@
|
||||
// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref<?xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-MIR: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK-MIR: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<64xf64>
|
||||
// CHECK-MIR: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
|
||||
|
||||
@@ -29,8 +29,8 @@
|
||||
// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64>
|
||||
// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-HIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK-HIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
|
||||
// CHECK-HIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index
|
||||
@@ -60,8 +60,8 @@
|
||||
// CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref<?xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<64xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64>
|
||||
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64>
|
||||
// CHECK-MIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
|
||||
// CHECK-MIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
|
||||
// CHECK-MIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index
|
||||
|
||||
@@ -35,13 +35,13 @@
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = arith.constant 80 : index
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_11]] to %[[VAL_10]] step %[[VAL_12]] {
|
||||
// CHECK: %[[VAL_23:.*]] = arith.muli %[[VAL_21]], %[[VAL_9]] : index
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref<?xi32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : memref<10xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<10xf32> to memref<10xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_8]] : memref<10xf32>)
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
@@ -53,7 +53,7 @@ func.func @allout_inplace(%arga: tensor<10xi32, #SV>,
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref<?xi32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_4]] : memref<10xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<10xf32> to memref<10xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_2]] : f32) outs(%[[VAL_8]] : memref<10xf32>)
|
||||
// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -86,7 +86,7 @@ func.func @allout_materialize(%arga: tensor<10xi32, #SV>) -> tensor<10xf32> {
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #{{.*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<10xf32>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<10xf32> to memref<10xf32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 100 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : memref<2xindex>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<2xindex> to memref<2xindex>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = memref.cast %[[VAL_6]] : memref<2xindex> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] : memref<6x2xi32>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<6x2xi32> to memref<6x2xi32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = memref.collapse_shape %[[VAL_8]] {{\[\[}}0, 1]] : memref<6x2xi32> into memref<12xi32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = memref.cast %[[VAL_9]] : memref<12xi32> to memref<?xi32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_0]] : memref<6xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<6xf64> to memref<6xf64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = memref.cast %[[VAL_11]] : memref<6xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.storage_specifier.init
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.set %[[VAL_13]] lvl_sz at 0 with %[[VAL_4]]
|
||||
@@ -45,18 +45,18 @@ func.func @sparse_pack(%values: tensor<6xf64>, %pos:tensor<2xindex>, %coordinate
|
||||
// CHECK-SAME: %[[VAL_5:.*]]: tensor<2xindex>,
|
||||
// CHECK-SAME: %[[VAL_6:.*]]: tensor<6x2xi32>) -> (tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>) {
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] pos_mem_sz at 0
|
||||
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_5]] : memref<2xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_5]] : tensor<2xindex> to memref<2xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.subview %[[VAL_8]][0] {{\[}}%[[VAL_7]]] [1] : memref<2xindex> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.subview %[[VAL_0]][0] {{\[}}%[[VAL_7]]] [1] : memref<?xindex> to memref<?xindex>
|
||||
// CHECK: memref.copy %[[VAL_10]], %[[VAL_9]] : memref<?xindex> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] crd_mem_sz at 0
|
||||
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_6]] : memref<6x2xi32>
|
||||
// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_6]] : tensor<6x2xi32> to memref<6x2xi32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.collapse_shape %[[VAL_12]] {{\[\[}}0, 1]] : memref<6x2xi32> into memref<12xi32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.subview %[[VAL_13]][0] {{\[}}%[[VAL_11]]] [1] : memref<12xi32> to memref<?xi32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.subview %[[VAL_1]][0] {{\[}}%[[VAL_11]]] [1] : memref<?xi32> to memref<?xi32>
|
||||
// CHECK: memref.copy %[[VAL_15]], %[[VAL_14]] : memref<?xi32> to memref<?xi32>
|
||||
// CHECK: %[[VAL_16:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] val_mem_sz
|
||||
// CHECK: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_4]] : memref<6xf64>
|
||||
// CHECK: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<6xf64> to memref<6xf64>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.subview %[[VAL_17]][0] {{\[}}%[[VAL_16]]] [1] : memref<6xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_19:.*]] = memref.subview %[[VAL_2]][0] {{\[}}%[[VAL_16]]] [1] : memref<?xf64> to memref<?xf64>
|
||||
// CHECK: memref.copy %[[VAL_19]], %[[VAL_18]] : memref<?xf64> to memref<?xf64>
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
// CHECK-DAG: %[[TMP_0:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index}
|
||||
// CHECK-DAG: %[[TMP_1:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index}
|
||||
// CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.values %[[TMP_arg0]]
|
||||
// CHECK-DAG: %[[TMP_3:.*]] = bufferization.to_memref %[[TMP_arg1]] : memref<32xf32>
|
||||
// CHECK-DAG: %[[TMP_4:.*]] = bufferization.to_memref %[[TMP_arg2]] : memref<16xf32>
|
||||
// CHECK-DAG: %[[TMP_3:.*]] = bufferization.to_memref %[[TMP_arg1]] : tensor<32xf32> to memref<32xf32>
|
||||
// CHECK-DAG: %[[TMP_4:.*]] = bufferization.to_memref %[[TMP_arg2]] : tensor<16xf32> to memref<16xf32>
|
||||
// CHECK: scf.parallel (%[[TMP_arg3:.*]]) = (%[[TMP_c0]]) to (%[[TMP_c16]]) step (%[[TMP_c1]]) {
|
||||
// CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_4]][%[[TMP_arg3]]] : memref<16xf32>
|
||||
// CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_0]][%[[TMP_arg3]]] : memref<?xindex>
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
|
||||
// CHECK: %[[DEMAP:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]]
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[DEMAP]] : tensor<30x10x20xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<20x30x10xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20x30x10xf32> to memref<20x30x10xf32>
|
||||
// CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_9]] : memref<20x30x10xf32>)
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
|
||||
// CHECK: %[[VAL_12:.*]] = arith.muli %[[VAL_10]], %[[VAL_4]] : index
|
||||
@@ -64,7 +64,7 @@ func.func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>,
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_2]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_3]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_4]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?x?x?xf32> to memref<?x?x?xf32>
|
||||
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_10]] : memref<?x?x?xf32>)
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_3]] to %[[VAL_7]] step %[[VAL_4]] {
|
||||
// CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_8]] : index
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
// CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_2]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_4]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[DEMAP]] : tensor<?x?x?xf32, #sparse{{[0-9]*}}>
|
||||
// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
|
||||
// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<f32> to memref<f32>
|
||||
// CHECK-HIR: %[[VAL_11:.*]] = tensor.extract %[[VAL_1]][] : tensor<f32>
|
||||
// CHECK-HIR: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_3]] to %[[VAL_5]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) {
|
||||
// CHECK-HIR: %[[VAL_18:.*]] = arith.muli %[[VAL_13]], %[[VAL_6]] : index
|
||||
@@ -58,7 +58,7 @@
|
||||
// CHECK-MIR-DAG: %[[DimSize1:.*]] = call @sparseLvlSize(%[[ARGA]], %[[I1]])
|
||||
// CHECK-MIR-DAG: %[[DimSize2:.*]] = call @sparseLvlSize(%[[ARGA]], %[[I2]])
|
||||
// CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF32(%[[ARGA]]) : (!llvm.ptr) -> memref<?xf32>
|
||||
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[ARGX]] : memref<f32>
|
||||
// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[ARGX]] : tensor<f32> to memref<f32>
|
||||
// CHECK-MIR: %[[VAL_11:.*]] = tensor.extract %[[ARGX]][] : tensor<f32>
|
||||
// CHECK-MIR: %[[VAL_12:.*]] = scf.for %[[D2:.*]] = %[[I0]] to %[[DimSize0]] step %[[I1]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) {
|
||||
// CHECK-MIR: %[[VAL_18:.*]] = arith.muli %[[D2]], %[[DimSize1]] : index
|
||||
|
||||
@@ -33,8 +33,8 @@
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_4]] : memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<f32> to memref<f32>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<32x16xf32> to memref<32x16xf32>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_14]][] : memref<f32>
|
||||
// CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref<?xindex>
|
||||
|
||||
@@ -64,14 +64,14 @@ func.func @fold_yield_direct_zero() -> tensor<32xf64> {
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_8]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_8]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_17]] to %[[VAL_18]] step %[[VAL_5]] {
|
||||
@@ -132,8 +132,8 @@ func.func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) : tensor<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = tensor.empty() : tensor<8x8xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
|
||||
@@ -30,8 +30,8 @@
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant false
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant true
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = tensor.empty() : tensor<8x8xf64, #sparse{{[0-9]*}}>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f64>
|
||||
// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<f64> to memref<f64>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_14]][] : memref<f64>
|
||||
// CHECK: %[[VAL_16:.*]] = scf.for %[[VAL_17:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] iter_args(%[[VAL_18:.*]] = %[[VAL_15]]) -> (f64) {
|
||||
// CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : memref<8xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : tensor<8xi64> to memref<8xi64>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_4]] : i64) outs(%[[VAL_11]] : memref<8xi64>)
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref<?xindex>
|
||||
@@ -70,7 +70,7 @@ func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse{{[0-9]*}}> to memref<?xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : memref<8xi64>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : tensor<8xi64> to memref<8xi64>
|
||||
// CHECK-DAG: linalg.fill ins(%[[VAL_3]] : i64) outs(%[[VAL_11]] : memref<8xi64>)
|
||||
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref<?xf64>
|
||||
|
||||
@@ -37,8 +37,8 @@
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]] : tensor<?x?xf32, #[[$BSR]]> to tensor<?x?x2x2xf32, #[[$MAP]]>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.lvl %[[VAL_7]], %[[VAL_4]] : tensor<?x?x2x2xf32, #[[$MAP]]>
|
||||
// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_7]] {level = 1 : index} : tensor<?x?x2x2xf32, #[[$MAP]]> to memref<?xindex>
|
||||
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_7]] {level = 1 : index} : tensor<?x?x2x2xf32, #[[$MAP]]> to memref<?xindex>
|
||||
|
||||
@@ -28,8 +28,8 @@
|
||||
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 4 : index
|
||||
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<2x4xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : memref<2x4xf64>
|
||||
// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<2x4xf64>
|
||||
// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<2x4xf64>
|
||||
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
|
||||
// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
|
||||
// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
// CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xi13>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i13>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i13> to memref<i13>
|
||||
// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i13>
|
||||
// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -42,7 +42,7 @@
|
||||
// CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xi13>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i13>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i13> to memref<i13>
|
||||
// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i13>
|
||||
// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -93,7 +93,7 @@ func.func @sparse_reduction_ori(%argx: tensor<i13>,
|
||||
// CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xi13>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i13>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i13> to memref<i13>
|
||||
// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i13>
|
||||
// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -119,7 +119,7 @@ func.func @sparse_reduction_ori(%argx: tensor<i13>,
|
||||
// CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi13, #sparse{{[0-9]*}}> to memref<?xi13>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i13>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i13> to memref<i13>
|
||||
// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i13>
|
||||
// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -168,7 +168,7 @@ func.func @sparse_reduction_ori_accumulator_on_rhs(%argx: tensor<i13>,
|
||||
// CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xi32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i32> to memref<i32>
|
||||
// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i32>
|
||||
// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -194,7 +194,7 @@ func.func @sparse_reduction_ori_accumulator_on_rhs(%argx: tensor<i13>,
|
||||
// CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xi32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i32> to memref<i32>
|
||||
// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i32>
|
||||
// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -243,7 +243,7 @@ func.func @sparse_reduction_subi(%argx: tensor<i32>,
|
||||
// CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xi32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i32> to memref<i32>
|
||||
// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i32>
|
||||
// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -269,7 +269,7 @@ func.func @sparse_reduction_subi(%argx: tensor<i32>,
|
||||
// CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xi32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i32> to memref<i32>
|
||||
// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i32>
|
||||
// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -319,7 +319,7 @@ func.func @sparse_reduction_xor(%argx: tensor<i32>,
|
||||
// CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xi32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i32> to memref<i32>
|
||||
// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<i32>
|
||||
// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -345,7 +345,7 @@ func.func @sparse_reduction_xor(%argx: tensor<i32>,
|
||||
// CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xi32, #sparse{{[0-9]*}}> to memref<?xi32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<i32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<i32> to memref<i32>
|
||||
// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<i32>
|
||||
// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -395,7 +395,7 @@ func.func @sparse_reduction_addi(%argx: tensor<i32>,
|
||||
// CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<f32> to memref<f32>
|
||||
// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<f32>
|
||||
// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -421,7 +421,7 @@ func.func @sparse_reduction_addi(%argx: tensor<i32>,
|
||||
// CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<f32> to memref<f32>
|
||||
// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<f32>
|
||||
// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
@@ -471,7 +471,7 @@ func.func @sparse_reduction_subf(%argx: tensor<f32>,
|
||||
// CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
|
||||
// CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
|
||||
// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<f32> to memref<f32>
|
||||
// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref<f32>
|
||||
// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex>
|
||||
// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex>
|
||||
@@ -497,7 +497,7 @@ func.func @sparse_reduction_subf(%argx: tensor<f32>,
|
||||
// CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
|
||||
// CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
||||
// CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref<f32>
|
||||
// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<f32> to memref<f32>
|
||||
// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref<f32>
|
||||
// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
|
||||
// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// CHECK-LABEL: func @dim(
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>,
|
||||
// CHECK-SAME: %[[INDEX:.*]]: index) -> index {
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<*xf32>
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<*xf32> to memref<*xf32>
|
||||
// CHECK: %[[EXTENT:.*]] = memref.dim %[[MEMREF]], %[[INDEX]] : memref<*xf32>
|
||||
// CHECK: return %[[EXTENT]] : index
|
||||
func.func @dim(%arg0: tensor<*xf32>, %arg1: index) -> index {
|
||||
@@ -39,7 +39,7 @@ func.func @tensor.cast(%arg0: tensor<?xindex>) -> tensor<2xindex> {
|
||||
|
||||
// CHECK-LABEL: func @tensor.cast_from_unranked(
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> tensor<2xf32> {
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<*xf32>
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<*xf32> to memref<*xf32>
|
||||
// CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<*xf32> to memref<2xf32, strided<[?], offset: ?>>
|
||||
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<2xf32, strided<[?], offset: ?>>
|
||||
// CHECK: return %[[RET]] : tensor<2xf32>
|
||||
@@ -52,7 +52,7 @@ func.func @tensor.cast_from_unranked(%arg0: tensor<*xf32>) -> tensor<2xf32> {
|
||||
|
||||
// CHECK-LABEL: func @tensor.cast_to_unranked(
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<2xf32>) -> tensor<*xf32> {
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<2xf32>
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<2xf32> to memref<2xf32>
|
||||
// CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<2xf32> to memref<*xf32>
|
||||
// CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<*xf32>
|
||||
// CHECK: return %[[RET]] : tensor<*xf32>
|
||||
@@ -77,7 +77,7 @@ func.func @tensor.empty() -> tensor<5xf32> {
|
||||
// CHECK-LABEL: func @tensor.extract(
|
||||
// CHECK-SAME: %[[TENSOR:.*]]: tensor<?xf32>,
|
||||
// CHECK-SAME: %[[IDX:.*]]: index) -> f32 {
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<?xf32>
|
||||
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK: %[[RET:.*]] = memref.load %[[MEMREF]][%[[IDX]]] : memref<?xf32>
|
||||
// CHECK: return %[[RET]] : f32
|
||||
// CHECK: }
|
||||
@@ -199,7 +199,7 @@ func.func @tensor.from_elements_3d(%f0 : f32) -> tensor<3x2x2xf32> {
|
||||
// CHECK-LABEL: func @tensor.generate(
|
||||
// CHECK-SAME: %[[ARG:.*]]: tensor<*xf32>,
|
||||
// CHECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor<?xindex> {
|
||||
// CHECK-DAG: %[[ARG_M:.*]] = bufferization.to_memref %[[ARG]] : memref<*xf32>
|
||||
// CHECK-DAG: %[[ARG_M:.*]] = bufferization.to_memref %[[ARG]] : tensor<*xf32> to memref<*xf32>
|
||||
// CHECK-DAG: %[[ALLOC:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) {{.*}} : memref<?xindex>
|
||||
// CHECK: %[[ALLOC_T:.*]] = bufferization.to_tensor %[[ALLOC]]
|
||||
// CHECK: %[[MAPPED:.*]] = linalg.map
|
||||
@@ -266,7 +266,7 @@ func.func @tensor.generate_unknown_ops_in_body(%arg0: index) -> tensor<?xindex>
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<?x?xf32>, %[[idx1:.*]]: index, %[[idx2:.*]]: index
|
||||
func.func @tensor.extract_slice(
|
||||
%t1: tensor<?x?xf32>, %idx1: index, %idx2: index) -> tensor<?x10xf32> {
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[t1]] : memref<?x?xf32>
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[t1]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: %[[r:.*]] = memref.subview %[[m]][5, %[[idx2]]] [%[[idx1]], 10] [1, 1] : memref<?x?xf32> to memref<?x10xf32, strided<[?, 1], offset: ?>>
|
||||
%0 = tensor.extract_slice %t1[5, %idx2][%idx1, 10][1, 1]
|
||||
: tensor<?x?xf32> to tensor<?x10xf32>
|
||||
@@ -282,7 +282,7 @@ func.func @tensor.extract_slice(
|
||||
// CHECK-SAME: %[[idx2:.*]]: index
|
||||
func.func @tensor.extract_slice_rank_reducing(
|
||||
%t1: tensor<?x10x?xf32>, %idx1: index, %idx2: index) -> tensor<?x15xf32> {
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10x?xf32>
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<?x10x?xf32> to memref<?x10x?xf32>
|
||||
// CHECK: %[[r:.*]] = memref.subview %[[m1]][5, %[[idx1]], 10] [%[[idx2]], 1, 15] [1, 1, 1] : memref<?x10x?xf32> to memref<?x15xf32, strided<[?, 1], offset: ?>>
|
||||
%0 = tensor.extract_slice %t1[5, %idx1, 10][%idx2, 1, 15][1, 1, 1]
|
||||
: tensor<?x10x?xf32> to tensor<?x15xf32>
|
||||
@@ -300,8 +300,8 @@ func.func @tensor.insert_slice(%t1: tensor<?x?xf32>, %t2: tensor<?x10xf32>,
|
||||
%idx1: index, %idx2: index) -> tensor<?x?xf32> {
|
||||
// CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x?xf32>
|
||||
// CHECK-DAG: %[[m2:.*]] = bufferization.to_memref %[[t2]] : memref<?x10xf32>
|
||||
// CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK-DAG: %[[m2:.*]] = bufferization.to_memref %[[t2]] : tensor<?x10xf32> to memref<?x10xf32>
|
||||
// CHECK-DAG: %[[dim0:.*]] = memref.dim %[[m1]], %[[c0]]
|
||||
// CHECK-DAG: %[[dim1:.*]] = memref.dim %[[m1]], %[[c1]]
|
||||
// CHECK: %[[alloc:.*]] = memref.alloc(%[[dim0]], %[[dim1]])
|
||||
@@ -353,7 +353,7 @@ func.func @tensor.insert_slice_rank_reducing_2(
|
||||
// CHECK-SAME: %[[f:.*]]: f32
|
||||
func.func @tensor.insert(%t1: tensor<5xf32>, %idx1: index, %f: f32) -> tensor<5xf32> {
|
||||
// CHECK-DAG: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32>
|
||||
// CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<5xf32>
|
||||
// CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<5xf32> to memref<5xf32>
|
||||
// CHECK: memref.copy %[[m1]], %[[alloc]]
|
||||
// CHECK: memref.store %[[f]], %[[alloc]][%[[idx1]]]
|
||||
%0 = tensor.insert %f into %t1[%idx1] : tensor<5xf32>
|
||||
@@ -368,7 +368,7 @@ func.func @tensor.insert(%t1: tensor<5xf32>, %idx1: index, %f: f32) -> tensor<5x
|
||||
// CHECK-LABEL: func @tensor.expand_shape(
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<?x10xf32>
|
||||
func.func @tensor.expand_shape(%t1: tensor<?x10xf32>, %sz0: index) -> tensor<2x?x10xf32> {
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10xf32>
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]]
|
||||
// CHECK: %[[C0:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[DIM:.*]] = memref.dim %[[m1]], %[[C0]] : memref<?x10xf32>
|
||||
// CHECK: %[[C2:.*]] = arith.constant 2 : index
|
||||
@@ -388,7 +388,7 @@ func.func @tensor.expand_shape(%t1: tensor<?x10xf32>, %sz0: index) -> tensor<2x?
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<?x20xf32>
|
||||
func.func @tensor.expand_shape_of_slice(
|
||||
%t1: tensor<?x20xf32>, %o1: index, %s1: index, %sz0: index) -> tensor<?x7x2x5xf32> {
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x20xf32>
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] :
|
||||
// CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}, 5] [%{{.*}}, 10] [1, 1] : memref<?x20xf32> to memref<?x10xf32, strided<[20, 1], offset: ?>>
|
||||
%0 = tensor.extract_slice %t1[%o1, 5][%s1, 10][1, 1] :
|
||||
tensor<?x20xf32> to tensor<?x10xf32>
|
||||
@@ -408,7 +408,7 @@ func.func @tensor.expand_shape_of_slice(
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<?xf32>
|
||||
func.func @tensor.expand_shape_of_scalar_slice(
|
||||
%t1: tensor<?xf32>, %o1: index, %s1: index) -> tensor<1xf32> {
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?xf32>
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}] [1] [1] : memref<?xf32> to memref<f32, strided<[], offset: ?>>
|
||||
%0 = tensor.extract_slice %t1[%o1][1][1] : tensor<?xf32> to tensor<f32>
|
||||
// CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [] output_shape [1] : memref<f32, strided{{.*}}> into memref<1xf32, strided<[1], offset: ?>>
|
||||
@@ -423,7 +423,7 @@ func.func @tensor.expand_shape_of_scalar_slice(
|
||||
// CHECK-LABEL: func @tensor.collapse_shape(
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<2x?x?xf32>
|
||||
func.func @tensor.collapse_shape(%t1: tensor<2x?x?xf32>) -> tensor<?x?xf32> {
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<2x?x?xf32>
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<2x?x?xf32> to memref<2x?x?xf32>
|
||||
// CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [
|
||||
// CHECK-SAME: [0, 1], [2]] : memref<2x?x?xf32> into memref<?x?xf32>
|
||||
%0 = tensor.collapse_shape %t1 [[0, 1], [2]]
|
||||
@@ -439,7 +439,7 @@ func.func @tensor.collapse_shape(%t1: tensor<2x?x?xf32>) -> tensor<?x?xf32> {
|
||||
// CHECK-LABEL: func @tensor.collapse_shape_to_scalar(
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<1x1x1xf32>
|
||||
func.func @tensor.collapse_shape_to_scalar(%t1: tensor<1x1x1xf32>) -> tensor<f32> {
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<1x1x1xf32>
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<1x1x1xf32> to memref<1x1x1xf32>
|
||||
// CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [] : memref<1x1x1xf32> into memref<f32>
|
||||
%0 = tensor.collapse_shape %t1 []
|
||||
: tensor<1x1x1xf32> into tensor<f32>
|
||||
@@ -528,7 +528,7 @@ func.func @tensor.collapse_shape_of_slice5(%arg0: tensor<2x2x2xi64>) -> tensor<4
|
||||
// CHECK-LABEL: func @tensor.reshape(
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<?x10xf32>
|
||||
func.func @tensor.reshape(%t1: tensor<?x10xf32>) -> tensor<2x2x5xf32> {
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10xf32>
|
||||
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<?x10xf32> to memref<?x10xf32>
|
||||
|
||||
// CHECK: %[[two:.*]] = arith.constant 2 : i64
|
||||
%two = arith.constant 2 : i64
|
||||
@@ -560,7 +560,7 @@ func.func @tensor.reshape(%t1: tensor<?x10xf32>) -> tensor<2x2x5xf32> {
|
||||
// CHECK-SAME: %[[t1:.*]]: tensor<?x10xindex>, %[[l2:.*]]: index, %[[h1:.*]]: index, %[[h2:.*]]: index
|
||||
func.func @tensor.pad(%t1: tensor<?x10xindex>, %l2: index, %h1: index,
|
||||
%h2: index) -> tensor<?x?xindex> {
|
||||
// CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : memref<?x10xindex>
|
||||
// CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<?x10xindex> to memref<?x10xindex>
|
||||
// CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[dim0:.*]] = memref.dim %[[m1]], %[[c0]]
|
||||
|
||||
20
mlir/test/Dialect/Tensor/one-shot-bufferize-encodings.mlir
Normal file
20
mlir/test/Dialect/Tensor/one-shot-bufferize-encodings.mlir
Normal file
@@ -0,0 +1,20 @@
|
||||
// RUN: mlir-opt %s -one-shot-bufferize="use-encoding-for-memory-space" -split-input-file | FileCheck %s
|
||||
|
||||
func.func @from_elements(%fill: f32, %f: f32, %idx: index) -> tensor<3xf32, 1> {
|
||||
%t = tensor.from_elements %fill, %fill, %fill : tensor<3xf32, 1>
|
||||
%i = tensor.insert %f into %t[%idx] : tensor<3xf32, 1>
|
||||
return %i : tensor<3xf32, 1>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @from_elements
|
||||
// CHECK-SAME: (%[[arg0:.+]]: f32, %[[arg1:.+]]: f32, %[[arg2:.+]]: index) -> tensor<3xf32, 1 : i64>
|
||||
// CHECK: %[[alloc:.+]] = memref.alloc() {{.*}} : memref<3xf32, 1>
|
||||
// CHECK-DAG: %[[c0:.+]] = arith.constant 0 : index
|
||||
// CHECK-DAG: %[[c1:.+]] = arith.constant 1 : index
|
||||
// CHECK-DAG: %[[c2:.+]] = arith.constant 2 : index
|
||||
// CHECK: memref.store %[[arg0]], %[[alloc]][%[[c0]]] : memref<3xf32, 1>
|
||||
// CHECK: memref.store %[[arg0]], %[[alloc]][%[[c1]]] : memref<3xf32, 1>
|
||||
// CHECK: memref.store %[[arg0]], %[[alloc]][%[[c2]]] : memref<3xf32, 1>
|
||||
// CHECK: memref.store %[[arg1]], %[[alloc]][%[[arg2]]] : memref<3xf32, 1>
|
||||
// CHECK: %[[v0:.+]] = bufferization.to_tensor %[[alloc]] : memref<3xf32, 1> to tensor<3xf32, 1 : i64>
|
||||
// CHECK: return %[[v0]] : tensor<3xf32, 1 : i64>
|
||||
@@ -402,7 +402,7 @@ func.func @tensor.reshape() -> tensor<2x2x5xf32> {
|
||||
// CHECK-SAME: %[[INPUT:[a-zA-Z0-9]*]]: memref<2x2xf32, strided<[?, ?], offset: ?>, 3>,
|
||||
// CHECK-SAME: %[[LAYOUT:[a-zA-Z0-9]*]]: memref<2xi32, strided<[?], offset: ?>>,
|
||||
func.func @reshape_with_non_identity_layout(%arg0: memref<2x2xf32, strided<[?, ?], offset: ?>, 3>, %arg1: tensor<2xi32>, %idx: index) -> f32 {
|
||||
%t = bufferization.to_tensor %arg0 restrict : memref<2x2xf32, strided<[?, ?], offset: ?>, 3>
|
||||
%t = bufferization.to_tensor %arg0 restrict : memref<2x2xf32, strided<[?, ?], offset: ?>, 3> to tensor<2x2xf32>
|
||||
|
||||
// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[INPUT]][1, 0] [1, 2] [1, 1] : memref<2x2xf32, strided<[?, ?], offset: ?>, 3> to memref<2xf32, strided<[?], offset: ?>, 3>
|
||||
%extracted_slice = tensor.extract_slice %t[1, 0] [1, 2] [1, 1] : tensor<2x2xf32> to tensor<2xf32>
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
// CHECK-LABEL: func @transfer_read(
|
||||
// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>, %[[o1:.*]]: index, %[[o2:.*]]: index, %[[pad:.*]]: f32)
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<?x?xf32>
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: %[[r:.*]] = vector.transfer_read %[[m]][%[[o1]], %[[o2]]], %[[pad]] {in_bounds = [true, false]} : memref<?x?xf32>, vector<5x6xf32>
|
||||
// CHECK: return %[[r]]
|
||||
func.func @transfer_read(%t: tensor<?x?xf32>, %o1: index,
|
||||
@@ -16,7 +16,7 @@ func.func @transfer_read(%t: tensor<?x?xf32>, %o1: index,
|
||||
|
||||
// CHECK-LABEL: func @transfer_write(
|
||||
// CHECK-SAME: %[[t:.*]]: tensor<?x?xf32>, %[[o1:.*]]: index, %[[o2:.*]]: index, %[[vec:.*]]: vector<5x6xf32>, %[[mask:.*]]: vector<5x6xi1>)
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<?x?xf32>
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: %[[alloc:.*]] = memref.alloc(%{{.*}}, %{{.*}}) {{.*}} : memref<?x?xf32>
|
||||
// CHECK: memref.copy %[[m]], %[[alloc]]
|
||||
// CHECK: vector.transfer_write %[[vec]], %[[alloc]][%[[o1]], %[[o2]]], %[[mask]] {in_bounds = [true, false]} : vector<5x6xf32>, memref<?x?xf32>
|
||||
@@ -35,7 +35,7 @@ func.func @transfer_write(%t: tensor<?x?xf32>, %o1: index,
|
||||
// CHECK-LABEL: func @gather(
|
||||
// CHECK-SAME: %[[base:.*]]: tensor<?x?xf32>, %[[v:.*]]: vector<16xi32>,
|
||||
// CHECK-SAME: %[[mask:.*]]: vector<16xi1>, %[[pass_thru:.*]]: vector<16xf32>)
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[base]] : memref<?x?xf32>
|
||||
// CHECK: %[[m:.*]] = bufferization.to_memref %[[base]] : tensor<?x?xf32> to memref<?x?xf32>
|
||||
// CHECK: %[[c0:.*]] = arith.constant 0 : index
|
||||
// CHECK: %[[out:.*]] = vector.gather %[[m]][%[[c0]], %[[c0]]] [%[[v]]], %[[mask]], %[[pass_thru]] : memref<?x?xf32>, vector<16xi32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
|
||||
func.func @gather(%base: tensor<?x?xf32>, %v: vector<16xi32>, %mask: vector<16xi1>, %pass_thru: vector<16xf32>) -> vector<16xf32> {
|
||||
|
||||
@@ -47,7 +47,7 @@ module {
|
||||
|
||||
// Call the kernel with an vector taken from global memory.
|
||||
%xbuf = memref.get_global @__constant_64xf64 : memref<64xf64>
|
||||
%x = bufferization.to_tensor %xbuf restrict : memref<64xf64>
|
||||
%x = bufferization.to_tensor %xbuf restrict : memref<64xf64> to tensor<64xf64>
|
||||
%0 = call @matvec(%A, %x, %y) : (tensor<1024x64xf64, #CSR>, tensor<64xf64>, tensor<1024xf64>) -> tensor<1024xf64>
|
||||
|
||||
//
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// NOTE: this test requires gpu-sm80
|
||||
//
|
||||
// RUN: mlir-opt \
|
||||
// RUN: --pass-pipeline="builtin.module(gpu.module(strip-debuginfo,convert-gpu-to-nvvm,convert-nvgpu-to-nvvm,affine-expand-index-ops,lower-affine,convert-arith-to-llvm),convert-vector-to-llvm,canonicalize,cse)" \
|
||||
// RUN: --pass-pipeline="builtin.module(gpu.module(strip-debuginfo,convert-gpu-to-nvvm,convert-nvgpu-to-nvvm,affine-expand-index-ops,lower-affine,arith-expand,convert-arith-to-llvm),convert-vector-to-llvm,canonicalize,cse)" \
|
||||
// RUN: %s \
|
||||
// RUN: | mlir-opt --gpu-lower-to-nvvm-pipeline="cubin-chip=sm_80 cubin-features=+ptx71 cubin-format=%gpu_compilation_format" \
|
||||
// RUN: | mlir-cpu-runner \
|
||||
|
||||
@@ -54,7 +54,7 @@ func.func @main() {
|
||||
%result_static = func.call @max_pool_static(%A) : (!tensor_type) -> !tensor_type
|
||||
%result_dynamic = func.call @max_pool_dynamic(%A_dynamic) : (tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
|
||||
|
||||
%static_buffer = bufferization.to_memref %result_static : !memref_type
|
||||
%static_buffer = bufferization.to_memref %result_static : !tensor_type to !memref_type
|
||||
%unranked_static_buffer = memref.cast %static_buffer : !memref_type to memref<*xf32>
|
||||
|
||||
// CHECK: Unranked Memref base@ = {{.*}} rank = 4 offset = 0 sizes = [1, 4, 4, 1] strides = [16, 4, 1, 1] data =
|
||||
@@ -81,7 +81,7 @@ func.func @main() {
|
||||
|
||||
func.call @printMemrefF32(%unranked_static_buffer) : (memref<*xf32>) -> ()
|
||||
|
||||
%dynamic_buffer = bufferization.to_memref %result_dynamic : memref<?x?x?x?xf32>
|
||||
%dynamic_buffer = bufferization.to_memref %result_dynamic : tensor<?x?x?x?xf32> to memref<?x?x?x?xf32>
|
||||
%unranked_dynamic_buffer = memref.cast %dynamic_buffer : memref<?x?x?x?xf32> to memref<*xf32>
|
||||
|
||||
// CHECK: Unranked Memref base@ = {{.*}} rank = 4 offset = 0 sizes = [1, 4, 4, 1] strides = [16, 4, 1, 1] data =
|
||||
|
||||
@@ -100,8 +100,8 @@ func.func @entry() -> i32 {
|
||||
]> : tensor<16x32xbf16>
|
||||
|
||||
// Set up memory.
|
||||
%a = bufferization.to_memref %0 : memref<16x32xbf16>
|
||||
%b = bufferization.to_memref %1 : memref<16x32xbf16>
|
||||
%a = bufferization.to_memref %0 : tensor<16x32xbf16> to memref<16x32xbf16>
|
||||
%b = bufferization.to_memref %1 : tensor<16x32xbf16> to memref<16x32xbf16>
|
||||
%c = memref.alloc() : memref<16x16xf32>
|
||||
|
||||
// Call kernel.
|
||||
|
||||
@@ -100,8 +100,8 @@ func.func @entry() -> i32 {
|
||||
]> : tensor<16x64xi8>
|
||||
|
||||
// Set up memory.
|
||||
%a = bufferization.to_memref %0 : memref<16x64xi8>
|
||||
%b = bufferization.to_memref %1 : memref<16x64xi8>
|
||||
%a = bufferization.to_memref %0 : tensor<16x64xi8> to memref<16x64xi8>
|
||||
%b = bufferization.to_memref %1 : tensor<16x64xi8> to memref<16x64xi8>
|
||||
%c = memref.alloc() : memref<16x16xi32>
|
||||
|
||||
// Call kernel.
|
||||
|
||||
Reference in New Issue
Block a user