Add memory backed buffer allocation for L0 ray tracing.

This allocates the buffer on a per-device basis and enables ray
tracing on devices that support it when given a kernel with ray
tracing calls.

Signed-off-by: Jim Snow <jim.m.snow@intel.com>
This commit is contained in:
Jim Snow
2021-02-26 22:02:57 +00:00
committed by Compute-Runtime-Automation
parent 135ac74c2c
commit 2acc0fb3f6
35 changed files with 308 additions and 17 deletions

View File

@@ -0,0 +1,24 @@
/*
* Copyright (C) 2020-2021 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "shared/source/helpers/constants.h"
#include "shared/source/helpers/ray_tracing_helper.h"
#include "shared/test/common/mocks/mock_device.h"
#include "opencl/test/unit_test/mocks/mock_cl_device.h"
#include "opencl/test/unit_test/mocks/mock_context.h"
#include "test.h"
using namespace NEO;
TEST(RayTracingHelperTests, whenMemoryBackedFifoSizeIsRequestedThenCorrectValueIsReturned) {
MockDevice device;
size_t size = RayTracingHelper::getTotalMemoryBackedFifoSize(device);
size_t expectedSize = device.getHardwareInfo().gtSystemInfo.DualSubSliceCount * RayTracingHelper::memoryBackedFifoSizePerDss;
EXPECT_EQ(expectedSize, size);
}