mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-25 13:33:02 +08:00
Revert "fix: use condition variables instead of busy waits in worker threads"
This reverts commit 452475a0b9.
Signed-off-by: Compute-Runtime-Validation <compute-runtime-validation@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
cf21cbc910
commit
6736378c4d
@@ -67,10 +67,6 @@ bool SVMAllocsManager::SvmAllocationCache::insert(size_t size, void *ptr, SvmAll
|
||||
return false;
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> cleanerLock;
|
||||
if (cleanerSyncData) {
|
||||
cleanerLock = std::unique_lock<std::mutex>(cleanerSyncData->mutex);
|
||||
}
|
||||
std::lock_guard<std::mutex> lock(this->mtx);
|
||||
if (svmData->device ? svmData->device->shouldLimitAllocationsReuse() : memoryManager->shouldLimitAllocationsReuse()) {
|
||||
return false;
|
||||
@@ -111,9 +107,6 @@ bool SVMAllocsManager::SvmAllocationCache::insert(size_t size, void *ptr, SvmAll
|
||||
.operationType = CacheOperationType::insert,
|
||||
.isSuccess = isSuccess});
|
||||
}
|
||||
if (cleanerSyncData) {
|
||||
cleanerSyncData->condVar.notify_one();
|
||||
}
|
||||
return isSuccess;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
@@ -35,7 +34,6 @@ class GraphicsAllocation;
|
||||
class MemoryManager;
|
||||
class Device;
|
||||
struct VirtualMemoryReservation;
|
||||
struct ConditionVarSyncData;
|
||||
|
||||
struct SvmAllocationData : NEO::NonCopyableAndNonMovableClass {
|
||||
SvmAllocationData(uint32_t maxRootDeviceIndex) : gpuAllocations(maxRootDeviceIndex), maxRootDeviceIndex(maxRootDeviceIndex){};
|
||||
@@ -199,9 +197,6 @@ class SVMAllocsManager {
|
||||
bool isSuccess;
|
||||
};
|
||||
|
||||
ConditionVarSyncData *cleanerSyncData = nullptr;
|
||||
void setCleanerSyncData(ConditionVarSyncData *syncData) { cleanerSyncData = syncData; }
|
||||
|
||||
static constexpr size_t maxServicedSize = 256 * MemoryConstants::megaByte;
|
||||
static constexpr size_t minimalSizeToCheckUtilization = 4 * MemoryConstants::pageSize64k;
|
||||
static constexpr double minimalAllocUtilization = 0.5;
|
||||
@@ -213,7 +208,6 @@ class SVMAllocsManager {
|
||||
static bool allocUtilizationAllows(size_t requestedSize, size_t reuseCandidateSize);
|
||||
static bool alignmentAllows(void *ptr, size_t alignment);
|
||||
bool isInUse(SvmCacheAllocationInfo &cacheAllocInfo);
|
||||
bool isEmpty() { return allocations.empty(); }
|
||||
void *get(size_t size, const UnifiedMemoryProperties &unifiedMemoryProperties);
|
||||
void trim();
|
||||
void trimOldAllocs(std::chrono::high_resolution_clock::time_point trimTimePoint, bool trimAll);
|
||||
|
||||
@@ -25,10 +25,6 @@ UnifiedMemoryReuseCleaner::~UnifiedMemoryReuseCleaner() {
|
||||
void UnifiedMemoryReuseCleaner::stopThread() {
|
||||
keepCleaning.store(false);
|
||||
runCleaning.store(false);
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(syncData.mutex);
|
||||
syncData.condVar.notify_one();
|
||||
}
|
||||
if (unifiedMemoryReuseCleanerThread) {
|
||||
unifiedMemoryReuseCleanerThread->join();
|
||||
unifiedMemoryReuseCleanerThread.reset();
|
||||
@@ -41,28 +37,21 @@ void *UnifiedMemoryReuseCleaner::cleanUnifiedMemoryReuse(void *self) {
|
||||
if (!cleaner->keepCleaning.load()) {
|
||||
return nullptr;
|
||||
}
|
||||
std::this_thread::yield();
|
||||
NEO::sleep(sleepTime);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (!cleaner->keepCleaning.load()) {
|
||||
return nullptr;
|
||||
}
|
||||
std::unique_lock lock(cleaner->syncData.mutex);
|
||||
if (cleaner->isEmpty()) {
|
||||
cleaner->syncData.condVar.wait(lock);
|
||||
}
|
||||
|
||||
NEO::sleep(sleepTime);
|
||||
cleaner->trimOldInCaches();
|
||||
}
|
||||
}
|
||||
|
||||
void UnifiedMemoryReuseCleaner::registerSvmAllocationCache(SvmAllocationCache *cache) {
|
||||
{
|
||||
std::lock_guard<std::mutex> lockSvmAllocationCaches(this->svmAllocationCachesMutex);
|
||||
this->svmAllocationCaches.push_back(cache);
|
||||
}
|
||||
cache->setCleanerSyncData(&syncData);
|
||||
std::lock_guard<std::mutex> lockSvmAllocationCaches(this->svmAllocationCachesMutex);
|
||||
this->svmAllocationCaches.push_back(cache);
|
||||
this->startCleaning();
|
||||
}
|
||||
|
||||
@@ -94,4 +83,4 @@ void UnifiedMemoryReuseCleaner::startThread() {
|
||||
this->unifiedMemoryReuseCleanerThread = Thread::createFunc(cleanUnifiedMemoryReuse, reinterpret_cast<void *>(this));
|
||||
}
|
||||
|
||||
} // namespace NEO
|
||||
} // namespace NEO
|
||||
@@ -8,10 +8,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "shared/source/helpers/non_copyable_or_moveable.h"
|
||||
#include "shared/source/helpers/sleep.h"
|
||||
#include "shared/source/memory_manager/unified_memory_manager.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
@@ -35,10 +33,6 @@ class UnifiedMemoryReuseCleaner : NEO::NonCopyableAndNonMovableClass {
|
||||
|
||||
void registerSvmAllocationCache(SvmAllocationCache *cache);
|
||||
void unregisterSvmAllocationCache(SvmAllocationCache *cache);
|
||||
MOCKABLE_VIRTUAL bool isEmpty() {
|
||||
std::unique_lock<std::mutex> lock(svmAllocationCachesMutex);
|
||||
return std::all_of(svmAllocationCaches.begin(), svmAllocationCaches.end(), [](const auto &it) { return it->isEmpty(); });
|
||||
}
|
||||
|
||||
protected:
|
||||
void startCleaning() { runCleaning.store(true); };
|
||||
@@ -48,7 +42,6 @@ class UnifiedMemoryReuseCleaner : NEO::NonCopyableAndNonMovableClass {
|
||||
|
||||
std::vector<SvmAllocationCache *> svmAllocationCaches;
|
||||
std::mutex svmAllocationCachesMutex;
|
||||
ConditionVarSyncData syncData;
|
||||
|
||||
std::atomic_bool runCleaning = false;
|
||||
std::atomic_bool keepCleaning = true;
|
||||
|
||||
Reference in New Issue
Block a user