Merge "Don't hold lock during hotplug callback"
diff --git a/system/hwc3/Layer.h b/system/hwc3/Layer.h
index 7d6a9ed..4d100a1 100644
--- a/system/hwc3/Layer.h
+++ b/system/hwc3/Layer.h
@@ -31,8 +31,8 @@
Layer(const Layer&) = delete;
Layer& operator=(const Layer&) = delete;
- Layer(Layer&&) = default;
- Layer& operator=(Layer&&) = default;
+ Layer(Layer&&) = delete;
+ Layer& operator=(Layer&&) = delete;
int64_t getId() const { return mId; }
@@ -113,4 +113,4 @@
} // namespace aidl::android::hardware::graphics::composer3::impl
-#endif
\ No newline at end of file
+#endif
diff --git a/system/hwc3/VsyncThread.h b/system/hwc3/VsyncThread.h
index 5738b5d..ffa859f 100644
--- a/system/hwc3/VsyncThread.h
+++ b/system/hwc3/VsyncThread.h
@@ -36,11 +36,11 @@
VsyncThread(int64_t id);
virtual ~VsyncThread();
- VsyncThread(const VsyncThread&) = default;
- VsyncThread& operator=(const VsyncThread&) = default;
+ VsyncThread(const VsyncThread&) = delete;
+ VsyncThread& operator=(const VsyncThread&) = delete;
- VsyncThread(VsyncThread&&) = default;
- VsyncThread& operator=(VsyncThread&&) = default;
+ VsyncThread(VsyncThread&&) = delete;
+ VsyncThread& operator=(VsyncThread&&) = delete;
HWC3::Error start(int32_t periodNanos);
@@ -84,4 +84,4 @@
} // namespace aidl::android::hardware::graphics::composer3::impl
-#endif
\ No newline at end of file
+#endif
diff --git a/system/vulkan_enc/CommandBufferStagingStream.cpp b/system/vulkan_enc/CommandBufferStagingStream.cpp
index 0890185..8487bcd 100644
--- a/system/vulkan_enc/CommandBufferStagingStream.cpp
+++ b/system/vulkan_enc/CommandBufferStagingStream.cpp
@@ -33,127 +33,130 @@
static const size_t kReadSize = 512 * 1024;
static const size_t kWriteOffset = kReadSize;
-CommandBufferStagingStream::CommandBufferStagingStream(Alloc&& allocFn, Free&& freeFn)
- : IOStream(1048576),
- m_buf(nullptr),
- m_size(0),
- m_writePos(0),
- m_customAlloc(allocFn),
- m_customFree(freeFn) {
- // custom allocator/free
- if (allocFn && freeFn) {
- m_usingCustomAlloc = true;
- // for custom allocation, allocate metadata memory at the beginning.
- // m_alloc, m_free and m_realloc wraps sync data logic
-
- // \param size to allocate
- // \return ptr starting at data
- m_alloc = [this](size_t size) -> void* {
- // allocation requested size + sync data size
-
- // <---sync bytes--><----Data--->
- // |———————————————|————————————|
- // |0|1|2|3|4|5|6|7|............|
- // |———————————————|————————————|
- // ꜛ ꜛ
- // allocated ptr ptr to data [dataPtr]
- const size_t totalSize = size + kSyncDataSize;
-
- unsigned char* dataPtr = static_cast<unsigned char*>(m_customAlloc(totalSize));
- if (!dataPtr) {
- ALOGE("Custom allocation (%zu bytes) failed\n", size);
- return nullptr;
- }
-
- // set DWORD sync data to 0
- *(reinterpret_cast<uint32_t*>(dataPtr)) = kSyncDataReadComplete;
-
- // pointer for data starts after sync data
- dataPtr += kSyncDataSize;
-
- return dataPtr;
+CommandBufferStagingStream::CommandBufferStagingStream()
+ : IOStream(1048576), m_size(0), m_writePos(0) {
+ // use default allocators
+ m_alloc = [](size_t size) -> Memory {
+ return {
+ .deviceMemory = VK_NULL_HANDLE, // no device memory for malloc
+ .ptr = malloc(size),
};
+ };
+ m_free = [](const Memory& mem) { free(mem.ptr); };
+ m_realloc = [](const Memory& mem, size_t size) -> Memory {
+ return {.deviceMemory = VK_NULL_HANDLE, .ptr = realloc(mem.ptr, size)};
+ };
+}
- // Free freeMemory(freeFn);
- // \param dataPtr to free
- m_free = [this](void* dataPtr) {
- // for custom allocation/free, memory holding metadata must be freed
- // <---sync byte---><----Data--->
- // |———————————————|————————————|
- // |0|1|2|3|4|5|6|7|............|
- // |———————————————|————————————|
- // ꜛ ꜛ
- // ptr to free ptr to data [dataPtr]
- unsigned char* toFreePtr = static_cast<unsigned char*>(dataPtr);
- toFreePtr -= kSyncDataSize;
- m_customFree(toFreePtr);
- };
+CommandBufferStagingStream::CommandBufferStagingStream(const Alloc& allocFn, const Free& freeFn)
+ : CommandBufferStagingStream() {
+ m_usingCustomAlloc = true;
+ // for custom allocation, allocate metadata memory at the beginning.
+ // m_alloc, m_free and m_realloc wraps sync data logic
- // \param ptr is the data pointer currently allocated
- // \return dataPtr starting at data
- m_realloc = [this](void* ptr, size_t size) -> void* {
- // realloc requires freeing previously allocated memory
- // read sync DWORD to ensure host is done reading this memory
- // before releasing it.
+ // \param size to allocate
+ // \return ptr starting at data
+ m_alloc = [&allocFn, this](size_t size) -> Memory {
+ // allocation requested size + sync data size
- size_t hostWaits = 0;
- unsigned char* syncDataStart = static_cast<unsigned char*>(ptr) - kSyncDataSize;
- uint32_t* syncDWordPtr = reinterpret_cast<uint32_t*>(syncDataStart);
+ // <---sync bytes--><----Data--->
+ // |———————————————|————————————|
+ // |0|1|2|3|4|5|6|7|............|
+ // |———————————————|————————————|
+ // ꜛ ꜛ
+ // allocated ptr ptr to data [dataPtr]
- while (__atomic_load_n(syncDWordPtr, __ATOMIC_ACQUIRE) != kSyncDataReadComplete) {
- hostWaits++;
- usleep(10);
- if (hostWaits > 1000) {
- ALOGD("%s: warning, stalled on host decoding on this command buffer stream\n",
- __func__);
- }
+ Memory memory;
+ if (!allocFn) {
+ ALOGE("Custom allocation (%zu bytes) failed\n", size);
+ return memory;
+ }
+
+ // custom allocation/free requires metadata for sync between host/guest
+ const size_t totalSize = size + kSyncDataSize;
+ memory = allocFn(totalSize);
+ if (!memory.ptr) {
+ ALOGE("Custom allocation (%zu bytes) failed\n", size);
+ return memory;
+ }
+
+ // set sync data to read complete
+ uint32_t* syncDWordPtr = reinterpret_cast<uint32_t*>(memory.ptr);
+ __atomic_store_n(syncDWordPtr, kSyncDataReadComplete, __ATOMIC_RELEASE);
+ return memory;
+ };
+
+ m_free = [&freeFn](const Memory& mem) {
+ if (!freeFn) {
+ ALOGE("Custom free for memory(%p) failed\n", mem.ptr);
+ return;
+ }
+ freeFn(mem);
+ };
+
+ // \param ptr is the data pointer currently allocated
+ // \return dataPtr starting at data
+ m_realloc = [this](const Memory& mem, size_t size) -> Memory {
+ // realloc requires freeing previously allocated memory
+ // read sync DWORD to ensure host is done reading this memory
+ // before releasing it.
+
+ size_t hostWaits = 0;
+
+ uint32_t* syncDWordPtr = reinterpret_cast<uint32_t*>(mem.ptr);
+ while (__atomic_load_n(syncDWordPtr, __ATOMIC_ACQUIRE) != kSyncDataReadComplete) {
+ hostWaits++;
+ usleep(10);
+ if (hostWaits > 1000) {
+ ALOGD("%s: warning, stalled on host decoding on this command buffer stream\n",
+ __func__);
}
+ }
- // for custom allocation/free, memory holding metadata must be copied
- // along with stream data
- // <---sync byte---><----Data--->
- // |———————————————|————————————|
- // |0|1|2|3|4|5|6|7|............|
- // |———————————————|————————————|
- // ꜛ ꜛ
- // [copyLocation] ptr to data [ptr]
+ // for custom allocation/free, memory holding metadata must be copied
+ // along with stream data
+ // <---sync bytes--><----Data--->
+ // |———————————————|————————————|
+ // |0|1|2|3|4|5|6|7|............|
+ // |———————————————|————————————|
+ // ꜛ ꜛ
+ // [copyLocation] ptr to data [ptr]
- const size_t toCopySize = m_writePos + kSyncDataSize;
- unsigned char* copyLocation = static_cast<unsigned char*>(ptr) - kSyncDataSize;
- std::vector<uint8_t> tmp(copyLocation, copyLocation + toCopySize);
- m_free(ptr);
+ const size_t toCopySize = m_writePos + kSyncDataSize;
+ unsigned char* copyLocation = static_cast<unsigned char*>(mem.ptr);
+ std::vector<uint8_t> tmp(copyLocation, copyLocation + toCopySize);
+ m_free(mem);
- // get new buffer and copy previous stream data to it
- unsigned char* newBuf = static_cast<unsigned char*>(m_alloc(size));
- if (!newBuf) {
- ALOGE("Custom allocation (%zu bytes) failed\n", size);
- return nullptr;
- }
- // custom allocator will allocate space for metadata too
- // copy previous metadata too
- memcpy(newBuf - kSyncDataSize, tmp.data(), toCopySize);
+ // get new buffer and copy previous stream data to it
+ Memory newMemory = m_alloc(size);
+ unsigned char* newBuf = static_cast<unsigned char*>(newMemory.ptr);
+ if (!newBuf) {
+ ALOGE("Custom allocation (%zu bytes) failed\n", size);
+ return newMemory;
+ }
+ // copy previous data
+ memcpy(newBuf, tmp.data(), toCopySize);
- return newBuf;
- };
- } else {
- // use default allocators
- m_alloc = [](size_t size) { return malloc(size); };
- m_free = [](void* ptr) { free(ptr); };
- m_realloc = [](void* ptr, size_t size) { return realloc(ptr, size); };
- }
+ return newMemory;
+ };
}
CommandBufferStagingStream::~CommandBufferStagingStream() {
flush();
- if (m_buf) m_free(m_buf);
+ if (m_mem.ptr) m_free(m_mem);
+}
+
+unsigned char* CommandBufferStagingStream::getDataPtr() {
+ if (!m_mem.ptr) return nullptr;
+ const size_t metadataSize = m_usingCustomAlloc ? kSyncDataSize : 0;
+ return static_cast<unsigned char*>(m_mem.ptr) + metadataSize;
}
void CommandBufferStagingStream::markFlushing() {
if (!m_usingCustomAlloc) {
return;
}
- // mark read of stream buffer as pending
- uint32_t* syncDWordPtr = reinterpret_cast<uint32_t*>(m_buf - kSyncDataSize);
+ uint32_t* syncDWordPtr = reinterpret_cast<uint32_t*>(m_mem.ptr);
__atomic_store_n(syncDWordPtr, kSyncDataReadPending, __ATOMIC_RELEASE);
}
@@ -165,10 +168,10 @@
void* CommandBufferStagingStream::allocBuffer(size_t minSize) {
size_t allocSize = (1048576 < minSize ? minSize : 1048576);
// Initial case: blank
- if (!m_buf) {
- m_buf = (unsigned char*)m_alloc(allocSize);
+ if (!m_mem.ptr) {
+ m_mem = m_alloc(allocSize);
m_size = allocSize;
- return (void*)m_buf;
+ return getDataPtr();
}
// Calculate remaining
@@ -177,13 +180,25 @@
// if not, reallocate a buffer of big enough size
if (remaining < minSize) {
size_t newAllocSize = m_size * 2 + allocSize;
- m_buf = (unsigned char*)m_realloc(m_buf, newAllocSize);
+ m_mem = m_realloc(m_mem, newAllocSize);
m_size = newAllocSize;
- return (void*)(m_buf + m_writePos);
+ return (void*)(getDataPtr() + m_writePos);
}
- return (void*)(m_buf + m_writePos);
+ // for custom allocations, host should have finished reading
+ // data from command buffer since command buffers are flushed
+ // on queue submit.
+ // allocBuffer should not be called on command buffers that are currently
+ // being read by the host
+ if (m_usingCustomAlloc) {
+ uint32_t* syncDWordPtr = reinterpret_cast<uint32_t*>(m_mem.ptr);
+ LOG_ALWAYS_FATAL_IF(
+ __atomic_load_n(syncDWordPtr, __ATOMIC_ACQUIRE) != kSyncDataReadComplete,
+ "FATAL: allocBuffer() called but previous read not complete");
+ }
+
+ return (void*)(getDataPtr() + m_writePos);
}
int CommandBufferStagingStream::commitBuffer(size_t size)
@@ -224,7 +239,7 @@
}
void CommandBufferStagingStream::getWritten(unsigned char** bufOut, size_t* sizeOut) {
- *bufOut = m_buf;
+ *bufOut = getDataPtr();
*sizeOut = m_writePos;
}
@@ -232,3 +247,5 @@
m_writePos = 0;
IOStream::rewind();
}
+
+VkDeviceMemory CommandBufferStagingStream::getDeviceMemory() { return m_mem.deviceMemory; }
\ No newline at end of file
diff --git a/system/vulkan_enc/CommandBufferStagingStream.h b/system/vulkan_enc/CommandBufferStagingStream.h
index 9513b45..2742e20 100644
--- a/system/vulkan_enc/CommandBufferStagingStream.h
+++ b/system/vulkan_enc/CommandBufferStagingStream.h
@@ -16,9 +16,12 @@
#ifndef __COMMAND_BUFFER_STAGING_STREAM_H
#define __COMMAND_BUFFER_STAGING_STREAM_H
-#include "IOStream.h"
+#include <vulkan/vulkan_core.h>
+
#include <functional>
+#include "IOStream.h"
+
class CommandBufferStagingStream : public IOStream {
public:
// host will write kSyncDataReadComplete to the sync bytes to indicate memory is no longer being
@@ -31,18 +34,29 @@
// indicates read is pending
static constexpr uint32_t kSyncDataReadPending = 0X1;
+ // \struct backing memory structure
+ struct Memory {
+ VkDeviceMemory deviceMemory =
+ VK_NULL_HANDLE; // device memory associated with allocated memory
+ void* ptr = nullptr; // pointer to allocated memory
+ bool operator==(const Memory& rhs) const {
+ return (deviceMemory == rhs.deviceMemory) && (ptr == rhs.ptr);
+ }
+ };
+
// allocator
// param size to allocate
- // return pointer to allocated memory
- using Alloc = std::function<void*(size_t)>;
+ // return allocated memory
+ using Alloc = std::function<Memory(size_t)>;
// free function
- // param pointer to free
- using Free = std::function<void(void*)>;
+ // param memory to free
+ using Free = std::function<void(const Memory&)>;
// constructor
- // \param allocFn is the allocation function provided. Default allocation function used if nullptr
- // \param freeFn is the free function provided. Default free function used if nullptr
- // freeFn must be provided if allocFn is provided and vice versa
- explicit CommandBufferStagingStream(Alloc&& allocFn = nullptr, Free&& freeFn = nullptr);
+ // \param allocFn is the allocation function provided.
+ // \param freeFn is the free function provided
+ explicit CommandBufferStagingStream(const Alloc& allocFn, const Free& freeFn);
+ // constructor
+ explicit CommandBufferStagingStream();
~CommandBufferStagingStream();
virtual size_t idealAllocSize(size_t len);
@@ -63,38 +77,37 @@
// when not using custom allocators
void markFlushing();
+ // gets the device memory associated with the stream. This is VK_NULL_HANDLE for default allocation
+ // \return device memory
+ VkDeviceMemory getDeviceMemory();
+
private:
- // underlying buffer for data
- unsigned char* m_buf;
- // size of portion of m_buf available for data.
+ // underlying memory for data
+ Memory m_mem;
+ // size of portion of memory available for data.
// for custom allocation, this size excludes size of sync data.
size_t m_size;
- // current write position in m_buf
+ // current write position in data buffer
uint32_t m_writePos;
+ // alloc function
Alloc m_alloc;
+ // free function
Free m_free;
- // underlying custom alloc. default is null
- Alloc m_customAlloc = nullptr;
- // underlying free alloc. default is null
- Free m_customFree = nullptr;
-
// realloc function
// \param size of memory to be allocated
// \ param reference size to update with actual size allocated. This size can be < requested size
// for custom allocation to account for sync data
- using Realloc = std::function<void*(void*, size_t)>;
+ using Realloc = std::function<Memory(const Memory&, size_t)>;
Realloc m_realloc;
// flag tracking use of custom allocation/free
bool m_usingCustomAlloc = false;
- // calculates actual allocation size for data
- // \param requestedSize is the size requested for allocation
- // \return actual data size allocated for requested size. For
- // custom allocation the data size < requested size to account for sync data word
- size_t getDataAllocationSize(const size_t requestedSize);
+ // adjusted memory location to point to start of data after accounting for metadata
+ // \return pointer to data start
+ unsigned char* getDataPtr();
};
#endif
diff --git a/system/vulkan_enc/ResourceTracker.cpp b/system/vulkan_enc/ResourceTracker.cpp
index aebcdc0..c5f1985 100644
--- a/system/vulkan_enc/ResourceTracker.cpp
+++ b/system/vulkan_enc/ResourceTracker.cpp
@@ -15,18 +15,15 @@
#include "ResourceTracker.h"
-#include "Resources.h"
-#include "CommandBufferStagingStream.h"
-#include "DescriptorSetVirtualization.h"
-
-#include "aemu/base/Optional.h"
-#include "aemu/base/threads/AndroidWorkPool.h"
-#include "aemu/base/Tracing.h"
-
-#include "goldfish_vk_private_defs.h"
-
#include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
#include "../OpenglSystemCommon/HostConnection.h"
+#include "CommandBufferStagingStream.h"
+#include "DescriptorSetVirtualization.h"
+#include "Resources.h"
+#include "aemu/base/Optional.h"
+#include "aemu/base/Tracing.h"
+#include "aemu/base/threads/AndroidWorkPool.h"
+#include "goldfish_vk_private_defs.h"
#include "vulkan/vulkan_core.h"
/// Use installed headers or locally defined Fuchsia-specific bits
@@ -205,6 +202,14 @@
Lock mLock;
std::vector<CommandBufferStagingStream*> streams;
std::vector<VkEncoder*> encoders;
+ /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
+ /// \param allocFn is the callback to allocate memory
+ /// \param freeFn is the callback to free memory
+ void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
+ CommandBufferStagingStream::Free&& freeFn) {
+ mAlloc = allocFn;
+ mFree = freeFn;
+ }
~StagingInfo() {
for (auto stream : streams) {
@@ -228,7 +233,12 @@
CommandBufferStagingStream* stream;
VkEncoder* encoder;
if (streams.empty()) {
- stream = new CommandBufferStagingStream;
+ if (mAlloc && mFree) {
+ // if custom allocators are provided, forward them to CommandBufferStagingStream
+ stream = new CommandBufferStagingStream(mAlloc, mFree);
+ } else {
+ stream = new CommandBufferStagingStream;
+ }
encoder = new VkEncoder(stream);
} else {
stream = streams.back();
@@ -239,6 +249,10 @@
*streamOut = stream;
*encoderOut = encoder;
}
+
+ private:
+ CommandBufferStagingStream::Alloc mAlloc = nullptr;
+ CommandBufferStagingStream::Free mFree = nullptr;
};
static StagingInfo sStaging;
@@ -2933,8 +2947,8 @@
return coherentMemory;
}
- VkResult allocateCoherentMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
- VkEncoder *enc, VkDeviceMemory* pMemory) {
+ VkResult allocateCoherentMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
+ VkEncoder* enc, VkDeviceMemory* pMemory) {
uint64_t offset = 0;
uint8_t *ptr = nullptr;
VkMemoryAllocateFlagsInfo allocFlagsInfo;
@@ -3019,7 +3033,6 @@
VkResult getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkEncoder* enc,
VkDevice device, VkDeviceMemory* pMemory) {
-
VkMemoryAllocateFlagsInfo allocFlagsInfo;
VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
@@ -3061,6 +3074,9 @@
info.coherentMemory = coherentMemory;
info.device = device;
+ // for suballocated memory, create an alias VkDeviceMemory handle for application
+ // memory used for suballocations will still be VkDeviceMemory associated with
+ // CoherentMemory
auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
info_VkDeviceMemory[mem] = info;
*pMemory = mem;
@@ -3731,6 +3747,23 @@
_RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
}
+ CoherentMemoryPtr freeCoherentMemoryLocked(VkDeviceMemory memory, VkDeviceMemory_Info& info) {
+ if (info.coherentMemory && info.ptr) {
+ if (info.coherentMemory->getDeviceMemory() != memory) {
+ delete_goldfish_VkDeviceMemory(memory);
+ }
+
+ if (info.ptr) {
+ info.coherentMemory->release(info.ptr);
+ info.ptr = nullptr;
+ }
+
+ return std::move(info.coherentMemory);
+ }
+
+ return nullptr;
+ }
+
void on_vkFreeMemory(
void* context,
VkDevice device,
@@ -3748,15 +3781,12 @@
memoryObjectId = getAHardwareBufferId(info.ahw);
}
#endif
- emitDeviceMemoryReport(
- info_VkDevice[device],
- info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
- : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
- memoryObjectId,
- 0 /* size */,
- VK_OBJECT_TYPE_DEVICE_MEMORY,
- (uint64_t)(void*)memory
- );
+
+ emitDeviceMemoryReport(info_VkDevice[device],
+ info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
+ : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
+ memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
+ (uint64_t)(void*)memory);
#ifdef VK_USE_PLATFORM_FUCHSIA
if (info.vmoHandle && info.ptr) {
@@ -3776,23 +3806,13 @@
return;
}
- if (info.coherentMemory && info.ptr) {
- if (info.coherentMemory->getDeviceMemory() != memory) {
- delete_goldfish_VkDeviceMemory(memory);
- }
+ auto coherentMemory = freeCoherentMemoryLocked(memory, info);
- if (info.ptr) {
- info.coherentMemory->release(info.ptr);
- info.ptr = nullptr;
- }
-
- auto coherentMemory = std::move(info.coherentMemory);
- // We have to release the lock before we could possibly free a
- // CoherentMemory, because that will call into VkEncoder, which
- // shouldn't be called when the lock is held.
- lock.unlock();
- coherentMemory = nullptr;
- }
+ // We have to release the lock before we could possibly free a
+ // CoherentMemory, because that will call into VkEncoder, which
+ // shouldn't be called when the lock is held.
+ lock.unlock();
+ coherentMemory = nullptr;
}
VkResult on_vkMapMemory(
@@ -5716,17 +5736,42 @@
unsigned char* writtenPtr = 0;
size_t written = 0;
- ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
+ CommandBufferStagingStream* cmdBufStream =
+ static_cast<CommandBufferStagingStream*>(cb->privateStream);
+ cmdBufStream->getWritten(&writtenPtr, &written);
// There's no pending commands here, skip. (case 2, stream created but no new recordings)
if (!written) continue;
// There are pending commands to flush.
VkEncoder* enc = (VkEncoder*)context;
- enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr, true /* do lock */);
+ VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
+ VkDeviceSize dataOffset = 0;
+ if (mFeatureInfo->hasVulkanAuxCommandMemory) {
+ // for suballocations, deviceMemory is an alias VkDeviceMemory
+ // get underling VkDeviceMemory for given alias
+ deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
+ 1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
+ nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
+ nullptr /*typeBits*/, 0 /*typeBitCounts*/);
+ // mark stream as flushing before flushing commands
+ cmdBufStream->markFlushing();
+ enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory,
+ dataOffset, written, true /*do lock*/);
+ } else {
+ enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
+ true /* do lock */);
+ }
// Reset this stream.
- ((CommandBufferStagingStream*)cb->privateStream)->reset();
+ // flushing happens on vkQueueSubmit
+ // vulkan api states that on queue submit,
+ // applications MUST not attempt to modify the command buffer in any way
+ // -as the device may be processing the commands recorded to it.
+ // It is safe to call reset() here for this reason.
+ // Command Buffer associated with this stream will only leave pending state
+ // after queue submit is complete and host has read the data
+ cmdBufStream->reset();
}
}
@@ -6603,6 +6648,69 @@
return 0;
}
+ CommandBufferStagingStream::Alloc getAlloc() {
+ if (mFeatureInfo->hasVulkanAuxCommandMemory) {
+ return [this](size_t size) -> CommandBufferStagingStream::Memory {
+ VkMemoryAllocateInfo info{
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = nullptr,
+ .allocationSize = size,
+ .memoryTypeIndex = VK_MAX_MEMORY_TYPES // indicates auxiliary memory
+ };
+
+ auto enc = ResourceTracker::getThreadLocalEncoder();
+ VkDevice device = VK_NULL_HANDLE;
+ VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
+ VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
+ if (result != VK_SUCCESS) {
+ ALOGE("Failed to get coherent memory %u", result);
+ return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
+ }
+
+ // getCoherentMemory() uses suballocations.
+ // To retrieve the suballocated memory address, look up
+ // VkDeviceMemory filled in by getCoherentMemory()
+ // scope of mLock
+ {
+ AutoLock<RecursiveLock> lock(mLock);
+ const auto it = info_VkDeviceMemory.find(vkDeviceMem);
+ if (it == info_VkDeviceMemory.end()) {
+ ALOGE("Coherent memory allocated %u not found", result);
+ return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
+ };
+
+ const auto& info = it->second;
+ return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
+ }
+ };
+ }
+ return nullptr;
+ }
+
+ CommandBufferStagingStream::Free getFree() {
+ if (mFeatureInfo->hasVulkanAuxCommandMemory) {
+ return [this](const CommandBufferStagingStream::Memory& memory) {
+ // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
+ // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
+ // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
+ VkDeviceMemory deviceMemory = memory.deviceMemory;
+ AutoLock<RecursiveLock> lock(mLock);
+ auto it = info_VkDeviceMemory.find(deviceMemory);
+ if (it == info_VkDeviceMemory.end()) {
+ ALOGE("Device memory to free not found");
+ return;
+ }
+ auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
+ // We have to release the lock before we could possibly free a
+ // CoherentMemory, because that will call into VkEncoder, which
+ // shouldn't be called when the lock is held.
+ lock.unlock();
+ coherentMemory = nullptr;
+ };
+ }
+ return nullptr;
+ }
+
VkResult on_vkBeginCommandBuffer(
void* context, VkResult input_result,
VkCommandBuffer commandBuffer,
@@ -7103,7 +7211,8 @@
// Resets staging stream for this command buffer and primary command buffers
// where this command buffer has been recorded. If requested, also clears the pending
// descriptor sets.
- void resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer, bool alsoResetPrimaries, bool alsoClearPendingDescriptorSets) {
+ void resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer, bool alsoResetPrimaries,
+ bool alsoClearPendingDescriptorSets) {
struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
if (!cb) {
return;
@@ -7299,6 +7408,8 @@
struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
if (!cb->privateEncoder) {
+ sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
+ ResourceTracker::get()->getFree());
sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
}
uint8_t* writtenPtr; size_t written;
@@ -8114,6 +8225,9 @@
return mImpl->syncEncodersForQueue(queue, current);
}
+CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() { return mImpl->getAlloc(); }
+
+CommandBufferStagingStream::Free ResourceTracker::getFree() { return mImpl->getFree(); }
VkResult ResourceTracker::on_vkBeginCommandBuffer(
void* context, VkResult input_result,
diff --git a/system/vulkan_enc/ResourceTracker.h b/system/vulkan_enc/ResourceTracker.h
index 391eac5..2cb5b2a 100644
--- a/system/vulkan_enc/ResourceTracker.h
+++ b/system/vulkan_enc/ResourceTracker.h
@@ -14,15 +14,15 @@
// limitations under the License.
#pragma once
-#include "aemu/base/Tracing.h"
-
#include <vulkan/vulkan.h>
-#include "VulkanHandleMapping.h"
-#include "VulkanHandles.h"
#include <functional>
#include <memory>
+#include "CommandBufferStagingStream.h"
+#include "VulkanHandleMapping.h"
+#include "VulkanHandles.h"
+#include "aemu/base/Tracing.h"
#include "goldfish_vk_transform_guest.h"
struct EmulatorFeatureInfo;
@@ -528,6 +528,9 @@
uint32_t syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* current);
uint32_t syncEncodersForQueue(VkQueue queue, VkEncoder* current);
+ CommandBufferStagingStream::Alloc getAlloc();
+ CommandBufferStagingStream::Free getFree();
+
VkResult on_vkBeginCommandBuffer(
void* context, VkResult input_result,
VkCommandBuffer commandBuffer,
diff --git a/system/vulkan_enc_unit_tests/Android.mk b/system/vulkan_enc_unit_tests/Android.mk
index 162c95d..e506bc9 100644
--- a/system/vulkan_enc_unit_tests/Android.mk
+++ b/system/vulkan_enc_unit_tests/Android.mk
@@ -6,7 +6,7 @@
LOCAL_C_INCLUDES += \
device/generic/goldfish-opengl/host/include/libOpenglRender \
-
+ external/gfxstream-protocols/include/vulkan/include/
LOCAL_SRC_FILES:= \
CommandBufferStagingStream_test.cpp \
diff --git a/system/vulkan_enc_unit_tests/CommandBufferStagingStream_test.cpp b/system/vulkan_enc_unit_tests/CommandBufferStagingStream_test.cpp
index b1df082..375bc6e 100644
--- a/system/vulkan_enc_unit_tests/CommandBufferStagingStream_test.cpp
+++ b/system/vulkan_enc_unit_tests/CommandBufferStagingStream_test.cpp
@@ -201,30 +201,38 @@
<< "commitBufferAndReadFully should not be supported";
}
-// CommandBufferStagingStreamCustomAllocationTest tests tests behavior of CommandBufferStagingStream
+using MockAlloc = MockFunction<CommandBufferStagingStream::Memory(size_t)>;
+using MockFree = MockFunction<void(const CommandBufferStagingStream::Memory&)>;
+// default empty implementation of free
+static std::function<void(const CommandBufferStagingStream::Memory&)> EmptyFree =
+ [](const CommandBufferStagingStream::Memory&) {};
+// CommandBufferStagingStreamCustomAllocationTest tests behavior of CommandBufferStagingStream
// when initialized with custom allocator/free function.
// These tests test the same outcome as CommandBufferStagingStreamTest tests
// tests allocBuffer can successfully allocate a buffer of given size
-
TEST(CommandBufferStagingStreamCustomAllocationTest, AllocateBufferTest) {
// memory source
std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
- MockFunction<void*(size_t)> allocFn;
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+
+ MockAlloc allocFn;
// alloc function should be called once
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
// free function should be called once
- MockFunction<void(void*)> freeFn;
- EXPECT_CALL(freeFn, Call(Eq(static_cast<void*>(memorySrc.data())))).Times(1);
+ MockFree freeFn;
+ EXPECT_CALL(freeFn, Call(Eq(memory))).Times(1);
// scope: CommandBufferStagingStream_Creation
{
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), freeFn.AsStdFunction());
+ auto allocStdFn = allocFn.AsStdFunction();
+ auto freeStdFn = freeFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, freeStdFn);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
EXPECT_THAT(buffer, NotNull());
}
@@ -235,24 +243,84 @@
// memory source for initial allocation
std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
- MockFunction<void*(size_t)> allocFn;
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = nullptr, // to test alloc call failing
+ };
+
+ MockAlloc allocFn;
// alloc function should be called once
- // return nullptr to test alloc call failing
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(nullptr));
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
// free function should not be called if allocation fails
- MockFunction<void(void*)> freeFn;
+ MockFree freeFn;
EXPECT_CALL(freeFn, Call).Times(0);
// scope: CommandBufferStagingStream_Creation
{
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), freeFn.AsStdFunction());
+ auto allocStdFn = allocFn.AsStdFunction();
+ auto freeStdFn = freeFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, freeStdFn);
void* buffer = stream.allocBuffer(kTestBufferSize);
EXPECT_THAT(buffer, IsNull());
}
}
+TEST(CommandBufferStagingStreamCustomAllocationTest, DeviceMemoryPointerIsPassedDuringFree) {
+ // memory source
+ std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
+
+ // device memory for test purposes. The test just needs a pointer
+ uint64_t deviceMem = 0;
+ VkDeviceMemory deviceMemPtr = (VkDeviceMemory)(&deviceMem);
+
+ CommandBufferStagingStream::Memory memory{.deviceMemory = deviceMemPtr,
+ .ptr = memorySrc.data()};
+
+ MockAlloc allocFn;
+
+ // alloc function should be called once
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
+
+ // free function should be called once
+ MockFree freeFn;
+ EXPECT_CALL(freeFn, Call(Eq(memory))).Times(1);
+
+ // scope: CommandBufferStagingStream_Creation
+ {
+ auto allocStdFn = allocFn.AsStdFunction();
+ auto freeStdFn = freeFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, freeStdFn);
+ uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
+ EXPECT_THAT(buffer, NotNull());
+ }
+}
+
+// test verifies that there are no crashes if alloc/free function reference becomes null
+TEST(CommandBufferStagingStreamCustomAllocationTest, AllocFreeInvalidReference) {
+ MockAlloc allocFn;
+ // alloc shouldn't be called if reference is invalidated
+ EXPECT_CALL(allocFn, Call).Times(0);
+
+ MockFree freeFn;
+ // free shouldn't be called if reference is invalidated
+ EXPECT_CALL(freeFn, Call).Times(0);
+
+ auto allocStdFn = allocFn.AsStdFunction();
+ auto freeStdFn = freeFn.AsStdFunction();
+ // scope: CommandBufferStagingStream_Creation
+ {
+ CommandBufferStagingStream stream(allocStdFn, freeStdFn);
+ // invalidate alloc/free functions
+ allocStdFn = nullptr;
+ freeStdFn = nullptr;
+ stream.allocBuffer(kTestBufferSize);
+ uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
+ EXPECT_THAT(buffer, IsNull());
+ }
+}
+
// test reallocate buffer remembers previously committed buffers
TEST(CommandBufferStagingStreamCustomAllocationTest, ReallocateBuffer) {
// memory source for initial allocation
@@ -260,35 +328,43 @@
// memory source after reallocation
std::vector<uint8_t> reallocatedMemorySrc(kTestBufferSize * 3);
- MockFunction<void*(size_t)> allocFn;
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+
+ CommandBufferStagingStream::Memory reallocatedMemory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = reallocatedMemorySrc.data()};
+
+ MockAlloc allocFn;
// alloc function should be called twice
{
InSequence seq;
// expect initial allocation call with allocation size == kTestBufferSize;
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
// expect reallocation call with allocation size > kTestBufferSize.
EXPECT_CALL(allocFn, Call(testing::Ge(kTestBufferSize)))
.Times(1)
- .WillRepeatedly(Return(reallocatedMemorySrc.data()));
+ .WillRepeatedly(Return(reallocatedMemory));
}
- MockFunction<void(void*)> freeFn;
+ MockFree freeFn;
{
InSequence seq;
// free function should be called when reallocation happens
- EXPECT_CALL(freeFn, Call(Eq(memorySrc.data()))).Times(1);
+ EXPECT_CALL(freeFn, Call(Eq(memory))).Times(1);
// free function should be called when stream goes out of scope
- EXPECT_CALL(freeFn, Call(Eq(reallocatedMemorySrc.data()))).Times(1);
+ EXPECT_CALL(freeFn, Call(Eq(reallocatedMemory))).Times(1);
}
// scope: CommandBufferStagingStream_Creation
{
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), freeFn.AsStdFunction());
+ auto allocStdFn = allocFn.AsStdFunction();
+ auto freeStdFn = freeFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, freeStdFn);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
EXPECT_THAT(buffer, NotNull());
@@ -318,14 +394,17 @@
TEST(CommandBufferStagingStreamCustomAllocationTest, CommitBuffer) {
// memory source
std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
- MockFunction<void*(size_t)> allocFn;
+
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+
+ MockAlloc allocFn;
// alloc function should be called once
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
-
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), [](void*) {});
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
EXPECT_THAT(buffer, NotNull());
@@ -351,17 +430,16 @@
// memory source
std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
- MockFunction<void*(size_t)> allocFn;
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+
+ MockAlloc allocFn;
// alloc function should be called once
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
-
- // free function expectation not needed in this test
- MockFunction<void(void*)> freeFn;
-
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), freeFn.AsStdFunction());
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
// write some arbitrary data
@@ -389,18 +467,17 @@
// memory source
std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
- MockFunction<void*(size_t)> allocFn;
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+
+ MockAlloc allocFn;
// alloc function should be called once, no reallocation
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
- // free function expectation not needed in this test
- MockFunction<void(void*)> freeFn;
-
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), freeFn.AsStdFunction());
-
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
EXPECT_THAT(buffer, NotNull());
@@ -413,31 +490,33 @@
TEST(CommandBufferStagingStreamCustomAllocationTest, ReallocationBoundary) {
// memory source
std::vector<uint8_t> memorySrc(kTestBufferSize * 3);
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
- MockFunction<void*(size_t)> allocFn;
+ MockAlloc allocFn;
// alloc function should be called twice
{
InSequence seq;
// expect initial allocation call with allocation size >= kTestBufferSize;
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
// expect reallocation call with allocation size > kTestBufferSize.
EXPECT_CALL(allocFn, Call(testing::Ge(kTestBufferSize)))
.Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ .WillRepeatedly(Return(memory));
}
// free function should be called once during reallocation,
// once when stream goes out of scope
- MockFunction<void(void*)> freeFn;
+ MockFree freeFn;
- EXPECT_CALL(freeFn, Call(Eq(memorySrc.data()))).Times(2);
-
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), freeFn.AsStdFunction());
+ EXPECT_CALL(freeFn, Call(Eq(memory))).Times(2);
+ auto allocStdFn = allocFn.AsStdFunction();
+ auto freeStdFn = freeFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, freeStdFn);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
EXPECT_THAT(buffer, NotNull());
@@ -473,12 +552,15 @@
// memory source
std::vector<uint8_t> memorySrc(kTestBufferSize * 3);
- MockFunction<void*(size_t)> allocFn;
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), [](void*) {});
+ MockAlloc allocFn;
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
+
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
EXPECT_THAT(buffer, NotNull());
@@ -495,30 +577,38 @@
TEST(CommandBufferStagingStreamCustomAllocationTest, MetadataCheck) {
// memory source
std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+
// CommandBufferStagingStream allocates metadata when using custom allocators
static const size_t expectedMetadataSize = 8;
- MockFunction<void*(size_t)> allocFn;
+ MockAlloc allocFn;
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), [](void*) {});
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
- EXPECT_THAT(buffer, NotNull());
+ // data should start after metadata
+ EXPECT_THAT(buffer, memorySrc.data() + expectedMetadataSize);
+ // metadata should be initialized to read complete
+ uint32_t* metadataPtr = reinterpret_cast<uint32_t*>(memorySrc.data());
+ EXPECT_THAT(*metadataPtr, CommandBufferStagingStream::kSyncDataReadComplete);
}
-TEST(CommandBufferStagingStreamCustomAllocationTest, MarkFlushingTest) {
+TEST(CommandBufferStagingStreamCustomAllocationTest, MarkFlushing) {
// memory source for allocation
std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+ MockAlloc allocFn;
- MockFunction<void*(size_t)> allocFn;
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
-
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), [](void*) {});
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree);
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
// write some data
@@ -536,47 +626,19 @@
EXPECT_EQ(*readPtr, CommandBufferStagingStream::kSyncDataReadPending);
}
-TEST(CommandBufferStagingStreamCustomAllocationTest, MarkFlushing) {
- // memory source for allocation
- std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
-
- MockFunction<void*(size_t)> allocFn;
-
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
-
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), [](void*) {});
- uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
-
- // write some data
- const std::string_view commandData{"some command"};
- const size_t dataSize = commandData.size();
- memcpy(buffer, commandData.data(), dataSize);
-
- // commit data
- stream.commitBuffer(dataSize);
-
- // will set metadata of the stream buffer to pending read
- stream.markFlushing();
-
- uint32_t* readPtr = reinterpret_cast<uint32_t*>(memorySrc.data());
- EXPECT_EQ(*readPtr, 0x01);
-}
-
// this test verifies that realloc waits till consumer of memory has completed read
TEST(CommandBufferStagingStreamCustomAllocationTest, ReallocNotCalledTillBufferIsRead) {
// memory source for allocation
// allocate a big enough buffer to avoid resizes in test
std::vector<uint8_t> memorySrc(kTestBufferSize * 3);
- auto ptr = memorySrc.data();
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
std::condition_variable memoryFlushedCondition;
std::mutex mutex;
- // track the number of times allocFn is called
-
- MockFunction<void*(size_t)> allocFn;
+ MockAlloc allocFn;
// mock function to notify read is complete
// this will be used to set up the expectation that realloc should
@@ -589,7 +651,8 @@
std::unique_lock readLock(mutex);
memoryFlushedCondition.wait(readLock, [&]() {
// wait till memorySrc is ready for read
- return *ptr == CommandBufferStagingStream::kSyncDataReadPending;
+ uint32_t* syncData = static_cast<uint32_t*>(memory.ptr);
+ return *syncData = CommandBufferStagingStream::kSyncDataReadPending;
});
readLock.unlock();
@@ -601,14 +664,12 @@
fn();
});
- CommandBufferStagingStream stream(allocFn.AsStdFunction(), [](void*) {});
- // scope for writeLock
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree); // scope for writeLock
{
std::lock_guard writeLock(mutex);
- EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize)))
- .Times(1)
- .WillRepeatedly(Return(memorySrc.data()));
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
uint8_t* buffer = static_cast<uint8_t*>(stream.allocBuffer(kTestBufferSize));
@@ -630,11 +691,34 @@
EXPECT_CALL(allocFn, Call(testing::Ge(kTestBufferSize)))
.Times(1)
.After(readCompleteExpectation)
- .WillRepeatedly(Return(memorySrc.data()));
+ .WillRepeatedly(Return(memory));
// realloc will be blocked till buffer read is complete by reader
(void)stream.allocBuffer(kTestBufferSize);
// wait for read thread to finish
consumer.join();
+}
+
+// this test verifies that allocBuffer() cannot be called on a stream
+// that is currently being read by the host
+TEST(CommandBufferStagingStreamCustomAllocationTest, AllocBufferFailsIfReadPending) {
+ // memory source for allocation
+ std::vector<uint8_t> memorySrc(kTestBufferSize * 2);
+ CommandBufferStagingStream::Memory memory{
+ .deviceMemory = VK_NULL_HANDLE, // not needed for this test
+ .ptr = memorySrc.data()};
+ MockAlloc allocFn;
+
+ EXPECT_CALL(allocFn, Call(Ge(kTestBufferSize))).Times(1).WillRepeatedly(Return(memory));
+
+ auto allocStdFn = allocFn.AsStdFunction();
+ CommandBufferStagingStream stream(allocStdFn, EmptyFree);
+ (void)stream.allocBuffer(kTestBufferSize);
+
+ // will set metadata of the stream buffer to pending read
+ stream.markFlushing();
+
+ EXPECT_DEATH({ (void)stream.allocBuffer(kTestBufferSize); }, "")
+ << "allocBuffer() should not be called while previous data is being flushed";
}
\ No newline at end of file