Pyrogenesis trunk
vk_mem_alloc.h
Go to the documentation of this file.
1//
2// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
3//
4// Permission is hereby granted, free of charge, to any person obtaining a copy
5// of this software and associated documentation files (the "Software"), to deal
6// in the Software without restriction, including without limitation the rights
7// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8// copies of the Software, and to permit persons to whom the Software is
9// furnished to do so, subject to the following conditions:
10//
11// The above copyright notice and this permission notice shall be included in
12// all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20// THE SOFTWARE.
21//
22
23#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24#define AMD_VULKAN_MEMORY_ALLOCATOR_H
25
26/** \mainpage Vulkan Memory Allocator
27
28<b>Version 3.0.1 (2022-05-26)</b>
29
30Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
31License: MIT
32
33<b>API documentation divided into groups:</b> [Modules](modules.html)
34
35\section main_table_of_contents Table of contents
36
37- <b>User guide</b>
38 - \subpage quick_start
39 - [Project setup](@ref quick_start_project_setup)
40 - [Initialization](@ref quick_start_initialization)
41 - [Resource allocation](@ref quick_start_resource_allocation)
42 - \subpage choosing_memory_type
43 - [Usage](@ref choosing_memory_type_usage)
44 - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
45 - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
46 - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
47 - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
48 - \subpage memory_mapping
49 - [Mapping functions](@ref memory_mapping_mapping_functions)
50 - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
51 - [Cache flush and invalidate](@ref memory_mapping_cache_control)
52 - \subpage staying_within_budget
53 - [Querying for budget](@ref staying_within_budget_querying_for_budget)
54 - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
55 - \subpage resource_aliasing
56 - \subpage custom_memory_pools
57 - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
58 - [Linear allocation algorithm](@ref linear_algorithm)
59 - [Free-at-once](@ref linear_algorithm_free_at_once)
60 - [Stack](@ref linear_algorithm_stack)
61 - [Double stack](@ref linear_algorithm_double_stack)
62 - [Ring buffer](@ref linear_algorithm_ring_buffer)
63 - \subpage defragmentation
64 - \subpage statistics
65 - [Numeric statistics](@ref statistics_numeric_statistics)
66 - [JSON dump](@ref statistics_json_dump)
67 - \subpage allocation_annotation
68 - [Allocation user data](@ref allocation_user_data)
69 - [Allocation names](@ref allocation_names)
70 - \subpage virtual_allocator
71 - \subpage debugging_memory_usage
72 - [Memory initialization](@ref debugging_memory_usage_initialization)
73 - [Margins](@ref debugging_memory_usage_margins)
74 - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
75 - \subpage opengl_interop
76- \subpage usage_patterns
77 - [GPU-only resource](@ref usage_patterns_gpu_only)
78 - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
79 - [Readback](@ref usage_patterns_readback)
80 - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
81 - [Other use cases](@ref usage_patterns_other_use_cases)
82- \subpage configuration
83 - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
84 - [Custom host memory allocator](@ref custom_memory_allocator)
85 - [Device memory allocation callbacks](@ref allocation_callbacks)
86 - [Device heap memory limit](@ref heap_memory_limit)
87- <b>Extension support</b>
88 - \subpage vk_khr_dedicated_allocation
89 - \subpage enabling_buffer_device_address
90 - \subpage vk_ext_memory_priority
91 - \subpage vk_amd_device_coherent_memory
92- \subpage general_considerations
93 - [Thread safety](@ref general_considerations_thread_safety)
94 - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
95 - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
96 - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
97 - [Features not supported](@ref general_considerations_features_not_supported)
98
99\section main_see_also See also
100
101- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
102- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
103
104\defgroup group_init Library initialization
105
106\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
107
108\defgroup group_alloc Memory allocation
109
110\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
111Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
112
113\defgroup group_virtual Virtual allocator
114
115\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
116for user-defined purpose without allocating any real GPU memory.
117
118\defgroup group_stats Statistics
119
120\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
121See documentation chapter: \ref statistics.
122*/
123
124
125#ifdef __cplusplus
126extern "C" {
127#endif
128
129#ifndef VULKAN_H_
130 #include <vulkan/vulkan.h>
131#endif
132
133// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
134// where AAA = major, BBB = minor, CCC = patch.
135// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
136#if !defined(VMA_VULKAN_VERSION)
137 #if defined(VK_VERSION_1_3)
138 #define VMA_VULKAN_VERSION 1003000
139 #elif defined(VK_VERSION_1_2)
140 #define VMA_VULKAN_VERSION 1002000
141 #elif defined(VK_VERSION_1_1)
142 #define VMA_VULKAN_VERSION 1001000
143 #else
144 #define VMA_VULKAN_VERSION 1000000
145 #endif
146#endif
147
148#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
168 #if VMA_VULKAN_VERSION >= 1001000
174 #endif // #if VMA_VULKAN_VERSION >= 1001000
175#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
176
177#if !defined(VMA_DEDICATED_ALLOCATION)
178 #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
179 #define VMA_DEDICATED_ALLOCATION 1
180 #else
181 #define VMA_DEDICATED_ALLOCATION 0
182 #endif
183#endif
184
185#if !defined(VMA_BIND_MEMORY2)
186 #if VK_KHR_bind_memory2
187 #define VMA_BIND_MEMORY2 1
188 #else
189 #define VMA_BIND_MEMORY2 0
190 #endif
191#endif
192
193#if !defined(VMA_MEMORY_BUDGET)
194 #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
195 #define VMA_MEMORY_BUDGET 1
196 #else
197 #define VMA_MEMORY_BUDGET 0
198 #endif
199#endif
200
201// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
202#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
203 #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
204 #define VMA_BUFFER_DEVICE_ADDRESS 1
205 #else
206 #define VMA_BUFFER_DEVICE_ADDRESS 0
207 #endif
208#endif
209
210// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
211#if !defined(VMA_MEMORY_PRIORITY)
212 #if VK_EXT_memory_priority
213 #define VMA_MEMORY_PRIORITY 1
214 #else
215 #define VMA_MEMORY_PRIORITY 0
216 #endif
217#endif
218
219// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
220#if !defined(VMA_EXTERNAL_MEMORY)
221 #if VK_KHR_external_memory
222 #define VMA_EXTERNAL_MEMORY 1
223 #else
224 #define VMA_EXTERNAL_MEMORY 0
225 #endif
226#endif
227
228// Define these macros to decorate all public functions with additional code,
229// before and after returned type, appropriately. This may be useful for
230// exporting the functions when compiling VMA as a separate library. Example:
231// #define VMA_CALL_PRE __declspec(dllexport)
232// #define VMA_CALL_POST __cdecl
233#ifndef VMA_CALL_PRE
234 #define VMA_CALL_PRE
235#endif
236#ifndef VMA_CALL_POST
237 #define VMA_CALL_POST
238#endif
239
240// Define this macro to decorate pointers with an attribute specifying the
241// length of the array they point to if they are not null.
242//
243// The length may be one of
244// - The name of another parameter in the argument list where the pointer is declared
245// - The name of another member in the struct where the pointer is declared
246// - The name of a member of a struct type, meaning the value of that member in
247// the context of the call. For example
248// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
249// this means the number of memory heaps available in the device associated
250// with the VmaAllocator being dealt with.
251#ifndef VMA_LEN_IF_NOT_NULL
252 #define VMA_LEN_IF_NOT_NULL(len)
253#endif
254
255// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
256// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
257#ifndef VMA_NULLABLE
258 #ifdef __clang__
259 #define VMA_NULLABLE _Nullable
260 #else
261 #define VMA_NULLABLE
262 #endif
263#endif
264
265// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
266// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
267#ifndef VMA_NOT_NULL
268 #ifdef __clang__
269 #define VMA_NOT_NULL _Nonnull
270 #else
271 #define VMA_NOT_NULL
272 #endif
273#endif
274
275// If non-dispatchable handles are represented as pointers then we can give
276// then nullability annotations
277#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
278 #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
279 #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
280 #else
281 #define VMA_NOT_NULL_NON_DISPATCHABLE
282 #endif
283#endif
284
285#ifndef VMA_NULLABLE_NON_DISPATCHABLE
286 #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
287 #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
288 #else
289 #define VMA_NULLABLE_NON_DISPATCHABLE
290 #endif
291#endif
292
293#ifndef VMA_STATS_STRING_ENABLED
294 #define VMA_STATS_STRING_ENABLED 1
295#endif
296
297////////////////////////////////////////////////////////////////////////////////
298////////////////////////////////////////////////////////////////////////////////
299//
300// INTERFACE
301//
302////////////////////////////////////////////////////////////////////////////////
303////////////////////////////////////////////////////////////////////////////////
304
305// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
306#ifndef _VMA_ENUM_DECLARATIONS
307
308/**
309\addtogroup group_init
310@{
311*/
312
313/// Flags for created #VmaAllocator.
315{
316 /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
317
318 Using this flag may increase performance because internal mutexes are not used.
319 */
321 /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
322
323 The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
324 When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
325
326 Using this extension will automatically allocate dedicated blocks of memory for
327 some buffers and images instead of suballocating place for them out of bigger
328 memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
329 flag) when it is recommended by the driver. It may improve performance on some
330 GPUs.
331
332 You may set this flag only if you found out that following device extensions are
333 supported, you enabled them while creating Vulkan device passed as
334 VmaAllocatorCreateInfo::device, and you want them to be used internally by this
335 library:
336
337 - VK_KHR_get_memory_requirements2 (device extension)
338 - VK_KHR_dedicated_allocation (device extension)
339
340 When this flag is set, you can experience following warnings reported by Vulkan
341 validation layer. You can ignore them.
342
343 > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
344 */
346 /**
347 Enables usage of VK_KHR_bind_memory2 extension.
348
349 The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
350 When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
351
352 You may set this flag only if you found out that this device extension is supported,
353 you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
354 and you want it to be used internally by this library.
355
356 The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
357 which allow to pass a chain of `pNext` structures while binding.
358 This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
359 */
361 /**
362 Enables usage of VK_EXT_memory_budget extension.
363
364 You may set this flag only if you found out that this device extension is supported,
365 you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
366 and you want it to be used internally by this library, along with another instance extension
367 VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
368
369 The extension provides query for current memory usage and budget, which will probably
370 be more accurate than an estimation used by the library otherwise.
371 */
373 /**
374 Enables usage of VK_AMD_device_coherent_memory extension.
375
376 You may set this flag only if you:
377
378 - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
379 - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
380 - want it to be used internally by this library.
381
382 The extension and accompanying device feature provide access to memory types with
383 `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
384 They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
385
386 When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
387 To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
388 returning `VK_ERROR_FEATURE_NOT_PRESENT`.
389 */
391 /**
392 Enables usage of "buffer device address" feature, which allows you to use function
393 `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
394
395 You may set this flag only if you:
396
397 1. (For Vulkan version < 1.2) Found as available and enabled device extension
398 VK_KHR_buffer_device_address.
399 This extension is promoted to core Vulkan 1.2.
400 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
401
402 When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
403 The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
404 allocated memory blocks wherever it might be needed.
405
406 For more information, see documentation chapter \ref enabling_buffer_device_address.
407 */
409 /**
410 Enables usage of VK_EXT_memory_priority extension in the library.
411
412 You may set this flag only if you found available and enabled this device extension,
413 along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
414 while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
415
416 When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
417 are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
418
419 A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
420 Larger values are higher priority. The granularity of the priorities is implementation-dependent.
421 It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
422 The value to be used for default priority is 0.5.
423 For more details, see the documentation of the VK_EXT_memory_priority extension.
424 */
426
429/// See #VmaAllocatorCreateFlagBits.
431
432/** @} */
433
434/**
435\addtogroup group_alloc
436@{
437*/
438
439/// \brief Intended usage of the allocated memory.
440typedef enum VmaMemoryUsage
441{
442 /** No intended memory usage specified.
443 Use other members of VmaAllocationCreateInfo to specify your requirements.
444 */
446 /**
447 \deprecated Obsolete, preserved for backward compatibility.
448 Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
449 */
451 /**
452 \deprecated Obsolete, preserved for backward compatibility.
453 Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
454 */
456 /**
457 \deprecated Obsolete, preserved for backward compatibility.
458 Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
459 */
461 /**
462 \deprecated Obsolete, preserved for backward compatibility.
463 Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
464 */
466 /**
467 \deprecated Obsolete, preserved for backward compatibility.
468 Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
469 */
471 /**
472 Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
473 Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
474
475 Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
476
477 Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
478 */
480 /**
481 Selects best memory type automatically.
482 This flag is recommended for most common use cases.
483
484 When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
485 you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
486 in VmaAllocationCreateInfo::flags.
487
488 It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
489 vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
490 and not with generic memory allocation functions.
491 */
493 /**
494 Selects best memory type automatically with preference for GPU (device) memory.
495
496 When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
497 you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
498 in VmaAllocationCreateInfo::flags.
499
500 It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
501 vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
502 and not with generic memory allocation functions.
503 */
505 /**
506 Selects best memory type automatically with preference for CPU (host) memory.
507
508 When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
509 you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
510 in VmaAllocationCreateInfo::flags.
511
512 It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
513 vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
514 and not with generic memory allocation functions.
515 */
517
518 VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
520
521/// Flags to be passed as VmaAllocationCreateInfo::flags.
523{
524 /** \brief Set this flag if the allocation should have its own memory block.
525
526 Use it for special, big resources, like fullscreen images used as attachments.
527 */
529
530 /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
531
532 If new allocation cannot be placed in any of the existing blocks, allocation
533 fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
534
535 You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
536 #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
537 */
539 /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
540
541 Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
542
543 It is valid to use this flag for allocation made from memory type that is not
544 `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
545 useful if you need an allocation that is efficient to use on GPU
546 (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
547 support it (e.g. Intel GPU).
548 */
550 /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
551
552 Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
553 null-terminated string. Instead of copying pointer value, a local copy of the
554 string is made and stored in allocation's `pName`. The string is automatically
555 freed together with the allocation. It is also used in vmaBuildStatsString().
556 */
558 /** Allocation will be created from upper stack in a double stack pool.
559
560 This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
561 */
563 /** Create both buffer/image and allocation, but don't bind them together.
564 It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
565 The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
566 Otherwise it is ignored.
567
568 If you want to make sure the new buffer/image is not tied to the new memory allocation
569 through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
570 use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
571 */
573 /** Create allocation only if additional device memory required for it, if any, won't exceed
574 memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
575 */
577 /** \brief Set this flag if the allocated memory will have aliasing resources.
578
579 Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
580 Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
581 */
583 /**
584 Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
585
586 - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
587 you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
588 - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
589 This includes allocations created in \ref custom_memory_pools.
590
591 Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
592 never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
593
594 \warning Violating this declaration may work correctly, but will likely be very slow.
595 Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
596 Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
597 */
599 /**
600 Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
601
602 - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
603 you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
604 - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
605 This includes allocations created in \ref custom_memory_pools.
606
607 Declares that mapped memory can be read, written, and accessed in random order,
608 so a `HOST_CACHED` memory type is required.
609 */
611 /**
612 Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
613 it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
614 if it may improve performance.
615
616 By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
617 (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
618 issue an explicit transfer to write/read your data.
619 To prepare for this possibility, don't forget to add appropriate flags like
620 `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
621 */
623 /** Allocation strategy that chooses smallest possible free range for the allocation
624 to minimize memory usage and fragmentation, possibly at the expense of allocation time.
625 */
627 /** Allocation strategy that chooses first suitable free range for the allocation -
628 not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
629 to minimize allocation time, possibly at the expense of allocation quality.
630 */
632 /** Allocation strategy that chooses always the lowest offset in available space.
633 This is not the most efficient strategy but achieves highly packed data.
634 Used internally by defragmentation, not recomended in typical usage.
635 */
637 /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
638 */
640 /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
641 */
643 /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
644 */
649
652/// See #VmaAllocationCreateFlagBits.
654
655/// Flags to be passed as VmaPoolCreateInfo::flags.
657{
658 /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
659
660 This is an optional optimization flag.
661
662 If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
663 vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
664 knows exact type of your allocations so it can handle Buffer-Image Granularity
665 in the optimal way.
666
667 If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
668 exact type of such allocations is not known, so allocator must be conservative
669 in handling Buffer-Image Granularity, which can lead to suboptimal allocation
670 (wasted memory). In that case, if you can make sure you always allocate only
671 buffers and linear images or only optimal images out of this pool, use this flag
672 to make allocator disregard Buffer-Image Granularity and so make allocations
673 faster and more optimal.
674 */
676
677 /** \brief Enables alternative, linear allocation algorithm in this pool.
678
679 Specify this flag to enable linear allocation algorithm, which always creates
680 new allocations after last one and doesn't reuse space from allocations freed in
681 between. It trades memory consumption for simplified algorithm and data
682 structure, which has better performance and uses less memory for metadata.
683
684 By using this flag, you can achieve behavior of free-at-once, stack,
685 ring buffer, and double stack.
686 For details, see documentation chapter \ref linear_algorithm.
687 */
689
690 /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
691 */
694
697/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
699
700/// Flags to be passed as VmaDefragmentationInfo::flags.
702{
703 /* \brief Use simple but fast algorithm for defragmentation.
704 May not achieve best results but will require least time to compute and least allocations to copy.
705 */
707 /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
708 Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
709 */
711 /* \brief Perform full defragmentation of memory.
712 Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
713 */
715 /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
716 Only available when bufferImageGranularity is greater than 1, since it aims to reduce
717 alignment issues between different types of resources.
718 Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
719 */
721
722 /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
728
731/// See #VmaDefragmentationFlagBits.
733
734/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
736{
737 /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
739 /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
741 /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
744
745/** @} */
746
747/**
748\addtogroup group_virtual
749@{
750*/
751
752/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
754{
755 /** \brief Enables alternative, linear allocation algorithm in this virtual block.
756
757 Specify this flag to enable linear allocation algorithm, which always creates
758 new allocations after last one and doesn't reuse space from allocations freed in
759 between. It trades memory consumption for simplified algorithm and data
760 structure, which has better performance and uses less memory for metadata.
761
762 By using this flag, you can achieve behavior of free-at-once, stack,
763 ring buffer, and double stack.
764 For details, see documentation chapter \ref linear_algorithm.
765 */
767
768 /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
769 */
772
775/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
777
778/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
780{
781 /** \brief Allocation will be created from upper stack in a double stack pool.
782
783 This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
784 */
786 /** \brief Allocation strategy that tries to minimize memory usage.
787 */
789 /** \brief Allocation strategy that tries to minimize allocation time.
790 */
792 /** Allocation strategy that chooses always the lowest offset in available space.
793 This is not the most efficient strategy but achieves highly packed data.
794 */
796 /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
797
798 These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
799 */
801
804/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
806
807/** @} */
808
809#endif // _VMA_ENUM_DECLARATIONS
810
811#ifndef _VMA_DATA_TYPES_DECLARATIONS
812
813/**
814\addtogroup group_init
815@{ */
816
817/** \struct VmaAllocator
818\brief Represents main object of this library initialized.
819
820Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
821Call function vmaDestroyAllocator() to destroy it.
822
823It is recommended to create just one object of this type per `VkDevice` object,
824right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
825*/
827
828/** @} */
829
830/**
831\addtogroup group_alloc
832@{
833*/
834
835/** \struct VmaPool
836\brief Represents custom memory pool
837
838Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
839Call function vmaDestroyPool() to destroy it.
840
841For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
842*/
844
845/** \struct VmaAllocation
846\brief Represents single memory allocation.
847
848It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
849plus unique offset.
850
851There are multiple ways to create such object.
852You need to fill structure VmaAllocationCreateInfo.
853For more information see [Choosing memory type](@ref choosing_memory_type).
854
855Although the library provides convenience functions that create Vulkan buffer or image,
856allocate memory for it and bind them together,
857binding of the allocation to a buffer or an image is out of scope of the allocation itself.
858Allocation object can exist without buffer/image bound,
859binding can be done manually by the user, and destruction of it can be done
860independently of destruction of the allocation.
861
862The object also remembers its size and some other information.
863To retrieve this information, use function vmaGetAllocationInfo() and inspect
864returned structure VmaAllocationInfo.
865*/
867
868/** \struct VmaDefragmentationContext
869\brief An opaque object that represents started defragmentation process.
870
871Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
872Call function vmaEndDefragmentation() to destroy it.
873*/
875
876/** @} */
877
878/**
879\addtogroup group_virtual
880@{
881*/
882
883/** \struct VmaVirtualAllocation
884\brief Represents single memory allocation done inside VmaVirtualBlock.
885
886Use it as a unique identifier to virtual allocation within the single block.
887
888Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
889*/
891
892/** @} */
893
894/**
895\addtogroup group_virtual
896@{
897*/
898
899/** \struct VmaVirtualBlock
900\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
901
902Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
903For more information, see documentation chapter \ref virtual_allocator.
904
905This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
906*/
908
909/** @} */
910
911/**
912\addtogroup group_init
913@{
914*/
915
916/// Callback function called after successful vkAllocateMemory.
918 VmaAllocator VMA_NOT_NULL allocator,
919 uint32_t memoryType,
920 VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
921 VkDeviceSize size,
922 void* VMA_NULLABLE pUserData);
923
924/// Callback function called before vkFreeMemory.
926 VmaAllocator VMA_NOT_NULL allocator,
927 uint32_t memoryType,
928 VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
929 VkDeviceSize size,
930 void* VMA_NULLABLE pUserData);
931
932/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
933
934Provided for informative purpose, e.g. to gather statistics about number of
935allocations or total amount of memory allocated in Vulkan.
936
937Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
938*/
940{
941 /// Optional, can be null.
943 /// Optional, can be null.
945 /// Optional, can be null.
948
949/** \brief Pointers to some Vulkan functions - a subset used by the library.
950
951Used in VmaAllocatorCreateInfo::pVulkanFunctions.
952*/
953typedef struct VmaVulkanFunctions
954{
955 /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
957 /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
976#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
977 /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
979 /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
981#endif
982#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
983 /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
985 /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
987#endif
988#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
990#endif
991#if VMA_VULKAN_VERSION >= 1003000
992 /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
993 PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
994 /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
995 PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
996#endif
998
999/// Description of a Allocator to be created.
1001{
1002 /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
1004 /// Vulkan physical device.
1005 /** It must be valid throughout whole lifetime of created allocator. */
1007 /// Vulkan device.
1008 /** It must be valid throughout whole lifetime of created allocator. */
1010 /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
1011 /** Set to 0 to use default, which is currently 256 MiB. */
1013 /// Custom CPU memory allocation callbacks. Optional.
1014 /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
1016 /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
1017 /** Optional, can be null. */
1019 /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
1020
1021 If not NULL, it must be a pointer to an array of
1022 `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
1023 maximum number of bytes that can be allocated out of particular Vulkan memory
1024 heap.
1025
1026 Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
1027 heap. This is also the default in case of `pHeapSizeLimit` = NULL.
1028
1029 If there is a limit defined for a heap:
1030
1031 - If user tries to allocate more memory from that heap using this allocator,
1032 the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
1033 - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
1034 value of this limit will be reported instead when using vmaGetMemoryProperties().
1035
1036 Warning! Using this feature may not be equivalent to installing a GPU with
1037 smaller amount of memory, because graphics driver doesn't necessary fail new
1038 allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
1039 exceeded. It may return success and just silently migrate some device memory
1040 blocks to system RAM. This driver behavior can also be controlled using
1041 VK_AMD_memory_overallocation_behavior extension.
1042 */
1043 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
1044
1045 /** \brief Pointers to Vulkan functions. Can be null.
1046
1047 For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
1048 */
1050 /** \brief Handle to Vulkan instance object.
1051
1052 Starting from version 3.0.0 this member is no longer optional, it must be set!
1053 */
1055 /** \brief Optional. The highest version of Vulkan that the application is designed to use.
1056
1057 It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
1058 The patch version number specified is ignored. Only the major and minor versions are considered.
1059 It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
1060 Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
1061 Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
1062 */
1064#if VMA_EXTERNAL_MEMORY
1065 /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
1066
1067 If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
1068 elements, defining external memory handle types of particular Vulkan memory type,
1069 to be passed using `VkExportMemoryAllocateInfoKHR`.
1070
1071 Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
1072 This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
1073 */
1074 const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
1075#endif // #if VMA_EXTERNAL_MEMORY
1077
1078/// Information about existing #VmaAllocator object.
1079typedef struct VmaAllocatorInfo
1080{
1081 /** \brief Handle to Vulkan instance object.
1082
1083 This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
1084 */
1086 /** \brief Handle to Vulkan physical device object.
1087
1088 This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
1089 */
1091 /** \brief Handle to Vulkan device object.
1092
1093 This is the same value as has been passed through VmaAllocatorCreateInfo::device.
1094 */
1097
1098/** @} */
1099
1100/**
1101\addtogroup group_stats
1102@{
1103*/
1104
1105/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
1106
1107These are fast to calculate.
1108See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
1109*/
1110typedef struct VmaStatistics
1111{
1112 /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
1113 */
1115 /** \brief Number of #VmaAllocation objects allocated.
1116
1117 Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
1118 */
1120 /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
1121
1122 \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
1123 (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
1124 "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
1125 */
1127 /** \brief Total number of bytes occupied by all #VmaAllocation objects.
1128
1129 Always less or equal than `blockBytes`.
1130 Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
1131 but unused by any #VmaAllocation.
1132 */
1135
1136/** \brief More detailed statistics than #VmaStatistics.
1137
1138These are slower to calculate. Use for debugging purposes.
1139See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
1140
1141Previous version of the statistics API provided averages, but they have been removed
1142because they can be easily calculated as:
1143
1144\code
1145VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
1146VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
1147VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
1148\endcode
1149*/
1151{
1152 /// Basic statistics.
1154 /// Number of free ranges of memory between allocations.
1156 /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
1158 /// Largest allocation size. 0 if there are 0 allocations.
1160 /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
1162 /// Largest empty range size. 0 if there are 0 empty ranges.
1165
1166/** \brief General statistics from current state of the Allocator -
1167total memory usage across all memory heaps and types.
1168
1169These are slower to calculate. Use for debugging purposes.
1170See function vmaCalculateStatistics().
1171*/
1173{
1178
1179/** \brief Statistics of current memory usage and available budget for a specific memory heap.
1180
1181These are fast to calculate.
1182See function vmaGetHeapBudgets().
1183*/
1184typedef struct VmaBudget
1185{
1186 /** \brief Statistics fetched from the library.
1187 */
1189 /** \brief Estimated current memory usage of the program, in bytes.
1190
1191 Fetched from system using VK_EXT_memory_budget extension if enabled.
1192
1193 It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
1194 also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
1195 `VkDeviceMemory` blocks allocated outside of this library, if any.
1196 */
1198 /** \brief Estimated amount of memory available to the program, in bytes.
1199
1200 Fetched from system using VK_EXT_memory_budget extension if enabled.
1201
1202 It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
1203 external to the program, decided by the operating system.
1204 Difference `budget - usage` is the amount of additional memory that can probably
1205 be allocated without problems. Exceeding the budget may result in various problems.
1206 */
1209
1210/** @} */
1211
1212/**
1213\addtogroup group_alloc
1214@{
1215*/
1216
1217/** \brief Parameters of new #VmaAllocation.
1218
1219To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
1220*/
1222{
1223 /// Use #VmaAllocationCreateFlagBits enum.
1225 /** \brief Intended usage of memory.
1226
1227 You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
1228 If `pool` is not null, this member is ignored.
1229 */
1231 /** \brief Flags that must be set in a Memory Type chosen for an allocation.
1232
1233 Leave 0 if you specify memory requirements in other way. \n
1234 If `pool` is not null, this member is ignored.*/
1236 /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
1237
1238 Set to 0 if no additional flags are preferred. \n
1239 If `pool` is not null, this member is ignored. */
1241 /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
1242
1243 Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
1244 it meets other requirements specified by this structure, with no further
1245 restrictions on memory type index. \n
1246 If `pool` is not null, this member is ignored.
1247 */
1249 /** \brief Pool that this allocation should be created in.
1250
1251 Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
1252 `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
1253 */
1255 /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
1256
1257 If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
1258 null or pointer to a null-terminated string. The string will be then copied to
1259 internal buffer, so it doesn't need to be valid after allocation call.
1260 */
1262 /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
1263
1264 It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
1265 and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
1266 Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
1267 */
1270
1271/// Describes parameter of created #VmaPool.
1272typedef struct VmaPoolCreateInfo
1273{
1274 /** \brief Vulkan memory type index to allocate this pool from.
1275 */
1277 /** \brief Use combination of #VmaPoolCreateFlagBits.
1278 */
1280 /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
1281
1282 Specify nonzero to set explicit, constant size of memory blocks used by this
1283 pool.
1284
1285 Leave 0 to use default and let the library manage block sizes automatically.
1286 Sizes of particular blocks may vary.
1287 In this case, the pool will also support dedicated allocations.
1288 */
1290 /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
1291
1292 Set to 0 to have no preallocated blocks and allow the pool be completely empty.
1293 */
1295 /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
1296
1297 Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
1298
1299 Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
1300 throughout whole lifetime of this pool.
1301 */
1303 /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
1304
1305 It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
1306 Otherwise, this variable is ignored.
1307 */
1309 /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
1310
1311 Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
1312 It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
1313 e.g. when doing interop with OpenGL.
1314 */
1316 /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
1317
1318 Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
1319 It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
1320 Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
1321
1322 Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
1323 can be attached automatically by this library when using other, more convenient of its features.
1324 */
1327
1328/** @} */
1329
1330/**
1331\addtogroup group_alloc
1332@{
1333*/
1334
1335/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
1336typedef struct VmaAllocationInfo
1337{
1338 /** \brief Memory type index that this allocation was allocated from.
1339
1340 It never changes.
1341 */
1343 /** \brief Handle to Vulkan memory object.
1344
1345 Same memory object can be shared by multiple allocations.
1346
1347 It can change after the allocation is moved during \ref defragmentation.
1348 */
1350 /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
1351
1352 You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
1353 vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
1354 not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
1355 and apply this offset automatically.
1356
1357 It can change after the allocation is moved during \ref defragmentation.
1358 */
1360 /** \brief Size of this allocation, in bytes.
1361
1362 It never changes.
1363
1364 \note Allocation size returned in this variable may be greater than the size
1365 requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
1366 allocation is accessible for operations on memory e.g. using a pointer after
1367 mapping with vmaMapMemory(), but operations on the resource e.g. using
1368 `vkCmdCopyBuffer` must be limited to the size of the resource.
1369 */
1371 /** \brief Pointer to the beginning of this allocation as mapped data.
1372
1373 If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
1374 created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
1375
1376 It can change after call to vmaMapMemory(), vmaUnmapMemory().
1377 It can also change after the allocation is moved during \ref defragmentation.
1378 */
1380 /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
1381
1382 It can change after call to vmaSetAllocationUserData() for this allocation.
1383 */
1385 /** \brief Custom allocation name that was set with vmaSetAllocationName().
1386
1387 It can change after call to vmaSetAllocationName() for this allocation.
1388
1389 Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
1390 additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
1391 */
1392 const char* VMA_NULLABLE pName;
1394
1395/** \brief Parameters for defragmentation.
1396
1397To be used with function vmaBeginDefragmentation().
1398*/
1400{
1401 /// \brief Use combination of #VmaDefragmentationFlagBits.
1403 /** \brief Custom pool to be defragmented.
1404
1405 If null then default pools will undergo defragmentation process.
1406 */
1408 /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
1409
1410 `0` means no limit.
1411 */
1413 /** \brief Maximum number of allocations that can be moved during single pass to a different place.
1414
1415 `0` means no limit.
1416 */
1419
1420/// Single move of an allocation to be done for defragmentation.
1422{
1423 /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
1425 /// Allocation that should be moved.
1427 /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
1428
1429 \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
1430 to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
1431 vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
1432 */
1435
1436/** \brief Parameters for incremental defragmentation steps.
1437
1438To be used with function vmaBeginDefragmentationPass().
1439*/
1441{
1442 /// Number of elements in the `pMoves` array.
1444 /** \brief Array of moves to be performed by the user in the current defragmentation pass.
1445
1446 Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
1447
1448 For each element, you should:
1449
1450 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
1451 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
1452 3. Make sure these commands finished executing on the GPU.
1453 4. Destroy the old buffer/image.
1454
1455 Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
1456 After this call, the allocation will point to the new place in memory.
1457
1458 Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
1459
1460 Alternatively, if you decide you want to completely remove the allocation:
1461
1462 1. Destroy its buffer/image.
1463 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
1464
1465 Then, after vmaEndDefragmentationPass() the allocation will be freed.
1466 */
1469
1470/// Statistics returned for defragmentation process in function vmaEndDefragmentation().
1472{
1473 /// Total number of bytes that have been copied while moving allocations to different places.
1475 /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
1477 /// Number of allocations that have been moved to different places.
1479 /// Number of empty `VkDeviceMemory` objects that have been released to the system.
1482
1483/** @} */
1484
1485/**
1486\addtogroup group_virtual
1487@{
1488*/
1489
1490/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
1492{
1493 /** \brief Total size of the virtual block.
1494
1495 Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
1496 For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
1497 */
1499
1500 /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
1501 */
1503
1504 /** \brief Custom CPU memory allocation callbacks. Optional.
1505
1506 Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
1507 */
1510
1511/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
1513{
1514 /** \brief Size of the allocation.
1515
1516 Cannot be zero.
1517 */
1519 /** \brief Required alignment of the allocation. Optional.
1520
1521 Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
1522 */
1524 /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
1525 */
1527 /** \brief Custom pointer to be associated with the allocation. Optional.
1528
1529 It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
1530 */
1533
1534/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
1536{
1537 /** \brief Offset of the allocation.
1538
1539 Offset at which the allocation was made.
1540 */
1542 /** \brief Size of the allocation.
1543
1544 Same value as passed in VmaVirtualAllocationCreateInfo::size.
1545 */
1547 /** \brief Custom pointer associated with the allocation.
1548
1549 Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
1550 */
1553
1554/** @} */
1555
1556#endif // _VMA_DATA_TYPES_DECLARATIONS
1557
1558#ifndef _VMA_FUNCTION_HEADERS
1559
1560/**
1561\addtogroup group_init
1562@{
1563*/
1564
1565/// Creates #VmaAllocator object.
1567 const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
1569
1570/// Destroys allocator object.
1572 VmaAllocator VMA_NULLABLE allocator);
1573
1574/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
1575
1576It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
1577`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
1578*/
1580 VmaAllocator VMA_NOT_NULL allocator,
1581 VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
1582
1583/**
1584PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
1585You can access it here, without fetching it again on your own.
1586*/
1588 VmaAllocator VMA_NOT_NULL allocator,
1589 const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
1590
1591/**
1592PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
1593You can access it here, without fetching it again on your own.
1594*/
1596 VmaAllocator VMA_NOT_NULL allocator,
1597 const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
1598
1599/**
1600\brief Given Memory Type Index, returns Property Flags of this memory type.
1601
1602This is just a convenience function. Same information can be obtained using
1603vmaGetMemoryProperties().
1604*/
1606 VmaAllocator VMA_NOT_NULL allocator,
1607 uint32_t memoryTypeIndex,
1609
1610/** \brief Sets index of the current frame.
1611*/
1613 VmaAllocator VMA_NOT_NULL allocator,
1614 uint32_t frameIndex);
1615
1616/** @} */
1617
1618/**
1619\addtogroup group_stats
1620@{
1621*/
1622
1623/** \brief Retrieves statistics from current state of the Allocator.
1624
1625This function is called "calculate" not "get" because it has to traverse all
1626internal data structures, so it may be quite slow. Use it for debugging purposes.
1627For faster but more brief statistics suitable to be called every frame or every allocation,
1628use vmaGetHeapBudgets().
1629
1630Note that when using allocator from multiple threads, returned information may immediately
1631become outdated.
1632*/
1634 VmaAllocator VMA_NOT_NULL allocator,
1636
1637/** \brief Retrieves information about current memory usage and budget for all memory heaps.
1638
1639\param allocator
1640\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
1641
1642This function is called "get" not "calculate" because it is very fast, suitable to be called
1643every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
1644
1645Note that when using allocator from multiple threads, returned information may immediately
1646become outdated.
1647*/
1649 VmaAllocator VMA_NOT_NULL allocator,
1650 VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
1651
1652/** @} */
1653
1654/**
1655\addtogroup group_alloc
1656@{
1657*/
1658
1659/**
1660\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
1661
1662This algorithm tries to find a memory type that:
1663
1664- Is allowed by memoryTypeBits.
1665- Contains all the flags from pAllocationCreateInfo->requiredFlags.
1666- Matches intended usage.
1667- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
1668
1669\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
1670from this function or any other allocating function probably means that your
1671device doesn't support any memory type with requested features for the specific
1672type of resource you want to use it for. Please check parameters of your
1673resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
1674*/
1676 VmaAllocator VMA_NOT_NULL allocator,
1677 uint32_t memoryTypeBits,
1678 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1679 uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1680
1681/**
1682\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
1683
1684It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1685It internally creates a temporary, dummy buffer that never has memory bound.
1686*/
1688 VmaAllocator VMA_NOT_NULL allocator,
1689 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
1690 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1691 uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1692
1693/**
1694\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
1695
1696It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1697It internally creates a temporary, dummy image that never has memory bound.
1698*/
1700 VmaAllocator VMA_NOT_NULL allocator,
1701 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
1702 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1703 uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1704
1705/** \brief Allocates Vulkan device memory and creates #VmaPool object.
1706
1707\param allocator Allocator object.
1708\param pCreateInfo Parameters of pool to create.
1709\param[out] pPool Handle to created pool.
1710*/
1712 VmaAllocator VMA_NOT_NULL allocator,
1713 const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
1715
1716/** \brief Destroys #VmaPool object and frees Vulkan device memory.
1717*/
1719 VmaAllocator VMA_NOT_NULL allocator,
1720 VmaPool VMA_NULLABLE pool);
1721
1722/** @} */
1723
1724/**
1725\addtogroup group_stats
1726@{
1727*/
1728
1729/** \brief Retrieves statistics of existing #VmaPool object.
1730
1731\param allocator Allocator object.
1732\param pool Pool object.
1733\param[out] pPoolStats Statistics of specified pool.
1734*/
1736 VmaAllocator VMA_NOT_NULL allocator,
1737 VmaPool VMA_NOT_NULL pool,
1738 VmaStatistics* VMA_NOT_NULL pPoolStats);
1739
1740/** \brief Retrieves detailed statistics of existing #VmaPool object.
1741
1742\param allocator Allocator object.
1743\param pool Pool object.
1744\param[out] pPoolStats Statistics of specified pool.
1745*/
1747 VmaAllocator VMA_NOT_NULL allocator,
1748 VmaPool VMA_NOT_NULL pool,
1750
1751/** @} */
1752
1753/**
1754\addtogroup group_alloc
1755@{
1756*/
1757
1758/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
1759
1760Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
1761`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
1762`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
1763
1764Possible return values:
1765
1766- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
1767- `VK_SUCCESS` - corruption detection has been performed and succeeded.
1768- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
1769 `VMA_ASSERT` is also fired in that case.
1770- Other value: Error returned by Vulkan, e.g. memory mapping failure.
1771*/
1773 VmaAllocator VMA_NOT_NULL allocator,
1774 VmaPool VMA_NOT_NULL pool);
1775
1776/** \brief Retrieves name of a custom pool.
1777
1778After the call `ppName` is either null or points to an internally-owned null-terminated string
1779containing name of the pool that was previously set. The pointer becomes invalid when the pool is
1780destroyed or its name is changed using vmaSetPoolName().
1781*/
1783 VmaAllocator VMA_NOT_NULL allocator,
1784 VmaPool VMA_NOT_NULL pool,
1785 const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
1786
1787/** \brief Sets name of a custom pool.
1788
1789`pName` can be either null or pointer to a null-terminated string with new name for the pool.
1790Function makes internal copy of the string, so it can be changed or freed immediately after this call.
1791*/
1793 VmaAllocator VMA_NOT_NULL allocator,
1794 VmaPool VMA_NOT_NULL pool,
1795 const char* VMA_NULLABLE pName);
1796
1797/** \brief General purpose memory allocation.
1798
1799\param allocator
1800\param pVkMemoryRequirements
1801\param pCreateInfo
1802\param[out] pAllocation Handle to allocated memory.
1803\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1804
1805You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1806
1807It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
1808vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
1809*/
1811 VmaAllocator VMA_NOT_NULL allocator,
1812 const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
1813 const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1815 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1816
1817/** \brief General purpose memory allocation for multiple allocation objects at once.
1818
1819\param allocator Allocator object.
1820\param pVkMemoryRequirements Memory requirements for each allocation.
1821\param pCreateInfo Creation parameters for each allocation.
1822\param allocationCount Number of allocations to make.
1823\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
1824\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
1825
1826You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1827
1828Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
1829It is just a general purpose allocation function able to make multiple allocations at once.
1830It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
1831
1832All allocations are made using same parameters. All of them are created out of the same memory pool and type.
1833If any allocation fails, all allocations already made within this function call are also freed, so that when
1834returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
1835*/
1837 VmaAllocator VMA_NOT_NULL allocator,
1838 const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
1839 const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
1840 size_t allocationCount,
1841 VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
1842 VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
1843
1844/** \brief Allocates memory suitable for given `VkBuffer`.
1845
1846\param allocator
1847\param buffer
1848\param pCreateInfo
1849\param[out] pAllocation Handle to allocated memory.
1850\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1851
1852It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
1853
1854This is a special-purpose function. In most cases you should use vmaCreateBuffer().
1855
1856You must free the allocation using vmaFreeMemory() when no longer needed.
1857*/
1859 VmaAllocator VMA_NOT_NULL allocator,
1860 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
1861 const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1863 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1864
1865/** \brief Allocates memory suitable for given `VkImage`.
1866
1867\param allocator
1868\param image
1869\param pCreateInfo
1870\param[out] pAllocation Handle to allocated memory.
1871\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1872
1873It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
1874
1875This is a special-purpose function. In most cases you should use vmaCreateImage().
1876
1877You must free the allocation using vmaFreeMemory() when no longer needed.
1878*/
1880 VmaAllocator VMA_NOT_NULL allocator,
1881 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
1882 const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1884 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1885
1886/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
1887
1888Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
1889*/
1891 VmaAllocator VMA_NOT_NULL allocator,
1892 const VmaAllocation VMA_NULLABLE allocation);
1893
1894/** \brief Frees memory and destroys multiple allocations.
1895
1896Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
1897It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
1898vmaAllocateMemoryPages() and other functions.
1899It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
1900
1901Allocations in `pAllocations` array can come from any memory pools and types.
1902Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
1903*/
1905 VmaAllocator VMA_NOT_NULL allocator,
1906 size_t allocationCount,
1907 const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
1908
1909/** \brief Returns current information about specified allocation.
1910
1911Current paramteres of given allocation are returned in `pAllocationInfo`.
1912
1913Although this function doesn't lock any mutex, so it should be quite efficient,
1914you should avoid calling it too often.
1915You can retrieve same VmaAllocationInfo structure while creating your resource, from function
1916vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
1917(e.g. due to defragmentation).
1918*/
1920 VmaAllocator VMA_NOT_NULL allocator,
1921 VmaAllocation VMA_NOT_NULL allocation,
1922 VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
1923
1924/** \brief Sets pUserData in given allocation to new value.
1925
1926The value of pointer `pUserData` is copied to allocation's `pUserData`.
1927It is opaque, so you can use it however you want - e.g.
1928as a pointer, ordinal number or some handle to you own data.
1929*/
1931 VmaAllocator VMA_NOT_NULL allocator,
1932 VmaAllocation VMA_NOT_NULL allocation,
1933 void* VMA_NULLABLE pUserData);
1934
1935/** \brief Sets pName in given allocation to new value.
1936
1937`pName` must be either null, or pointer to a null-terminated string. The function
1938makes local copy of the string and sets it as allocation's `pName`. String
1939passed as pName doesn't need to be valid for whole lifetime of the allocation -
1940you can free it after this call. String previously pointed by allocation's
1941`pName` is freed from memory.
1942*/
1944 VmaAllocator VMA_NOT_NULL allocator,
1945 VmaAllocation VMA_NOT_NULL allocation,
1946 const char* VMA_NULLABLE pName);
1947
1948/**
1949\brief Given an allocation, returns Property Flags of its memory type.
1950
1951This is just a convenience function. Same information can be obtained using
1952vmaGetAllocationInfo() + vmaGetMemoryProperties().
1953*/
1955 VmaAllocator VMA_NOT_NULL allocator,
1956 VmaAllocation VMA_NOT_NULL allocation,
1958
1959/** \brief Maps memory represented by given allocation and returns pointer to it.
1960
1961Maps memory represented by given allocation to make it accessible to CPU code.
1962When succeeded, `*ppData` contains pointer to first byte of this memory.
1963
1964\warning
1965If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
1966correctly offsetted to the beginning of region assigned to this particular allocation.
1967Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
1968You should not add VmaAllocationInfo::offset to it!
1969
1970Mapping is internally reference-counted and synchronized, so despite raw Vulkan
1971function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
1972multiple times simultaneously, it is safe to call this function on allocations
1973assigned to the same memory block. Actual Vulkan memory will be mapped on first
1974mapping and unmapped on last unmapping.
1975
1976If the function succeeded, you must call vmaUnmapMemory() to unmap the
1977allocation when mapping is no longer needed or before freeing the allocation, at
1978the latest.
1979
1980It also safe to call this function multiple times on the same allocation. You
1981must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
1982
1983It is also safe to call this function on allocation created with
1984#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
1985You must still call vmaUnmapMemory() same number of times as you called
1986vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
1987"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
1988
1989This function fails when used on allocation made in memory type that is not
1990`HOST_VISIBLE`.
1991
1992This function doesn't automatically flush or invalidate caches.
1993If the allocation is made from a memory types that is not `HOST_COHERENT`,
1994you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
1995*/
1997 VmaAllocator VMA_NOT_NULL allocator,
1998 VmaAllocation VMA_NOT_NULL allocation,
1999 void* VMA_NULLABLE* VMA_NOT_NULL ppData);
2000
2001/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
2002
2003For details, see description of vmaMapMemory().
2004
2005This function doesn't automatically flush or invalidate caches.
2006If the allocation is made from a memory types that is not `HOST_COHERENT`,
2007you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
2008*/
2010 VmaAllocator VMA_NOT_NULL allocator,
2011 VmaAllocation VMA_NOT_NULL allocation);
2012
2013/** \brief Flushes memory of given allocation.
2014
2015Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
2016It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
2017Unmap operation doesn't do that automatically.
2018
2019- `offset` must be relative to the beginning of allocation.
2020- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2021- `offset` and `size` don't have to be aligned.
2022 They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2023- If `size` is 0, this call is ignored.
2024- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2025 this call is ignored.
2026
2027Warning! `offset` and `size` are relative to the contents of given `allocation`.
2028If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2029Do not pass allocation's offset as `offset`!!!
2030
2031This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2032called, otherwise `VK_SUCCESS`.
2033*/
2035 VmaAllocator VMA_NOT_NULL allocator,
2036 VmaAllocation VMA_NOT_NULL allocation,
2037 VkDeviceSize offset,
2038 VkDeviceSize size);
2039
2040/** \brief Invalidates memory of given allocation.
2041
2042Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
2043It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
2044Map operation doesn't do that automatically.
2045
2046- `offset` must be relative to the beginning of allocation.
2047- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2048- `offset` and `size` don't have to be aligned.
2049 They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2050- If `size` is 0, this call is ignored.
2051- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2052 this call is ignored.
2053
2054Warning! `offset` and `size` are relative to the contents of given `allocation`.
2055If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2056Do not pass allocation's offset as `offset`!!!
2057
2058This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
2059it is called, otherwise `VK_SUCCESS`.
2060*/
2062 VmaAllocator VMA_NOT_NULL allocator,
2063 VmaAllocation VMA_NOT_NULL allocation,
2064 VkDeviceSize offset,
2065 VkDeviceSize size);
2066
2067/** \brief Flushes memory of given set of allocations.
2068
2069Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2070For more information, see documentation of vmaFlushAllocation().
2071
2072\param allocator
2073\param allocationCount
2074\param allocations
2075\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2076\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2077
2078This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2079called, otherwise `VK_SUCCESS`.
2080*/
2082 VmaAllocator VMA_NOT_NULL allocator,
2083 uint32_t allocationCount,
2084 const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2085 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2086 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2087
2088/** \brief Invalidates memory of given set of allocations.
2089
2090Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2091For more information, see documentation of vmaInvalidateAllocation().
2092
2093\param allocator
2094\param allocationCount
2095\param allocations
2096\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2097\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2098
2099This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
2100called, otherwise `VK_SUCCESS`.
2101*/
2103 VmaAllocator VMA_NOT_NULL allocator,
2104 uint32_t allocationCount,
2105 const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2106 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2107 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2108
2109/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
2110
2111\param allocator
2112\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
2113
2114Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
2115`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
2116`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
2117
2118Possible return values:
2119
2120- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
2121- `VK_SUCCESS` - corruption detection has been performed and succeeded.
2122- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
2123 `VMA_ASSERT` is also fired in that case.
2124- Other value: Error returned by Vulkan, e.g. memory mapping failure.
2125*/
2127 VmaAllocator VMA_NOT_NULL allocator,
2128 uint32_t memoryTypeBits);
2129
2130/** \brief Begins defragmentation process.
2131
2132\param allocator Allocator object.
2133\param pInfo Structure filled with parameters of defragmentation.
2134\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
2135\returns
2136- `VK_SUCCESS` if defragmentation can begin.
2137- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
2138
2139For more information about defragmentation, see documentation chapter:
2140[Defragmentation](@ref defragmentation).
2141*/
2143 VmaAllocator VMA_NOT_NULL allocator,
2146
2147/** \brief Ends defragmentation process.
2148
2149\param allocator Allocator object.
2150\param context Context object that has been created by vmaBeginDefragmentation().
2151\param[out] pStats Optional stats for the defragmentation. Can be null.
2152
2153Use this function to finish defragmentation started by vmaBeginDefragmentation().
2154*/
2156 VmaAllocator VMA_NOT_NULL allocator,
2159
2160/** \brief Starts single defragmentation pass.
2161
2162\param allocator Allocator object.
2163\param context Context object that has been created by vmaBeginDefragmentation().
2164\param[out] pPassInfo Computed informations for current pass.
2165\returns
2166- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
2167- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
2168 and then preferably try another pass with vmaBeginDefragmentationPass().
2169*/
2171 VmaAllocator VMA_NOT_NULL allocator,
2174
2175/** \brief Ends single defragmentation pass.
2176
2177\param allocator Allocator object.
2178\param context Context object that has been created by vmaBeginDefragmentation().
2179\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
2180
2181Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
2182
2183Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
2184After this call:
2185
2186- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
2187 (which is the default) will be pointing to the new destination place.
2188- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
2189 will be freed.
2190
2191If no more moves are possible you can end whole defragmentation.
2192*/
2194 VmaAllocator VMA_NOT_NULL allocator,
2197
2198/** \brief Binds buffer to allocation.
2199
2200Binds specified buffer to region of memory represented by specified allocation.
2201Gets `VkDeviceMemory` handle and offset from the allocation.
2202If you want to create a buffer, allocate memory for it and bind them together separately,
2203you should use this function for binding instead of standard `vkBindBufferMemory()`,
2204because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2205allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2206(which is illegal in Vulkan).
2207
2208It is recommended to use function vmaCreateBuffer() instead of this one.
2209*/
2211 VmaAllocator VMA_NOT_NULL allocator,
2212 VmaAllocation VMA_NOT_NULL allocation,
2213 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
2214
2215/** \brief Binds buffer to allocation with additional parameters.
2216
2217\param allocator
2218\param allocation
2219\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2220\param buffer
2221\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
2222
2223This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
2224
2225If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2226or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2227*/
2229 VmaAllocator VMA_NOT_NULL allocator,
2230 VmaAllocation VMA_NOT_NULL allocation,
2231 VkDeviceSize allocationLocalOffset,
2232 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
2233 const void* VMA_NULLABLE pNext);
2234
2235/** \brief Binds image to allocation.
2236
2237Binds specified image to region of memory represented by specified allocation.
2238Gets `VkDeviceMemory` handle and offset from the allocation.
2239If you want to create an image, allocate memory for it and bind them together separately,
2240you should use this function for binding instead of standard `vkBindImageMemory()`,
2241because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2242allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2243(which is illegal in Vulkan).
2244
2245It is recommended to use function vmaCreateImage() instead of this one.
2246*/
2248 VmaAllocator VMA_NOT_NULL allocator,
2249 VmaAllocation VMA_NOT_NULL allocation,
2250 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
2251
2252/** \brief Binds image to allocation with additional parameters.
2253
2254\param allocator
2255\param allocation
2256\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2257\param image
2258\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
2259
2260This function is similar to vmaBindImageMemory(), but it provides additional parameters.
2261
2262If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2263or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2264*/
2266 VmaAllocator VMA_NOT_NULL allocator,
2267 VmaAllocation VMA_NOT_NULL allocation,
2268 VkDeviceSize allocationLocalOffset,
2269 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
2270 const void* VMA_NULLABLE pNext);
2271
2272/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
2273
2274\param allocator
2275\param pBufferCreateInfo
2276\param pAllocationCreateInfo
2277\param[out] pBuffer Buffer that was created.
2278\param[out] pAllocation Allocation that was created.
2279\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
2280
2281This function automatically:
2282
2283-# Creates buffer.
2284-# Allocates appropriate memory for it.
2285-# Binds the buffer with the memory.
2286
2287If any of these operations fail, buffer and allocation are not created,
2288returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
2289
2290If the function succeeded, you must destroy both buffer and allocation when you
2291no longer need them using either convenience function vmaDestroyBuffer() or
2292separately, using `vkDestroyBuffer()` and vmaFreeMemory().
2293
2294If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
2295VK_KHR_dedicated_allocation extension is used internally to query driver whether
2296it requires or prefers the new buffer to have dedicated allocation. If yes,
2297and if dedicated allocation is possible
2298(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
2299allocation for this buffer, just like when using
2300#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
2301
2302\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
2303although recommended as a good practice, is out of scope of this library and could be implemented
2304by the user as a higher-level logic on top of VMA.
2305*/
2307 VmaAllocator VMA_NOT_NULL allocator,
2308 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2309 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2312 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2313
2314/** \brief Creates a buffer with additional minimum alignment.
2315
2316Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
2317minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
2318for interop with OpenGL.
2319*/
2321 VmaAllocator VMA_NOT_NULL allocator,
2322 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2323 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2324 VkDeviceSize minAlignment,
2327 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2328
2329/** \brief Creates a new `VkBuffer`, binds already created memory for it.
2330
2331\param allocator
2332\param allocation Allocation that provides memory to be used for binding new buffer to it.
2333\param pBufferCreateInfo
2334\param[out] pBuffer Buffer that was created.
2335
2336This function automatically:
2337
2338-# Creates buffer.
2339-# Binds the buffer with the supplied memory.
2340
2341If any of these operations fail, buffer is not created,
2342returned value is negative error code and `*pBuffer` is null.
2343
2344If the function succeeded, you must destroy the buffer when you
2345no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
2346allocation you can use convenience function vmaDestroyBuffer().
2347*/
2349 VmaAllocator VMA_NOT_NULL allocator,
2350 VmaAllocation VMA_NOT_NULL allocation,
2351 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2353
2354/** \brief Destroys Vulkan buffer and frees allocated memory.
2355
2356This is just a convenience function equivalent to:
2357
2358\code
2359vkDestroyBuffer(device, buffer, allocationCallbacks);
2360vmaFreeMemory(allocator, allocation);
2361\endcode
2362
2363It it safe to pass null as buffer and/or allocation.
2364*/
2366 VmaAllocator VMA_NOT_NULL allocator,
2367 VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
2368 VmaAllocation VMA_NULLABLE allocation);
2369
2370/// Function similar to vmaCreateBuffer().
2372 VmaAllocator VMA_NOT_NULL allocator,
2373 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2374 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2377 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2378
2379/// Function similar to vmaCreateAliasingBuffer().
2381 VmaAllocator VMA_NOT_NULL allocator,
2382 VmaAllocation VMA_NOT_NULL allocation,
2383 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2385
2386/** \brief Destroys Vulkan image and frees allocated memory.
2387
2388This is just a convenience function equivalent to:
2389
2390\code
2391vkDestroyImage(device, image, allocationCallbacks);
2392vmaFreeMemory(allocator, allocation);
2393\endcode
2394
2395It it safe to pass null as image and/or allocation.
2396*/
2398 VmaAllocator VMA_NOT_NULL allocator,
2399 VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
2400 VmaAllocation VMA_NULLABLE allocation);
2401
2402/** @} */
2403
2404/**
2405\addtogroup group_virtual
2406@{
2407*/
2408
2409/** \brief Creates new #VmaVirtualBlock object.
2410
2411\param pCreateInfo Parameters for creation.
2412\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
2413*/
2415 const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
2417
2418/** \brief Destroys #VmaVirtualBlock object.
2419
2420Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
2421You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
2422if you are sure this is what you want. If you do neither, an assert is called.
2423
2424If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
2425don't forget to free them.
2426*/
2428 VmaVirtualBlock VMA_NULLABLE virtualBlock);
2429
2430/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
2431*/
2433 VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2434
2435/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
2436*/
2438 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2440
2441/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
2442
2443If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
2444(despite the function doesn't ever allocate actual GPU memory).
2445`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
2446
2447\param virtualBlock Virtual block
2448\param pCreateInfo Parameters for the allocation
2449\param[out] pAllocation Returned handle of the new allocation
2450\param[out] pOffset Returned offset of the new allocation. Optional, can be null.
2451*/
2453 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2456 VkDeviceSize* VMA_NULLABLE pOffset);
2457
2458/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
2459
2460It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
2461*/
2463 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2465
2466/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
2467
2468You must either call this function or free each virtual allocation individually with vmaVirtualFree()
2469before destroying a virtual block. Otherwise, an assert is called.
2470
2471If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
2472don't forget to free it as well.
2473*/
2475 VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2476
2477/** \brief Changes custom pointer associated with given virtual allocation.
2478*/
2480 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2482 void* VMA_NULLABLE pUserData);
2483
2484/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2485
2486This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
2487*/
2489 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2490 VmaStatistics* VMA_NOT_NULL pStats);
2491
2492/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2493
2494This function is slow to call. Use for debugging purposes.
2495For less detailed statistics, see vmaGetVirtualBlockStatistics().
2496*/
2498 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2500
2501/** @} */
2502
2503#if VMA_STATS_STRING_ENABLED
2504/**
2505\addtogroup group_stats
2506@{
2507*/
2508
2509/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
2510\param virtualBlock Virtual block.
2511\param[out] ppStatsString Returned string.
2512\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
2513
2514Returned string must be freed using vmaFreeVirtualBlockStatsString().
2515*/
2517 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2518 char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2519 VkBool32 detailedMap);
2520
2521/// Frees a string returned by vmaBuildVirtualBlockStatsString().
2523 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2524 char* VMA_NULLABLE pStatsString);
2525
2526/** \brief Builds and returns statistics as a null-terminated string in JSON format.
2527\param allocator
2528\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
2529\param detailedMap
2530*/
2532 VmaAllocator VMA_NOT_NULL allocator,
2533 char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2534 VkBool32 detailedMap);
2535
2537 VmaAllocator VMA_NOT_NULL allocator,
2538 char* VMA_NULLABLE pStatsString);
2539
2540/** @} */
2541
2542#endif // VMA_STATS_STRING_ENABLED
2543
2544#endif // _VMA_FUNCTION_HEADERS
2545
2546#ifdef __cplusplus
2547}
2548#endif
2549
2550#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2551
2552////////////////////////////////////////////////////////////////////////////////
2553////////////////////////////////////////////////////////////////////////////////
2554//
2555// IMPLEMENTATION
2556//
2557////////////////////////////////////////////////////////////////////////////////
2558////////////////////////////////////////////////////////////////////////////////
2559
2560// For Visual Studio IntelliSense.
2561#if defined(__cplusplus) && defined(__INTELLISENSE__)
2562#define VMA_IMPLEMENTATION
2563#endif
2564
2565#ifdef VMA_IMPLEMENTATION
2566#undef VMA_IMPLEMENTATION
2567
2568#include <cstdint>
2569#include <cstdlib>
2570#include <cstring>
2571#include <utility>
2572#include <type_traits>
2573
2574#ifdef _MSC_VER
2575 #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
2576#endif
2577#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
2578 #include <bit> // For std::popcount
2579#endif
2580
2581/*******************************************************************************
2582CONFIGURATION SECTION
2583
2584Define some of these macros before each #include of this header or change them
2585here if you need other then default behavior depending on your environment.
2586*/
2587#ifndef _VMA_CONFIGURATION
2588
2589/*
2590Define this macro to 1 to make the library fetch pointers to Vulkan functions
2591internally, like:
2592
2593 vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2594*/
2595#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2596 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2597#endif
2598
2599/*
2600Define this macro to 1 to make the library fetch pointers to Vulkan functions
2601internally, like:
2602
2603 vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
2604
2605To use this feature in new versions of VMA you now have to pass
2606VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
2607VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
2608*/
2609#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
2610 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
2611#endif
2612
2613#ifndef VMA_USE_STL_SHARED_MUTEX
2614 // Compiler conforms to C++17.
2615 #if __cplusplus >= 201703L
2616 #define VMA_USE_STL_SHARED_MUTEX 1
2617 // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
2618 // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
2619 #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
2620 #define VMA_USE_STL_SHARED_MUTEX 1
2621 #else
2622 #define VMA_USE_STL_SHARED_MUTEX 0
2623 #endif
2624#endif
2625
2626/*
2627Define this macro to include custom header files without having to edit this file directly, e.g.:
2628
2629 // Inside of "my_vma_configuration_user_includes.h":
2630
2631 #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
2632 #include "my_custom_min.h" // for my_custom_min
2633 #include <algorithm>
2634 #include <mutex>
2635
2636 // Inside a different file, which includes "vk_mem_alloc.h":
2637
2638 #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
2639 #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
2640 #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
2641 #include "vk_mem_alloc.h"
2642 ...
2643
2644The following headers are used in this CONFIGURATION section only, so feel free to
2645remove them if not needed.
2646*/
2647#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
2648 #include <cassert> // for assert
2649 #include <algorithm> // for min, max
2650 #include <mutex>
2651#else
2652 #include VMA_CONFIGURATION_USER_INCLUDES_H
2653#endif
2654
2655#ifndef VMA_NULL
2656 // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2657 #define VMA_NULL nullptr
2658#endif
2659
2660#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2661#include <cstdlib>
2662static void* vma_aligned_alloc(size_t alignment, size_t size)
2663{
2664 // alignment must be >= sizeof(void*)
2665 if(alignment < sizeof(void*))
2666 {
2667 alignment = sizeof(void*);
2668 }
2669
2670 return memalign(alignment, size);
2671}
2672#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
2673#include <cstdlib>
2674
2675#if defined(__APPLE__)
2676#include <AvailabilityMacros.h>
2677#endif
2678
2679static void* vma_aligned_alloc(size_t alignment, size_t size)
2680{
2681 // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
2682 // Therefore, for now disable this specific exception until a proper solution is found.
2683 //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
2684 //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
2685 // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
2686 // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
2687 // // MAC_OS_X_VERSION_10_16), even though the function is marked
2688 // // availabe for 10.15. That is why the preprocessor checks for 10.16 but
2689 // // the __builtin_available checks for 10.15.
2690 // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
2691 // if (__builtin_available(macOS 10.15, iOS 13, *))
2692 // return aligned_alloc(alignment, size);
2693 //#endif
2694 //#endif
2695
2696 // alignment must be >= sizeof(void*)
2697 if(alignment < sizeof(void*))
2698 {
2699 alignment = sizeof(void*);
2700 }
2701
2702 void *pointer;
2703 if(posix_memalign(&pointer, alignment, size) == 0)
2704 return pointer;
2705 return VMA_NULL;
2706}
2707#elif defined(_WIN32)
2708static void* vma_aligned_alloc(size_t alignment, size_t size)
2709{
2710 return _aligned_malloc(size, alignment);
2711}
2712#else
2713static void* vma_aligned_alloc(size_t alignment, size_t size)
2714{
2715 return aligned_alloc(alignment, size);
2716}
2717#endif
2718
2719#if defined(_WIN32)
2720static void vma_aligned_free(void* ptr)
2721{
2722 _aligned_free(ptr);
2723}
2724#else
2725static void vma_aligned_free(void* VMA_NULLABLE ptr)
2726{
2727 free(ptr);
2728}
2729#endif
2730
2731// If your compiler is not compatible with C++11 and definition of
2732// aligned_alloc() function is missing, uncommeting following line may help:
2733
2734//#include <malloc.h>
2735
2736// Normal assert to check for programmer's errors, especially in Debug configuration.
2737#ifndef VMA_ASSERT
2738 #ifdef NDEBUG
2739 #define VMA_ASSERT(expr)
2740 #else
2741 #define VMA_ASSERT(expr) assert(expr)
2742 #endif
2743#endif
2744
2745// Assert that will be called very often, like inside data structures e.g. operator[].
2746// Making it non-empty can make program slow.
2747#ifndef VMA_HEAVY_ASSERT
2748 #ifdef NDEBUG
2749 #define VMA_HEAVY_ASSERT(expr)
2750 #else
2751 #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2752 #endif
2753#endif
2754
2755#ifndef VMA_ALIGN_OF
2756 #define VMA_ALIGN_OF(type) (__alignof(type))
2757#endif
2758
2759#ifndef VMA_SYSTEM_ALIGNED_MALLOC
2760 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
2761#endif
2762
2763#ifndef VMA_SYSTEM_ALIGNED_FREE
2764 // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
2765 #if defined(VMA_SYSTEM_FREE)
2766 #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
2767 #else
2768 #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
2769 #endif
2770#endif
2771
2772#ifndef VMA_COUNT_BITS_SET
2773 // Returns number of bits set to 1 in (v)
2774 #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
2775#endif
2776
2777#ifndef VMA_BITSCAN_LSB
2778 // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
2779 #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
2780#endif
2781
2782#ifndef VMA_BITSCAN_MSB
2783 // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
2784 #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
2785#endif
2786
2787#ifndef VMA_MIN
2788 #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
2789#endif
2790
2791#ifndef VMA_MAX
2792 #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
2793#endif
2794
2795#ifndef VMA_SWAP
2796 #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2797#endif
2798
2799#ifndef VMA_SORT
2800 #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2801#endif
2802
2803#ifndef VMA_DEBUG_LOG
2804 #define VMA_DEBUG_LOG(format, ...)
2805 /*
2806 #define VMA_DEBUG_LOG(format, ...) do { \
2807 printf(format, __VA_ARGS__); \
2808 printf("\n"); \
2809 } while(false)
2810 */
2811#endif
2812
2813// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2814#if VMA_STATS_STRING_ENABLED
2815 static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
2816 {
2817 snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2818 }
2819 static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
2820 {
2821 snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2822 }
2823 static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
2824 {
2825 snprintf(outStr, strLen, "%p", ptr);
2826 }
2827#endif
2828
2829#ifndef VMA_MUTEX
2830 class VmaMutex
2831 {
2832 public:
2833 void Lock() { m_Mutex.lock(); }
2834 void Unlock() { m_Mutex.unlock(); }
2835 bool TryLock() { return m_Mutex.try_lock(); }
2836 private:
2837 std::mutex m_Mutex;
2838 };
2839 #define VMA_MUTEX VmaMutex
2840#endif
2841
2842// Read-write mutex, where "read" is shared access, "write" is exclusive access.
2843#ifndef VMA_RW_MUTEX
2844 #if VMA_USE_STL_SHARED_MUTEX
2845 // Use std::shared_mutex from C++17.
2846 #include <shared_mutex>
2847 class VmaRWMutex
2848 {
2849 public:
2850 void LockRead() { m_Mutex.lock_shared(); }
2851 void UnlockRead() { m_Mutex.unlock_shared(); }
2852 bool TryLockRead() { return m_Mutex.try_lock_shared(); }
2853 void LockWrite() { m_Mutex.lock(); }
2854 void UnlockWrite() { m_Mutex.unlock(); }
2855 bool TryLockWrite() { return m_Mutex.try_lock(); }
2856 private:
2857 std::shared_mutex m_Mutex;
2858 };
2859 #define VMA_RW_MUTEX VmaRWMutex
2860 #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
2861 // Use SRWLOCK from WinAPI.
2862 // Minimum supported client = Windows Vista, server = Windows Server 2008.
2863 class VmaRWMutex
2864 {
2865 public:
2866 VmaRWMutex() { InitializeSRWLock(&m_Lock); }
2867 void LockRead() { AcquireSRWLockShared(&m_Lock); }
2868 void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
2869 bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
2870 void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
2871 void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
2872 bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
2873 private:
2874 SRWLOCK m_Lock;
2875 };
2876 #define VMA_RW_MUTEX VmaRWMutex
2877 #else
2878 // Less efficient fallback: Use normal mutex.
2879 class VmaRWMutex
2880 {
2881 public:
2882 void LockRead() { m_Mutex.Lock(); }
2883 void UnlockRead() { m_Mutex.Unlock(); }
2884 bool TryLockRead() { return m_Mutex.TryLock(); }
2885 void LockWrite() { m_Mutex.Lock(); }
2886 void UnlockWrite() { m_Mutex.Unlock(); }
2887 bool TryLockWrite() { return m_Mutex.TryLock(); }
2888 private:
2889 VMA_MUTEX m_Mutex;
2890 };
2891 #define VMA_RW_MUTEX VmaRWMutex
2892 #endif // #if VMA_USE_STL_SHARED_MUTEX
2893#endif // #ifndef VMA_RW_MUTEX
2894
2895/*
2896If providing your own implementation, you need to implement a subset of std::atomic.
2897*/
2898#ifndef VMA_ATOMIC_UINT32
2899 #include <atomic>
2900 #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2901#endif
2902
2903#ifndef VMA_ATOMIC_UINT64
2904 #include <atomic>
2905 #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
2906#endif
2907
2908#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2909 /**
2910 Every allocation will have its own memory block.
2911 Define to 1 for debugging purposes only.
2912 */
2913 #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2914#endif
2915
2916#ifndef VMA_MIN_ALIGNMENT
2917 /**
2918 Minimum alignment of all allocations, in bytes.
2919 Set to more than 1 for debugging purposes. Must be power of two.
2920 */
2921 #ifdef VMA_DEBUG_ALIGNMENT // Old name
2922 #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
2923 #else
2924 #define VMA_MIN_ALIGNMENT (1)
2925 #endif
2926#endif
2927
2928#ifndef VMA_DEBUG_MARGIN
2929 /**
2930 Minimum margin after every allocation, in bytes.
2931 Set nonzero for debugging purposes only.
2932 */
2933 #define VMA_DEBUG_MARGIN (0)
2934#endif
2935
2936#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2937 /**
2938 Define this macro to 1 to automatically fill new allocations and destroyed
2939 allocations with some bit pattern.
2940 */
2941 #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2942#endif
2943
2944#ifndef VMA_DEBUG_DETECT_CORRUPTION
2945 /**
2946 Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
2947 enable writing magic value to the margin after every allocation and
2948 validating it, so that memory corruptions (out-of-bounds writes) are detected.
2949 */
2950 #define VMA_DEBUG_DETECT_CORRUPTION (0)
2951#endif
2952
2953#ifndef VMA_DEBUG_GLOBAL_MUTEX
2954 /**
2955 Set this to 1 for debugging purposes only, to enable single mutex protecting all
2956 entry calls to the library. Can be useful for debugging multithreading issues.
2957 */
2958 #define VMA_DEBUG_GLOBAL_MUTEX (0)
2959#endif
2960
2961#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2962 /**
2963 Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
2964 Set to more than 1 for debugging purposes only. Must be power of two.
2965 */
2966 #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2967#endif
2968
2969#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
2970 /*
2971 Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
2972 and return error instead of leaving up to Vulkan implementation what to do in such cases.
2973 */
2974 #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
2975#endif
2976
2977#ifndef VMA_SMALL_HEAP_MAX_SIZE
2978 /// Maximum size of a memory heap in Vulkan to consider it "small".
2979 #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2980#endif
2981
2982#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2983 /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
2984 #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2985#endif
2986
2987/*
2988Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
2989or a persistently mapped allocation is created and destroyed several times in a row.
2990It keeps additional +1 mapping of a device memory block to prevent calling actual
2991vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
2992tools like RenderDOc.
2993*/
2994#ifndef VMA_MAPPING_HYSTERESIS_ENABLED
2995 #define VMA_MAPPING_HYSTERESIS_ENABLED 1
2996#endif
2997
2998#ifndef VMA_CLASS_NO_COPY
2999 #define VMA_CLASS_NO_COPY(className) \
3000 private: \
3001 className(const className&) = delete; \
3002 className& operator=(const className&) = delete;
3003#endif
3004
3005#define VMA_VALIDATE(cond) do { if(!(cond)) { \
3006 VMA_ASSERT(0 && "Validation failed: " #cond); \
3007 return false; \
3008 } } while(false)
3009
3010/*******************************************************************************
3011END OF CONFIGURATION
3012*/
3013#endif // _VMA_CONFIGURATION
3014
3015
3016static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3017static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3018// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3019static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3020
3021// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
3022static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
3023static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
3024static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
3025static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
3026static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
3027static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3028static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
3029static const uint32_t VMA_VENDOR_ID_AMD = 4098;
3030
3031// This one is tricky. Vulkan specification defines this code as available since
3032// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
3033// See pull request #207.
3034#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
3035
3036
3037#if VMA_STATS_STRING_ENABLED
3038// Correspond to values of enum VmaSuballocationType.
3039static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
3040{
3041 "FREE",
3042 "UNKNOWN",
3043 "BUFFER",
3044 "IMAGE_UNKNOWN",
3045 "IMAGE_LINEAR",
3046 "IMAGE_OPTIMAL",
3047};
3048#endif
3049
3050static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
3051 { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3052
3053
3054#ifndef _VMA_ENUM_DECLARATIONS
3055
3056enum VmaSuballocationType
3057{
3058 VMA_SUBALLOCATION_TYPE_FREE = 0,
3059 VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3060 VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3061 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3062 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3063 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3064 VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3065};
3066
3067enum VMA_CACHE_OPERATION
3068{
3069 VMA_CACHE_FLUSH,
3070 VMA_CACHE_INVALIDATE
3071};
3072
3073enum class VmaAllocationRequestType
3074{
3075 Normal,
3076 TLSF,
3077 // Used by "Linear" algorithm.
3078 UpperAddress,
3079 EndOf1st,
3080 EndOf2nd,
3081};
3082
3083#endif // _VMA_ENUM_DECLARATIONS
3084
3085#ifndef _VMA_FORWARD_DECLARATIONS
3086// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
3087VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);
3088
3089struct VmaMutexLock;
3090struct VmaMutexLockRead;
3091struct VmaMutexLockWrite;
3092
3093template<typename T>
3094struct AtomicTransactionalIncrement;
3095
3096template<typename T>
3097struct VmaStlAllocator;
3098
3099template<typename T, typename AllocatorT>
3100class VmaVector;
3101
3102template<typename T, typename AllocatorT, size_t N>
3103class VmaSmallVector;
3104
3105template<typename T>
3106class VmaPoolAllocator;
3107
3108template<typename T>
3109struct VmaListItem;
3110
3111template<typename T>
3112class VmaRawList;
3113
3114template<typename T, typename AllocatorT>
3115class VmaList;
3116
3117template<typename ItemTypeTraits>
3118class VmaIntrusiveLinkedList;
3119
3120// Unused in this version
3121#if 0
3122template<typename T1, typename T2>
3123struct VmaPair;
3124template<typename FirstT, typename SecondT>
3125struct VmaPairFirstLess;
3126
3127template<typename KeyT, typename ValueT>
3128class VmaMap;
3129#endif
3130
3131#if VMA_STATS_STRING_ENABLED
3132class VmaStringBuilder;
3133class VmaJsonWriter;
3134#endif
3135
3136class VmaDeviceMemoryBlock;
3137
3138struct VmaDedicatedAllocationListItemTraits;
3139class VmaDedicatedAllocationList;
3140
3141struct VmaSuballocation;
3142struct VmaSuballocationOffsetLess;
3143struct VmaSuballocationOffsetGreater;
3144struct VmaSuballocationItemSizeLess;
3145
3146typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
3147
3148struct VmaAllocationRequest;
3149
3150class VmaBlockMetadata;
3151class VmaBlockMetadata_Linear;
3152class VmaBlockMetadata_TLSF;
3153
3154class VmaBlockVector;
3155
3156struct VmaPoolListItemTraits;
3157
3158struct VmaCurrentBudgetData;
3159
3160class VmaAllocationObjectAllocator;
3161
3162#endif // _VMA_FORWARD_DECLARATIONS
3163
3164
3165#ifndef _VMA_FUNCTIONS
3166
3167/*
3168Returns number of bits set to 1 in (v).
3169
3170On specific platforms and compilers you can use instrinsics like:
3171
3172Visual Studio:
3173 return __popcnt(v);
3174GCC, Clang:
3175 return static_cast<uint32_t>(__builtin_popcount(v));
3176
3177Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
3178But you need to check in runtime whether user's CPU supports these, as some old processors don't.
3179*/
3180static inline uint32_t VmaCountBitsSet(uint32_t v)
3181{
3182#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
3183 return std::popcount(v);
3184#else
3185 uint32_t c = v - ((v >> 1) & 0x55555555);
3186 c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3187 c = ((c >> 4) + c) & 0x0F0F0F0F;
3188 c = ((c >> 8) + c) & 0x00FF00FF;
3189 c = ((c >> 16) + c) & 0x0000FFFF;
3190 return c;
3191#endif
3192}
3193
3194static inline uint8_t VmaBitScanLSB(uint64_t mask)
3195{
3196#if defined(_MSC_VER) && defined(_WIN64)
3197 unsigned long pos;
3198 if (_BitScanForward64(&pos, mask))
3199 return static_cast<uint8_t>(pos);
3200 return UINT8_MAX;
3201#elif defined __GNUC__ || defined __clang__
3202 return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
3203#else
3204 uint8_t pos = 0;
3205 uint64_t bit = 1;
3206 do
3207 {
3208 if (mask & bit)
3209 return pos;
3210 bit <<= 1;
3211 } while (pos++ < 63);
3212 return UINT8_MAX;
3213#endif
3214}
3215
3216static inline uint8_t VmaBitScanLSB(uint32_t mask)
3217{
3218#ifdef _MSC_VER
3219 unsigned long pos;
3220 if (_BitScanForward(&pos, mask))
3221 return static_cast<uint8_t>(pos);
3222 return UINT8_MAX;
3223#elif defined __GNUC__ || defined __clang__
3224 return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
3225#else
3226 uint8_t pos = 0;
3227 uint32_t bit = 1;
3228 do
3229 {
3230 if (mask & bit)
3231 return pos;
3232 bit <<= 1;
3233 } while (pos++ < 31);
3234 return UINT8_MAX;
3235#endif
3236}
3237
3238static inline uint8_t VmaBitScanMSB(uint64_t mask)
3239{
3240#if defined(_MSC_VER) && defined(_WIN64)
3241 unsigned long pos;
3242 if (_BitScanReverse64(&pos, mask))
3243 return static_cast<uint8_t>(pos);
3244#elif defined __GNUC__ || defined __clang__
3245 if (mask)
3246 return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
3247#else
3248 uint8_t pos = 63;
3249 uint64_t bit = 1ULL << 63;
3250 do
3251 {
3252 if (mask & bit)
3253 return pos;
3254 bit >>= 1;
3255 } while (pos-- > 0);
3256#endif
3257 return UINT8_MAX;
3258}
3259
3260static inline uint8_t VmaBitScanMSB(uint32_t mask)
3261{
3262#ifdef _MSC_VER
3263 unsigned long pos;
3264 if (_BitScanReverse(&pos, mask))
3265 return static_cast<uint8_t>(pos);
3266#elif defined __GNUC__ || defined __clang__
3267 if (mask)
3268 return 31 - static_cast<uint8_t>(__builtin_clz(mask));
3269#else
3270 uint8_t pos = 31;
3271 uint32_t bit = 1UL << 31;
3272 do
3273 {
3274 if (mask & bit)
3275 return pos;
3276 bit >>= 1;
3277 } while (pos-- > 0);
3278#endif
3279 return UINT8_MAX;
3280}
3281
3282/*
3283Returns true if given number is a power of two.
3284T must be unsigned integer number or signed integer but always nonnegative.
3285For 0 returns true.
3286*/
3287template <typename T>
3288inline bool VmaIsPow2(T x)
3289{
3290 return (x & (x - 1)) == 0;
3291}
3292
3293// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3294// Use types like uint32_t, uint64_t as T.
3295template <typename T>
3296static inline T VmaAlignUp(T val, T alignment)
3297{
3298 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3299 return (val + alignment - 1) & ~(alignment - 1);
3300}
3301
3302// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3303// Use types like uint32_t, uint64_t as T.
3304template <typename T>
3305static inline T VmaAlignDown(T val, T alignment)
3306{
3307 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3308 return val & ~(alignment - 1);
3309}
3310
3311// Division with mathematical rounding to nearest number.
3312template <typename T>
3313static inline T VmaRoundDiv(T x, T y)
3314{
3315 return (x + (y / (T)2)) / y;
3316}
3317
3318// Divide by 'y' and round up to nearest integer.
3319template <typename T>
3320static inline T VmaDivideRoundingUp(T x, T y)
3321{
3322 return (x + y - (T)1) / y;
3323}
3324
3325// Returns smallest power of 2 greater or equal to v.
3326static inline uint32_t VmaNextPow2(uint32_t v)
3327{
3328 v--;
3329 v |= v >> 1;
3330 v |= v >> 2;
3331 v |= v >> 4;
3332 v |= v >> 8;
3333 v |= v >> 16;
3334 v++;
3335 return v;
3336}
3337
3338static inline uint64_t VmaNextPow2(uint64_t v)
3339{
3340 v--;
3341 v |= v >> 1;
3342 v |= v >> 2;
3343 v |= v >> 4;
3344 v |= v >> 8;
3345 v |= v >> 16;
3346 v |= v >> 32;
3347 v++;
3348 return v;
3349}
3350
3351// Returns largest power of 2 less or equal to v.
3352static inline uint32_t VmaPrevPow2(uint32_t v)
3353{
3354 v |= v >> 1;
3355 v |= v >> 2;
3356 v |= v >> 4;
3357 v |= v >> 8;
3358 v |= v >> 16;
3359 v = v ^ (v >> 1);
3360 return v;
3361}
3362
3363static inline uint64_t VmaPrevPow2(uint64_t v)
3364{
3365 v |= v >> 1;
3366 v |= v >> 2;
3367 v |= v >> 4;
3368 v |= v >> 8;
3369 v |= v >> 16;
3370 v |= v >> 32;
3371 v = v ^ (v >> 1);
3372 return v;
3373}
3374
3375static inline bool VmaStrIsEmpty(const char* pStr)
3376{
3377 return pStr == VMA_NULL || *pStr == '\0';
3378}
3379
3380/*
3381Returns true if two memory blocks occupy overlapping pages.
3382ResourceA must be in less memory offset than ResourceB.
3383
3384Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3385chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3386*/
3387static inline bool VmaBlocksOnSamePage(
3388 VkDeviceSize resourceAOffset,
3389 VkDeviceSize resourceASize,
3390 VkDeviceSize resourceBOffset,
3391 VkDeviceSize pageSize)
3392{
3393 VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3394 VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3395 VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3396 VkDeviceSize resourceBStart = resourceBOffset;
3397 VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3398 return resourceAEndPage == resourceBStartPage;
3399}
3400
3401/*
3402Returns true if given suballocation types could conflict and must respect
3403VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3404or linear image and another one is optimal image. If type is unknown, behave
3405conservatively.
3406*/
3407static inline bool VmaIsBufferImageGranularityConflict(
3408 VmaSuballocationType suballocType1,
3409 VmaSuballocationType suballocType2)
3410{
3411 if (suballocType1 > suballocType2)
3412 {
3413 VMA_SWAP(suballocType1, suballocType2);
3414 }
3415
3416 switch (suballocType1)
3417 {
3418 case VMA_SUBALLOCATION_TYPE_FREE:
3419 return false;
3420 case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3421 return true;
3422 case VMA_SUBALLOCATION_TYPE_BUFFER:
3423 return
3424 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3425 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3426 case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3427 return
3428 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3429 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3430 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3431 case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3432 return
3433 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3434 case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3435 return false;
3436 default:
3437 VMA_ASSERT(0);
3438 return true;
3439 }
3440}
3441
3442static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3443{
3444#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3445 uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3446 const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3447 for (size_t i = 0; i < numberCount; ++i, ++pDst)
3448 {
3449 *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3450 }
3451#else
3452 // no-op
3453#endif
3454}
3455
3456static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3457{
3458#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3459 const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3460 const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3461 for (size_t i = 0; i < numberCount; ++i, ++pSrc)
3462 {
3463 if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3464 {
3465 return false;
3466 }
3467 }
3468#endif
3469 return true;
3470}
3471
3472/*
3473Fills structure with parameters of an example buffer to be used for transfers
3474during GPU memory defragmentation.
3475*/
3476static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3477{
3478 memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3481 outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3482}
3483
3484
3485/*
3486Performs binary search and returns iterator to first element that is greater or
3487equal to (key), according to comparison (cmp).
3488
3489Cmp should return true if first argument is less than second argument.
3490
3491Returned value is the found element, if present in the collection or place where
3492new element with value (key) should be inserted.
3493*/
3494template <typename CmpLess, typename IterT, typename KeyT>
3495static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
3496{
3497 size_t down = 0, up = (end - beg);
3498 while (down < up)
3499 {
3500 const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
3501 if (cmp(*(beg + mid), key))
3502 {
3503 down = mid + 1;
3504 }
3505 else
3506 {
3507 up = mid;
3508 }
3509 }
3510 return beg + down;
3511}
3512
3513template<typename CmpLess, typename IterT, typename KeyT>
3514IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3515{
3516 IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3517 beg, end, value, cmp);
3518 if (it == end ||
3519 (!cmp(*it, value) && !cmp(value, *it)))
3520 {
3521 return it;
3522 }
3523 return end;
3524}
3525
3526/*
3527Returns true if all pointers in the array are not-null and unique.
3528Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3529T must be pointer type, e.g. VmaAllocation, VmaPool.
3530*/
3531template<typename T>
3532static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3533{
3534 for (uint32_t i = 0; i < count; ++i)
3535 {
3536 const T iPtr = arr[i];
3537 if (iPtr == VMA_NULL)
3538 {
3539 return false;
3540 }
3541 for (uint32_t j = i + 1; j < count; ++j)
3542 {
3543 if (iPtr == arr[j])
3544 {
3545 return false;
3546 }
3547 }
3548 }
3549 return true;
3550}
3551
3552template<typename MainT, typename NewT>
3553static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
3554{
3555 newStruct->pNext = mainStruct->pNext;
3556 mainStruct->pNext = newStruct;
3557}
3558
3559// This is the main algorithm that guides the selection of a memory type best for an allocation -
3560// converts usage to required/preferred/not preferred flags.
3561static bool FindMemoryPreferences(
3562 bool isIntegratedGPU,
3563 const VmaAllocationCreateInfo& allocCreateInfo,
3564 VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
3565 VkMemoryPropertyFlags& outRequiredFlags,
3566 VkMemoryPropertyFlags& outPreferredFlags,
3567 VkMemoryPropertyFlags& outNotPreferredFlags)
3568{
3569 outRequiredFlags = allocCreateInfo.requiredFlags;
3570 outPreferredFlags = allocCreateInfo.preferredFlags;
3571 outNotPreferredFlags = 0;
3572
3573 switch(allocCreateInfo.usage)
3574 {
3576 break;
3578 if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3579 {
3580 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3581 }
3582 break;
3585 break;
3587 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3588 if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3589 {
3590 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3591 }
3592 break;
3594 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3595 outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3596 break;
3598 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3599 break;
3601 outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
3602 break;
3606 {
3607 if(bufImgUsage == UINT32_MAX)
3608 {
3609 VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
3610 return false;
3611 }
3612 // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
3613 const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
3614 const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
3615 const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
3616 const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
3617 const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
3618 const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
3619
3620 // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
3621 if(hostAccessRandom)
3622 {
3623 if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3624 {
3625 // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
3626 // Omitting HOST_VISIBLE here is intentional.
3627 // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
3628 // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
3630 }
3631 else
3632 {
3633 // Always CPU memory, cached.
3635 }
3636 }
3637 // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
3638 else if(hostAccessSequentialWrite)
3639 {
3640 // Want uncached and write-combined.
3641 outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3642
3643 if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3644 {
3646 }
3647 else
3648 {
3649 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3650 // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
3651 if(deviceAccess)
3652 {
3653 // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
3654 if(preferHost)
3655 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3656 else
3657 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3658 }
3659 // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
3660 else
3661 {
3662 // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
3663 if(preferDevice)
3664 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3665 else
3666 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3667 }
3668 }
3669 }
3670 // No CPU access
3671 else
3672 {
3673 // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory
3674 if(deviceAccess)
3675 {
3676 // ...unless there is a clear preference from the user not to do so.
3677 if(preferHost)
3678 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3679 else
3680 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3681 }
3682 // No direct GPU access, no CPU access, just transfers.
3683 // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
3684 // a "swap file" copy to free some GPU memory (then better CPU memory).
3685 // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
3686 if(preferHost)
3687 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3688 else
3689 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3690 }
3691 break;
3692 }
3693 default:
3694 VMA_ASSERT(0);
3695 }
3696
3697 // Avoid DEVICE_COHERENT unless explicitly requested.
3698 if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
3699 (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
3700 {
3701 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
3702 }
3703
3704 return true;
3705}
3706
3707////////////////////////////////////////////////////////////////////////////////
3708// Memory allocation
3709
3710static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3711{
3712 void* result = VMA_NULL;
3713 if ((pAllocationCallbacks != VMA_NULL) &&
3714 (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3715 {
3716 result = (*pAllocationCallbacks->pfnAllocation)(
3717 pAllocationCallbacks->pUserData,
3718 size,
3719 alignment,
3721 }
3722 else
3723 {
3724 result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3725 }
3726 VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
3727 return result;
3728}
3729
3730static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3731{
3732 if ((pAllocationCallbacks != VMA_NULL) &&
3733 (pAllocationCallbacks->pfnFree != VMA_NULL))
3734 {
3735 (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3736 }
3737 else
3738 {
3739 VMA_SYSTEM_ALIGNED_FREE(ptr);
3740 }
3741}
3742
3743template<typename T>
3744static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3745{
3746 return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3747}
3748
3749template<typename T>
3750static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3751{
3752 return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3753}
3754
3755#define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3756
3757#define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3758
3759template<typename T>
3760static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3761{
3762 ptr->~T();
3763 VmaFree(pAllocationCallbacks, ptr);
3764}
3765
3766template<typename T>
3767static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3768{
3769 if (ptr != VMA_NULL)
3770 {
3771 for (size_t i = count; i--; )
3772 {
3773 ptr[i].~T();
3774 }
3775 VmaFree(pAllocationCallbacks, ptr);
3776 }
3777}
3778
3779static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
3780{
3781 if (srcStr != VMA_NULL)
3782 {
3783 const size_t len = strlen(srcStr);
3784 char* const result = vma_new_array(allocs, char, len + 1);
3785 memcpy(result, srcStr, len + 1);
3786 return result;
3787 }
3788 return VMA_NULL;
3789}
3790
3791#if VMA_STATS_STRING_ENABLED
3792static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
3793{
3794 if (srcStr != VMA_NULL)
3795 {
3796 char* const result = vma_new_array(allocs, char, strLen + 1);
3797 memcpy(result, srcStr, strLen);
3798 result[strLen] = '\0';
3799 return result;
3800 }
3801 return VMA_NULL;
3802}
3803#endif // VMA_STATS_STRING_ENABLED
3804
3805static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
3806{
3807 if (str != VMA_NULL)
3808 {
3809 const size_t len = strlen(str);
3810 vma_delete_array(allocs, str, len + 1);
3811 }
3812}
3813
3814template<typename CmpLess, typename VectorT>
3815size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3816{
3817 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3818 vector.data(),
3819 vector.data() + vector.size(),
3820 value,
3821 CmpLess()) - vector.data();
3822 VmaVectorInsert(vector, indexToInsert, value);
3823 return indexToInsert;
3824}
3825
3826template<typename CmpLess, typename VectorT>
3827bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3828{
3829 CmpLess comparator;
3830 typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3831 vector.begin(),
3832 vector.end(),
3833 value,
3834 comparator);
3835 if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3836 {
3837 size_t indexToRemove = it - vector.begin();
3838 VmaVectorRemove(vector, indexToRemove);
3839 return true;
3840 }
3841 return false;
3842}
3843#endif // _VMA_FUNCTIONS
3844
3845#ifndef _VMA_STATISTICS_FUNCTIONS
3846
3847static void VmaClearStatistics(VmaStatistics& outStats)
3848{
3849 outStats.blockCount = 0;
3850 outStats.allocationCount = 0;
3851 outStats.blockBytes = 0;
3852 outStats.allocationBytes = 0;
3853}
3854
3855static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
3856{
3857 inoutStats.blockCount += src.blockCount;
3858 inoutStats.allocationCount += src.allocationCount;
3859 inoutStats.blockBytes += src.blockBytes;
3860 inoutStats.allocationBytes += src.allocationBytes;
3861}
3862
3863static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
3864{
3865 VmaClearStatistics(outStats.statistics);
3866 outStats.unusedRangeCount = 0;
3868 outStats.allocationSizeMax = 0;
3870 outStats.unusedRangeSizeMax = 0;
3871}
3872
3873static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3874{
3875 inoutStats.statistics.allocationCount++;
3876 inoutStats.statistics.allocationBytes += size;
3877 inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
3878 inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
3879}
3880
3881static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3882{
3883 inoutStats.unusedRangeCount++;
3884 inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
3885 inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
3886}
3887
3888static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
3889{
3890 VmaAddStatistics(inoutStats.statistics, src.statistics);
3891 inoutStats.unusedRangeCount += src.unusedRangeCount;
3892 inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
3893 inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
3894 inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
3895 inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
3896}
3897
3898#endif // _VMA_STATISTICS_FUNCTIONS
3899
3900#ifndef _VMA_MUTEX_LOCK
3901// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3902struct VmaMutexLock
3903{
3904 VMA_CLASS_NO_COPY(VmaMutexLock)
3905public:
3906 VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3907 m_pMutex(useMutex ? &mutex : VMA_NULL)
3908 {
3909 if (m_pMutex) { m_pMutex->Lock(); }
3910 }
3911 ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
3912
3913private:
3914 VMA_MUTEX* m_pMutex;
3915};
3916
3917// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3918struct VmaMutexLockRead
3919{
3920 VMA_CLASS_NO_COPY(VmaMutexLockRead)
3921public:
3922 VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3923 m_pMutex(useMutex ? &mutex : VMA_NULL)
3924 {
3925 if (m_pMutex) { m_pMutex->LockRead(); }
3926 }
3927 ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
3928
3929private:
3930 VMA_RW_MUTEX* m_pMutex;
3931};
3932
3933// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3934struct VmaMutexLockWrite
3935{
3936 VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3937public:
3938 VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
3939 : m_pMutex(useMutex ? &mutex : VMA_NULL)
3940 {
3941 if (m_pMutex) { m_pMutex->LockWrite(); }
3942 }
3943 ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
3944
3945private:
3946 VMA_RW_MUTEX* m_pMutex;
3947};
3948
3949#if VMA_DEBUG_GLOBAL_MUTEX
3950 static VMA_MUTEX gDebugGlobalMutex;
3951 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3952#else
3953 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3954#endif
3955#endif // _VMA_MUTEX_LOCK
3956
3957#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
3958// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
3959template<typename T>
3960struct AtomicTransactionalIncrement
3961{
3962public:
3963 typedef std::atomic<T> AtomicT;
3964
3965 ~AtomicTransactionalIncrement()
3966 {
3967 if(m_Atomic)
3968 --(*m_Atomic);
3969 }
3970
3971 void Commit() { m_Atomic = nullptr; }
3972 T Increment(AtomicT* atomic)
3973 {
3974 m_Atomic = atomic;
3975 return m_Atomic->fetch_add(1);
3976 }
3977
3978private:
3979 AtomicT* m_Atomic = nullptr;
3980};
3981#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
3982
3983#ifndef _VMA_STL_ALLOCATOR
3984// STL-compatible allocator.
3985template<typename T>
3986struct VmaStlAllocator
3987{
3988 const VkAllocationCallbacks* const m_pCallbacks;
3989 typedef T value_type;
3990
3991 VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
3992 template<typename U>
3993 VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
3994 VmaStlAllocator(const VmaStlAllocator&) = default;
3995 VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
3996
3997 T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3998 void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3999
4000 template<typename U>
4001 bool operator==(const VmaStlAllocator<U>& rhs) const
4002 {
4003 return m_pCallbacks == rhs.m_pCallbacks;
4004 }
4005 template<typename U>
4006 bool operator!=(const VmaStlAllocator<U>& rhs) const
4007 {
4008 return m_pCallbacks != rhs.m_pCallbacks;
4009 }
4010};
4011#endif // _VMA_STL_ALLOCATOR
4012
4013#ifndef _VMA_VECTOR
4014/* Class with interface compatible with subset of std::vector.
4015T must be POD because constructors and destructors are not called and memcpy is
4016used for these objects. */
4017template<typename T, typename AllocatorT>
4018class VmaVector
4019{
4020public:
4021 typedef T value_type;
4022 typedef T* iterator;
4023 typedef const T* const_iterator;
4024
4025 VmaVector(const AllocatorT& allocator);
4026 VmaVector(size_t count, const AllocatorT& allocator);
4027 // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4028 // value is unused.
4029 VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
4030 VmaVector(const VmaVector<T, AllocatorT>& src);
4031 VmaVector& operator=(const VmaVector& rhs);
4032 ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
4033
4034 bool empty() const { return m_Count == 0; }
4035 size_t size() const { return m_Count; }
4036 T* data() { return m_pArray; }
4037 T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
4038 T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
4039 const T* data() const { return m_pArray; }
4040 const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
4041 const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
4042
4043 iterator begin() { return m_pArray; }
4044 iterator end() { return m_pArray + m_Count; }
4045 const_iterator cbegin() const { return m_pArray; }
4046 const_iterator cend() const { return m_pArray + m_Count; }
4047 const_iterator begin() const { return cbegin(); }
4048 const_iterator end() const { return cend(); }
4049
4050 void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
4051 void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
4052 void push_front(const T& src) { insert(0, src); }
4053
4054 void push_back(const T& src);
4055 void reserve(size_t newCapacity, bool freeMemory = false);
4056 void resize(size_t newCount);
4057 void clear() { resize(0); }
4058 void shrink_to_fit();
4059 void insert(size_t index, const T& src);
4060 void remove(size_t index);
4061
4062 T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4063 const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4064
4065private:
4066 AllocatorT m_Allocator;
4067 T* m_pArray;
4068 size_t m_Count;
4069 size_t m_Capacity;
4070};
4071
4072#ifndef _VMA_VECTOR_FUNCTIONS
4073template<typename T, typename AllocatorT>
4074VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
4075 : m_Allocator(allocator),
4076 m_pArray(VMA_NULL),
4077 m_Count(0),
4078 m_Capacity(0) {}
4079
4080template<typename T, typename AllocatorT>
4081VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
4082 : m_Allocator(allocator),
4083 m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4084 m_Count(count),
4085 m_Capacity(count) {}
4086
4087template<typename T, typename AllocatorT>
4088VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
4089 : m_Allocator(src.m_Allocator),
4090 m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4091 m_Count(src.m_Count),
4092 m_Capacity(src.m_Count)
4093{
4094 if (m_Count != 0)
4095 {
4096 memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4097 }
4098}
4099
4100template<typename T, typename AllocatorT>
4101VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
4102{
4103 if (&rhs != this)
4104 {
4105 resize(rhs.m_Count);
4106 if (m_Count != 0)
4107 {
4108 memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4109 }
4110 }
4111 return *this;
4112}
4113
4114template<typename T, typename AllocatorT>
4115void VmaVector<T, AllocatorT>::push_back(const T& src)
4116{
4117 const size_t newIndex = size();
4118 resize(newIndex + 1);
4119 m_pArray[newIndex] = src;
4120}
4121
4122template<typename T, typename AllocatorT>
4123void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
4124{
4125 newCapacity = VMA_MAX(newCapacity, m_Count);
4126
4127 if ((newCapacity < m_Capacity) && !freeMemory)
4128 {
4129 newCapacity = m_Capacity;
4130 }
4131
4132 if (newCapacity != m_Capacity)
4133 {
4134 T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4135 if (m_Count != 0)
4136 {
4137 memcpy(newArray, m_pArray, m_Count * sizeof(T));
4138 }
4139 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4140 m_Capacity = newCapacity;
4141 m_pArray = newArray;
4142 }
4143}
4144
4145template<typename T, typename AllocatorT>
4146void VmaVector<T, AllocatorT>::resize(size_t newCount)
4147{
4148 size_t newCapacity = m_Capacity;
4149 if (newCount > m_Capacity)
4150 {
4151 newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4152 }
4153
4154 if (newCapacity != m_Capacity)
4155 {
4156 T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4157 const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4158 if (elementsToCopy != 0)
4159 {
4160 memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4161 }
4162 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4163 m_Capacity = newCapacity;
4164 m_pArray = newArray;
4165 }
4166
4167 m_Count = newCount;
4168}
4169
4170template<typename T, typename AllocatorT>
4171void VmaVector<T, AllocatorT>::shrink_to_fit()
4172{
4173 if (m_Capacity > m_Count)
4174 {
4175 T* newArray = VMA_NULL;
4176 if (m_Count > 0)
4177 {
4178 newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
4179 memcpy(newArray, m_pArray, m_Count * sizeof(T));
4180 }
4181 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4182 m_Capacity = m_Count;
4183 m_pArray = newArray;
4184 }
4185}
4186
4187template<typename T, typename AllocatorT>
4188void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
4189{
4190 VMA_HEAVY_ASSERT(index <= m_Count);
4191 const size_t oldCount = size();
4192 resize(oldCount + 1);
4193 if (index < oldCount)
4194 {
4195 memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4196 }
4197 m_pArray[index] = src;
4198}
4199
4200template<typename T, typename AllocatorT>
4201void VmaVector<T, AllocatorT>::remove(size_t index)
4202{
4203 VMA_HEAVY_ASSERT(index < m_Count);
4204 const size_t oldCount = size();
4205 if (index < oldCount - 1)
4206 {
4207 memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4208 }
4209 resize(oldCount - 1);
4210}
4211#endif // _VMA_VECTOR_FUNCTIONS
4212
4213template<typename T, typename allocatorT>
4214static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4215{
4216 vec.insert(index, item);
4217}
4218
4219template<typename T, typename allocatorT>
4220static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4221{
4222 vec.remove(index);
4223}
4224#endif // _VMA_VECTOR
4225
4226#ifndef _VMA_SMALL_VECTOR
4227/*
4228This is a vector (a variable-sized array), optimized for the case when the array is small.
4229
4230It contains some number of elements in-place, which allows it to avoid heap allocation
4231when the actual number of elements is below that threshold. This allows normal "small"
4232cases to be fast without losing generality for large inputs.
4233*/
4234template<typename T, typename AllocatorT, size_t N>
4235class VmaSmallVector
4236{
4237public:
4238 typedef T value_type;
4239 typedef T* iterator;
4240
4241 VmaSmallVector(const AllocatorT& allocator);
4242 VmaSmallVector(size_t count, const AllocatorT& allocator);
4243 template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4244 VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4245 template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4246 VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4247 ~VmaSmallVector() = default;
4248
4249 bool empty() const { return m_Count == 0; }
4250 size_t size() const { return m_Count; }
4251 T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
4252 T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
4253 T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
4254 const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
4255 const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
4256 const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
4257
4258 iterator begin() { return data(); }
4259 iterator end() { return data() + m_Count; }
4260
4261 void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
4262 void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
4263 void push_front(const T& src) { insert(0, src); }
4264
4265 void push_back(const T& src);
4266 void resize(size_t newCount, bool freeMemory = false);
4267 void clear(bool freeMemory = false);
4268 void insert(size_t index, const T& src);
4269 void remove(size_t index);
4270
4271 T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4272 const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4273
4274private:
4275 size_t m_Count;
4276 T m_StaticArray[N]; // Used when m_Size <= N
4277 VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
4278};
4279
4280#ifndef _VMA_SMALL_VECTOR_FUNCTIONS
4281template<typename T, typename AllocatorT, size_t N>
4282VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
4283 : m_Count(0),
4284 m_DynamicArray(allocator) {}
4285
4286template<typename T, typename AllocatorT, size_t N>
4287VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
4288 : m_Count(count),
4289 m_DynamicArray(count > N ? count : 0, allocator) {}
4290
4291template<typename T, typename AllocatorT, size_t N>
4292void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
4293{
4294 const size_t newIndex = size();
4295 resize(newIndex + 1);
4296 data()[newIndex] = src;
4297}
4298
4299template<typename T, typename AllocatorT, size_t N>
4300void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
4301{
4302 if (newCount > N && m_Count > N)
4303 {
4304 // Any direction, staying in m_DynamicArray
4305 m_DynamicArray.resize(newCount);
4306 if (freeMemory)
4307 {
4308 m_DynamicArray.shrink_to_fit();
4309 }
4310 }
4311 else if (newCount > N && m_Count <= N)
4312 {
4313 // Growing, moving from m_StaticArray to m_DynamicArray
4314 m_DynamicArray.resize(newCount);
4315 if (m_Count > 0)
4316 {
4317 memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
4318 }
4319 }
4320 else if (newCount <= N && m_Count > N)
4321 {
4322 // Shrinking, moving from m_DynamicArray to m_StaticArray
4323 if (newCount > 0)
4324 {
4325 memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
4326 }
4327 m_DynamicArray.resize(0);
4328 if (freeMemory)
4329 {
4330 m_DynamicArray.shrink_to_fit();
4331 }
4332 }
4333 else
4334 {
4335 // Any direction, staying in m_StaticArray - nothing to do here
4336 }
4337 m_Count = newCount;
4338}
4339
4340template<typename T, typename AllocatorT, size_t N>
4341void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
4342{
4343 m_DynamicArray.clear();
4344 if (freeMemory)
4345 {
4346 m_DynamicArray.shrink_to_fit();
4347 }
4348 m_Count = 0;
4349}
4350
4351template<typename T, typename AllocatorT, size_t N>
4352void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
4353{
4354 VMA_HEAVY_ASSERT(index <= m_Count);
4355 const size_t oldCount = size();
4356 resize(oldCount + 1);
4357 T* const dataPtr = data();
4358 if (index < oldCount)
4359 {
4360 // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
4361 memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
4362 }
4363 dataPtr[index] = src;
4364}
4365
4366template<typename T, typename AllocatorT, size_t N>
4367void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
4368{
4369 VMA_HEAVY_ASSERT(index < m_Count);
4370 const size_t oldCount = size();
4371 if (index < oldCount - 1)
4372 {
4373 // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
4374 T* const dataPtr = data();
4375 memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
4376 }
4377 resize(oldCount - 1);
4378}
4379#endif // _VMA_SMALL_VECTOR_FUNCTIONS
4380#endif // _VMA_SMALL_VECTOR
4381
4382#ifndef _VMA_POOL_ALLOCATOR
4383/*
4384Allocator for objects of type T using a list of arrays (pools) to speed up
4385allocation. Number of elements that can be allocated is not bounded because
4386allocator can create multiple blocks.
4387*/
4388template<typename T>
4389class VmaPoolAllocator
4390{
4391 VMA_CLASS_NO_COPY(VmaPoolAllocator)
4392public:
4393 VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4394 ~VmaPoolAllocator();
4395 template<typename... Types> T* Alloc(Types&&... args);
4396 void Free(T* ptr);
4397
4398private:
4399 union Item
4400 {
4401 uint32_t NextFreeIndex;
4402 alignas(T) char Value[sizeof(T)];
4403 };
4404 struct ItemBlock
4405 {
4406 Item* pItems;
4407 uint32_t Capacity;
4408 uint32_t FirstFreeIndex;
4409 };
4410
4411 const VkAllocationCallbacks* m_pAllocationCallbacks;
4412 const uint32_t m_FirstBlockCapacity;
4413 VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
4414
4415 ItemBlock& CreateNewBlock();
4416};
4417
4418#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
4419template<typename T>
4420VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
4421 : m_pAllocationCallbacks(pAllocationCallbacks),
4422 m_FirstBlockCapacity(firstBlockCapacity),
4423 m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4424{
4425 VMA_ASSERT(m_FirstBlockCapacity > 1);
4426}
4427
4428template<typename T>
4429VmaPoolAllocator<T>::~VmaPoolAllocator()
4430{
4431 for (size_t i = m_ItemBlocks.size(); i--;)
4432 vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4433 m_ItemBlocks.clear();
4434}
4435
4436template<typename T>
4437template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
4438{
4439 for (size_t i = m_ItemBlocks.size(); i--; )
4440 {
4441 ItemBlock& block = m_ItemBlocks[i];
4442 // This block has some free items: Use first one.
4443 if (block.FirstFreeIndex != UINT32_MAX)
4444 {
4445 Item* const pItem = &block.pItems[block.FirstFreeIndex];
4446 block.FirstFreeIndex = pItem->NextFreeIndex;
4447 T* result = (T*)&pItem->Value;
4448 new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4449 return result;
4450 }
4451 }
4452
4453 // No block has free item: Create new one and use it.
4454 ItemBlock& newBlock = CreateNewBlock();
4455 Item* const pItem = &newBlock.pItems[0];
4456 newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4457 T* result = (T*)&pItem->Value;
4458 new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
4459 return result;
4460}
4461
4462template<typename T>
4464{
4465 // Search all memory blocks to find ptr.
4466 for (size_t i = m_ItemBlocks.size(); i--; )
4467 {
4468 ItemBlock& block = m_ItemBlocks[i];
4469
4470 // Casting to union.
4471 Item* pItemPtr;
4472 memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4473
4474 // Check if pItemPtr is in address range of this block.
4475 if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4476 {
4477 ptr->~T(); // Explicit destructor call.
4478 const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4479 pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4480 block.FirstFreeIndex = index;
4481 return;
4482 }
4483 }
4484 VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4485}
4486
4487template<typename T>
4488typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4489{
4490 const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4491 m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4492
4493 const ItemBlock newBlock =
4494 {
4495 vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4496 newBlockCapacity,
4497 0
4498 };
4499
4500 m_ItemBlocks.push_back(newBlock);
4501
4502 // Setup singly-linked list of all free items in this block.
4503 for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4504 newBlock.pItems[i].NextFreeIndex = i + 1;
4505 newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4506 return m_ItemBlocks.back();
4507}
4508#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
4509#endif // _VMA_POOL_ALLOCATOR
4510
4511#ifndef _VMA_RAW_LIST
4512template<typename T>
4513struct VmaListItem
4514{
4515 VmaListItem* pPrev;
4516 VmaListItem* pNext;
4517 T Value;
4518};
4519
4520// Doubly linked list.
4521template<typename T>
4522class VmaRawList
4523{
4524 VMA_CLASS_NO_COPY(VmaRawList)
4525public:
4526 typedef VmaListItem<T> ItemType;
4527
4528 VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4529 // Intentionally not calling Clear, because that would be unnecessary
4530 // computations to return all items to m_ItemAllocator as free.
4531 ~VmaRawList() = default;
4532
4533 size_t GetCount() const { return m_Count; }
4534 bool IsEmpty() const { return m_Count == 0; }
4535
4536 ItemType* Front() { return m_pFront; }
4537 ItemType* Back() { return m_pBack; }
4538 const ItemType* Front() const { return m_pFront; }
4539 const ItemType* Back() const { return m_pBack; }
4540
4541 ItemType* PushFront();
4542 ItemType* PushBack();
4543 ItemType* PushFront(const T& value);
4544 ItemType* PushBack(const T& value);
4545 void PopFront();
4546 void PopBack();
4547
4548 // Item can be null - it means PushBack.
4549 ItemType* InsertBefore(ItemType* pItem);
4550 // Item can be null - it means PushFront.
4551 ItemType* InsertAfter(ItemType* pItem);
4552 ItemType* InsertBefore(ItemType* pItem, const T& value);
4553 ItemType* InsertAfter(ItemType* pItem, const T& value);
4554
4555 void Clear();
4556 void Remove(ItemType* pItem);
4557
4558private:
4559 const VkAllocationCallbacks* const m_pAllocationCallbacks;
4560 VmaPoolAllocator<ItemType> m_ItemAllocator;
4561 ItemType* m_pFront;
4562 ItemType* m_pBack;
4563 size_t m_Count;
4564};
4565
4566#ifndef _VMA_RAW_LIST_FUNCTIONS
4567template<typename T>
4568VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
4569 : m_pAllocationCallbacks(pAllocationCallbacks),
4570 m_ItemAllocator(pAllocationCallbacks, 128),
4571 m_pFront(VMA_NULL),
4572 m_pBack(VMA_NULL),
4573 m_Count(0) {}
4574
4575template<typename T>
4576VmaListItem<T>* VmaRawList<T>::PushFront()
4577{
4578 ItemType* const pNewItem = m_ItemAllocator.Alloc();
4579 pNewItem->pPrev = VMA_NULL;
4580 if (IsEmpty())
4581 {
4582 pNewItem->pNext = VMA_NULL;
4583 m_pFront = pNewItem;
4584 m_pBack = pNewItem;
4585 m_Count = 1;
4586 }
4587 else
4588 {
4589 pNewItem->pNext = m_pFront;
4590 m_pFront->pPrev = pNewItem;
4591 m_pFront = pNewItem;
4592 ++m_Count;
4593 }
4594 return pNewItem;
4595}
4596
4597template<typename T>
4598VmaListItem<T>* VmaRawList<T>::PushBack()
4599{
4600 ItemType* const pNewItem = m_ItemAllocator.Alloc();
4601 pNewItem->pNext = VMA_NULL;
4602 if(IsEmpty())
4603 {
4604 pNewItem->pPrev = VMA_NULL;
4605 m_pFront = pNewItem;
4606 m_pBack = pNewItem;
4607 m_Count = 1;
4608 }
4609 else
4610 {
4611 pNewItem->pPrev = m_pBack;
4612 m_pBack->pNext = pNewItem;
4613 m_pBack = pNewItem;
4614 ++m_Count;
4615 }
4616 return pNewItem;
4617}
4618
4619template<typename T>
4620VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4621{
4622 ItemType* const pNewItem = PushFront();
4623 pNewItem->Value = value;
4624 return pNewItem;
4625}
4626
4627template<typename T>
4628VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4629{
4630 ItemType* const pNewItem = PushBack();
4631 pNewItem->Value = value;
4632 return pNewItem;
4633}
4634
4635template<typename T>
4636void VmaRawList<T>::PopFront()
4637{
4638 VMA_HEAVY_ASSERT(m_Count > 0);
4639 ItemType* const pFrontItem = m_pFront;
4640 ItemType* const pNextItem = pFrontItem->pNext;
4641 if (pNextItem != VMA_NULL)
4642 {
4643 pNextItem->pPrev = VMA_NULL;
4644 }
4645 m_pFront = pNextItem;
4646 m_ItemAllocator.Free(pFrontItem);
4647 --m_Count;
4648}
4649
4650template<typename T>
4651void VmaRawList<T>::PopBack()
4652{
4653 VMA_HEAVY_ASSERT(m_Count > 0);
4654 ItemType* const pBackItem = m_pBack;
4655 ItemType* const pPrevItem = pBackItem->pPrev;
4656 if(pPrevItem != VMA_NULL)
4657 {
4658 pPrevItem->pNext = VMA_NULL;
4659 }
4660 m_pBack = pPrevItem;
4661 m_ItemAllocator.Free(pBackItem);
4662 --m_Count;
4663}
4664
4665template<typename T>
4666void VmaRawList<T>::Clear()
4667{
4668 if (IsEmpty() == false)
4669 {
4670 ItemType* pItem = m_pBack;
4671 while (pItem != VMA_NULL)
4672 {
4673 ItemType* const pPrevItem = pItem->pPrev;
4674 m_ItemAllocator.Free(pItem);
4675 pItem = pPrevItem;
4676 }
4677 m_pFront = VMA_NULL;
4678 m_pBack = VMA_NULL;
4679 m_Count = 0;
4680 }
4681}
4682
4683template<typename T>
4684void VmaRawList<T>::Remove(ItemType* pItem)
4685{
4686 VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4687 VMA_HEAVY_ASSERT(m_Count > 0);
4688
4689 if(pItem->pPrev != VMA_NULL)
4690 {
4691 pItem->pPrev->pNext = pItem->pNext;
4692 }
4693 else
4694 {
4695 VMA_HEAVY_ASSERT(m_pFront == pItem);
4696 m_pFront = pItem->pNext;
4697 }
4698
4699 if(pItem->pNext != VMA_NULL)
4700 {
4701 pItem->pNext->pPrev = pItem->pPrev;
4702 }
4703 else
4704 {
4705 VMA_HEAVY_ASSERT(m_pBack == pItem);
4706 m_pBack = pItem->pPrev;
4707 }
4708
4709 m_ItemAllocator.Free(pItem);
4710 --m_Count;
4711}
4712
4713template<typename T>
4714VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4715{
4716 if(pItem != VMA_NULL)
4717 {
4718 ItemType* const prevItem = pItem->pPrev;
4719 ItemType* const newItem = m_ItemAllocator.Alloc();
4720 newItem->pPrev = prevItem;
4721 newItem->pNext = pItem;
4722 pItem->pPrev = newItem;
4723 if(prevItem != VMA_NULL)
4724 {
4725 prevItem->pNext = newItem;
4726 }
4727 else
4728 {
4729 VMA_HEAVY_ASSERT(m_pFront == pItem);
4730 m_pFront = newItem;
4731 }
4732 ++m_Count;
4733 return newItem;
4734 }
4735 else
4736 return PushBack();
4737}
4738
4739template<typename T>
4740VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4741{
4742 if(pItem != VMA_NULL)
4743 {
4744 ItemType* const nextItem = pItem->pNext;
4745 ItemType* const newItem = m_ItemAllocator.Alloc();
4746 newItem->pNext = nextItem;
4747 newItem->pPrev = pItem;
4748 pItem->pNext = newItem;
4749 if(nextItem != VMA_NULL)
4750 {
4751 nextItem->pPrev = newItem;
4752 }
4753 else
4754 {
4755 VMA_HEAVY_ASSERT(m_pBack == pItem);
4756 m_pBack = newItem;
4757 }
4758 ++m_Count;
4759 return newItem;
4760 }
4761 else
4762 return PushFront();
4763}
4764
4765template<typename T>
4766VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4767{
4768 ItemType* const newItem = InsertBefore(pItem);
4769 newItem->Value = value;
4770 return newItem;
4771}
4772
4773template<typename T>
4774VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4775{
4776 ItemType* const newItem = InsertAfter(pItem);
4777 newItem->Value = value;
4778 return newItem;
4779}
4780#endif // _VMA_RAW_LIST_FUNCTIONS
4781#endif // _VMA_RAW_LIST
4782
4783#ifndef _VMA_LIST
4784template<typename T, typename AllocatorT>
4785class VmaList
4786{
4787 VMA_CLASS_NO_COPY(VmaList)
4788public:
4789 class reverse_iterator;
4790 class const_iterator;
4791 class const_reverse_iterator;
4792
4793 class iterator
4794 {
4795 friend class const_iterator;
4796 friend class VmaList<T, AllocatorT>;
4797 public:
4798 iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4799 iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4800
4801 T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4802 T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4803
4804 bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4805 bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4806
4807 iterator operator++(int) { iterator result = *this; ++*this; return result; }
4808 iterator operator--(int) { iterator result = *this; --*this; return result; }
4809
4810 iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4811 iterator& operator--();
4812
4813 private:
4814 VmaRawList<T>* m_pList;
4815 VmaListItem<T>* m_pItem;
4816
4817 iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4818 };
4819 class reverse_iterator
4820 {
4821 friend class const_reverse_iterator;
4822 friend class VmaList<T, AllocatorT>;
4823 public:
4824 reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4825 reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4826
4827 T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4828 T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4829
4830 bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4831 bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4832
4833 reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
4834 reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
4835
4836 reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4837 reverse_iterator& operator--();
4838
4839 private:
4840 VmaRawList<T>* m_pList;
4841 VmaListItem<T>* m_pItem;
4842
4843 reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4844 };
4845 class const_iterator
4846 {
4847 friend class VmaList<T, AllocatorT>;
4848 public:
4849 const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4850 const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4851 const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4852
4853 iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4854
4855 const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4856 const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4857
4858 bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4859 bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4860
4861 const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
4862 const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
4863
4864 const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4865 const_iterator& operator--();
4866
4867 private:
4868 const VmaRawList<T>* m_pList;
4869 const VmaListItem<T>* m_pItem;
4870
4871 const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4872 };
4873 class const_reverse_iterator
4874 {
4875 friend class VmaList<T, AllocatorT>;
4876 public:
4877 const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4878 const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4879 const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4880
4881 reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4882
4883 const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4884 const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4885
4886 bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4887 bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4888
4889 const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
4890 const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
4891
4892 const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4893 const_reverse_iterator& operator--();
4894
4895 private:
4896 const VmaRawList<T>* m_pList;
4897 const VmaListItem<T>* m_pItem;
4898
4899 const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4900 };
4901
4902 VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
4903
4904 bool empty() const { return m_RawList.IsEmpty(); }
4905 size_t size() const { return m_RawList.GetCount(); }
4906
4907 iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4908 iterator end() { return iterator(&m_RawList, VMA_NULL); }
4909
4910 const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4911 const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4912
4913 const_iterator begin() const { return cbegin(); }
4914 const_iterator end() const { return cend(); }
4915
4916 reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
4917 reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
4918
4919 const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
4920 const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
4921
4922 const_reverse_iterator rbegin() const { return crbegin(); }
4923 const_reverse_iterator rend() const { return crend(); }
4924
4925 void push_back(const T& value) { m_RawList.PushBack(value); }
4926 iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4927
4928 void clear() { m_RawList.Clear(); }
4929 void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4930
4931private:
4932 VmaRawList<T> m_RawList;
4933};
4934
4935#ifndef _VMA_LIST_FUNCTIONS
4936template<typename T, typename AllocatorT>
4937typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
4938{
4939 if (m_pItem != VMA_NULL)
4940 {
4941 m_pItem = m_pItem->pPrev;
4942 }
4943 else
4944 {
4945 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4946 m_pItem = m_pList->Back();
4947 }
4948 return *this;
4949}
4950
4951template<typename T, typename AllocatorT>
4952typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
4953{
4954 if (m_pItem != VMA_NULL)
4955 {
4956 m_pItem = m_pItem->pNext;
4957 }
4958 else
4959 {
4960 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4961 m_pItem = m_pList->Front();
4962 }
4963 return *this;
4964}
4965
4966template<typename T, typename AllocatorT>
4967typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
4968{
4969 if (m_pItem != VMA_NULL)
4970 {
4971 m_pItem = m_pItem->pPrev;
4972 }
4973 else
4974 {
4975 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4976 m_pItem = m_pList->Back();
4977 }
4978 return *this;
4979}
4980
4981template<typename T, typename AllocatorT>
4982typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
4983{
4984 if (m_pItem != VMA_NULL)
4985 {
4986 m_pItem = m_pItem->pNext;
4987 }
4988 else
4989 {
4990 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4991 m_pItem = m_pList->Back();
4992 }
4993 return *this;
4994}
4995#endif // _VMA_LIST_FUNCTIONS
4996#endif // _VMA_LIST
4997
4998#ifndef _VMA_INTRUSIVE_LINKED_LIST
4999/*
5000Expected interface of ItemTypeTraits:
5001struct MyItemTypeTraits
5002{
5003 typedef MyItem ItemType;
5004 static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
5005 static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
5006 static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
5007 static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
5008};
5009*/
5010template<typename ItemTypeTraits>
5011class VmaIntrusiveLinkedList
5012{
5013public:
5014 typedef typename ItemTypeTraits::ItemType ItemType;
5015 static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
5016 static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
5017
5018 // Movable, not copyable.
5019 VmaIntrusiveLinkedList() = default;
5020 VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
5021 VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
5022 VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
5023 VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
5024 ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
5025
5026 size_t GetCount() const { return m_Count; }
5027 bool IsEmpty() const { return m_Count == 0; }
5028 ItemType* Front() { return m_Front; }
5029 ItemType* Back() { return m_Back; }
5030 const ItemType* Front() const { return m_Front; }
5031 const ItemType* Back() const { return m_Back; }
5032
5033 void PushBack(ItemType* item);
5034 void PushFront(ItemType* item);
5035 ItemType* PopBack();
5036 ItemType* PopFront();
5037
5038 // MyItem can be null - it means PushBack.
5039 void InsertBefore(ItemType* existingItem, ItemType* newItem);
5040 // MyItem can be null - it means PushFront.
5041 void InsertAfter(ItemType* existingItem, ItemType* newItem);
5042 void Remove(ItemType* item);
5043 void RemoveAll();
5044
5045private:
5046 ItemType* m_Front = VMA_NULL;
5047 ItemType* m_Back = VMA_NULL;
5048 size_t m_Count = 0;
5049};
5050
5051#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5052template<typename ItemTypeTraits>
5053VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
5054 : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
5055{
5056 src.m_Front = src.m_Back = VMA_NULL;
5057 src.m_Count = 0;
5058}
5059
5060template<typename ItemTypeTraits>
5061VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
5062{
5063 if (&src != this)
5064 {
5065 VMA_HEAVY_ASSERT(IsEmpty());
5066 m_Front = src.m_Front;
5067 m_Back = src.m_Back;
5068 m_Count = src.m_Count;
5069 src.m_Front = src.m_Back = VMA_NULL;
5070 src.m_Count = 0;
5071 }
5072 return *this;
5073}
5074
5075template<typename ItemTypeTraits>
5076void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
5077{
5078 VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5079 if (IsEmpty())
5080 {
5081 m_Front = item;
5082 m_Back = item;
5083 m_Count = 1;
5084 }
5085 else
5086 {
5087 ItemTypeTraits::AccessPrev(item) = m_Back;
5088 ItemTypeTraits::AccessNext(m_Back) = item;
5089 m_Back = item;
5090 ++m_Count;
5091 }
5092}
5093
5094template<typename ItemTypeTraits>
5095void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
5096{
5097 VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5098 if (IsEmpty())
5099 {
5100 m_Front = item;
5101 m_Back = item;
5102 m_Count = 1;
5103 }
5104 else
5105 {
5106 ItemTypeTraits::AccessNext(item) = m_Front;
5107 ItemTypeTraits::AccessPrev(m_Front) = item;
5108 m_Front = item;
5109 ++m_Count;
5110 }
5111}
5112
5113template<typename ItemTypeTraits>
5114typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
5115{
5116 VMA_HEAVY_ASSERT(m_Count > 0);
5117 ItemType* const backItem = m_Back;
5118 ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
5119 if (prevItem != VMA_NULL)
5120 {
5121 ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
5122 }
5123 m_Back = prevItem;
5124 --m_Count;
5125 ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
5126 ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
5127 return backItem;
5128}
5129
5130template<typename ItemTypeTraits>
5131typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
5132{
5133 VMA_HEAVY_ASSERT(m_Count > 0);
5134 ItemType* const frontItem = m_Front;
5135 ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
5136 if (nextItem != VMA_NULL)
5137 {
5138 ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
5139 }
5140 m_Front = nextItem;
5141 --m_Count;
5142 ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
5143 ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
5144 return frontItem;
5145}
5146
5147template<typename ItemTypeTraits>
5148void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
5149{
5150 VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5151 if (existingItem != VMA_NULL)
5152 {
5153 ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
5154 ItemTypeTraits::AccessPrev(newItem) = prevItem;
5155 ItemTypeTraits::AccessNext(newItem) = existingItem;
5156 ItemTypeTraits::AccessPrev(existingItem) = newItem;
5157 if (prevItem != VMA_NULL)
5158 {
5159 ItemTypeTraits::AccessNext(prevItem) = newItem;
5160 }
5161 else
5162 {
5163 VMA_HEAVY_ASSERT(m_Front == existingItem);
5164 m_Front = newItem;
5165 }
5166 ++m_Count;
5167 }
5168 else
5169 PushBack(newItem);
5170}
5171
5172template<typename ItemTypeTraits>
5173void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
5174{
5175 VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5176 if (existingItem != VMA_NULL)
5177 {
5178 ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
5179 ItemTypeTraits::AccessNext(newItem) = nextItem;
5180 ItemTypeTraits::AccessPrev(newItem) = existingItem;
5181 ItemTypeTraits::AccessNext(existingItem) = newItem;
5182 if (nextItem != VMA_NULL)
5183 {
5184 ItemTypeTraits::AccessPrev(nextItem) = newItem;
5185 }
5186 else
5187 {
5188 VMA_HEAVY_ASSERT(m_Back == existingItem);
5189 m_Back = newItem;
5190 }
5191 ++m_Count;
5192 }
5193 else
5194 return PushFront(newItem);
5195}
5196
5197template<typename ItemTypeTraits>
5198void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
5199{
5200 VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
5201 if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
5202 {
5203 ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
5204 }
5205 else
5206 {
5207 VMA_HEAVY_ASSERT(m_Front == item);
5208 m_Front = ItemTypeTraits::GetNext(item);
5209 }
5210
5211 if (ItemTypeTraits::GetNext(item) != VMA_NULL)
5212 {
5213 ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
5214 }
5215 else
5216 {
5217 VMA_HEAVY_ASSERT(m_Back == item);
5218 m_Back = ItemTypeTraits::GetPrev(item);
5219 }
5220 ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5221 ItemTypeTraits::AccessNext(item) = VMA_NULL;
5222 --m_Count;
5223}
5224
5225template<typename ItemTypeTraits>
5226void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
5227{
5228 if (!IsEmpty())
5229 {
5230 ItemType* item = m_Back;
5231 while (item != VMA_NULL)
5232 {
5233 ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
5234 ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5235 ItemTypeTraits::AccessNext(item) = VMA_NULL;
5236 item = prevItem;
5237 }
5238 m_Front = VMA_NULL;
5239 m_Back = VMA_NULL;
5240 m_Count = 0;
5241 }
5242}
5243#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5244#endif // _VMA_INTRUSIVE_LINKED_LIST
5245
5246// Unused in this version.
5247#if 0
5248
5249#ifndef _VMA_PAIR
5250template<typename T1, typename T2>
5251struct VmaPair
5252{
5253 T1 first;
5254 T2 second;
5255
5256 VmaPair() : first(), second() {}
5257 VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
5258};
5259
5260template<typename FirstT, typename SecondT>
5261struct VmaPairFirstLess
5262{
5263 bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5264 {
5265 return lhs.first < rhs.first;
5266 }
5267 bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5268 {
5269 return lhs.first < rhsFirst;
5270 }
5271};
5272#endif // _VMA_PAIR
5273
5274#ifndef _VMA_MAP
5275/* Class compatible with subset of interface of std::unordered_map.
5276KeyT, ValueT must be POD because they will be stored in VmaVector.
5277*/
5278template<typename KeyT, typename ValueT>
5279class VmaMap
5280{
5281public:
5282 typedef VmaPair<KeyT, ValueT> PairType;
5283 typedef PairType* iterator;
5284
5285 VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
5286
5287 iterator begin() { return m_Vector.begin(); }
5288 iterator end() { return m_Vector.end(); }
5289 size_t size() { return m_Vector.size(); }
5290
5291 void insert(const PairType& pair);
5292 iterator find(const KeyT& key);
5293 void erase(iterator it);
5294
5295private:
5296 VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
5297};
5298
5299#ifndef _VMA_MAP_FUNCTIONS
5300template<typename KeyT, typename ValueT>
5301void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5302{
5303 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5304 m_Vector.data(),
5305 m_Vector.data() + m_Vector.size(),
5306 pair,
5307 VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5308 VmaVectorInsert(m_Vector, indexToInsert, pair);
5309}
5310
5311template<typename KeyT, typename ValueT>
5312VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5313{
5314 PairType* it = VmaBinaryFindFirstNotLess(
5315 m_Vector.data(),
5316 m_Vector.data() + m_Vector.size(),
5317 key,
5318 VmaPairFirstLess<KeyT, ValueT>());
5319 if ((it != m_Vector.end()) && (it->first == key))
5320 {
5321 return it;
5322 }
5323 else
5324 {
5325 return m_Vector.end();
5326 }
5327}
5328
5329template<typename KeyT, typename ValueT>
5330void VmaMap<KeyT, ValueT>::erase(iterator it)
5331{
5332 VmaVectorRemove(m_Vector, it - m_Vector.begin());
5333}
5334#endif // _VMA_MAP_FUNCTIONS
5335#endif // _VMA_MAP
5336
5337#endif // #if 0
5338
5339#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
5340class VmaStringBuilder
5341{
5342public:
5343 VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
5344 ~VmaStringBuilder() = default;
5345
5346 size_t GetLength() const { return m_Data.size(); }
5347 const char* GetData() const { return m_Data.data(); }
5348 void AddNewLine() { Add('\n'); }
5349 void Add(char ch) { m_Data.push_back(ch); }
5350
5351 void Add(const char* pStr);
5352 void AddNumber(uint32_t num);
5353 void AddNumber(uint64_t num);
5354 void AddPointer(const void* ptr);
5355
5356private:
5357 VmaVector<char, VmaStlAllocator<char>> m_Data;
5358};
5359
5360#ifndef _VMA_STRING_BUILDER_FUNCTIONS
5361void VmaStringBuilder::Add(const char* pStr)
5362{
5363 const size_t strLen = strlen(pStr);
5364 if (strLen > 0)
5365 {
5366 const size_t oldCount = m_Data.size();
5367 m_Data.resize(oldCount + strLen);
5368 memcpy(m_Data.data() + oldCount, pStr, strLen);
5369 }
5370}
5371
5372void VmaStringBuilder::AddNumber(uint32_t num)
5373{
5374 char buf[11];
5375 buf[10] = '\0';
5376 char* p = &buf[10];
5377 do
5378 {
5379 *--p = '0' + (num % 10);
5380 num /= 10;
5381 } while (num);
5382 Add(p);
5383}
5384
5385void VmaStringBuilder::AddNumber(uint64_t num)
5386{
5387 char buf[21];
5388 buf[20] = '\0';
5389 char* p = &buf[20];
5390 do
5391 {
5392 *--p = '0' + (num % 10);
5393 num /= 10;
5394 } while (num);
5395 Add(p);
5396}
5397
5398void VmaStringBuilder::AddPointer(const void* ptr)
5399{
5400 char buf[21];
5401 VmaPtrToStr(buf, sizeof(buf), ptr);
5402 Add(buf);
5403}
5404#endif //_VMA_STRING_BUILDER_FUNCTIONS
5405#endif // _VMA_STRING_BUILDER
5406
5407#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
5408/*
5409Allows to conveniently build a correct JSON document to be written to the
5410VmaStringBuilder passed to the constructor.
5411*/
5412class VmaJsonWriter
5413{
5414 VMA_CLASS_NO_COPY(VmaJsonWriter)
5415public:
5416 // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
5417 VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5418 ~VmaJsonWriter();
5419
5420 // Begins object by writing "{".
5421 // Inside an object, you must call pairs of WriteString and a value, e.g.:
5422 // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
5423 // Will write: { "A": 1, "B": 2 }
5424 void BeginObject(bool singleLine = false);
5425 // Ends object by writing "}".
5426 void EndObject();
5427
5428 // Begins array by writing "[".
5429 // Inside an array, you can write a sequence of any values.
5430 void BeginArray(bool singleLine = false);
5431 // Ends array by writing "[".
5432 void EndArray();
5433
5434 // Writes a string value inside "".
5435 // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
5436 void WriteString(const char* pStr);
5437
5438 // Begins writing a string value.
5439 // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
5440 // WriteString to conveniently build the string content incrementally, made of
5441 // parts including numbers.
5442 void BeginString(const char* pStr = VMA_NULL);
5443 // Posts next part of an open string.
5444 void ContinueString(const char* pStr);
5445 // Posts next part of an open string. The number is converted to decimal characters.
5446 void ContinueString(uint32_t n);
5447 void ContinueString(uint64_t n);
5448 void ContinueString_Size(size_t n);
5449 // Posts next part of an open string. Pointer value is converted to characters
5450 // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
5451 void ContinueString_Pointer(const void* ptr);
5452 // Ends writing a string value by writing '"'.
5453 void EndString(const char* pStr = VMA_NULL);
5454
5455 // Writes a number value.
5456 void WriteNumber(uint32_t n);
5457 void WriteNumber(uint64_t n);
5458 void WriteSize(size_t n);
5459 // Writes a boolean value - false or true.
5460 void WriteBool(bool b);
5461 // Writes a null value.
5462 void WriteNull();
5463
5464private:
5465 enum COLLECTION_TYPE
5466 {
5467 COLLECTION_TYPE_OBJECT,
5468 COLLECTION_TYPE_ARRAY,
5469 };
5470 struct StackItem
5471 {
5472 COLLECTION_TYPE type;
5473 uint32_t valueCount;
5474 bool singleLineMode;
5475 };
5476
5477 static const char* const INDENT;
5478
5479 VmaStringBuilder& m_SB;
5480 VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5481 bool m_InsideString;
5482
5483 // Write size_t for less than 64bits
5484 void WriteSize(size_t n, std::integral_constant<bool, false>) { m_SB.AddNumber(static_cast<uint32_t>(n)); }
5485 // Write size_t for 64bits
5486 void WriteSize(size_t n, std::integral_constant<bool, true>) { m_SB.AddNumber(static_cast<uint64_t>(n)); }
5487
5488 void BeginValue(bool isString);
5489 void WriteIndent(bool oneLess = false);
5490};
5491const char* const VmaJsonWriter::INDENT = " ";
5492
5493#ifndef _VMA_JSON_WRITER_FUNCTIONS
5494VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
5495 : m_SB(sb),
5496 m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5497 m_InsideString(false) {}
5498
5499VmaJsonWriter::~VmaJsonWriter()
5500{
5501 VMA_ASSERT(!m_InsideString);
5502 VMA_ASSERT(m_Stack.empty());
5503}
5504
5505void VmaJsonWriter::BeginObject(bool singleLine)
5506{
5507 VMA_ASSERT(!m_InsideString);
5508
5509 BeginValue(false);
5510 m_SB.Add('{');
5511
5512 StackItem item;
5513 item.type = COLLECTION_TYPE_OBJECT;
5514 item.valueCount = 0;
5515 item.singleLineMode = singleLine;
5516 m_Stack.push_back(item);
5517}
5518
5519void VmaJsonWriter::EndObject()
5520{
5521 VMA_ASSERT(!m_InsideString);
5522
5523 WriteIndent(true);
5524 m_SB.Add('}');
5525
5526 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5527 m_Stack.pop_back();
5528}
5529
5530void VmaJsonWriter::BeginArray(bool singleLine)
5531{
5532 VMA_ASSERT(!m_InsideString);
5533
5534 BeginValue(false);
5535 m_SB.Add('[');
5536
5537 StackItem item;
5538 item.type = COLLECTION_TYPE_ARRAY;
5539 item.valueCount = 0;
5540 item.singleLineMode = singleLine;
5541 m_Stack.push_back(item);
5542}
5543
5544void VmaJsonWriter::EndArray()
5545{
5546 VMA_ASSERT(!m_InsideString);
5547
5548 WriteIndent(true);
5549 m_SB.Add(']');
5550
5551 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5552 m_Stack.pop_back();
5553}
5554
5555void VmaJsonWriter::WriteString(const char* pStr)
5556{
5557 BeginString(pStr);
5558 EndString();
5559}
5560
5561void VmaJsonWriter::BeginString(const char* pStr)
5562{
5563 VMA_ASSERT(!m_InsideString);
5564
5565 BeginValue(true);
5566 m_SB.Add('"');
5567 m_InsideString = true;
5568 if (pStr != VMA_NULL && pStr[0] != '\0')
5569 {
5570 ContinueString(pStr);
5571 }
5572}
5573
5574void VmaJsonWriter::ContinueString(const char* pStr)
5575{
5576 VMA_ASSERT(m_InsideString);
5577
5578 const size_t strLen = strlen(pStr);
5579 for (size_t i = 0; i < strLen; ++i)
5580 {
5581 char ch = pStr[i];
5582 if (ch == '\\')
5583 {
5584 m_SB.Add("\\\\");
5585 }
5586 else if (ch == '"')
5587 {
5588 m_SB.Add("\\\"");
5589 }
5590 else if (ch >= 32)
5591 {
5592 m_SB.Add(ch);
5593 }
5594 else switch (ch)
5595 {
5596 case '\b':
5597 m_SB.Add("\\b");
5598 break;
5599 case '\f':
5600 m_SB.Add("\\f");
5601 break;
5602 case '\n':
5603 m_SB.Add("\\n");
5604 break;
5605 case '\r':
5606 m_SB.Add("\\r");
5607 break;
5608 case '\t':
5609 m_SB.Add("\\t");
5610 break;
5611 default:
5612 VMA_ASSERT(0 && "Character not currently supported.");
5613 break;
5614 }
5615 }
5616}
5617
5618void VmaJsonWriter::ContinueString(uint32_t n)
5619{
5620 VMA_ASSERT(m_InsideString);
5621 m_SB.AddNumber(n);
5622}
5623
5624void VmaJsonWriter::ContinueString(uint64_t n)
5625{
5626 VMA_ASSERT(m_InsideString);
5627 m_SB.AddNumber(n);
5628}
5629
5630void VmaJsonWriter::ContinueString_Size(size_t n)
5631{
5632 VMA_ASSERT(m_InsideString);
5633 // Fix for AppleClang incorrect type casting
5634 // TODO: Change to if constexpr when C++17 used as minimal standard
5635 WriteSize(n, std::is_same<size_t, uint64_t>{});
5636}
5637
5638void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5639{
5640 VMA_ASSERT(m_InsideString);
5641 m_SB.AddPointer(ptr);
5642}
5643
5644void VmaJsonWriter::EndString(const char* pStr)
5645{
5646 VMA_ASSERT(m_InsideString);
5647 if (pStr != VMA_NULL && pStr[0] != '\0')
5648 {
5649 ContinueString(pStr);
5650 }
5651 m_SB.Add('"');
5652 m_InsideString = false;
5653}
5654
5655void VmaJsonWriter::WriteNumber(uint32_t n)
5656{
5657 VMA_ASSERT(!m_InsideString);
5658 BeginValue(false);
5659 m_SB.AddNumber(n);
5660}
5661
5662void VmaJsonWriter::WriteNumber(uint64_t n)
5663{
5664 VMA_ASSERT(!m_InsideString);
5665 BeginValue(false);
5666 m_SB.AddNumber(n);
5667}
5668
5669void VmaJsonWriter::WriteSize(size_t n)
5670{
5671 VMA_ASSERT(!m_InsideString);
5672 BeginValue(false);
5673 // Fix for AppleClang incorrect type casting
5674 // TODO: Change to if constexpr when C++17 used as minimal standard
5675 WriteSize(n, std::is_same<size_t, uint64_t>{});
5676}
5677
5678void VmaJsonWriter::WriteBool(bool b)
5679{
5680 VMA_ASSERT(!m_InsideString);
5681 BeginValue(false);
5682 m_SB.Add(b ? "true" : "false");
5683}
5684
5685void VmaJsonWriter::WriteNull()
5686{
5687 VMA_ASSERT(!m_InsideString);
5688 BeginValue(false);
5689 m_SB.Add("null");
5690}
5691
5692void VmaJsonWriter::BeginValue(bool isString)
5693{
5694 if (!m_Stack.empty())
5695 {
5696 StackItem& currItem = m_Stack.back();
5697 if (currItem.type == COLLECTION_TYPE_OBJECT &&
5698 currItem.valueCount % 2 == 0)
5699 {
5700 VMA_ASSERT(isString);
5701 }
5702
5703 if (currItem.type == COLLECTION_TYPE_OBJECT &&
5704 currItem.valueCount % 2 != 0)
5705 {
5706 m_SB.Add(": ");
5707 }
5708 else if (currItem.valueCount > 0)
5709 {
5710 m_SB.Add(", ");
5711 WriteIndent();
5712 }
5713 else
5714 {
5715 WriteIndent();
5716 }
5717 ++currItem.valueCount;
5718 }
5719}
5720
5721void VmaJsonWriter::WriteIndent(bool oneLess)
5722{
5723 if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
5724 {
5725 m_SB.AddNewLine();
5726
5727 size_t count = m_Stack.size();
5728 if (count > 0 && oneLess)
5729 {
5730 --count;
5731 }
5732 for (size_t i = 0; i < count; ++i)
5733 {
5734 m_SB.Add(INDENT);
5735 }
5736 }
5737}
5738#endif // _VMA_JSON_WRITER_FUNCTIONS
5739
5740static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
5741{
5742 json.BeginObject();
5743
5744 json.WriteString("BlockCount");
5745 json.WriteNumber(stat.statistics.blockCount);
5746 json.WriteString("BlockBytes");
5747 json.WriteNumber(stat.statistics.blockBytes);
5748 json.WriteString("AllocationCount");
5749 json.WriteNumber(stat.statistics.allocationCount);
5750 json.WriteString("AllocationBytes");
5751 json.WriteNumber(stat.statistics.allocationBytes);
5752 json.WriteString("UnusedRangeCount");
5753 json.WriteNumber(stat.unusedRangeCount);
5754
5755 if (stat.statistics.allocationCount > 1)
5756 {
5757 json.WriteString("AllocationSizeMin");
5758 json.WriteNumber(stat.allocationSizeMin);
5759 json.WriteString("AllocationSizeMax");
5760 json.WriteNumber(stat.allocationSizeMax);
5761 }
5762 if (stat.unusedRangeCount > 1)
5763 {
5764 json.WriteString("UnusedRangeSizeMin");
5765 json.WriteNumber(stat.unusedRangeSizeMin);
5766 json.WriteString("UnusedRangeSizeMax");
5767 json.WriteNumber(stat.unusedRangeSizeMax);
5768 }
5769 json.EndObject();
5770}
5771#endif // _VMA_JSON_WRITER
5772
5773#ifndef _VMA_MAPPING_HYSTERESIS
5774
5775class VmaMappingHysteresis
5776{
5777 VMA_CLASS_NO_COPY(VmaMappingHysteresis)
5778public:
5779 VmaMappingHysteresis() = default;
5780
5781 uint32_t GetExtraMapping() const { return m_ExtraMapping; }
5782
5783 // Call when Map was called.
5784 // Returns true if switched to extra +1 mapping reference count.
5785 bool PostMap()
5786 {
5787#if VMA_MAPPING_HYSTERESIS_ENABLED
5788 if(m_ExtraMapping == 0)
5789 {
5790 ++m_MajorCounter;
5791 if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
5792 {
5793 m_ExtraMapping = 1;
5794 m_MajorCounter = 0;
5795 m_MinorCounter = 0;
5796 return true;
5797 }
5798 }
5799 else // m_ExtraMapping == 1
5800 PostMinorCounter();
5801#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5802 return false;
5803 }
5804
5805 // Call when Unmap was called.
5806 void PostUnmap()
5807 {
5808#if VMA_MAPPING_HYSTERESIS_ENABLED
5809 if(m_ExtraMapping == 0)
5810 ++m_MajorCounter;
5811 else // m_ExtraMapping == 1
5812 PostMinorCounter();
5813#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5814 }
5815
5816 // Call when allocation was made from the memory block.
5817 void PostAlloc()
5818 {
5819#if VMA_MAPPING_HYSTERESIS_ENABLED
5820 if(m_ExtraMapping == 1)
5821 ++m_MajorCounter;
5822 else // m_ExtraMapping == 0
5823 PostMinorCounter();
5824#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5825 }
5826
5827 // Call when allocation was freed from the memory block.
5828 // Returns true if switched to extra -1 mapping reference count.
5829 bool PostFree()
5830 {
5831#if VMA_MAPPING_HYSTERESIS_ENABLED
5832 if(m_ExtraMapping == 1)
5833 {
5834 ++m_MajorCounter;
5835 if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
5836 m_MajorCounter > m_MinorCounter + 1)
5837 {
5838 m_ExtraMapping = 0;
5839 m_MajorCounter = 0;
5840 m_MinorCounter = 0;
5841 return true;
5842 }
5843 }
5844 else // m_ExtraMapping == 0
5845 PostMinorCounter();
5846#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
5847 return false;
5848 }
5849
5850private:
5851 static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
5852
5853 uint32_t m_MinorCounter = 0;
5854 uint32_t m_MajorCounter = 0;
5855 uint32_t m_ExtraMapping = 0; // 0 or 1.
5856
5857 void PostMinorCounter()
5858 {
5859 if(m_MinorCounter < m_MajorCounter)
5860 {
5861 ++m_MinorCounter;
5862 }
5863 else if(m_MajorCounter > 0)
5864 {
5865 --m_MajorCounter;
5866 --m_MinorCounter;
5867 }
5868 }
5869};
5870
5871#endif // _VMA_MAPPING_HYSTERESIS
5872
5873#ifndef _VMA_DEVICE_MEMORY_BLOCK
5874/*
5875Represents a single block of device memory (`VkDeviceMemory`) with all the
5876data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5877
5878Thread-safety:
5879- Access to m_pMetadata must be externally synchronized.
5880- Map, Unmap, Bind* are synchronized internally.
5881*/
5882class VmaDeviceMemoryBlock
5883{
5884 VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5885public:
5886 VmaBlockMetadata* m_pMetadata;
5887
5888 VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5889 ~VmaDeviceMemoryBlock();
5890
5891 // Always call after construction.
5892 void Init(
5893 VmaAllocator hAllocator,
5894 VmaPool hParentPool,
5895 uint32_t newMemoryTypeIndex,
5896 VkDeviceMemory newMemory,
5897 VkDeviceSize newSize,
5898 uint32_t id,
5899 uint32_t algorithm,
5900 VkDeviceSize bufferImageGranularity);
5901 // Always call before destruction.
5902 void Destroy(VmaAllocator allocator);
5903
5904 VmaPool GetParentPool() const { return m_hParentPool; }
5905 VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5906 uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5907 uint32_t GetId() const { return m_Id; }
5908 void* GetMappedData() const { return m_pMappedData; }
5909 uint32_t GetMapRefCount() const { return m_MapCount; }
5910
5911 // Call when allocation/free was made from m_pMetadata.
5912 // Used for m_MappingHysteresis.
5913 void PostAlloc() { m_MappingHysteresis.PostAlloc(); }
5914 void PostFree(VmaAllocator hAllocator);
5915
5916 // Validates all data structures inside this object. If not valid, returns false.
5917 bool Validate() const;
5918 VkResult CheckCorruption(VmaAllocator hAllocator);
5919
5920 // ppData can be null.
5921 VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5922 void Unmap(VmaAllocator hAllocator, uint32_t count);
5923
5924 VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5925 VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5926
5927 VkResult BindBufferMemory(
5928 const VmaAllocator hAllocator,
5929 const VmaAllocation hAllocation,
5930 VkDeviceSize allocationLocalOffset,
5931 VkBuffer hBuffer,
5932 const void* pNext);
5933 VkResult BindImageMemory(
5934 const VmaAllocator hAllocator,
5935 const VmaAllocation hAllocation,
5936 VkDeviceSize allocationLocalOffset,
5937 VkImage hImage,
5938 const void* pNext);
5939
5940private:
5941 VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
5942 uint32_t m_MemoryTypeIndex;
5943 uint32_t m_Id;
5944 VkDeviceMemory m_hMemory;
5945
5946 /*
5947 Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5948 Also protects m_MapCount, m_pMappedData.
5949 Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5950 */
5951 VMA_MUTEX m_MapAndBindMutex;
5952 VmaMappingHysteresis m_MappingHysteresis;
5953 uint32_t m_MapCount;
5954 void* m_pMappedData;
5955};
5956#endif // _VMA_DEVICE_MEMORY_BLOCK
5957
5958#ifndef _VMA_ALLOCATION_T
5959struct VmaAllocation_T
5960{
5961 friend struct VmaDedicatedAllocationListItemTraits;
5962
5963 enum FLAGS
5964 {
5965 FLAG_PERSISTENT_MAP = 0x01,
5966 FLAG_MAPPING_ALLOWED = 0x02,
5967 };
5968
5969public:
5970 enum ALLOCATION_TYPE
5971 {
5972 ALLOCATION_TYPE_NONE,
5973 ALLOCATION_TYPE_BLOCK,
5974 ALLOCATION_TYPE_DEDICATED,
5975 };
5976
5977 // This struct is allocated using VmaPoolAllocator.
5978 VmaAllocation_T(bool mappingAllowed);
5979 ~VmaAllocation_T();
5980
5981 void InitBlockAllocation(
5982 VmaDeviceMemoryBlock* block,
5983 VmaAllocHandle allocHandle,
5984 VkDeviceSize alignment,
5985 VkDeviceSize size,
5986 uint32_t memoryTypeIndex,
5987 VmaSuballocationType suballocationType,
5988 bool mapped);
5989 // pMappedData not null means allocation is created with MAPPED flag.
5990 void InitDedicatedAllocation(
5991 VmaPool hParentPool,
5992 uint32_t memoryTypeIndex,
5993 VkDeviceMemory hMemory,
5994 VmaSuballocationType suballocationType,
5995 void* pMappedData,
5996 VkDeviceSize size);
5997
5998 ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
5999 VkDeviceSize GetAlignment() const { return m_Alignment; }
6000 VkDeviceSize GetSize() const { return m_Size; }
6001 void* GetUserData() const { return m_pUserData; }
6002 const char* GetName() const { return m_pName; }
6003 VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6004
6005 VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
6006 uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6007 bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
6008 bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
6009
6010 void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
6011 void SetName(VmaAllocator hAllocator, const char* pName);
6012 void FreeName(VmaAllocator hAllocator);
6013 uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
6014 VmaAllocHandle GetAllocHandle() const;
6015 VkDeviceSize GetOffset() const;
6016 VmaPool GetParentPool() const;
6017 VkDeviceMemory GetMemory() const;
6018 void* GetMappedData() const;
6019
6020 void BlockAllocMap();
6021 void BlockAllocUnmap();
6022 VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6023 void DedicatedAllocUnmap(VmaAllocator hAllocator);
6024
6025#if VMA_STATS_STRING_ENABLED
6026 uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6027
6028 void InitBufferImageUsage(uint32_t bufferImageUsage);
6029 void PrintParameters(class VmaJsonWriter& json) const;
6030#endif
6031
6032private:
6033 // Allocation out of VmaDeviceMemoryBlock.
6034 struct BlockAllocation
6035 {
6036 VmaDeviceMemoryBlock* m_Block;
6037 VmaAllocHandle m_AllocHandle;
6038 };
6039 // Allocation for an object that has its own private VkDeviceMemory.
6040 struct DedicatedAllocation
6041 {
6042 VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6043 VkDeviceMemory m_hMemory;
6044 void* m_pMappedData; // Not null means memory is mapped.
6045 VmaAllocation_T* m_Prev;
6046 VmaAllocation_T* m_Next;
6047 };
6048 union
6049 {
6050 // Allocation out of VmaDeviceMemoryBlock.
6051 BlockAllocation m_BlockAllocation;
6052 // Allocation for an object that has its own private VkDeviceMemory.
6053 DedicatedAllocation m_DedicatedAllocation;
6054 };
6055
6056 VkDeviceSize m_Alignment;
6057 VkDeviceSize m_Size;
6058 void* m_pUserData;
6059 char* m_pName;
6060 uint32_t m_MemoryTypeIndex;
6061 uint8_t m_Type; // ALLOCATION_TYPE
6062 uint8_t m_SuballocationType; // VmaSuballocationType
6063 // Reference counter for vmaMapMemory()/vmaUnmapMemory().
6064 uint8_t m_MapCount;
6065 uint8_t m_Flags; // enum FLAGS
6066#if VMA_STATS_STRING_ENABLED
6067 uint32_t m_BufferImageUsage; // 0 if unknown.
6068#endif
6069};
6070#endif // _VMA_ALLOCATION_T
6071
6072#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
6073struct VmaDedicatedAllocationListItemTraits
6074{
6075 typedef VmaAllocation_T ItemType;
6076
6077 static ItemType* GetPrev(const ItemType* item)
6078 {
6079 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6080 return item->m_DedicatedAllocation.m_Prev;
6081 }
6082 static ItemType* GetNext(const ItemType* item)
6083 {
6084 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6085 return item->m_DedicatedAllocation.m_Next;
6086 }
6087 static ItemType*& AccessPrev(ItemType* item)
6088 {
6089 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6090 return item->m_DedicatedAllocation.m_Prev;
6091 }
6092 static ItemType*& AccessNext(ItemType* item)
6093 {
6094 VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6095 return item->m_DedicatedAllocation.m_Next;
6096 }
6097};
6098#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
6099
6100#ifndef _VMA_DEDICATED_ALLOCATION_LIST
6101/*
6102Stores linked list of VmaAllocation_T objects.
6103Thread-safe, synchronized internally.
6104*/
6105class VmaDedicatedAllocationList
6106{
6107public:
6108 VmaDedicatedAllocationList() {}
6109 ~VmaDedicatedAllocationList();
6110
6111 void Init(bool useMutex) { m_UseMutex = useMutex; }
6112 bool Validate();
6113
6114 void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
6115 void AddStatistics(VmaStatistics& inoutStats);
6116#if VMA_STATS_STRING_ENABLED
6117 // Writes JSON array with the list of allocations.
6118 void BuildStatsString(VmaJsonWriter& json);
6119#endif
6120
6121 bool IsEmpty();
6122 void Register(VmaAllocation alloc);
6123 void Unregister(VmaAllocation alloc);
6124
6125private:
6126 typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
6127
6128 bool m_UseMutex = true;
6129 VMA_RW_MUTEX m_Mutex;
6130 DedicatedAllocationLinkedList m_AllocationList;
6131};
6132
6133#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
6134
6135VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
6136{
6137 VMA_HEAVY_ASSERT(Validate());
6138
6139 if (!m_AllocationList.IsEmpty())
6140 {
6141 VMA_ASSERT(false && "Unfreed dedicated allocations found!");
6142 }
6143}
6144
6145bool VmaDedicatedAllocationList::Validate()
6146{
6147 const size_t declaredCount = m_AllocationList.GetCount();
6148 size_t actualCount = 0;
6149 VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6150 for (VmaAllocation alloc = m_AllocationList.Front();
6151 alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
6152 {
6153 ++actualCount;
6154 }
6155 VMA_VALIDATE(actualCount == declaredCount);
6156
6157 return true;
6158}
6159
6160void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
6161{
6162 for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
6163 {
6164 const VkDeviceSize size = item->GetSize();
6165 inoutStats.statistics.blockCount++;
6166 inoutStats.statistics.blockBytes += size;
6167 VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
6168 }
6169}
6170
6171void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
6172{
6173 VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6174
6175 const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
6176 inoutStats.blockCount += allocCount;
6177 inoutStats.allocationCount += allocCount;
6178
6179 for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
6180 {
6181 const VkDeviceSize size = item->GetSize();
6182 inoutStats.blockBytes += size;
6183 inoutStats.allocationBytes += size;
6184 }
6185}
6186
6187#if VMA_STATS_STRING_ENABLED
6188void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
6189{
6190 VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6191 json.BeginArray();
6192 for (VmaAllocation alloc = m_AllocationList.Front();
6193 alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
6194 {
6195 json.BeginObject(true);
6196 alloc->PrintParameters(json);
6197 json.EndObject();
6198 }
6199 json.EndArray();
6200}
6201#endif // VMA_STATS_STRING_ENABLED
6202
6203bool VmaDedicatedAllocationList::IsEmpty()
6204{
6205 VmaMutexLockRead lock(m_Mutex, m_UseMutex);
6206 return m_AllocationList.IsEmpty();
6207}
6208
6209void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
6210{
6211 VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
6212 m_AllocationList.PushBack(alloc);
6213}
6214
6215void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
6216{
6217 VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
6218 m_AllocationList.Remove(alloc);
6219}
6220#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
6221#endif // _VMA_DEDICATED_ALLOCATION_LIST
6222
6223#ifndef _VMA_SUBALLOCATION
6224/*
6225Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6226allocated memory block or free.
6227*/
6228struct VmaSuballocation
6229{
6230 VkDeviceSize offset;
6231 VkDeviceSize size;
6232 void* userData;
6233 VmaSuballocationType type;
6234};
6235
6236// Comparator for offsets.
6237struct VmaSuballocationOffsetLess
6238{
6239 bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6240 {
6241 return lhs.offset < rhs.offset;
6242 }
6243};
6244
6245struct VmaSuballocationOffsetGreater
6246{
6247 bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6248 {
6249 return lhs.offset > rhs.offset;
6250 }
6251};
6252
6253struct VmaSuballocationItemSizeLess
6254{
6255 bool operator()(const VmaSuballocationList::iterator lhs,
6256 const VmaSuballocationList::iterator rhs) const
6257 {
6258 return lhs->size < rhs->size;
6259 }
6260
6261 bool operator()(const VmaSuballocationList::iterator lhs,
6262 VkDeviceSize rhsSize) const
6263 {
6264 return lhs->size < rhsSize;
6265 }
6266};
6267#endif // _VMA_SUBALLOCATION
6268
6269#ifndef _VMA_ALLOCATION_REQUEST
6270/*
6271Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6272item points to a FREE suballocation.
6273*/
6274struct VmaAllocationRequest
6275{
6276 VmaAllocHandle allocHandle;
6277 VkDeviceSize size;
6278 VmaSuballocationList::iterator item;
6279 void* customData;
6280 uint64_t algorithmData;
6281 VmaAllocationRequestType type;
6282};
6283#endif // _VMA_ALLOCATION_REQUEST
6284
6285#ifndef _VMA_BLOCK_METADATA
6286/*
6287Data structure used for bookkeeping of allocations and unused ranges of memory
6288in a single VkDeviceMemory block.
6289*/
6290class VmaBlockMetadata
6291{
6292public:
6293 // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
6294 VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
6295 VkDeviceSize bufferImageGranularity, bool isVirtual);
6296 virtual ~VmaBlockMetadata() = default;
6297
6298 virtual void Init(VkDeviceSize size) { m_Size = size; }
6299 bool IsVirtual() const { return m_IsVirtual; }
6300 VkDeviceSize GetSize() const { return m_Size; }
6301
6302 // Validates all data structures inside this object. If not valid, returns false.
6303 virtual bool Validate() const = 0;
6304 virtual size_t GetAllocationCount() const = 0;
6305 virtual size_t GetFreeRegionsCount() const = 0;
6306 virtual VkDeviceSize GetSumFreeSize() const = 0;
6307 // Returns true if this block is empty - contains only single free suballocation.
6308 virtual bool IsEmpty() const = 0;
6309 virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
6310 virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
6311 virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
6312
6313 virtual VmaAllocHandle GetAllocationListBegin() const = 0;
6314 virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
6315 virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
6316
6317 // Shouldn't modify blockCount.
6318 virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
6319 virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
6320
6321#if VMA_STATS_STRING_ENABLED
6322 virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6323#endif
6324
6325 // Tries to find a place for suballocation with given parameters inside this block.
6326 // If succeeded, fills pAllocationRequest and returns true.
6327 // If failed, returns false.
6328 virtual bool CreateAllocationRequest(
6329 VkDeviceSize allocSize,
6330 VkDeviceSize allocAlignment,
6331 bool upperAddress,
6332 VmaSuballocationType allocType,
6333 // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6334 uint32_t strategy,
6335 VmaAllocationRequest* pAllocationRequest) = 0;
6336
6337 virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6338
6339 // Makes actual allocation based on request. Request must already be checked and valid.
6340 virtual void Alloc(
6341 const VmaAllocationRequest& request,
6342 VmaSuballocationType type,
6343 void* userData) = 0;
6344
6345 // Frees suballocation assigned to given memory region.
6346 virtual void Free(VmaAllocHandle allocHandle) = 0;
6347
6348 // Frees all allocations.
6349 // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
6350 virtual void Clear() = 0;
6351
6352 virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
6353 virtual void DebugLogAllAllocations() const = 0;
6354
6355protected:
6356 const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6357 VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
6358 VkDeviceSize GetDebugMargin() const { return IsVirtual() ? 0 : VMA_DEBUG_MARGIN; }
6359
6360 void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
6361#if VMA_STATS_STRING_ENABLED
6362 // mapRefCount == UINT32_MAX means unspecified.
6363 void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6364 VkDeviceSize unusedBytes,
6365 size_t allocationCount,
6366 size_t unusedRangeCount) const;
6367 void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6368 VkDeviceSize offset, VkDeviceSize size, void* userData) const;
6369 void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6370 VkDeviceSize offset,
6371 VkDeviceSize size) const;
6372 void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6373#endif
6374
6375private:
6376 VkDeviceSize m_Size;
6377 const VkAllocationCallbacks* m_pAllocationCallbacks;
6378 const VkDeviceSize m_BufferImageGranularity;
6379 const bool m_IsVirtual;
6380};
6381
6382#ifndef _VMA_BLOCK_METADATA_FUNCTIONS
6383VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
6384 VkDeviceSize bufferImageGranularity, bool isVirtual)
6385 : m_Size(0),
6386 m_pAllocationCallbacks(pAllocationCallbacks),
6387 m_BufferImageGranularity(bufferImageGranularity),
6388 m_IsVirtual(isVirtual) {}
6389
6390void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
6391{
6392 if (IsVirtual())
6393 {
6394 VMA_DEBUG_LOG("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData);
6395 }
6396 else
6397 {
6398 VMA_ASSERT(userData != VMA_NULL);
6399 VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
6400
6401 userData = allocation->GetUserData();
6402 const char* name = allocation->GetName();
6403
6404#if VMA_STATS_STRING_ENABLED
6405 VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u",
6406 offset, size, userData, name ? name : "vma_empty",
6407 VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
6408 allocation->GetBufferImageUsage());
6409#else
6410 VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u",
6411 offset, size, userData, name ? name : "vma_empty",
6412 (uint32_t)allocation->GetSuballocationType());
6413#endif // VMA_STATS_STRING_ENABLED
6414 }
6415
6416}
6417
6418#if VMA_STATS_STRING_ENABLED
6419void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
6420 VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
6421{
6422 json.WriteString("TotalBytes");
6423 json.WriteNumber(GetSize());
6424
6425 json.WriteString("UnusedBytes");
6426 json.WriteSize(unusedBytes);
6427
6428 json.WriteString("Allocations");
6429 json.WriteSize(allocationCount);
6430
6431 json.WriteString("UnusedRanges");
6432 json.WriteSize(unusedRangeCount);
6433
6434 json.WriteString("Suballocations");
6435 json.BeginArray();
6436}
6437
6438void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6439 VkDeviceSize offset, VkDeviceSize size, void* userData) const
6440{
6441 json.BeginObject(true);
6442
6443 json.WriteString("Offset");
6444 json.WriteNumber(offset);
6445
6446 if (IsVirtual())
6447 {
6448 json.WriteString("Size");
6449 json.WriteNumber(size);
6450 if (userData)
6451 {
6452 json.WriteString("CustomData");
6453 json.BeginString();
6454 json.ContinueString_Pointer(userData);
6455 json.EndString();
6456 }
6457 }
6458 else
6459 {
6460 ((VmaAllocation)userData)->PrintParameters(json);
6461 }
6462
6463 json.EndObject();
6464}
6465
6466void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6467 VkDeviceSize offset, VkDeviceSize size) const
6468{
6469 json.BeginObject(true);
6470
6471 json.WriteString("Offset");
6472 json.WriteNumber(offset);
6473
6474 json.WriteString("Type");
6475 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
6476
6477 json.WriteString("Size");
6478 json.WriteNumber(size);
6479
6480 json.EndObject();
6481}
6482
6483void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
6484{
6485 json.EndArray();
6486}
6487#endif // VMA_STATS_STRING_ENABLED
6488#endif // _VMA_BLOCK_METADATA_FUNCTIONS
6489#endif // _VMA_BLOCK_METADATA
6490
6491#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
6492// Before deleting object of this class remember to call 'Destroy()'
6493class VmaBlockBufferImageGranularity final
6494{
6495public:
6496 struct ValidationContext
6497 {
6498 const VkAllocationCallbacks* allocCallbacks;
6499 uint16_t* pageAllocs;
6500 };
6501
6502 VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
6503 ~VmaBlockBufferImageGranularity();
6504
6505 bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
6506
6507 void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
6508 // Before destroying object you must call free it's memory
6509 void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
6510
6511 void RoundupAllocRequest(VmaSuballocationType allocType,
6512 VkDeviceSize& inOutAllocSize,
6513 VkDeviceSize& inOutAllocAlignment) const;
6514
6515 bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
6516 VkDeviceSize allocSize,
6517 VkDeviceSize blockOffset,
6518 VkDeviceSize blockSize,
6519 VmaSuballocationType allocType) const;
6520
6521 void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
6522 void FreePages(VkDeviceSize offset, VkDeviceSize size);
6523 void Clear();
6524
6525 ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
6526 bool isVirutal) const;
6527 bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
6528 bool FinishValidation(ValidationContext& ctx) const;
6529
6530private:
6531 static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
6532
6533 struct RegionInfo
6534 {
6535 uint8_t allocType;
6536 uint16_t allocCount;
6537 };
6538
6539 VkDeviceSize m_BufferImageGranularity;
6540 uint32_t m_RegionCount;
6541 RegionInfo* m_RegionInfo;
6542
6543 uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
6544 uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
6545
6546 uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
6547 void AllocPage(RegionInfo& page, uint8_t allocType);
6548};
6549
6550#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
6551VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
6552 : m_BufferImageGranularity(bufferImageGranularity),
6553 m_RegionCount(0),
6554 m_RegionInfo(VMA_NULL) {}
6555
6556VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
6557{
6558 VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
6559}
6560
6561void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
6562{
6563 if (IsEnabled())
6564 {
6565 m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
6566 m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
6567 memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
6568 }
6569}
6570
6571void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
6572{
6573 if (m_RegionInfo)
6574 {
6575 vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
6576 m_RegionInfo = VMA_NULL;
6577 }
6578}
6579
6580void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
6581 VkDeviceSize& inOutAllocSize,
6582 VkDeviceSize& inOutAllocAlignment) const
6583{
6584 if (m_BufferImageGranularity > 1 &&
6585 m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
6586 {
6587 if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
6588 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
6589 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
6590 {
6591 inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
6592 inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
6593 }
6594 }
6595}
6596
6597bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
6598 VkDeviceSize allocSize,
6599 VkDeviceSize blockOffset,
6600 VkDeviceSize blockSize,
6601 VmaSuballocationType allocType) const
6602{
6603 if (IsEnabled())
6604 {
6605 uint32_t startPage = GetStartPage(inOutAllocOffset);
6606 if (m_RegionInfo[startPage].allocCount > 0 &&
6607 VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
6608 {
6609 inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
6610 if (blockSize < allocSize + inOutAllocOffset - blockOffset)
6611 return true;
6612 ++startPage;
6613 }
6614 uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
6615 if (endPage != startPage &&
6616 m_RegionInfo[endPage].allocCount > 0 &&
6617 VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
6618 {
6619 return true;
6620 }
6621 }
6622 return false;
6623}
6624
6625void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
6626{
6627 if (IsEnabled())
6628 {
6629 uint32_t startPage = GetStartPage(offset);
6630 AllocPage(m_RegionInfo[startPage], allocType);
6631
6632 uint32_t endPage = GetEndPage(offset, size);
6633 if (startPage != endPage)
6634 AllocPage(m_RegionInfo[endPage], allocType);
6635 }
6636}
6637
6638void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
6639{
6640 if (IsEnabled())
6641 {
6642 uint32_t startPage = GetStartPage(offset);
6643 --m_RegionInfo[startPage].allocCount;
6644 if (m_RegionInfo[startPage].allocCount == 0)
6645 m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
6646 uint32_t endPage = GetEndPage(offset, size);
6647 if (startPage != endPage)
6648 {
6649 --m_RegionInfo[endPage].allocCount;
6650 if (m_RegionInfo[endPage].allocCount == 0)
6651 m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
6652 }
6653 }
6654}
6655
6656void VmaBlockBufferImageGranularity::Clear()
6657{
6658 if (m_RegionInfo)
6659 memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
6660}
6661
6662VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
6663 const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
6664{
6665 ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
6666 if (!isVirutal && IsEnabled())
6667 {
6668 ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
6669 memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
6670 }
6671 return ctx;
6672}
6673
6674bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
6675 VkDeviceSize offset, VkDeviceSize size) const
6676{
6677 if (IsEnabled())
6678 {
6679 uint32_t start = GetStartPage(offset);
6680 ++ctx.pageAllocs[start];
6681 VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
6682
6683 uint32_t end = GetEndPage(offset, size);
6684 if (start != end)
6685 {
6686 ++ctx.pageAllocs[end];
6687 VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
6688 }
6689 }
6690 return true;
6691}
6692
6693bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
6694{
6695 // Check proper page structure
6696 if (IsEnabled())
6697 {
6698 VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
6699
6700 for (uint32_t page = 0; page < m_RegionCount; ++page)
6701 {
6702 VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
6703 }
6704 vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
6705 ctx.pageAllocs = VMA_NULL;
6706 }
6707 return true;
6708}
6709
6710uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
6711{
6712 return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
6713}
6714
6715void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
6716{
6717 // When current alloc type is free then it can be overriden by new type
6718 if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
6719 page.allocType = allocType;
6720
6721 ++page.allocCount;
6722}
6723#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
6724#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
6725
6726#if 0
6727#ifndef _VMA_BLOCK_METADATA_GENERIC
6728class VmaBlockMetadata_Generic : public VmaBlockMetadata
6729{
6730 friend class VmaDefragmentationAlgorithm_Generic;
6731 friend class VmaDefragmentationAlgorithm_Fast;
6732 VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6733public:
6734 VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
6735 VkDeviceSize bufferImageGranularity, bool isVirtual);
6736 virtual ~VmaBlockMetadata_Generic() = default;
6737
6738 size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
6739 VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
6740 bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); }
6741 void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); }
6742 VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; };
6743
6744 void Init(VkDeviceSize size) override;
6745 bool Validate() const override;
6746
6747 void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
6748 void AddStatistics(VmaStatistics& inoutStats) const override;
6749
6750#if VMA_STATS_STRING_ENABLED
6751 void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
6752#endif
6753
6754 bool CreateAllocationRequest(
6755 VkDeviceSize allocSize,
6756 VkDeviceSize allocAlignment,
6757 bool upperAddress,
6758 VmaSuballocationType allocType,
6759 uint32_t strategy,
6760 VmaAllocationRequest* pAllocationRequest) override;
6761
6762 VkResult CheckCorruption(const void* pBlockData) override;
6763
6764 void Alloc(
6765 const VmaAllocationRequest& request,
6766 VmaSuballocationType type,
6767 void* userData) override;
6768
6769 void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
6770 void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
6771 VmaAllocHandle GetAllocationListBegin() const override;
6772 VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
6773 void Clear() override;
6774 void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
6775 void DebugLogAllAllocations() const override;
6776
6777private:
6778 uint32_t m_FreeCount;
6779 VkDeviceSize m_SumFreeSize;
6780 VmaSuballocationList m_Suballocations;
6781 // Suballocations that are free. Sorted by size, ascending.
6782 VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator>> m_FreeSuballocationsBySize;
6783
6784 VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); }
6785
6786 VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const;
6787 bool ValidateFreeSuballocationList() const;
6788
6789 // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6790 // If yes, fills pOffset and returns true. If no, returns false.
6791 bool CheckAllocation(
6792 VkDeviceSize allocSize,
6793 VkDeviceSize allocAlignment,
6794 VmaSuballocationType allocType,
6795 VmaSuballocationList::const_iterator suballocItem,
6796 VmaAllocHandle* pAllocHandle) const;
6797
6798 // Given free suballocation, it merges it with following one, which must also be free.
6799 void MergeFreeWithNext(VmaSuballocationList::iterator item);
6800 // Releases given suballocation, making it free.
6801 // Merges it with adjacent free suballocations if applicable.
6802 // Returns iterator to new free suballocation at this place.
6803 VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6804 // Given free suballocation, it inserts it into sorted list of
6805 // m_FreeSuballocationsBySize if it is suitable.
6806 void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6807 // Given free suballocation, it removes it from sorted list of
6808 // m_FreeSuballocationsBySize if it is suitable.
6809 void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6810};
6811
6812#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
6813VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
6814 VkDeviceSize bufferImageGranularity, bool isVirtual)
6815 : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
6816 m_FreeCount(0),
6817 m_SumFreeSize(0),
6818 m_Suballocations(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
6819 m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(pAllocationCallbacks)) {}
6820
6822{
6824
6825 m_FreeCount = 1;
6826 m_SumFreeSize = size;
6827
6828 VmaSuballocation suballoc = {};
6829 suballoc.offset = 0;
6830 suballoc.size = size;
6831 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6832
6833 m_Suballocations.push_back(suballoc);
6834 m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
6835}
6836
6837bool VmaBlockMetadata_Generic::Validate() const
6838{
6839 VMA_VALIDATE(!m_Suballocations.empty());
6840
6841 // Expected offset of new suballocation as calculated from previous ones.
6842 VkDeviceSize calculatedOffset = 0;
6843 // Expected number of free suballocations as calculated from traversing their list.
6844 uint32_t calculatedFreeCount = 0;
6845 // Expected sum size of free suballocations as calculated from traversing their list.
6846 VkDeviceSize calculatedSumFreeSize = 0;
6847 // Expected number of free suballocations that should be registered in
6848 // m_FreeSuballocationsBySize calculated from traversing their list.
6849 size_t freeSuballocationsToRegister = 0;
6850 // True if previous visited suballocation was free.
6851 bool prevFree = false;
6852
6853 const VkDeviceSize debugMargin = GetDebugMargin();
6854
6855 for (const auto& subAlloc : m_Suballocations)
6856 {
6857 // Actual offset of this suballocation doesn't match expected one.
6858 VMA_VALIDATE(subAlloc.offset == calculatedOffset);
6859
6860 const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
6861 // Two adjacent free suballocations are invalid. They should be merged.
6862 VMA_VALIDATE(!prevFree || !currFree);
6863
6864 VmaAllocation alloc = (VmaAllocation)subAlloc.userData;
6865 if (!IsVirtual())
6866 {
6867 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
6868 }
6869
6870 if (currFree)
6871 {
6872 calculatedSumFreeSize += subAlloc.size;
6873 ++calculatedFreeCount;
6874 ++freeSuballocationsToRegister;
6875
6876 // Margin required between allocations - every free space must be at least that large.
6877 VMA_VALIDATE(subAlloc.size >= debugMargin);
6878 }
6879 else
6880 {
6881 if (!IsVirtual())
6882 {
6883 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1);
6884 VMA_VALIDATE(alloc->GetSize() == subAlloc.size);
6885 }
6886
6887 // Margin required between allocations - previous allocation must be free.
6888 VMA_VALIDATE(debugMargin == 0 || prevFree);
6889 }
6890
6891 calculatedOffset += subAlloc.size;
6892 prevFree = currFree;
6893 }
6894
6895 // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
6896 // match expected one.
6897 VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
6898
6899 VkDeviceSize lastSize = 0;
6900 for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
6901 {
6902 VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
6903
6904 // Only free suballocations can be registered in m_FreeSuballocationsBySize.
6905 VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6906 // They must be sorted by size ascending.
6907 VMA_VALIDATE(suballocItem->size >= lastSize);
6908
6909 lastSize = suballocItem->size;
6910 }
6911
6912 // Check if totals match calculated values.
6913 VMA_VALIDATE(ValidateFreeSuballocationList());
6914 VMA_VALIDATE(calculatedOffset == GetSize());
6915 VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
6916 VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
6917
6918 return true;
6919}
6920
6921void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
6922{
6923 const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
6924 inoutStats.statistics.blockCount++;
6925 inoutStats.statistics.blockBytes += GetSize();
6926
6927 for (const auto& suballoc : m_Suballocations)
6928 {
6929 if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
6930 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
6931 else
6932 VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
6933 }
6934}
6935
6936void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const
6937{
6938 inoutStats.blockCount++;
6939 inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount;
6940 inoutStats.blockBytes += GetSize();
6941 inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
6942}
6943
6944#if VMA_STATS_STRING_ENABLED
6945void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
6946{
6947 PrintDetailedMap_Begin(json,
6948 m_SumFreeSize, // unusedBytes
6949 m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
6950 m_FreeCount, // unusedRangeCount
6951 mapRefCount);
6952
6953 for (const auto& suballoc : m_Suballocations)
6954 {
6955 if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
6956 {
6957 PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
6958 }
6959 else
6960 {
6961 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
6962 }
6963 }
6964
6965 PrintDetailedMap_End(json);
6966}
6967#endif // VMA_STATS_STRING_ENABLED
6968
6969bool VmaBlockMetadata_Generic::CreateAllocationRequest(
6970 VkDeviceSize allocSize,
6971 VkDeviceSize allocAlignment,
6972 bool upperAddress,
6973 VmaSuballocationType allocType,
6974 uint32_t strategy,
6975 VmaAllocationRequest* pAllocationRequest)
6976{
6977 VMA_ASSERT(allocSize > 0);
6978 VMA_ASSERT(!upperAddress);
6979 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
6980 VMA_ASSERT(pAllocationRequest != VMA_NULL);
6981 VMA_HEAVY_ASSERT(Validate());
6982
6983 allocSize = AlignAllocationSize(allocSize);
6984
6985 pAllocationRequest->type = VmaAllocationRequestType::Normal;
6986 pAllocationRequest->size = allocSize;
6987
6988 const VkDeviceSize debugMargin = GetDebugMargin();
6989
6990 // There is not enough total free space in this block to fulfill the request: Early return.
6991 if (m_SumFreeSize < allocSize + debugMargin)
6992 {
6993 return false;
6994 }
6995
6996 // New algorithm, efficiently searching freeSuballocationsBySize.
6997 const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
6998 if (freeSuballocCount > 0)
6999 {
7000 if (strategy == 0 ||
7002 {
7003 // Find first free suballocation with size not less than allocSize + debugMargin.
7004 VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7005 m_FreeSuballocationsBySize.data(),
7006 m_FreeSuballocationsBySize.data() + freeSuballocCount,
7007 allocSize + debugMargin,
7008 VmaSuballocationItemSizeLess());
7009 size_t index = it - m_FreeSuballocationsBySize.data();
7010 for (; index < freeSuballocCount; ++index)
7011 {
7012 if (CheckAllocation(
7013 allocSize,
7014 allocAlignment,
7015 allocType,
7016 m_FreeSuballocationsBySize[index],
7017 &pAllocationRequest->allocHandle))
7018 {
7019 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7020 return true;
7021 }
7022 }
7023 }
7024 else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7025 {
7026 for (VmaSuballocationList::iterator it = m_Suballocations.begin();
7027 it != m_Suballocations.end();
7028 ++it)
7029 {
7030 if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7031 allocSize,
7032 allocAlignment,
7033 allocType,
7034 it,
7035 &pAllocationRequest->allocHandle))
7036 {
7037 pAllocationRequest->item = it;
7038 return true;
7039 }
7040 }
7041 }
7042 else
7043 {
7045 // Search staring from biggest suballocations.
7046 for (size_t index = freeSuballocCount; index--; )
7047 {
7048 if (CheckAllocation(
7049 allocSize,
7050 allocAlignment,
7051 allocType,
7052 m_FreeSuballocationsBySize[index],
7053 &pAllocationRequest->allocHandle))
7054 {
7055 pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7056 return true;
7057 }
7058 }
7059 }
7060 }
7061
7062 return false;
7063}
7064
7065VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7066{
7067 for (auto& suballoc : m_Suballocations)
7068 {
7069 if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7070 {
7071 if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
7072 {
7073 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7074 return VK_ERROR_UNKNOWN_COPY;
7075 }
7076 }
7077 }
7078
7079 return VK_SUCCESS;
7080}
7081
7082void VmaBlockMetadata_Generic::Alloc(
7083 const VmaAllocationRequest& request,
7084 VmaSuballocationType type,
7085 void* userData)
7086{
7088 VMA_ASSERT(request.item != m_Suballocations.end());
7089 VmaSuballocation& suballoc = *request.item;
7090 // Given suballocation is a free block.
7091 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7092
7093 // Given offset is inside this suballocation.
7094 VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset);
7095 const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1;
7096 VMA_ASSERT(suballoc.size >= paddingBegin + request.size);
7097 const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size;
7098
7099 // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7100 // it to become used.
7101 UnregisterFreeSuballocation(request.item);
7102
7103 suballoc.offset = (VkDeviceSize)request.allocHandle - 1;
7104 suballoc.size = request.size;
7105 suballoc.type = type;
7106 suballoc.userData = userData;
7107
7108 // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7109 if (paddingEnd)
7110 {
7111 VmaSuballocation paddingSuballoc = {};
7112 paddingSuballoc.offset = suballoc.offset + suballoc.size;
7113 paddingSuballoc.size = paddingEnd;
7114 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7115 VmaSuballocationList::iterator next = request.item;
7116 ++next;
7117 const VmaSuballocationList::iterator paddingEndItem =
7118 m_Suballocations.insert(next, paddingSuballoc);
7119 RegisterFreeSuballocation(paddingEndItem);
7120 }
7121
7122 // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7123 if (paddingBegin)
7124 {
7125 VmaSuballocation paddingSuballoc = {};
7126 paddingSuballoc.offset = suballoc.offset - paddingBegin;
7127 paddingSuballoc.size = paddingBegin;
7128 paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7129 const VmaSuballocationList::iterator paddingBeginItem =
7130 m_Suballocations.insert(request.item, paddingSuballoc);
7131 RegisterFreeSuballocation(paddingBeginItem);
7132 }
7133
7134 // Update totals.
7135 m_FreeCount = m_FreeCount - 1;
7136 if (paddingBegin > 0)
7137 {
7138 ++m_FreeCount;
7139 }
7140 if (paddingEnd > 0)
7141 {
7142 ++m_FreeCount;
7143 }
7144 m_SumFreeSize -= request.size;
7145}
7146
7147void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
7148{
7149 outInfo.offset = (VkDeviceSize)allocHandle - 1;
7150 const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset);
7151 outInfo.size = suballoc.size;
7152 outInfo.pUserData = suballoc.userData;
7153}
7154
7155void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const
7156{
7157 return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData;
7158}
7159
7160VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const
7161{
7162 if (IsEmpty())
7163 return VK_NULL_HANDLE;
7164
7165 for (const auto& suballoc : m_Suballocations)
7166 {
7167 if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7168 return (VmaAllocHandle)(suballoc.offset + 1);
7169 }
7170 VMA_ASSERT(false && "Should contain at least 1 allocation!");
7171 return VK_NULL_HANDLE;
7172}
7173
7174VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const
7175{
7176 VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1);
7177
7178 for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it)
7179 {
7180 if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
7181 return (VmaAllocHandle)(it->offset + 1);
7182 }
7183 return VK_NULL_HANDLE;
7184}
7185
7186void VmaBlockMetadata_Generic::Clear()
7187{
7188 const VkDeviceSize size = GetSize();
7189
7190 VMA_ASSERT(IsVirtual());
7191 m_FreeCount = 1;
7192 m_SumFreeSize = size;
7193 m_Suballocations.clear();
7194 m_FreeSuballocationsBySize.clear();
7195
7196 VmaSuballocation suballoc = {};
7197 suballoc.offset = 0;
7198 suballoc.size = size;
7199 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7200 m_Suballocations.push_back(suballoc);
7201
7202 m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
7203}
7204
7205void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
7206{
7207 VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1);
7208 suballoc.userData = userData;
7209}
7210
7211void VmaBlockMetadata_Generic::DebugLogAllAllocations() const
7212{
7213 for (const auto& suballoc : m_Suballocations)
7214 {
7215 if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7216 DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData);
7217 }
7218}
7219
7220VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const
7221{
7222 VMA_HEAVY_ASSERT(!m_Suballocations.empty());
7223 const VkDeviceSize last = m_Suballocations.rbegin()->offset;
7224 if (last == offset)
7225 return m_Suballocations.rbegin().drop_const();
7226 const VkDeviceSize first = m_Suballocations.begin()->offset;
7227 if (first == offset)
7228 return m_Suballocations.begin().drop_const();
7229
7230 const size_t suballocCount = m_Suballocations.size();
7231 const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount;
7232 auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator
7233 {
7234 for (auto suballocItem = begin;
7235 suballocItem != end;
7236 ++suballocItem)
7237 {
7238 if (suballocItem->offset == offset)
7239 return suballocItem.drop_const();
7240 }
7241 VMA_ASSERT(false && "Not found!");
7242 return m_Suballocations.end().drop_const();
7243 };
7244 // If requested offset is closer to the end of range, search from the end
7245 if (offset - first > suballocCount * step / 2)
7246 {
7247 return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend());
7248 }
7249 return findSuballocation(m_Suballocations.begin(), m_Suballocations.end());
7250}
7251
7252bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
7253{
7254 VkDeviceSize lastSize = 0;
7255 for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
7256 {
7257 const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
7258
7259 VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
7260 VMA_VALIDATE(it->size >= lastSize);
7261 lastSize = it->size;
7262 }
7263 return true;
7264}
7265
7266bool VmaBlockMetadata_Generic::CheckAllocation(
7267 VkDeviceSize allocSize,
7268 VkDeviceSize allocAlignment,
7269 VmaSuballocationType allocType,
7270 VmaSuballocationList::const_iterator suballocItem,
7271 VmaAllocHandle* pAllocHandle) const
7272{
7273 VMA_ASSERT(allocSize > 0);
7274 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7275 VMA_ASSERT(suballocItem != m_Suballocations.cend());
7276 VMA_ASSERT(pAllocHandle != VMA_NULL);
7277
7278 const VkDeviceSize debugMargin = GetDebugMargin();
7279 const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
7280
7281 const VmaSuballocation& suballoc = *suballocItem;
7282 VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7283
7284 // Size of this suballocation is too small for this request: Early return.
7285 if (suballoc.size < allocSize)
7286 {
7287 return false;
7288 }
7289
7290 // Start from offset equal to beginning of this suballocation.
7291 VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
7292
7293 // Apply debugMargin from the end of previous alloc.
7294 if (debugMargin > 0)
7295 {
7296 offset += debugMargin;
7297 }
7298
7299 // Apply alignment.
7300 offset = VmaAlignUp(offset, allocAlignment);
7301
7302 // Check previous suballocations for BufferImageGranularity conflicts.
7303 // Make bigger alignment if necessary.
7304 if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
7305 {
7306 bool bufferImageGranularityConflict = false;
7307 VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
7308 while (prevSuballocItem != m_Suballocations.cbegin())
7309 {
7310 --prevSuballocItem;
7311 const VmaSuballocation& prevSuballoc = *prevSuballocItem;
7312 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity))
7313 {
7314 if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
7315 {
7316 bufferImageGranularityConflict = true;
7317 break;
7318 }
7319 }
7320 else
7321 // Already on previous page.
7322 break;
7323 }
7324 if (bufferImageGranularityConflict)
7325 {
7326 offset = VmaAlignUp(offset, bufferImageGranularity);
7327 }
7328 }
7329
7330 // Calculate padding at the beginning based on current offset.
7331 const VkDeviceSize paddingBegin = offset - suballoc.offset;
7332
7333 // Fail if requested size plus margin after is bigger than size of this suballocation.
7334 if (paddingBegin + allocSize + debugMargin > suballoc.size)
7335 {
7336 return false;
7337 }
7338
7339 // Check next suballocations for BufferImageGranularity conflicts.
7340 // If conflict exists, allocation cannot be made here.
7341 if (allocSize % bufferImageGranularity || offset % bufferImageGranularity)
7342 {
7343 VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
7344 ++nextSuballocItem;
7345 while (nextSuballocItem != m_Suballocations.cend())
7346 {
7347 const VmaSuballocation& nextSuballoc = *nextSuballocItem;
7348 if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity))
7349 {
7350 if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
7351 {
7352 return false;
7353 }
7354 }
7355 else
7356 {
7357 // Already on next page.
7358 break;
7359 }
7360 ++nextSuballocItem;
7361 }
7362 }
7363
7364 *pAllocHandle = (VmaAllocHandle)(offset + 1);
7365 // All tests passed: Success. pAllocHandle is already filled.
7366 return true;
7367}
7368
7369void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
7370{
7371 VMA_ASSERT(item != m_Suballocations.end());
7372 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7373
7374 VmaSuballocationList::iterator nextItem = item;
7375 ++nextItem;
7376 VMA_ASSERT(nextItem != m_Suballocations.end());
7377 VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7378
7379 item->size += nextItem->size;
7380 --m_FreeCount;
7381 m_Suballocations.erase(nextItem);
7382}
7383
7384VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
7385{
7386 // Change this suballocation to be marked as free.
7387 VmaSuballocation& suballoc = *suballocItem;
7388 suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7389 suballoc.userData = VMA_NULL;
7390
7391 // Update totals.
7392 ++m_FreeCount;
7393 m_SumFreeSize += suballoc.size;
7394
7395 // Merge with previous and/or next suballocation if it's also free.
7396 bool mergeWithNext = false;
7397 bool mergeWithPrev = false;
7398
7399 VmaSuballocationList::iterator nextItem = suballocItem;
7400 ++nextItem;
7401 if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
7402 {
7403 mergeWithNext = true;
7404 }
7405
7406 VmaSuballocationList::iterator prevItem = suballocItem;
7407 if (suballocItem != m_Suballocations.begin())
7408 {
7409 --prevItem;
7410 if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7411 {
7412 mergeWithPrev = true;
7413 }
7414 }
7415
7416 if (mergeWithNext)
7417 {
7418 UnregisterFreeSuballocation(nextItem);
7419 MergeFreeWithNext(suballocItem);
7420 }
7421
7422 if (mergeWithPrev)
7423 {
7424 UnregisterFreeSuballocation(prevItem);
7425 MergeFreeWithNext(prevItem);
7426 RegisterFreeSuballocation(prevItem);
7427 return prevItem;
7428 }
7429 else
7430 {
7431 RegisterFreeSuballocation(suballocItem);
7432 return suballocItem;
7433 }
7434}
7435
7436void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
7437{
7438 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7439 VMA_ASSERT(item->size > 0);
7440
7441 // You may want to enable this validation at the beginning or at the end of
7442 // this function, depending on what do you want to check.
7443 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7444
7445 if (m_FreeSuballocationsBySize.empty())
7446 {
7447 m_FreeSuballocationsBySize.push_back(item);
7448 }
7449 else
7450 {
7451 VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
7452 }
7453
7454 //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7455}
7456
7457void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
7458{
7459 VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
7460 VMA_ASSERT(item->size > 0);
7461
7462 // You may want to enable this validation at the beginning or at the end of
7463 // this function, depending on what do you want to check.
7464 VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7465
7466 VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7467 m_FreeSuballocationsBySize.data(),
7468 m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
7469 item,
7470 VmaSuballocationItemSizeLess());
7471 for (size_t index = it - m_FreeSuballocationsBySize.data();
7472 index < m_FreeSuballocationsBySize.size();
7473 ++index)
7474 {
7475 if (m_FreeSuballocationsBySize[index] == item)
7476 {
7477 VmaVectorRemove(m_FreeSuballocationsBySize, index);
7478 return;
7479 }
7480 VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
7481 }
7482 VMA_ASSERT(0 && "Not found.");
7483
7484 //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
7485}
7486#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
7487#endif // _VMA_BLOCK_METADATA_GENERIC
7488#endif // #if 0
7489
7490#ifndef _VMA_BLOCK_METADATA_LINEAR
7491/*
7492Allocations and their references in internal data structure look like this:
7493
7494if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
7495
7496 0 +-------+
7497 | |
7498 | |
7499 | |
7500 +-------+
7501 | Alloc | 1st[m_1stNullItemsBeginCount]
7502 +-------+
7503 | Alloc | 1st[m_1stNullItemsBeginCount + 1]
7504 +-------+
7505 | ... |
7506 +-------+
7507 | Alloc | 1st[1st.size() - 1]
7508 +-------+
7509 | |
7510 | |
7511 | |
7512GetSize() +-------+
7513
7514if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
7515
7516 0 +-------+
7517 | Alloc | 2nd[0]
7518 +-------+
7519 | Alloc | 2nd[1]
7520 +-------+
7521 | ... |
7522 +-------+
7523 | Alloc | 2nd[2nd.size() - 1]
7524 +-------+
7525 | |
7526 | |
7527 | |
7528 +-------+
7529 | Alloc | 1st[m_1stNullItemsBeginCount]
7530 +-------+
7531 | Alloc | 1st[m_1stNullItemsBeginCount + 1]
7532 +-------+
7533 | ... |
7534 +-------+
7535 | Alloc | 1st[1st.size() - 1]
7536 +-------+
7537 | |
7538GetSize() +-------+
7539
7540if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
7541
7542 0 +-------+
7543 | |
7544 | |
7545 | |
7546 +-------+
7547 | Alloc | 1st[m_1stNullItemsBeginCount]
7548 +-------+
7549 | Alloc | 1st[m_1stNullItemsBeginCount + 1]
7550 +-------+
7551 | ... |
7552 +-------+
7553 | Alloc | 1st[1st.size() - 1]
7554 +-------+
7555 | |
7556 | |
7557 | |
7558 +-------+
7559 | Alloc | 2nd[2nd.size() - 1]
7560 +-------+
7561 | ... |
7562 +-------+
7563 | Alloc | 2nd[1]
7564 +-------+
7565 | Alloc | 2nd[0]
7566GetSize() +-------+
7567
7568*/
7569class VmaBlockMetadata_Linear : public VmaBlockMetadata
7570{
7571 VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
7572public:
7573 VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
7574 VkDeviceSize bufferImageGranularity, bool isVirtual);
7575 virtual ~VmaBlockMetadata_Linear() = default;
7576
7577 VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
7578 bool IsEmpty() const override { return GetAllocationCount() == 0; }
7579 VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; };
7580
7581 void Init(VkDeviceSize size) override;
7582 bool Validate() const override;
7583 size_t GetAllocationCount() const override;
7584 size_t GetFreeRegionsCount() const override;
7585
7586 void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
7587 void AddStatistics(VmaStatistics& inoutStats) const override;
7588
7589#if VMA_STATS_STRING_ENABLED
7590 void PrintDetailedMap(class VmaJsonWriter& json) const override;
7591#endif
7592
7593 bool CreateAllocationRequest(
7594 VkDeviceSize allocSize,
7595 VkDeviceSize allocAlignment,
7596 bool upperAddress,
7597 VmaSuballocationType allocType,
7598 uint32_t strategy,
7599 VmaAllocationRequest* pAllocationRequest) override;
7600
7601 VkResult CheckCorruption(const void* pBlockData) override;
7602
7603 void Alloc(
7604 const VmaAllocationRequest& request,
7605 VmaSuballocationType type,
7606 void* userData) override;
7607
7608 void Free(VmaAllocHandle allocHandle) override;
7609 void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
7610 void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
7611 VmaAllocHandle GetAllocationListBegin() const override;
7612 VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
7613 VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
7614 void Clear() override;
7615 void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
7616 void DebugLogAllAllocations() const override;
7617
7618private:
7619 /*
7620 There are two suballocation vectors, used in ping-pong way.
7621 The one with index m_1stVectorIndex is called 1st.
7622 The one with index (m_1stVectorIndex ^ 1) is called 2nd.
7623 2nd can be non-empty only when 1st is not empty.
7624 When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
7625 */
7626 typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
7627
7628 enum SECOND_VECTOR_MODE
7629 {
7630 SECOND_VECTOR_EMPTY,
7631 /*
7632 Suballocations in 2nd vector are created later than the ones in 1st, but they
7633 all have smaller offset.
7634 */
7635 SECOND_VECTOR_RING_BUFFER,
7636 /*
7637 Suballocations in 2nd vector are upper side of double stack.
7638 They all have offsets higher than those in 1st vector.
7639 Top of this stack means smaller offsets, but higher indices in this vector.
7640 */
7641 SECOND_VECTOR_DOUBLE_STACK,
7642 };
7643
7644 VkDeviceSize m_SumFreeSize;
7645 SuballocationVectorType m_Suballocations0, m_Suballocations1;
7646 uint32_t m_1stVectorIndex;
7647 SECOND_VECTOR_MODE m_2ndVectorMode;
7648 // Number of items in 1st vector with hAllocation = null at the beginning.
7649 size_t m_1stNullItemsBeginCount;
7650 // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
7651 size_t m_1stNullItemsMiddleCount;
7652 // Number of items in 2nd vector with hAllocation = null.
7653 size_t m_2ndNullItemsCount;
7654
7655 SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7656 SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7657 const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7658 const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7659
7660 VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
7661 bool ShouldCompact1st() const;
7662 void CleanupAfterFree();
7663
7664 bool CreateAllocationRequest_LowerAddress(
7665 VkDeviceSize allocSize,
7666 VkDeviceSize allocAlignment,
7667 VmaSuballocationType allocType,
7668 uint32_t strategy,
7669 VmaAllocationRequest* pAllocationRequest);
7670 bool CreateAllocationRequest_UpperAddress(
7671 VkDeviceSize allocSize,
7672 VkDeviceSize allocAlignment,
7673 VmaSuballocationType allocType,
7674 uint32_t strategy,
7675 VmaAllocationRequest* pAllocationRequest);
7676};
7677
7678#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
7679VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
7680 VkDeviceSize bufferImageGranularity, bool isVirtual)
7681 : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
7682 m_SumFreeSize(0),
7683 m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
7684 m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
7685 m_1stVectorIndex(0),
7686 m_2ndVectorMode(SECOND_VECTOR_EMPTY),
7687 m_1stNullItemsBeginCount(0),
7688 m_1stNullItemsMiddleCount(0),
7689 m_2ndNullItemsCount(0) {}
7690
7692{
7694 m_SumFreeSize = size;
7695}
7696
7697bool VmaBlockMetadata_Linear::Validate() const
7698{
7699 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7700 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7701
7702 VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
7703 VMA_VALIDATE(!suballocations1st.empty() ||
7704 suballocations2nd.empty() ||
7705 m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
7706
7707 if (!suballocations1st.empty())
7708 {
7709 // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
7710 VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
7711 // Null item at the end should be just pop_back().
7712 VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
7713 }
7714 if (!suballocations2nd.empty())
7715 {
7716 // Null item at the end should be just pop_back().
7717 VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
7718 }
7719
7720 VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
7721 VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
7722
7723 VkDeviceSize sumUsedSize = 0;
7724 const size_t suballoc1stCount = suballocations1st.size();
7725 const VkDeviceSize debugMargin = GetDebugMargin();
7726 VkDeviceSize offset = 0;
7727
7728 if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7729 {
7730 const size_t suballoc2ndCount = suballocations2nd.size();
7731 size_t nullItem2ndCount = 0;
7732 for (size_t i = 0; i < suballoc2ndCount; ++i)
7733 {
7734 const VmaSuballocation& suballoc = suballocations2nd[i];
7735 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7736
7737 VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7738 if (!IsVirtual())
7739 {
7740 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7741 }
7742 VMA_VALIDATE(suballoc.offset >= offset);
7743
7744 if (!currFree)
7745 {
7746 if (!IsVirtual())
7747 {
7748 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7749 VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7750 }
7751 sumUsedSize += suballoc.size;
7752 }
7753 else
7754 {
7755 ++nullItem2ndCount;
7756 }
7757
7758 offset = suballoc.offset + suballoc.size + debugMargin;
7759 }
7760
7761 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7762 }
7763
7764 for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
7765 {
7766 const VmaSuballocation& suballoc = suballocations1st[i];
7767 VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
7768 suballoc.userData == VMA_NULL);
7769 }
7770
7771 size_t nullItem1stCount = m_1stNullItemsBeginCount;
7772
7773 for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
7774 {
7775 const VmaSuballocation& suballoc = suballocations1st[i];
7776 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7777
7778 VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7779 if (!IsVirtual())
7780 {
7781 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7782 }
7783 VMA_VALIDATE(suballoc.offset >= offset);
7784 VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
7785
7786 if (!currFree)
7787 {
7788 if (!IsVirtual())
7789 {
7790 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7791 VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7792 }
7793 sumUsedSize += suballoc.size;
7794 }
7795 else
7796 {
7797 ++nullItem1stCount;
7798 }
7799
7800 offset = suballoc.offset + suballoc.size + debugMargin;
7801 }
7802 VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
7803
7804 if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7805 {
7806 const size_t suballoc2ndCount = suballocations2nd.size();
7807 size_t nullItem2ndCount = 0;
7808 for (size_t i = suballoc2ndCount; i--; )
7809 {
7810 const VmaSuballocation& suballoc = suballocations2nd[i];
7811 const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7812
7813 VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
7814 if (!IsVirtual())
7815 {
7816 VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
7817 }
7818 VMA_VALIDATE(suballoc.offset >= offset);
7819
7820 if (!currFree)
7821 {
7822 if (!IsVirtual())
7823 {
7824 VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
7825 VMA_VALIDATE(alloc->GetSize() == suballoc.size);
7826 }
7827 sumUsedSize += suballoc.size;
7828 }
7829 else
7830 {
7831 ++nullItem2ndCount;
7832 }
7833
7834 offset = suballoc.offset + suballoc.size + debugMargin;
7835 }
7836
7837 VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
7838 }
7839
7840 VMA_VALIDATE(offset <= GetSize());
7841 VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
7842
7843 return true;
7844}
7845
7846size_t VmaBlockMetadata_Linear::GetAllocationCount() const
7847{
7848 return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
7849 AccessSuballocations2nd().size() - m_2ndNullItemsCount;
7850}
7851
7852size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
7853{
7854 // Function only used for defragmentation, which is disabled for this algorithm
7855 VMA_ASSERT(0);
7856 return SIZE_MAX;
7857}
7858
7859void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
7860{
7861 const VkDeviceSize size = GetSize();
7862 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
7863 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
7864 const size_t suballoc1stCount = suballocations1st.size();
7865 const size_t suballoc2ndCount = suballocations2nd.size();
7866
7867 inoutStats.statistics.blockCount++;
7868 inoutStats.statistics.blockBytes += size;
7869
7870 VkDeviceSize lastOffset = 0;
7871
7872 if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
7873 {
7874 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
7875 size_t nextAlloc2ndIndex = 0;
7876 while (lastOffset < freeSpace2ndTo1stEnd)
7877 {
7878 // Find next non-null allocation or move nextAllocIndex to the end.
7879 while (nextAlloc2ndIndex < suballoc2ndCount &&
7880 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
7881 {
7882 ++nextAlloc2ndIndex;
7883 }
7884
7885 // Found non-null allocation.
7886 if (nextAlloc2ndIndex < suballoc2ndCount)
7887 {
7888 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7889
7890 // 1. Process free space before this allocation.
7891 if (lastOffset < suballoc.offset)
7892 {
7893 // There is free space from lastOffset to suballoc.offset.
7894 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7895 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7896 }
7897
7898 // 2. Process this allocation.
7899 // There is allocation with suballoc.offset, suballoc.size.
7900 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
7901
7902 // 3. Prepare for next iteration.
7903 lastOffset = suballoc.offset + suballoc.size;
7904 ++nextAlloc2ndIndex;
7905 }
7906 // We are at the end.
7907 else
7908 {
7909 // There is free space from lastOffset to freeSpace2ndTo1stEnd.
7910 if (lastOffset < freeSpace2ndTo1stEnd)
7911 {
7912 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
7913 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7914 }
7915
7916 // End of loop.
7917 lastOffset = freeSpace2ndTo1stEnd;
7918 }
7919 }
7920 }
7921
7922 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
7923 const VkDeviceSize freeSpace1stTo2ndEnd =
7924 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
7925 while (lastOffset < freeSpace1stTo2ndEnd)
7926 {
7927 // Find next non-null allocation or move nextAllocIndex to the end.
7928 while (nextAlloc1stIndex < suballoc1stCount &&
7929 suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
7930 {
7931 ++nextAlloc1stIndex;
7932 }
7933
7934 // Found non-null allocation.
7935 if (nextAlloc1stIndex < suballoc1stCount)
7936 {
7937 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
7938
7939 // 1. Process free space before this allocation.
7940 if (lastOffset < suballoc.offset)
7941 {
7942 // There is free space from lastOffset to suballoc.offset.
7943 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7944 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7945 }
7946
7947 // 2. Process this allocation.
7948 // There is allocation with suballoc.offset, suballoc.size.
7949 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
7950
7951 // 3. Prepare for next iteration.
7952 lastOffset = suballoc.offset + suballoc.size;
7953 ++nextAlloc1stIndex;
7954 }
7955 // We are at the end.
7956 else
7957 {
7958 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
7959 if (lastOffset < freeSpace1stTo2ndEnd)
7960 {
7961 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
7962 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7963 }
7964
7965 // End of loop.
7966 lastOffset = freeSpace1stTo2ndEnd;
7967 }
7968 }
7969
7970 if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
7971 {
7972 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
7973 while (lastOffset < size)
7974 {
7975 // Find next non-null allocation or move nextAllocIndex to the end.
7976 while (nextAlloc2ndIndex != SIZE_MAX &&
7977 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
7978 {
7979 --nextAlloc2ndIndex;
7980 }
7981
7982 // Found non-null allocation.
7983 if (nextAlloc2ndIndex != SIZE_MAX)
7984 {
7985 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
7986
7987 // 1. Process free space before this allocation.
7988 if (lastOffset < suballoc.offset)
7989 {
7990 // There is free space from lastOffset to suballoc.offset.
7991 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
7992 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
7993 }
7994
7995 // 2. Process this allocation.
7996 // There is allocation with suballoc.offset, suballoc.size.
7997 VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
7998
7999 // 3. Prepare for next iteration.
8000 lastOffset = suballoc.offset + suballoc.size;
8001 --nextAlloc2ndIndex;
8002 }
8003 // We are at the end.
8004 else
8005 {
8006 // There is free space from lastOffset to size.
8007 if (lastOffset < size)
8008 {
8009 const VkDeviceSize unusedRangeSize = size - lastOffset;
8010 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
8011 }
8012
8013 // End of loop.
8014 lastOffset = size;
8015 }
8016 }
8017 }
8018}
8019
8020void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
8021{
8022 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8023 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8024 const VkDeviceSize size = GetSize();
8025 const size_t suballoc1stCount = suballocations1st.size();
8026 const size_t suballoc2ndCount = suballocations2nd.size();
8027
8028 inoutStats.blockCount++;
8029 inoutStats.blockBytes += size;
8030 inoutStats.allocationBytes += size - m_SumFreeSize;
8031
8032 VkDeviceSize lastOffset = 0;
8033
8034 if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8035 {
8036 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8037 size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
8038 while (lastOffset < freeSpace2ndTo1stEnd)
8039 {
8040 // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8041 while (nextAlloc2ndIndex < suballoc2ndCount &&
8042 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8043 {
8044 ++nextAlloc2ndIndex;
8045 }
8046
8047 // Found non-null allocation.
8048 if (nextAlloc2ndIndex < suballoc2ndCount)
8049 {
8050 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8051
8052 // 1. Process free space before this allocation.
8053 if (lastOffset < suballoc.offset)
8054 {
8055 // There is free space from lastOffset to suballoc.offset.
8056 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8057 }
8058
8059 // 2. Process this allocation.
8060 // There is allocation with suballoc.offset, suballoc.size.
8061 ++inoutStats.allocationCount;
8062
8063 // 3. Prepare for next iteration.
8064 lastOffset = suballoc.offset + suballoc.size;
8065 ++nextAlloc2ndIndex;
8066 }
8067 // We are at the end.
8068 else
8069 {
8070 if (lastOffset < freeSpace2ndTo1stEnd)
8071 {
8072 // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8073 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8074 }
8075
8076 // End of loop.
8077 lastOffset = freeSpace2ndTo1stEnd;
8078 }
8079 }
8080 }
8081
8082 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8083 const VkDeviceSize freeSpace1stTo2ndEnd =
8084 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8085 while (lastOffset < freeSpace1stTo2ndEnd)
8086 {
8087 // Find next non-null allocation or move nextAllocIndex to the end.
8088 while (nextAlloc1stIndex < suballoc1stCount &&
8089 suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8090 {
8091 ++nextAlloc1stIndex;
8092 }
8093
8094 // Found non-null allocation.
8095 if (nextAlloc1stIndex < suballoc1stCount)
8096 {
8097 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8098
8099 // 1. Process free space before this allocation.
8100 if (lastOffset < suballoc.offset)
8101 {
8102 // There is free space from lastOffset to suballoc.offset.
8103 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8104 }
8105
8106 // 2. Process this allocation.
8107 // There is allocation with suballoc.offset, suballoc.size.
8108 ++inoutStats.allocationCount;
8109
8110 // 3. Prepare for next iteration.
8111 lastOffset = suballoc.offset + suballoc.size;
8112 ++nextAlloc1stIndex;
8113 }
8114 // We are at the end.
8115 else
8116 {
8117 if (lastOffset < freeSpace1stTo2ndEnd)
8118 {
8119 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8120 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8121 }
8122
8123 // End of loop.
8124 lastOffset = freeSpace1stTo2ndEnd;
8125 }
8126 }
8127
8128 if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8129 {
8130 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8131 while (lastOffset < size)
8132 {
8133 // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8134 while (nextAlloc2ndIndex != SIZE_MAX &&
8135 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8136 {
8137 --nextAlloc2ndIndex;
8138 }
8139
8140 // Found non-null allocation.
8141 if (nextAlloc2ndIndex != SIZE_MAX)
8142 {
8143 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8144
8145 // 1. Process free space before this allocation.
8146 if (lastOffset < suballoc.offset)
8147 {
8148 // There is free space from lastOffset to suballoc.offset.
8149 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8150 }
8151
8152 // 2. Process this allocation.
8153 // There is allocation with suballoc.offset, suballoc.size.
8154 ++inoutStats.allocationCount;
8155
8156 // 3. Prepare for next iteration.
8157 lastOffset = suballoc.offset + suballoc.size;
8158 --nextAlloc2ndIndex;
8159 }
8160 // We are at the end.
8161 else
8162 {
8163 if (lastOffset < size)
8164 {
8165 // There is free space from lastOffset to size.
8166 const VkDeviceSize unusedRangeSize = size - lastOffset;
8167 }
8168
8169 // End of loop.
8170 lastOffset = size;
8171 }
8172 }
8173 }
8174}
8175
8176#if VMA_STATS_STRING_ENABLED
8177void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
8178{
8179 const VkDeviceSize size = GetSize();
8180 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8181 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8182 const size_t suballoc1stCount = suballocations1st.size();
8183 const size_t suballoc2ndCount = suballocations2nd.size();
8184
8185 // FIRST PASS
8186
8187 size_t unusedRangeCount = 0;
8188 VkDeviceSize usedBytes = 0;
8189
8190 VkDeviceSize lastOffset = 0;
8191
8192 size_t alloc2ndCount = 0;
8193 if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8194 {
8195 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8196 size_t nextAlloc2ndIndex = 0;
8197 while (lastOffset < freeSpace2ndTo1stEnd)
8198 {
8199 // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8200 while (nextAlloc2ndIndex < suballoc2ndCount &&
8201 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8202 {
8203 ++nextAlloc2ndIndex;
8204 }
8205
8206 // Found non-null allocation.
8207 if (nextAlloc2ndIndex < suballoc2ndCount)
8208 {
8209 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8210
8211 // 1. Process free space before this allocation.
8212 if (lastOffset < suballoc.offset)
8213 {
8214 // There is free space from lastOffset to suballoc.offset.
8215 ++unusedRangeCount;
8216 }
8217
8218 // 2. Process this allocation.
8219 // There is allocation with suballoc.offset, suballoc.size.
8220 ++alloc2ndCount;
8221 usedBytes += suballoc.size;
8222
8223 // 3. Prepare for next iteration.
8224 lastOffset = suballoc.offset + suballoc.size;
8225 ++nextAlloc2ndIndex;
8226 }
8227 // We are at the end.
8228 else
8229 {
8230 if (lastOffset < freeSpace2ndTo1stEnd)
8231 {
8232 // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8233 ++unusedRangeCount;
8234 }
8235
8236 // End of loop.
8237 lastOffset = freeSpace2ndTo1stEnd;
8238 }
8239 }
8240 }
8241
8242 size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8243 size_t alloc1stCount = 0;
8244 const VkDeviceSize freeSpace1stTo2ndEnd =
8245 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8246 while (lastOffset < freeSpace1stTo2ndEnd)
8247 {
8248 // Find next non-null allocation or move nextAllocIndex to the end.
8249 while (nextAlloc1stIndex < suballoc1stCount &&
8250 suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8251 {
8252 ++nextAlloc1stIndex;
8253 }
8254
8255 // Found non-null allocation.
8256 if (nextAlloc1stIndex < suballoc1stCount)
8257 {
8258 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8259
8260 // 1. Process free space before this allocation.
8261 if (lastOffset < suballoc.offset)
8262 {
8263 // There is free space from lastOffset to suballoc.offset.
8264 ++unusedRangeCount;
8265 }
8266
8267 // 2. Process this allocation.
8268 // There is allocation with suballoc.offset, suballoc.size.
8269 ++alloc1stCount;
8270 usedBytes += suballoc.size;
8271
8272 // 3. Prepare for next iteration.
8273 lastOffset = suballoc.offset + suballoc.size;
8274 ++nextAlloc1stIndex;
8275 }
8276 // We are at the end.
8277 else
8278 {
8279 if (lastOffset < size)
8280 {
8281 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8282 ++unusedRangeCount;
8283 }
8284
8285 // End of loop.
8286 lastOffset = freeSpace1stTo2ndEnd;
8287 }
8288 }
8289
8290 if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8291 {
8292 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8293 while (lastOffset < size)
8294 {
8295 // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8296 while (nextAlloc2ndIndex != SIZE_MAX &&
8297 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8298 {
8299 --nextAlloc2ndIndex;
8300 }
8301
8302 // Found non-null allocation.
8303 if (nextAlloc2ndIndex != SIZE_MAX)
8304 {
8305 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8306
8307 // 1. Process free space before this allocation.
8308 if (lastOffset < suballoc.offset)
8309 {
8310 // There is free space from lastOffset to suballoc.offset.
8311 ++unusedRangeCount;
8312 }
8313
8314 // 2. Process this allocation.
8315 // There is allocation with suballoc.offset, suballoc.size.
8316 ++alloc2ndCount;
8317 usedBytes += suballoc.size;
8318
8319 // 3. Prepare for next iteration.
8320 lastOffset = suballoc.offset + suballoc.size;
8321 --nextAlloc2ndIndex;
8322 }
8323 // We are at the end.
8324 else
8325 {
8326 if (lastOffset < size)
8327 {
8328 // There is free space from lastOffset to size.
8329 ++unusedRangeCount;
8330 }
8331
8332 // End of loop.
8333 lastOffset = size;
8334 }
8335 }
8336 }
8337
8338 const VkDeviceSize unusedBytes = size - usedBytes;
8339 PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
8340
8341 // SECOND PASS
8342 lastOffset = 0;
8343
8344 if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8345 {
8346 const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8347 size_t nextAlloc2ndIndex = 0;
8348 while (lastOffset < freeSpace2ndTo1stEnd)
8349 {
8350 // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8351 while (nextAlloc2ndIndex < suballoc2ndCount &&
8352 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8353 {
8354 ++nextAlloc2ndIndex;
8355 }
8356
8357 // Found non-null allocation.
8358 if (nextAlloc2ndIndex < suballoc2ndCount)
8359 {
8360 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8361
8362 // 1. Process free space before this allocation.
8363 if (lastOffset < suballoc.offset)
8364 {
8365 // There is free space from lastOffset to suballoc.offset.
8366 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8367 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8368 }
8369
8370 // 2. Process this allocation.
8371 // There is allocation with suballoc.offset, suballoc.size.
8372 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8373
8374 // 3. Prepare for next iteration.
8375 lastOffset = suballoc.offset + suballoc.size;
8376 ++nextAlloc2ndIndex;
8377 }
8378 // We are at the end.
8379 else
8380 {
8381 if (lastOffset < freeSpace2ndTo1stEnd)
8382 {
8383 // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8384 const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8385 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8386 }
8387
8388 // End of loop.
8389 lastOffset = freeSpace2ndTo1stEnd;
8390 }
8391 }
8392 }
8393
8394 nextAlloc1stIndex = m_1stNullItemsBeginCount;
8395 while (lastOffset < freeSpace1stTo2ndEnd)
8396 {
8397 // Find next non-null allocation or move nextAllocIndex to the end.
8398 while (nextAlloc1stIndex < suballoc1stCount &&
8399 suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
8400 {
8401 ++nextAlloc1stIndex;
8402 }
8403
8404 // Found non-null allocation.
8405 if (nextAlloc1stIndex < suballoc1stCount)
8406 {
8407 const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8408
8409 // 1. Process free space before this allocation.
8410 if (lastOffset < suballoc.offset)
8411 {
8412 // There is free space from lastOffset to suballoc.offset.
8413 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8414 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8415 }
8416
8417 // 2. Process this allocation.
8418 // There is allocation with suballoc.offset, suballoc.size.
8419 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8420
8421 // 3. Prepare for next iteration.
8422 lastOffset = suballoc.offset + suballoc.size;
8423 ++nextAlloc1stIndex;
8424 }
8425 // We are at the end.
8426 else
8427 {
8428 if (lastOffset < freeSpace1stTo2ndEnd)
8429 {
8430 // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8431 const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8432 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8433 }
8434
8435 // End of loop.
8436 lastOffset = freeSpace1stTo2ndEnd;
8437 }
8438 }
8439
8440 if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8441 {
8442 size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8443 while (lastOffset < size)
8444 {
8445 // Find next non-null allocation or move nextAlloc2ndIndex to the end.
8446 while (nextAlloc2ndIndex != SIZE_MAX &&
8447 suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
8448 {
8449 --nextAlloc2ndIndex;
8450 }
8451
8452 // Found non-null allocation.
8453 if (nextAlloc2ndIndex != SIZE_MAX)
8454 {
8455 const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8456
8457 // 1. Process free space before this allocation.
8458 if (lastOffset < suballoc.offset)
8459 {
8460 // There is free space from lastOffset to suballoc.offset.
8461 const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8462 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8463 }
8464
8465 // 2. Process this allocation.
8466 // There is allocation with suballoc.offset, suballoc.size.
8467 PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
8468
8469 // 3. Prepare for next iteration.
8470 lastOffset = suballoc.offset + suballoc.size;
8471 --nextAlloc2ndIndex;
8472 }
8473 // We are at the end.
8474 else
8475 {
8476 if (lastOffset < size)
8477 {
8478 // There is free space from lastOffset to size.
8479 const VkDeviceSize unusedRangeSize = size - lastOffset;
8480 PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
8481 }
8482
8483 // End of loop.
8484 lastOffset = size;
8485 }
8486 }
8487 }
8488
8489 PrintDetailedMap_End(json);
8490}
8491#endif // VMA_STATS_STRING_ENABLED
8492
8493bool VmaBlockMetadata_Linear::CreateAllocationRequest(
8494 VkDeviceSize allocSize,
8495 VkDeviceSize allocAlignment,
8496 bool upperAddress,
8497 VmaSuballocationType allocType,
8498 uint32_t strategy,
8499 VmaAllocationRequest* pAllocationRequest)
8500{
8501 VMA_ASSERT(allocSize > 0);
8502 VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8503 VMA_ASSERT(pAllocationRequest != VMA_NULL);
8504 VMA_HEAVY_ASSERT(Validate());
8505 pAllocationRequest->size = allocSize;
8506 return upperAddress ?
8507 CreateAllocationRequest_UpperAddress(
8508 allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
8509 CreateAllocationRequest_LowerAddress(
8510 allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
8511}
8512
8513VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
8514{
8515 VMA_ASSERT(!IsVirtual());
8516 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8517 for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
8518 {
8519 const VmaSuballocation& suballoc = suballocations1st[i];
8520 if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8521 {
8522 if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8523 {
8524 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8525 return VK_ERROR_UNKNOWN_COPY;
8526 }
8527 }
8528 }
8529
8530 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8531 for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
8532 {
8533 const VmaSuballocation& suballoc = suballocations2nd[i];
8534 if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
8535 {
8536 if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
8537 {
8538 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
8539 return VK_ERROR_UNKNOWN_COPY;
8540 }
8541 }
8542 }
8543
8544 return VK_SUCCESS;
8545}
8546
8547void VmaBlockMetadata_Linear::Alloc(
8548 const VmaAllocationRequest& request,
8549 VmaSuballocationType type,
8550 void* userData)
8551{
8552 const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
8553 const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
8554
8555 switch (request.type)
8556 {
8557 case VmaAllocationRequestType::UpperAddress:
8558 {
8559 VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
8560 "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
8561 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8562 suballocations2nd.push_back(newSuballoc);
8563 m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
8564 }
8565 break;
8566 case VmaAllocationRequestType::EndOf1st:
8567 {
8568 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8569
8570 VMA_ASSERT(suballocations1st.empty() ||
8571 offset >= suballocations1st.back().offset + suballocations1st.back().size);
8572 // Check if it fits before the end of the block.
8573 VMA_ASSERT(offset + request.size <= GetSize());
8574
8575 suballocations1st.push_back(newSuballoc);
8576 }
8577 break;
8578 case VmaAllocationRequestType::EndOf2nd:
8579 {
8580 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8581 // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
8582 VMA_ASSERT(!suballocations1st.empty() &&
8583 offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
8584 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8585
8586 switch (m_2ndVectorMode)
8587 {
8588 case SECOND_VECTOR_EMPTY:
8589 // First allocation from second part ring buffer.
8590 VMA_ASSERT(suballocations2nd.empty());
8591 m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
8592 break;
8593 case SECOND_VECTOR_RING_BUFFER:
8594 // 2-part ring buffer is already started.
8595 VMA_ASSERT(!suballocations2nd.empty());
8596 break;
8597 case SECOND_VECTOR_DOUBLE_STACK:
8598 VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
8599 break;
8600 default:
8601 VMA_ASSERT(0);
8602 }
8603
8604 suballocations2nd.push_back(newSuballoc);
8605 }
8606 break;
8607 default:
8608 VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
8609 }
8610
8611 m_SumFreeSize -= newSuballoc.size;
8612}
8613
8614void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
8615{
8616 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8617 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8618 VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
8619
8620 if (!suballocations1st.empty())
8621 {
8622 // First allocation: Mark it as next empty at the beginning.
8623 VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8624 if (firstSuballoc.offset == offset)
8625 {
8626 firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8627 firstSuballoc.userData = VMA_NULL;
8628 m_SumFreeSize += firstSuballoc.size;
8629 ++m_1stNullItemsBeginCount;
8630 CleanupAfterFree();
8631 return;
8632 }
8633 }
8634
8635 // Last allocation in 2-part ring buffer or top of upper stack (same logic).
8636 if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
8637 m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8638 {
8639 VmaSuballocation& lastSuballoc = suballocations2nd.back();
8640 if (lastSuballoc.offset == offset)
8641 {
8642 m_SumFreeSize += lastSuballoc.size;
8643 suballocations2nd.pop_back();
8644 CleanupAfterFree();
8645 return;
8646 }
8647 }
8648 // Last allocation in 1st vector.
8649 else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
8650 {
8651 VmaSuballocation& lastSuballoc = suballocations1st.back();
8652 if (lastSuballoc.offset == offset)
8653 {
8654 m_SumFreeSize += lastSuballoc.size;
8655 suballocations1st.pop_back();
8656 CleanupAfterFree();
8657 return;
8658 }
8659 }
8660
8661 VmaSuballocation refSuballoc;
8662 refSuballoc.offset = offset;
8663 // Rest of members stays uninitialized intentionally for better performance.
8664
8665 // Item from the middle of 1st vector.
8666 {
8667 const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
8668 suballocations1st.begin() + m_1stNullItemsBeginCount,
8669 suballocations1st.end(),
8670 refSuballoc,
8671 VmaSuballocationOffsetLess());
8672 if (it != suballocations1st.end())
8673 {
8674 it->type = VMA_SUBALLOCATION_TYPE_FREE;
8675 it->userData = VMA_NULL;
8676 ++m_1stNullItemsMiddleCount;
8677 m_SumFreeSize += it->size;
8678 CleanupAfterFree();
8679 return;
8680 }
8681 }
8682
8683 if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8684 {
8685 // Item from the middle of 2nd vector.
8686 const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8687 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
8688 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
8689 if (it != suballocations2nd.end())
8690 {
8691 it->type = VMA_SUBALLOCATION_TYPE_FREE;
8692 it->userData = VMA_NULL;
8693 ++m_2ndNullItemsCount;
8694 m_SumFreeSize += it->size;
8695 CleanupAfterFree();
8696 return;
8697 }
8698 }
8699
8700 VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
8701}
8702
8703void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
8704{
8705 outInfo.offset = (VkDeviceSize)allocHandle - 1;
8706 VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
8707 outInfo.size = suballoc.size;
8708 outInfo.pUserData = suballoc.userData;
8709}
8710
8711void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
8712{
8713 return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
8714}
8715
8716VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
8717{
8718 // Function only used for defragmentation, which is disabled for this algorithm
8719 VMA_ASSERT(0);
8720 return VK_NULL_HANDLE;
8721}
8722
8723VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
8724{
8725 // Function only used for defragmentation, which is disabled for this algorithm
8726 VMA_ASSERT(0);
8727 return VK_NULL_HANDLE;
8728}
8729
8730VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
8731{
8732 // Function only used for defragmentation, which is disabled for this algorithm
8733 VMA_ASSERT(0);
8734 return 0;
8735}
8736
8737void VmaBlockMetadata_Linear::Clear()
8738{
8739 m_SumFreeSize = GetSize();
8740 m_Suballocations0.clear();
8741 m_Suballocations1.clear();
8742 // Leaving m_1stVectorIndex unchanged - it doesn't matter.
8743 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8744 m_1stNullItemsBeginCount = 0;
8745 m_1stNullItemsMiddleCount = 0;
8746 m_2ndNullItemsCount = 0;
8747}
8748
8749void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
8750{
8751 VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
8752 suballoc.userData = userData;
8753}
8754
8755void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
8756{
8757 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8758 for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
8759 if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
8760 DebugLogAllocation(it->offset, it->size, it->userData);
8761
8762 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8763 for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
8764 if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
8765 DebugLogAllocation(it->offset, it->size, it->userData);
8766}
8767
8768VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
8769{
8770 const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8771 const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8772
8773 VmaSuballocation refSuballoc;
8774 refSuballoc.offset = offset;
8775 // Rest of members stays uninitialized intentionally for better performance.
8776
8777 // Item from the 1st vector.
8778 {
8779 SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
8780 suballocations1st.begin() + m_1stNullItemsBeginCount,
8781 suballocations1st.end(),
8782 refSuballoc,
8783 VmaSuballocationOffsetLess());
8784 if (it != suballocations1st.end())
8785 {
8786 return const_cast<VmaSuballocation&>(*it);
8787 }
8788 }
8789
8790 if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
8791 {
8792 // Rest of members stays uninitialized intentionally for better performance.
8793 SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
8794 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
8795 VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
8796 if (it != suballocations2nd.end())
8797 {
8798 return const_cast<VmaSuballocation&>(*it);
8799 }
8800 }
8801
8802 VMA_ASSERT(0 && "Allocation not found in linear allocator!");
8803 return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
8804}
8805
8806bool VmaBlockMetadata_Linear::ShouldCompact1st() const
8807{
8808 const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8809 const size_t suballocCount = AccessSuballocations1st().size();
8810 return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
8811}
8812
8813void VmaBlockMetadata_Linear::CleanupAfterFree()
8814{
8815 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8816 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8817
8818 if (IsEmpty())
8819 {
8820 suballocations1st.clear();
8821 suballocations2nd.clear();
8822 m_1stNullItemsBeginCount = 0;
8823 m_1stNullItemsMiddleCount = 0;
8824 m_2ndNullItemsCount = 0;
8825 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8826 }
8827 else
8828 {
8829 const size_t suballoc1stCount = suballocations1st.size();
8830 const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
8831 VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
8832
8833 // Find more null items at the beginning of 1st vector.
8834 while (m_1stNullItemsBeginCount < suballoc1stCount &&
8835 suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
8836 {
8837 ++m_1stNullItemsBeginCount;
8838 --m_1stNullItemsMiddleCount;
8839 }
8840
8841 // Find more null items at the end of 1st vector.
8842 while (m_1stNullItemsMiddleCount > 0 &&
8843 suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
8844 {
8845 --m_1stNullItemsMiddleCount;
8846 suballocations1st.pop_back();
8847 }
8848
8849 // Find more null items at the end of 2nd vector.
8850 while (m_2ndNullItemsCount > 0 &&
8851 suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
8852 {
8853 --m_2ndNullItemsCount;
8854 suballocations2nd.pop_back();
8855 }
8856
8857 // Find more null items at the beginning of 2nd vector.
8858 while (m_2ndNullItemsCount > 0 &&
8859 suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
8860 {
8861 --m_2ndNullItemsCount;
8862 VmaVectorRemove(suballocations2nd, 0);
8863 }
8864
8865 if (ShouldCompact1st())
8866 {
8867 const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
8868 size_t srcIndex = m_1stNullItemsBeginCount;
8869 for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
8870 {
8871 while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
8872 {
8873 ++srcIndex;
8874 }
8875 if (dstIndex != srcIndex)
8876 {
8877 suballocations1st[dstIndex] = suballocations1st[srcIndex];
8878 }
8879 ++srcIndex;
8880 }
8881 suballocations1st.resize(nonNullItemCount);
8882 m_1stNullItemsBeginCount = 0;
8883 m_1stNullItemsMiddleCount = 0;
8884 }
8885
8886 // 2nd vector became empty.
8887 if (suballocations2nd.empty())
8888 {
8889 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8890 }
8891
8892 // 1st vector became empty.
8893 if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
8894 {
8895 suballocations1st.clear();
8896 m_1stNullItemsBeginCount = 0;
8897
8898 if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8899 {
8900 // Swap 1st with 2nd. Now 2nd is empty.
8901 m_2ndVectorMode = SECOND_VECTOR_EMPTY;
8902 m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
8903 while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
8904 suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
8905 {
8906 ++m_1stNullItemsBeginCount;
8907 --m_1stNullItemsMiddleCount;
8908 }
8909 m_2ndNullItemsCount = 0;
8910 m_1stVectorIndex ^= 1;
8911 }
8912 }
8913 }
8914
8915 VMA_HEAVY_ASSERT(Validate());
8916}
8917
8918bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
8919 VkDeviceSize allocSize,
8920 VkDeviceSize allocAlignment,
8921 VmaSuballocationType allocType,
8922 uint32_t strategy,
8923 VmaAllocationRequest* pAllocationRequest)
8924{
8925 const VkDeviceSize blockSize = GetSize();
8926 const VkDeviceSize debugMargin = GetDebugMargin();
8927 const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
8928 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8929 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8930
8931 if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8932 {
8933 // Try to allocate at the end of 1st vector.
8934
8935 VkDeviceSize resultBaseOffset = 0;
8936 if (!suballocations1st.empty())
8937 {
8938 const VmaSuballocation& lastSuballoc = suballocations1st.back();
8939 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
8940 }
8941
8942 // Start from offset equal to beginning of free space.
8943 VkDeviceSize resultOffset = resultBaseOffset;
8944
8945 // Apply alignment.
8946 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
8947
8948 // Check previous suballocations for BufferImageGranularity conflicts.
8949 // Make bigger alignment if necessary.
8950 if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
8951 {
8952 bool bufferImageGranularityConflict = false;
8953 for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
8954 {
8955 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
8956 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
8957 {
8958 if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8959 {
8960 bufferImageGranularityConflict = true;
8961 break;
8962 }
8963 }
8964 else
8965 // Already on previous page.
8966 break;
8967 }
8968 if (bufferImageGranularityConflict)
8969 {
8970 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
8971 }
8972 }
8973
8974 const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
8975 suballocations2nd.back().offset : blockSize;
8976
8977 // There is enough free space at the end after alignment.
8978 if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
8979 {
8980 // Check next suballocations for BufferImageGranularity conflicts.
8981 // If conflict exists, allocation cannot be made here.
8982 if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8983 {
8984 for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
8985 {
8986 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
8987 if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8988 {
8989 if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8990 {
8991 return false;
8992 }
8993 }
8994 else
8995 {
8996 // Already on previous page.
8997 break;
8998 }
8999 }
9000 }
9001
9002 // All tests passed: Success.
9003 pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9004 // pAllocationRequest->item, customData unused.
9005 pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
9006 return true;
9007 }
9008 }
9009
9010 // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9011 // beginning of 1st vector as the end of free space.
9012 if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9013 {
9014 VMA_ASSERT(!suballocations1st.empty());
9015
9016 VkDeviceSize resultBaseOffset = 0;
9017 if (!suballocations2nd.empty())
9018 {
9019 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9020 resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
9021 }
9022
9023 // Start from offset equal to beginning of free space.
9024 VkDeviceSize resultOffset = resultBaseOffset;
9025
9026 // Apply alignment.
9027 resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9028
9029 // Check previous suballocations for BufferImageGranularity conflicts.
9030 // Make bigger alignment if necessary.
9031 if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
9032 {
9033 bool bufferImageGranularityConflict = false;
9034 for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9035 {
9036 const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9037 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9038 {
9039 if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9040 {
9041 bufferImageGranularityConflict = true;
9042 break;
9043 }
9044 }
9045 else
9046 // Already on previous page.
9047 break;
9048 }
9049 if (bufferImageGranularityConflict)
9050 {
9051 resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9052 }
9053 }
9054
9055 size_t index1st = m_1stNullItemsBeginCount;
9056
9057 // There is enough free space at the end after alignment.
9058 if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
9059 (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
9060 {
9061 // Check next suballocations for BufferImageGranularity conflicts.
9062 // If conflict exists, allocation cannot be made here.
9063 if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
9064 {
9065 for (size_t nextSuballocIndex = index1st;
9066 nextSuballocIndex < suballocations1st.size();
9067 nextSuballocIndex++)
9068 {
9069 const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9070 if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9071 {
9072 if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9073 {
9074 return false;
9075 }
9076 }
9077 else
9078 {
9079 // Already on next page.
9080 break;
9081 }
9082 }
9083 }
9084
9085 // All tests passed: Success.
9086 pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9087 pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
9088 // pAllocationRequest->item, customData unused.
9089 return true;
9090 }
9091 }
9092
9093 return false;
9094}
9095
9096bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
9097 VkDeviceSize allocSize,
9098 VkDeviceSize allocAlignment,
9099 VmaSuballocationType allocType,
9100 uint32_t strategy,
9101 VmaAllocationRequest* pAllocationRequest)
9102{
9103 const VkDeviceSize blockSize = GetSize();
9104 const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
9105 SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9106 SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9107
9108 if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9109 {
9110 VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9111 return false;
9112 }
9113
9114 // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9115 if (allocSize > blockSize)
9116 {
9117 return false;
9118 }
9119 VkDeviceSize resultBaseOffset = blockSize - allocSize;
9120 if (!suballocations2nd.empty())
9121 {
9122 const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9123 resultBaseOffset = lastSuballoc.offset - allocSize;
9124 if (allocSize > lastSuballoc.offset)
9125 {
9126 return false;
9127 }
9128 }
9129
9130 // Start from offset equal to end of free space.
9131 VkDeviceSize resultOffset = resultBaseOffset;
9132
9133 const VkDeviceSize debugMargin = GetDebugMargin();
9134
9135 // Apply debugMargin at the end.
9136 if (debugMargin > 0)
9137 {
9138 if (resultOffset < debugMargin)
9139 {
9140 return false;
9141 }
9142 resultOffset -= debugMargin;
9143 }
9144
9145 // Apply alignment.
9146 resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9147
9148 // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9149 // Make bigger alignment if necessary.
9150 if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
9151 {
9152 bool bufferImageGranularityConflict = false;
9153 for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9154 {
9155 const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9156 if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9157 {
9158 if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9159 {
9160 bufferImageGranularityConflict = true;
9161 break;
9162 }
9163 }
9164 else
9165 // Already on previous page.
9166 break;
9167 }
9168 if (bufferImageGranularityConflict)
9169 {
9170 resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9171 }
9172 }
9173
9174 // There is enough free space.
9175 const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9176 suballocations1st.back().offset + suballocations1st.back().size :
9177 0;
9178 if (endOf1st + debugMargin <= resultOffset)
9179 {
9180 // Check previous suballocations for BufferImageGranularity conflicts.
9181 // If conflict exists, allocation cannot be made here.
9182 if (bufferImageGranularity > 1)
9183 {
9184 for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9185 {
9186 const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9187 if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9188 {
9189 if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9190 {
9191 return false;
9192 }
9193 }
9194 else
9195 {
9196 // Already on next page.
9197 break;
9198 }
9199 }
9200 }
9201
9202 // All tests passed: Success.
9203 pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
9204 // pAllocationRequest->item unused.
9205 pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
9206 return true;
9207 }
9208
9209 return false;
9210}
9211#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
9212#endif // _VMA_BLOCK_METADATA_LINEAR
9213
9214#if 0
9215#ifndef _VMA_BLOCK_METADATA_BUDDY
9216/*
9217- GetSize() is the original size of allocated memory block.
9218- m_UsableSize is this size aligned down to a power of two.
9219 All allocations and calculations happen relative to m_UsableSize.
9220- GetUnusableSize() is the difference between them.
9221 It is reported as separate, unused range, not available for allocations.
9222
9223Node at level 0 has size = m_UsableSize.
9224Each next level contains nodes with size 2 times smaller than current level.
9225m_LevelCount is the maximum number of levels to use in the current object.
9226*/
9227class VmaBlockMetadata_Buddy : public VmaBlockMetadata
9228{
9229 VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
9230public:
9231 VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
9232 VkDeviceSize bufferImageGranularity, bool isVirtual);
9233 virtual ~VmaBlockMetadata_Buddy();
9234
9235 size_t GetAllocationCount() const override { return m_AllocationCount; }
9236 VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); }
9237 bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; }
9238 VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; }
9239 VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; };
9240 void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); }
9241
9242 void Init(VkDeviceSize size) override;
9243 bool Validate() const override;
9244
9245 void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
9246 void AddStatistics(VmaStatistics& inoutStats) const override;
9247
9248#if VMA_STATS_STRING_ENABLED
9249 void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
9250#endif
9251
9252 bool CreateAllocationRequest(
9253 VkDeviceSize allocSize,
9254 VkDeviceSize allocAlignment,
9255 bool upperAddress,
9256 VmaSuballocationType allocType,
9257 uint32_t strategy,
9258 VmaAllocationRequest* pAllocationRequest) override;
9259
9260 void Alloc(
9261 const VmaAllocationRequest& request,
9262 VmaSuballocationType type,
9263 void* userData) override;
9264
9265 void Free(VmaAllocHandle allocHandle) override;
9266 void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
9267 void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
9268 VmaAllocHandle GetAllocationListBegin() const override;
9269 VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
9270 void Clear() override;
9271 void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
9272
9273private:
9274 static const size_t MAX_LEVELS = 48;
9275
9276 struct ValidationContext
9277 {
9278 size_t calculatedAllocationCount = 0;
9279 size_t calculatedFreeCount = 0;
9280 VkDeviceSize calculatedSumFreeSize = 0;
9281 };
9282 struct Node
9283 {
9284 VkDeviceSize offset;
9285 enum TYPE
9286 {
9287 TYPE_FREE,
9288 TYPE_ALLOCATION,
9289 TYPE_SPLIT,
9290 TYPE_COUNT
9291 } type;
9292 Node* parent;
9293 Node* buddy;
9294
9295 union
9296 {
9297 struct
9298 {
9299 Node* prev;
9300 Node* next;
9301 } free;
9302 struct
9303 {
9304 void* userData;
9305 } allocation;
9306 struct
9307 {
9308 Node* leftChild;
9309 } split;
9310 };
9311 };
9312
9313 // Size of the memory block aligned down to a power of two.
9314 VkDeviceSize m_UsableSize;
9315 uint32_t m_LevelCount;
9316 VmaPoolAllocator<Node> m_NodeAllocator;
9317 Node* m_Root;
9318 struct
9319 {
9320 Node* front;
9321 Node* back;
9322 } m_FreeList[MAX_LEVELS];
9323
9324 // Number of nodes in the tree with type == TYPE_ALLOCATION.
9325 size_t m_AllocationCount;
9326 // Number of nodes in the tree with type == TYPE_FREE.
9327 size_t m_FreeCount;
9328 // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes.
9329 // Doesn't include unusable size.
9330 VkDeviceSize m_SumFreeSize;
9331
9332 VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
9333 VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
9334
9335 VkDeviceSize AlignAllocationSize(VkDeviceSize size) const
9336 {
9337 if (!IsVirtual())
9338 {
9339 size = VmaAlignUp(size, (VkDeviceSize)16);
9340 }
9341 return VmaNextPow2(size);
9342 }
9343 Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const;
9344 void DeleteNodeChildren(Node* node);
9345 bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
9346 uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
9347 void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const;
9348 // Adds node to the front of FreeList at given level.
9349 // node->type must be FREE.
9350 // node->free.prev, next can be undefined.
9351 void AddToFreeListFront(uint32_t level, Node* node);
9352 // Removes node from FreeList at given level.
9353 // node->type must be FREE.
9354 // node->free.prev, next stay untouched.
9355 void RemoveFromFreeList(uint32_t level, Node* node);
9356 void DebugLogAllAllocationNode(Node* node, uint32_t level) const;
9357
9358#if VMA_STATS_STRING_ENABLED
9359 void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
9360#endif
9361};
9362
9363#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
9364VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
9365 VkDeviceSize bufferImageGranularity, bool isVirtual)
9366 : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
9367 m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity
9368 m_Root(VMA_NULL),
9369 m_AllocationCount(0),
9370 m_FreeCount(1),
9371 m_SumFreeSize(0)
9372{
9373 memset(m_FreeList, 0, sizeof(m_FreeList));
9374}
9375
9376VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
9377{
9378 DeleteNodeChildren(m_Root);
9379 m_NodeAllocator.Free(m_Root);
9380}
9381
9383{
9385
9386 m_UsableSize = VmaPrevPow2(size);
9387 m_SumFreeSize = m_UsableSize;
9388
9389 // Calculate m_LevelCount.
9390 const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16;
9391 m_LevelCount = 1;
9392 while (m_LevelCount < MAX_LEVELS &&
9393 LevelToNodeSize(m_LevelCount) >= minNodeSize)
9394 {
9395 ++m_LevelCount;
9396 }
9397
9398 Node* rootNode = m_NodeAllocator.Alloc();
9399 rootNode->offset = 0;
9400 rootNode->type = Node::TYPE_FREE;
9401 rootNode->parent = VMA_NULL;
9402 rootNode->buddy = VMA_NULL;
9403
9404 m_Root = rootNode;
9405 AddToFreeListFront(0, rootNode);
9406}
9407
9408bool VmaBlockMetadata_Buddy::Validate() const
9409{
9410 // Validate tree.
9411 ValidationContext ctx;
9412 if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
9413 {
9414 VMA_VALIDATE(false && "ValidateNode failed.");
9415 }
9416 VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
9417 VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
9418
9419 // Validate free node lists.
9420 for (uint32_t level = 0; level < m_LevelCount; ++level)
9421 {
9422 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
9423 m_FreeList[level].front->free.prev == VMA_NULL);
9424
9425 for (Node* node = m_FreeList[level].front;
9426 node != VMA_NULL;
9427 node = node->free.next)
9428 {
9429 VMA_VALIDATE(node->type == Node::TYPE_FREE);
9430
9431 if (node->free.next == VMA_NULL)
9432 {
9433 VMA_VALIDATE(m_FreeList[level].back == node);
9434 }
9435 else
9436 {
9437 VMA_VALIDATE(node->free.next->free.prev == node);
9438 }
9439 }
9440 }
9441
9442 // Validate that free lists ar higher levels are empty.
9443 for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
9444 {
9445 VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
9446 }
9447
9448 return true;
9449}
9450
9451void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
9452{
9453 inoutStats.statistics.blockCount++;
9454 inoutStats.statistics.blockBytes += GetSize();
9455
9456 AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0));
9457
9458 const VkDeviceSize unusableSize = GetUnusableSize();
9459 if (unusableSize > 0)
9460 VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize);
9461}
9462
9463void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const
9464{
9465 inoutStats.blockCount++;
9466 inoutStats.allocationCount += (uint32_t)m_AllocationCount;
9467 inoutStats.blockBytes += GetSize();
9468 inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
9469}
9470
9471#if VMA_STATS_STRING_ENABLED
9472void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
9473{
9475 VmaClearDetailedStatistics(stats);
9476 AddDetailedStatistics(stats);
9477
9478 PrintDetailedMap_Begin(
9479 json,
9482 stats.unusedRangeCount,
9483 mapRefCount);
9484
9485 PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
9486
9487 const VkDeviceSize unusableSize = GetUnusableSize();
9488 if (unusableSize > 0)
9489 {
9490 PrintDetailedMap_UnusedRange(json,
9491 m_UsableSize, // offset
9492 unusableSize); // size
9493 }
9494
9495 PrintDetailedMap_End(json);
9496}
9497#endif // VMA_STATS_STRING_ENABLED
9498
9499bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
9500 VkDeviceSize allocSize,
9501 VkDeviceSize allocAlignment,
9502 bool upperAddress,
9503 VmaSuballocationType allocType,
9504 uint32_t strategy,
9505 VmaAllocationRequest* pAllocationRequest)
9506{
9507 VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
9508
9509 allocSize = AlignAllocationSize(allocSize);
9510
9511 // Simple way to respect bufferImageGranularity. May be optimized some day.
9512 // Whenever it might be an OPTIMAL image...
9513 if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
9514 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
9515 allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
9516 {
9517 allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity());
9518 allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity());
9519 }
9520
9521 if (allocSize > m_UsableSize)
9522 {
9523 return false;
9524 }
9525
9526 const uint32_t targetLevel = AllocSizeToLevel(allocSize);
9527 for (uint32_t level = targetLevel; level--; )
9528 {
9529 for (Node* freeNode = m_FreeList[level].front;
9530 freeNode != VMA_NULL;
9531 freeNode = freeNode->free.next)
9532 {
9533 if (freeNode->offset % allocAlignment == 0)
9534 {
9535 pAllocationRequest->type = VmaAllocationRequestType::Normal;
9536 pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1);
9537 pAllocationRequest->size = allocSize;
9538 pAllocationRequest->customData = (void*)(uintptr_t)level;
9539 return true;
9540 }
9541 }
9542 }
9543
9544 return false;
9545}
9546
9547void VmaBlockMetadata_Buddy::Alloc(
9548 const VmaAllocationRequest& request,
9549 VmaSuballocationType type,
9550 void* userData)
9551{
9553
9554 const uint32_t targetLevel = AllocSizeToLevel(request.size);
9555 uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
9556
9557 Node* currNode = m_FreeList[currLevel].front;
9558 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9559 const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
9560 while (currNode->offset != offset)
9561 {
9562 currNode = currNode->free.next;
9563 VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
9564 }
9565
9566 // Go down, splitting free nodes.
9567 while (currLevel < targetLevel)
9568 {
9569 // currNode is already first free node at currLevel.
9570 // Remove it from list of free nodes at this currLevel.
9571 RemoveFromFreeList(currLevel, currNode);
9572
9573 const uint32_t childrenLevel = currLevel + 1;
9574
9575 // Create two free sub-nodes.
9576 Node* leftChild = m_NodeAllocator.Alloc();
9577 Node* rightChild = m_NodeAllocator.Alloc();
9578
9579 leftChild->offset = currNode->offset;
9580 leftChild->type = Node::TYPE_FREE;
9581 leftChild->parent = currNode;
9582 leftChild->buddy = rightChild;
9583
9584 rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
9585 rightChild->type = Node::TYPE_FREE;
9586 rightChild->parent = currNode;
9587 rightChild->buddy = leftChild;
9588
9589 // Convert current currNode to split type.
9590 currNode->type = Node::TYPE_SPLIT;
9591 currNode->split.leftChild = leftChild;
9592
9593 // Add child nodes to free list. Order is important!
9594 AddToFreeListFront(childrenLevel, rightChild);
9595 AddToFreeListFront(childrenLevel, leftChild);
9596
9597 ++m_FreeCount;
9598 ++currLevel;
9599 currNode = m_FreeList[currLevel].front;
9600
9601 /*
9602 We can be sure that currNode, as left child of node previously split,
9603 also fulfills the alignment requirement.
9604 */
9605 }
9606
9607 // Remove from free list.
9608 VMA_ASSERT(currLevel == targetLevel &&
9609 currNode != VMA_NULL &&
9610 currNode->type == Node::TYPE_FREE);
9611 RemoveFromFreeList(currLevel, currNode);
9612
9613 // Convert to allocation node.
9614 currNode->type = Node::TYPE_ALLOCATION;
9615 currNode->allocation.userData = userData;
9616
9617 ++m_AllocationCount;
9618 --m_FreeCount;
9619 m_SumFreeSize -= request.size;
9620}
9621
9622void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
9623{
9624 uint32_t level = 0;
9625 outInfo.offset = (VkDeviceSize)allocHandle - 1;
9626 const Node* const node = FindAllocationNode(outInfo.offset, level);
9627 outInfo.size = LevelToNodeSize(level);
9628 outInfo.pUserData = node->allocation.userData;
9629}
9630
9631void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const
9632{
9633 uint32_t level = 0;
9634 const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9635 return node->allocation.userData;
9636}
9637
9638VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const
9639{
9640 // Function only used for defragmentation, which is disabled for this algorithm
9641 return VK_NULL_HANDLE;
9642}
9643
9644VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const
9645{
9646 // Function only used for defragmentation, which is disabled for this algorithm
9647 return VK_NULL_HANDLE;
9648}
9649
9650void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node)
9651{
9652 if (node->type == Node::TYPE_SPLIT)
9653 {
9654 DeleteNodeChildren(node->split.leftChild->buddy);
9655 DeleteNodeChildren(node->split.leftChild);
9656 const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks();
9657 m_NodeAllocator.Free(node->split.leftChild->buddy);
9658 m_NodeAllocator.Free(node->split.leftChild);
9659 }
9660}
9661
9662void VmaBlockMetadata_Buddy::Clear()
9663{
9664 DeleteNodeChildren(m_Root);
9665 m_Root->type = Node::TYPE_FREE;
9666 m_AllocationCount = 0;
9667 m_FreeCount = 1;
9668 m_SumFreeSize = m_UsableSize;
9669}
9670
9671void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
9672{
9673 uint32_t level = 0;
9674 Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9675 node->allocation.userData = userData;
9676}
9677
9678VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const
9679{
9680 Node* node = m_Root;
9681 VkDeviceSize nodeOffset = 0;
9682 outLevel = 0;
9683 VkDeviceSize levelNodeSize = LevelToNodeSize(0);
9684 while (node->type == Node::TYPE_SPLIT)
9685 {
9686 const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1;
9687 if (offset < nodeOffset + nextLevelNodeSize)
9688 {
9689 node = node->split.leftChild;
9690 }
9691 else
9692 {
9693 node = node->split.leftChild->buddy;
9694 nodeOffset += nextLevelNodeSize;
9695 }
9696 ++outLevel;
9697 levelNodeSize = nextLevelNodeSize;
9698 }
9699
9700 VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
9701 return node;
9702}
9703
9704bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
9705{
9706 VMA_VALIDATE(level < m_LevelCount);
9707 VMA_VALIDATE(curr->parent == parent);
9708 VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
9709 VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
9710 switch (curr->type)
9711 {
9712 case Node::TYPE_FREE:
9713 // curr->free.prev, next are validated separately.
9714 ctx.calculatedSumFreeSize += levelNodeSize;
9715 ++ctx.calculatedFreeCount;
9716 break;
9717 case Node::TYPE_ALLOCATION:
9718 ++ctx.calculatedAllocationCount;
9719 if (!IsVirtual())
9720 {
9721 VMA_VALIDATE(curr->allocation.userData != VMA_NULL);
9722 }
9723 break;
9724 case Node::TYPE_SPLIT:
9725 {
9726 const uint32_t childrenLevel = level + 1;
9727 const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1;
9728 const Node* const leftChild = curr->split.leftChild;
9729 VMA_VALIDATE(leftChild != VMA_NULL);
9730 VMA_VALIDATE(leftChild->offset == curr->offset);
9731 if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
9732 {
9733 VMA_VALIDATE(false && "ValidateNode for left child failed.");
9734 }
9735 const Node* const rightChild = leftChild->buddy;
9736 VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
9737 if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
9738 {
9739 VMA_VALIDATE(false && "ValidateNode for right child failed.");
9740 }
9741 }
9742 break;
9743 default:
9744 return false;
9745 }
9746
9747 return true;
9748}
9749
9750uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
9751{
9752 // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
9753 uint32_t level = 0;
9754 VkDeviceSize currLevelNodeSize = m_UsableSize;
9755 VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
9756 while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
9757 {
9758 ++level;
9759 currLevelNodeSize >>= 1;
9760 nextLevelNodeSize >>= 1;
9761 }
9762 return level;
9763}
9764
9765void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle)
9766{
9767 uint32_t level = 0;
9768 Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
9769
9770 ++m_FreeCount;
9771 --m_AllocationCount;
9772 m_SumFreeSize += LevelToNodeSize(level);
9773
9774 node->type = Node::TYPE_FREE;
9775
9776 // Join free nodes if possible.
9777 while (level > 0 && node->buddy->type == Node::TYPE_FREE)
9778 {
9779 RemoveFromFreeList(level, node->buddy);
9780 Node* const parent = node->parent;
9781
9782 m_NodeAllocator.Free(node->buddy);
9783 m_NodeAllocator.Free(node);
9784 parent->type = Node::TYPE_FREE;
9785
9786 node = parent;
9787 --level;
9788 --m_FreeCount;
9789 }
9790
9791 AddToFreeListFront(level, node);
9792}
9793
9794void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const
9795{
9796 switch (node->type)
9797 {
9798 case Node::TYPE_FREE:
9799 VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize);
9800 break;
9801 case Node::TYPE_ALLOCATION:
9802 VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize);
9803 break;
9804 case Node::TYPE_SPLIT:
9805 {
9806 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9807 const Node* const leftChild = node->split.leftChild;
9808 AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize);
9809 const Node* const rightChild = leftChild->buddy;
9810 AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize);
9811 }
9812 break;
9813 default:
9814 VMA_ASSERT(0);
9815 }
9816}
9817
9818void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
9819{
9820 VMA_ASSERT(node->type == Node::TYPE_FREE);
9821
9822 // List is empty.
9823 Node* const frontNode = m_FreeList[level].front;
9824 if (frontNode == VMA_NULL)
9825 {
9826 VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
9827 node->free.prev = node->free.next = VMA_NULL;
9828 m_FreeList[level].front = m_FreeList[level].back = node;
9829 }
9830 else
9831 {
9832 VMA_ASSERT(frontNode->free.prev == VMA_NULL);
9833 node->free.prev = VMA_NULL;
9834 node->free.next = frontNode;
9835 frontNode->free.prev = node;
9836 m_FreeList[level].front = node;
9837 }
9838}
9839
9840void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
9841{
9842 VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
9843
9844 // It is at the front.
9845 if (node->free.prev == VMA_NULL)
9846 {
9847 VMA_ASSERT(m_FreeList[level].front == node);
9848 m_FreeList[level].front = node->free.next;
9849 }
9850 else
9851 {
9852 Node* const prevFreeNode = node->free.prev;
9853 VMA_ASSERT(prevFreeNode->free.next == node);
9854 prevFreeNode->free.next = node->free.next;
9855 }
9856
9857 // It is at the back.
9858 if (node->free.next == VMA_NULL)
9859 {
9860 VMA_ASSERT(m_FreeList[level].back == node);
9861 m_FreeList[level].back = node->free.prev;
9862 }
9863 else
9864 {
9865 Node* const nextFreeNode = node->free.next;
9866 VMA_ASSERT(nextFreeNode->free.prev == node);
9867 nextFreeNode->free.prev = node->free.prev;
9868 }
9869}
9870
9871void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const
9872{
9873 switch (node->type)
9874 {
9875 case Node::TYPE_FREE:
9876 break;
9877 case Node::TYPE_ALLOCATION:
9878 DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData);
9879 break;
9880 case Node::TYPE_SPLIT:
9881 {
9882 ++level;
9883 DebugLogAllAllocationNode(node->split.leftChild, level);
9884 DebugLogAllAllocationNode(node->split.leftChild->buddy, level);
9885 }
9886 break;
9887 default:
9888 VMA_ASSERT(0);
9889 }
9890}
9891
9892#if VMA_STATS_STRING_ENABLED
9893void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
9894{
9895 switch (node->type)
9896 {
9897 case Node::TYPE_FREE:
9898 PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
9899 break;
9900 case Node::TYPE_ALLOCATION:
9901 PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData);
9902 break;
9903 case Node::TYPE_SPLIT:
9904 {
9905 const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
9906 const Node* const leftChild = node->split.leftChild;
9907 PrintDetailedMapNode(json, leftChild, childrenNodeSize);
9908 const Node* const rightChild = leftChild->buddy;
9909 PrintDetailedMapNode(json, rightChild, childrenNodeSize);
9910 }
9911 break;
9912 default:
9913 VMA_ASSERT(0);
9914 }
9915}
9916#endif // VMA_STATS_STRING_ENABLED
9917#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
9918#endif // _VMA_BLOCK_METADATA_BUDDY
9919#endif // #if 0
9920
9921#ifndef _VMA_BLOCK_METADATA_TLSF
9922// To not search current larger region if first allocation won't succeed and skip to smaller range
9923// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
9924// When fragmentation and reusal of previous blocks doesn't matter then use with
9925// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
9926class VmaBlockMetadata_TLSF : public VmaBlockMetadata
9927{
9928 VMA_CLASS_NO_COPY(VmaBlockMetadata_TLSF)
9929public:
9930 VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
9931 VkDeviceSize bufferImageGranularity, bool isVirtual);
9932 virtual ~VmaBlockMetadata_TLSF();
9933
9934 size_t GetAllocationCount() const override { return m_AllocCount; }
9935 size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
9936 VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
9937 bool IsEmpty() const override { return m_NullBlock->offset == 0; }
9938 VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; };
9939
9940 void Init(VkDeviceSize size) override;
9941 bool Validate() const override;
9942
9943 void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
9944 void AddStatistics(VmaStatistics& inoutStats) const override;
9945
9946#if VMA_STATS_STRING_ENABLED
9947 void PrintDetailedMap(class VmaJsonWriter& json) const override;
9948#endif
9949
9950 bool CreateAllocationRequest(
9951 VkDeviceSize allocSize,
9952 VkDeviceSize allocAlignment,
9953 bool upperAddress,
9954 VmaSuballocationType allocType,
9955 uint32_t strategy,
9956 VmaAllocationRequest* pAllocationRequest) override;
9957
9958 VkResult CheckCorruption(const void* pBlockData) override;
9959 void Alloc(
9960 const VmaAllocationRequest& request,
9961 VmaSuballocationType type,
9962 void* userData) override;
9963
9964 void Free(VmaAllocHandle allocHandle) override;
9965 void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
9966 void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
9967 VmaAllocHandle GetAllocationListBegin() const override;
9968 VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
9969 VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
9970 void Clear() override;
9971 void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
9972 void DebugLogAllAllocations() const override;
9973
9974private:
9975 // According to original paper it should be preferable 4 or 5:
9976 // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
9977 // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
9978 static const uint8_t SECOND_LEVEL_INDEX = 5;
9979 static const uint16_t SMALL_BUFFER_SIZE = 256;
9980 static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
9981 static const uint8_t MEMORY_CLASS_SHIFT = 7;
9982 static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
9983
9984 class Block
9985 {
9986 public:
9987 VkDeviceSize offset;
9988 VkDeviceSize size;
9989 Block* prevPhysical;
9990 Block* nextPhysical;
9991
9992 void MarkFree() { prevFree = VMA_NULL; }
9993 void MarkTaken() { prevFree = this; }
9994 bool IsFree() const { return prevFree != this; }
9995 void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
9996 Block*& PrevFree() { return prevFree; }
9997 Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
9998
9999 private:
10000 Block* prevFree; // Address of the same block here indicates that block is taken
10001 union
10002 {
10003 Block* nextFree;
10004 void* userData;
10005 };
10006 };
10007
10008 size_t m_AllocCount;
10009 // Total number of free blocks besides null block
10010 size_t m_BlocksFreeCount;
10011 // Total size of free blocks excluding null block
10012 VkDeviceSize m_BlocksFreeSize;
10013 uint32_t m_IsFreeBitmap;
10014 uint8_t m_MemoryClasses;
10015 uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
10016 uint32_t m_ListsCount;
10017 /*
10018 * 0: 0-3 lists for small buffers
10019 * 1+: 0-(2^SLI-1) lists for normal buffers
10020 */
10021 Block** m_FreeList;
10022 VmaPoolAllocator<Block> m_BlockAllocator;
10023 Block* m_NullBlock;
10024 VmaBlockBufferImageGranularity m_GranularityHandler;
10025
10026 uint8_t SizeToMemoryClass(VkDeviceSize size) const;
10027 uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
10028 uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
10029 uint32_t GetListIndex(VkDeviceSize size) const;
10030
10031 void RemoveFreeBlock(Block* block);
10032 void InsertFreeBlock(Block* block);
10033 void MergeBlock(Block* block, Block* prev);
10034
10035 Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
10036 bool CheckBlock(
10037 Block& block,
10038 uint32_t listIndex,
10039 VkDeviceSize allocSize,
10040 VkDeviceSize allocAlignment,
10041 VmaSuballocationType allocType,
10042 VmaAllocationRequest* pAllocationRequest);
10043};
10044
10045#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
10046VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
10047 VkDeviceSize bufferImageGranularity, bool isVirtual)
10048 : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
10049 m_AllocCount(0),
10050 m_BlocksFreeCount(0),
10051 m_BlocksFreeSize(0),
10052 m_IsFreeBitmap(0),
10053 m_MemoryClasses(0),
10054 m_ListsCount(0),
10055 m_FreeList(VMA_NULL),
10056 m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
10057 m_NullBlock(VMA_NULL),
10058 m_GranularityHandler(bufferImageGranularity) {}
10059
10060VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
10061{
10062 if (m_FreeList)
10063 vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
10064 m_GranularityHandler.Destroy(GetAllocationCallbacks());
10065}
10066
10068{
10070
10071 if (!IsVirtual())
10072 m_GranularityHandler.Init(GetAllocationCallbacks(), size);
10073
10074 m_NullBlock = m_BlockAllocator.Alloc();
10075 m_NullBlock->size = size;
10076 m_NullBlock->offset = 0;
10077 m_NullBlock->prevPhysical = VMA_NULL;
10078 m_NullBlock->nextPhysical = VMA_NULL;
10079 m_NullBlock->MarkFree();
10080 m_NullBlock->NextFree() = VMA_NULL;
10081 m_NullBlock->PrevFree() = VMA_NULL;
10082 uint8_t memoryClass = SizeToMemoryClass(size);
10083 uint16_t sli = SizeToSecondIndex(size, memoryClass);
10084 m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
10085 if (IsVirtual())
10086 m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
10087 else
10088 m_ListsCount += 4;
10089
10090 m_MemoryClasses = memoryClass + 2;
10091 memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
10092
10093 m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
10094 memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
10095}
10096
10097bool VmaBlockMetadata_TLSF::Validate() const
10098{
10099 VMA_VALIDATE(GetSumFreeSize() <= GetSize());
10100
10101 VkDeviceSize calculatedSize = m_NullBlock->size;
10102 VkDeviceSize calculatedFreeSize = m_NullBlock->size;
10103 size_t allocCount = 0;
10104 size_t freeCount = 0;
10105
10106 // Check integrity of free lists
10107 for (uint32_t list = 0; list < m_ListsCount; ++list)
10108 {
10109 Block* block = m_FreeList[list];
10110 if (block != VMA_NULL)
10111 {
10112 VMA_VALIDATE(block->IsFree());
10113 VMA_VALIDATE(block->PrevFree() == VMA_NULL);
10114 while (block->NextFree())
10115 {
10116 VMA_VALIDATE(block->NextFree()->IsFree());
10117 VMA_VALIDATE(block->NextFree()->PrevFree() == block);
10118 block = block->NextFree();
10119 }
10120 }
10121 }
10122
10123 VkDeviceSize nextOffset = m_NullBlock->offset;
10124 auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
10125
10126 VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
10127 if (m_NullBlock->prevPhysical)
10128 {
10129 VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
10130 }
10131 // Check all blocks
10132 for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
10133 {
10134 VMA_VALIDATE(prev->offset + prev->size == nextOffset);
10135 nextOffset = prev->offset;
10136 calculatedSize += prev->size;
10137
10138 uint32_t listIndex = GetListIndex(prev->size);
10139 if (prev->IsFree())
10140 {
10141 ++freeCount;
10142 // Check if free block belongs to free list
10143 Block* freeBlock = m_FreeList[listIndex];
10144 VMA_VALIDATE(freeBlock != VMA_NULL);
10145
10146 bool found = false;
10147 do
10148 {
10149 if (freeBlock == prev)
10150 found = true;
10151
10152 freeBlock = freeBlock->NextFree();
10153 } while (!found && freeBlock != VMA_NULL);
10154
10155 VMA_VALIDATE(found);
10156 calculatedFreeSize += prev->size;
10157 }
10158 else
10159 {
10160 ++allocCount;
10161 // Check if taken block is not on a free list
10162 Block* freeBlock = m_FreeList[listIndex];
10163 while (freeBlock)
10164 {
10165 VMA_VALIDATE(freeBlock != prev);
10166 freeBlock = freeBlock->NextFree();
10167 }
10168
10169 if (!IsVirtual())
10170 {
10171 VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
10172 }
10173 }
10174
10175 if (prev->prevPhysical)
10176 {
10177 VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
10178 }
10179 }
10180
10181 if (!IsVirtual())
10182 {
10183 VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
10184 }
10185
10186 VMA_VALIDATE(nextOffset == 0);
10187 VMA_VALIDATE(calculatedSize == GetSize());
10188 VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
10189 VMA_VALIDATE(allocCount == m_AllocCount);
10190 VMA_VALIDATE(freeCount == m_BlocksFreeCount);
10191
10192 return true;
10193}
10194
10195void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
10196{
10197 inoutStats.statistics.blockCount++;
10198 inoutStats.statistics.blockBytes += GetSize();
10199 if (m_NullBlock->size > 0)
10200 VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
10201
10202 for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10203 {
10204 if (block->IsFree())
10205 VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
10206 else
10207 VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
10208 }
10209}
10210
10211void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
10212{
10213 inoutStats.blockCount++;
10214 inoutStats.allocationCount += (uint32_t)m_AllocCount;
10215 inoutStats.blockBytes += GetSize();
10216 inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
10217}
10218
10219#if VMA_STATS_STRING_ENABLED
10220void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
10221{
10222 size_t blockCount = m_AllocCount + m_BlocksFreeCount;
10223 VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
10224 VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
10225
10226 size_t i = blockCount;
10227 for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10228 {
10229 blockList[--i] = block;
10230 }
10231 VMA_ASSERT(i == 0);
10232
10234 VmaClearDetailedStatistics(stats);
10235 AddDetailedStatistics(stats);
10236
10237 PrintDetailedMap_Begin(json,
10240 stats.unusedRangeCount);
10241
10242 for (; i < blockCount; ++i)
10243 {
10244 Block* block = blockList[i];
10245 if (block->IsFree())
10246 PrintDetailedMap_UnusedRange(json, block->offset, block->size);
10247 else
10248 PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
10249 }
10250 if (m_NullBlock->size > 0)
10251 PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
10252
10253 PrintDetailedMap_End(json);
10254}
10255#endif
10256
10257bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
10258 VkDeviceSize allocSize,
10259 VkDeviceSize allocAlignment,
10260 bool upperAddress,
10261 VmaSuballocationType allocType,
10262 uint32_t strategy,
10263 VmaAllocationRequest* pAllocationRequest)
10264{
10265 VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
10266 VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10267
10268 // For small granularity round up
10269 if (!IsVirtual())
10270 m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
10271
10272 allocSize += GetDebugMargin();
10273 // Quick check for too small pool
10274 if (allocSize > GetSumFreeSize())
10275 return false;
10276
10277 // If no free blocks in pool then check only null block
10278 if (m_BlocksFreeCount == 0)
10279 return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
10280
10281 // Round up to the next block
10282 VkDeviceSize sizeForNextList = allocSize;
10283 VkDeviceSize smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4);
10284 if (allocSize > SMALL_BUFFER_SIZE)
10285 {
10286 sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
10287 }
10288 else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
10289 sizeForNextList = SMALL_BUFFER_SIZE + 1;
10290 else
10291 sizeForNextList += smallSizeStep;
10292
10293 uint32_t nextListIndex = 0;
10294 uint32_t prevListIndex = 0;
10295 Block* nextListBlock = VMA_NULL;
10296 Block* prevListBlock = VMA_NULL;
10297
10298 // Check blocks according to strategies
10300 {
10301 // Quick check for larger block first
10302 nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10303 if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10304 return true;
10305
10306 // If not fitted then null block
10307 if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10308 return true;
10309
10310 // Null block failed, search larger bucket
10311 while (nextListBlock)
10312 {
10313 if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10314 return true;
10315 nextListBlock = nextListBlock->NextFree();
10316 }
10317
10318 // Failed again, check best fit bucket
10319 prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10320 while (prevListBlock)
10321 {
10322 if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10323 return true;
10324 prevListBlock = prevListBlock->NextFree();
10325 }
10326 }
10328 {
10329 // Check best fit bucket
10330 prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10331 while (prevListBlock)
10332 {
10333 if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10334 return true;
10335 prevListBlock = prevListBlock->NextFree();
10336 }
10337
10338 // If failed check null block
10339 if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10340 return true;
10341
10342 // Check larger bucket
10343 nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10344 while (nextListBlock)
10345 {
10346 if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10347 return true;
10348 nextListBlock = nextListBlock->NextFree();
10349 }
10350 }
10352 {
10353 // Perform search from the start
10354 VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
10355 VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
10356
10357 size_t i = m_BlocksFreeCount;
10358 for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10359 {
10360 if (block->IsFree() && block->size >= allocSize)
10361 blockList[--i] = block;
10362 }
10363
10364 for (; i < m_BlocksFreeCount; ++i)
10365 {
10366 Block& block = *blockList[i];
10367 if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
10368 return true;
10369 }
10370
10371 // If failed check null block
10372 if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10373 return true;
10374
10375 // Whole range searched, no more memory
10376 return false;
10377 }
10378 else
10379 {
10380 // Check larger bucket
10381 nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
10382 while (nextListBlock)
10383 {
10384 if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10385 return true;
10386 nextListBlock = nextListBlock->NextFree();
10387 }
10388
10389 // If failed check null block
10390 if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
10391 return true;
10392
10393 // Check best fit bucket
10394 prevListBlock = FindFreeBlock(allocSize, prevListIndex);
10395 while (prevListBlock)
10396 {
10397 if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10398 return true;
10399 prevListBlock = prevListBlock->NextFree();
10400 }
10401 }
10402
10403 // Worst case, full search has to be done
10404 while (++nextListIndex < m_ListsCount)
10405 {
10406 nextListBlock = m_FreeList[nextListIndex];
10407 while (nextListBlock)
10408 {
10409 if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
10410 return true;
10411 nextListBlock = nextListBlock->NextFree();
10412 }
10413 }
10414
10415 // No more memory sadly
10416 return false;
10417}
10418
10419VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
10420{
10421 for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10422 {
10423 if (!block->IsFree())
10424 {
10425 if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
10426 {
10427 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10428 return VK_ERROR_UNKNOWN_COPY;
10429 }
10430 }
10431 }
10432
10433 return VK_SUCCESS;
10434}
10435
10436void VmaBlockMetadata_TLSF::Alloc(
10437 const VmaAllocationRequest& request,
10438 VmaSuballocationType type,
10439 void* userData)
10440{
10441 VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
10442
10443 // Get block and pop it from the free list
10444 Block* currentBlock = (Block*)request.allocHandle;
10445 VkDeviceSize offset = request.algorithmData;
10446 VMA_ASSERT(currentBlock != VMA_NULL);
10447 VMA_ASSERT(currentBlock->offset <= offset);
10448
10449 if (currentBlock != m_NullBlock)
10450 RemoveFreeBlock(currentBlock);
10451
10452 VkDeviceSize debugMargin = GetDebugMargin();
10453 VkDeviceSize misssingAlignment = offset - currentBlock->offset;
10454
10455 // Append missing alignment to prev block or create new one
10456 if (misssingAlignment)
10457 {
10458 Block* prevBlock = currentBlock->prevPhysical;
10459 VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
10460
10461 if (prevBlock->IsFree() && prevBlock->size != debugMargin)
10462 {
10463 uint32_t oldList = GetListIndex(prevBlock->size);
10464 prevBlock->size += misssingAlignment;
10465 // Check if new size crosses list bucket
10466 if (oldList != GetListIndex(prevBlock->size))
10467 {
10468 prevBlock->size -= misssingAlignment;
10469 RemoveFreeBlock(prevBlock);
10470 prevBlock->size += misssingAlignment;
10471 InsertFreeBlock(prevBlock);
10472 }
10473 else
10474 m_BlocksFreeSize += misssingAlignment;
10475 }
10476 else
10477 {
10478 Block* newBlock = m_BlockAllocator.Alloc();
10479 currentBlock->prevPhysical = newBlock;
10480 prevBlock->nextPhysical = newBlock;
10481 newBlock->prevPhysical = prevBlock;
10482 newBlock->nextPhysical = currentBlock;
10483 newBlock->size = misssingAlignment;
10484 newBlock->offset = currentBlock->offset;
10485 newBlock->MarkTaken();
10486
10487 InsertFreeBlock(newBlock);
10488 }
10489
10490 currentBlock->size -= misssingAlignment;
10491 currentBlock->offset += misssingAlignment;
10492 }
10493
10494 VkDeviceSize size = request.size + debugMargin;
10495 if (currentBlock->size == size)
10496 {
10497 if (currentBlock == m_NullBlock)
10498 {
10499 // Setup new null block
10500 m_NullBlock = m_BlockAllocator.Alloc();
10501 m_NullBlock->size = 0;
10502 m_NullBlock->offset = currentBlock->offset + size;
10503 m_NullBlock->prevPhysical = currentBlock;
10504 m_NullBlock->nextPhysical = VMA_NULL;
10505 m_NullBlock->MarkFree();
10506 m_NullBlock->PrevFree() = VMA_NULL;
10507 m_NullBlock->NextFree() = VMA_NULL;
10508 currentBlock->nextPhysical = m_NullBlock;
10509 currentBlock->MarkTaken();
10510 }
10511 }
10512 else
10513 {
10514 VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
10515
10516 // Create new free block
10517 Block* newBlock = m_BlockAllocator.Alloc();
10518 newBlock->size = currentBlock->size - size;
10519 newBlock->offset = currentBlock->offset + size;
10520 newBlock->prevPhysical = currentBlock;
10521 newBlock->nextPhysical = currentBlock->nextPhysical;
10522 currentBlock->nextPhysical = newBlock;
10523 currentBlock->size = size;
10524
10525 if (currentBlock == m_NullBlock)
10526 {
10527 m_NullBlock = newBlock;
10528 m_NullBlock->MarkFree();
10529 m_NullBlock->NextFree() = VMA_NULL;
10530 m_NullBlock->PrevFree() = VMA_NULL;
10531 currentBlock->MarkTaken();
10532 }
10533 else
10534 {
10535 newBlock->nextPhysical->prevPhysical = newBlock;
10536 newBlock->MarkTaken();
10537 InsertFreeBlock(newBlock);
10538 }
10539 }
10540 currentBlock->UserData() = userData;
10541
10542 if (debugMargin > 0)
10543 {
10544 currentBlock->size -= debugMargin;
10545 Block* newBlock = m_BlockAllocator.Alloc();
10546 newBlock->size = debugMargin;
10547 newBlock->offset = currentBlock->offset + currentBlock->size;
10548 newBlock->prevPhysical = currentBlock;
10549 newBlock->nextPhysical = currentBlock->nextPhysical;
10550 newBlock->MarkTaken();
10551 currentBlock->nextPhysical->prevPhysical = newBlock;
10552 currentBlock->nextPhysical = newBlock;
10553 InsertFreeBlock(newBlock);
10554 }
10555
10556 if (!IsVirtual())
10557 m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
10558 currentBlock->offset, currentBlock->size);
10559 ++m_AllocCount;
10560}
10561
10562void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
10563{
10564 Block* block = (Block*)allocHandle;
10565 Block* next = block->nextPhysical;
10566 VMA_ASSERT(!block->IsFree() && "Block is already free!");
10567
10568 if (!IsVirtual())
10569 m_GranularityHandler.FreePages(block->offset, block->size);
10570 --m_AllocCount;
10571
10572 VkDeviceSize debugMargin = GetDebugMargin();
10573 if (debugMargin > 0)
10574 {
10575 RemoveFreeBlock(next);
10576 MergeBlock(next, block);
10577 block = next;
10578 next = next->nextPhysical;
10579 }
10580
10581 // Try merging
10582 Block* prev = block->prevPhysical;
10583 if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
10584 {
10585 RemoveFreeBlock(prev);
10586 MergeBlock(block, prev);
10587 }
10588
10589 if (!next->IsFree())
10590 InsertFreeBlock(block);
10591 else if (next == m_NullBlock)
10592 MergeBlock(m_NullBlock, block);
10593 else
10594 {
10595 RemoveFreeBlock(next);
10596 MergeBlock(next, block);
10597 InsertFreeBlock(next);
10598 }
10599}
10600
10601void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
10602{
10603 Block* block = (Block*)allocHandle;
10604 VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
10605 outInfo.offset = block->offset;
10606 outInfo.size = block->size;
10607 outInfo.pUserData = block->UserData();
10608}
10609
10610void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
10611{
10612 Block* block = (Block*)allocHandle;
10613 VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
10614 return block->UserData();
10615}
10616
10617VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
10618{
10619 if (m_AllocCount == 0)
10620 return VK_NULL_HANDLE;
10621
10622 for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
10623 {
10624 if (!block->IsFree())
10625 return (VmaAllocHandle)block;
10626 }
10627 VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
10628 return VK_NULL_HANDLE;
10629}
10630
10631VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
10632{
10633 Block* startBlock = (Block*)prevAlloc;
10634 VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
10635
10636 for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
10637 {
10638 if (!block->IsFree())
10639 return (VmaAllocHandle)block;
10640 }
10641 return VK_NULL_HANDLE;
10642}
10643
10644VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
10645{
10646 Block* block = (Block*)alloc;
10647 VMA_ASSERT(!block->IsFree() && "Incorrect block!");
10648
10649 if (block->prevPhysical)
10650 return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
10651 return 0;
10652}
10653
10654void VmaBlockMetadata_TLSF::Clear()
10655{
10656 m_AllocCount = 0;
10657 m_BlocksFreeCount = 0;
10658 m_BlocksFreeSize = 0;
10659 m_IsFreeBitmap = 0;
10660 m_NullBlock->offset = 0;
10661 m_NullBlock->size = GetSize();
10662 Block* block = m_NullBlock->prevPhysical;
10663 m_NullBlock->prevPhysical = VMA_NULL;
10664 while (block)
10665 {
10666 Block* prev = block->prevPhysical;
10667 m_BlockAllocator.Free(block);
10668 block = prev;
10669 }
10670 memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
10671 memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
10672 m_GranularityHandler.Clear();
10673}
10674
10675void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
10676{
10677 Block* block = (Block*)allocHandle;
10678 VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
10679 block->UserData() = userData;
10680}
10681
10682void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
10683{
10684 for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
10685 if (!block->IsFree())
10686 DebugLogAllocation(block->offset, block->size, block->UserData());
10687}
10688
10689uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
10690{
10691 if (size > SMALL_BUFFER_SIZE)
10692 return VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT;
10693 return 0;
10694}
10695
10696uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
10697{
10698 if (memoryClass == 0)
10699 {
10700 if (IsVirtual())
10701 return static_cast<uint16_t>((size - 1) / 8);
10702 else
10703 return static_cast<uint16_t>((size - 1) / 64);
10704 }
10705 return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
10706}
10707
10708uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
10709{
10710 if (memoryClass == 0)
10711 return secondIndex;
10712
10713 const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
10714 if (IsVirtual())
10715 return index + (1 << SECOND_LEVEL_INDEX);
10716 else
10717 return index + 4;
10718}
10719
10720uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
10721{
10722 uint8_t memoryClass = SizeToMemoryClass(size);
10723 return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
10724}
10725
10726void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
10727{
10728 VMA_ASSERT(block != m_NullBlock);
10729 VMA_ASSERT(block->IsFree());
10730
10731 if (block->NextFree() != VMA_NULL)
10732 block->NextFree()->PrevFree() = block->PrevFree();
10733 if (block->PrevFree() != VMA_NULL)
10734 block->PrevFree()->NextFree() = block->NextFree();
10735 else
10736 {
10737 uint8_t memClass = SizeToMemoryClass(block->size);
10738 uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
10739 uint32_t index = GetListIndex(memClass, secondIndex);
10740 VMA_ASSERT(m_FreeList[index] == block);
10741 m_FreeList[index] = block->NextFree();
10742 if (block->NextFree() == VMA_NULL)
10743 {
10744 m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
10745 if (m_InnerIsFreeBitmap[memClass] == 0)
10746 m_IsFreeBitmap &= ~(1UL << memClass);
10747 }
10748 }
10749 block->MarkTaken();
10750 block->UserData() = VMA_NULL;
10751 --m_BlocksFreeCount;
10752 m_BlocksFreeSize -= block->size;
10753}
10754
10755void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
10756{
10757 VMA_ASSERT(block != m_NullBlock);
10758 VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
10759
10760 uint8_t memClass = SizeToMemoryClass(block->size);
10761 uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
10762 uint32_t index = GetListIndex(memClass, secondIndex);
10763 VMA_ASSERT(index < m_ListsCount);
10764 block->PrevFree() = VMA_NULL;
10765 block->NextFree() = m_FreeList[index];
10766 m_FreeList[index] = block;
10767 if (block->NextFree() != VMA_NULL)
10768 block->NextFree()->PrevFree() = block;
10769 else
10770 {
10771 m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
10772 m_IsFreeBitmap |= 1UL << memClass;
10773 }
10774 ++m_BlocksFreeCount;
10775 m_BlocksFreeSize += block->size;
10776}
10777
10778void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
10779{
10780 VMA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!");
10781 VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
10782
10783 block->offset = prev->offset;
10784 block->size += prev->size;
10785 block->prevPhysical = prev->prevPhysical;
10786 if (block->prevPhysical)
10787 block->prevPhysical->nextPhysical = block;
10788 m_BlockAllocator.Free(prev);
10789}
10790
10791VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
10792{
10793 uint8_t memoryClass = SizeToMemoryClass(size);
10794 uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
10795 if (!innerFreeMap)
10796 {
10797 // Check higher levels for avaiable blocks
10798 uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
10799 if (!freeMap)
10800 return VMA_NULL; // No more memory avaible
10801
10802 // Find lowest free region
10803 memoryClass = VMA_BITSCAN_LSB(freeMap);
10804 innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
10805 VMA_ASSERT(innerFreeMap != 0);
10806 }
10807 // Find lowest free subregion
10808 listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
10809 VMA_ASSERT(m_FreeList[listIndex]);
10810 return m_FreeList[listIndex];
10811}
10812
10813bool VmaBlockMetadata_TLSF::CheckBlock(
10814 Block& block,
10815 uint32_t listIndex,
10816 VkDeviceSize allocSize,
10817 VkDeviceSize allocAlignment,
10818 VmaSuballocationType allocType,
10819 VmaAllocationRequest* pAllocationRequest)
10820{
10821 VMA_ASSERT(block.IsFree() && "Block is already taken!");
10822
10823 VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
10824 if (block.size < allocSize + alignedOffset - block.offset)
10825 return false;
10826
10827 // Check for granularity conflicts
10828 if (!IsVirtual() &&
10829 m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
10830 return false;
10831
10832 // Alloc successful
10833 pAllocationRequest->type = VmaAllocationRequestType::TLSF;
10834 pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
10835 pAllocationRequest->size = allocSize - GetDebugMargin();
10836 pAllocationRequest->customData = (void*)allocType;
10837 pAllocationRequest->algorithmData = alignedOffset;
10838
10839 // Place block at the start of list if it's normal block
10840 if (listIndex != m_ListsCount && block.PrevFree())
10841 {
10842 block.PrevFree()->NextFree() = block.NextFree();
10843 if (block.NextFree())
10844 block.NextFree()->PrevFree() = block.PrevFree();
10845 block.PrevFree() = VMA_NULL;
10846 block.NextFree() = m_FreeList[listIndex];
10847 m_FreeList[listIndex] = &block;
10848 if (block.NextFree())
10849 block.NextFree()->PrevFree() = &block;
10850 }
10851
10852 return true;
10853}
10854#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
10855#endif // _VMA_BLOCK_METADATA_TLSF
10856
10857#ifndef _VMA_BLOCK_VECTOR
10858/*
10859Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
10860Vulkan memory type.
10861
10862Synchronized internally with a mutex.
10863*/
10864class VmaBlockVector
10865{
10866 friend struct VmaDefragmentationContext_T;
10867 VMA_CLASS_NO_COPY(VmaBlockVector)
10868public:
10869 VmaBlockVector(
10870 VmaAllocator hAllocator,
10871 VmaPool hParentPool,
10872 uint32_t memoryTypeIndex,
10873 VkDeviceSize preferredBlockSize,
10874 size_t minBlockCount,
10875 size_t maxBlockCount,
10876 VkDeviceSize bufferImageGranularity,
10877 bool explicitBlockSize,
10878 uint32_t algorithm,
10879 float priority,
10880 VkDeviceSize minAllocationAlignment,
10881 void* pMemoryAllocateNext);
10882 ~VmaBlockVector();
10883
10884 VmaAllocator GetAllocator() const { return m_hAllocator; }
10885 VmaPool GetParentPool() const { return m_hParentPool; }
10886 bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
10887 uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
10888 VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
10889 VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
10890 uint32_t GetAlgorithm() const { return m_Algorithm; }
10891 bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
10892 float GetPriority() const { return m_Priority; }
10893 const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
10894 // To be used only while the m_Mutex is locked. Used during defragmentation.
10895 size_t GetBlockCount() const { return m_Blocks.size(); }
10896 // To be used only while the m_Mutex is locked. Used during defragmentation.
10897 VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
10898 VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
10899
10900 VkResult CreateMinBlocks();
10901 void AddStatistics(VmaStatistics& inoutStats);
10902 void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
10903 bool IsEmpty();
10904 bool IsCorruptionDetectionEnabled() const;
10905
10907 VkDeviceSize size,
10908 VkDeviceSize alignment,
10909 const VmaAllocationCreateInfo& createInfo,
10910 VmaSuballocationType suballocType,
10911 size_t allocationCount,
10912 VmaAllocation* pAllocations);
10913
10914 void Free(const VmaAllocation hAllocation);
10915
10916#if VMA_STATS_STRING_ENABLED
10917 void PrintDetailedMap(class VmaJsonWriter& json);
10918#endif
10919
10920 VkResult CheckCorruption();
10921
10922private:
10923 const VmaAllocator m_hAllocator;
10924 const VmaPool m_hParentPool;
10925 const uint32_t m_MemoryTypeIndex;
10926 const VkDeviceSize m_PreferredBlockSize;
10927 const size_t m_MinBlockCount;
10928 const size_t m_MaxBlockCount;
10929 const VkDeviceSize m_BufferImageGranularity;
10930 const bool m_ExplicitBlockSize;
10931 const uint32_t m_Algorithm;
10932 const float m_Priority;
10933 const VkDeviceSize m_MinAllocationAlignment;
10934
10935 void* const m_pMemoryAllocateNext;
10936 VMA_RW_MUTEX m_Mutex;
10937 // Incrementally sorted by sumFreeSize, ascending.
10938 VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
10939 uint32_t m_NextBlockId;
10940 bool m_IncrementalSort = true;
10941
10942 void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
10943
10944 VkDeviceSize CalcMaxBlockSize() const;
10945 // Finds and removes given block from vector.
10946 void Remove(VmaDeviceMemoryBlock* pBlock);
10947 // Performs single step in sorting m_Blocks. They may not be fully sorted
10948 // after this call.
10949 void IncrementallySortBlocks();
10950 void SortByFreeSize();
10951
10952 VkResult AllocatePage(
10953 VkDeviceSize size,
10954 VkDeviceSize alignment,
10955 const VmaAllocationCreateInfo& createInfo,
10956 VmaSuballocationType suballocType,
10957 VmaAllocation* pAllocation);
10958
10959 VkResult AllocateFromBlock(
10960 VmaDeviceMemoryBlock* pBlock,
10961 VkDeviceSize size,
10962 VkDeviceSize alignment,
10963 VmaAllocationCreateFlags allocFlags,
10964 void* pUserData,
10965 VmaSuballocationType suballocType,
10966 uint32_t strategy,
10967 VmaAllocation* pAllocation);
10968
10969 VkResult CommitAllocationRequest(
10970 VmaAllocationRequest& allocRequest,
10971 VmaDeviceMemoryBlock* pBlock,
10972 VkDeviceSize alignment,
10973 VmaAllocationCreateFlags allocFlags,
10974 void* pUserData,
10975 VmaSuballocationType suballocType,
10976 VmaAllocation* pAllocation);
10977
10978 VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
10979 bool HasEmptyBlock();
10980};
10981#endif // _VMA_BLOCK_VECTOR
10982
10983#ifndef _VMA_DEFRAGMENTATION_CONTEXT
10984struct VmaDefragmentationContext_T
10985{
10986 VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
10987public:
10988 VmaDefragmentationContext_T(
10989 VmaAllocator hAllocator,
10990 const VmaDefragmentationInfo& info);
10991 ~VmaDefragmentationContext_T();
10992
10993 void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
10994
10995 VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
10996 VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
10997
10998private:
10999 // Max number of allocations to ignore due to size constraints before ending single pass
11000 static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
11001 enum class CounterStatus { Pass, Ignore, End };
11002
11003 struct FragmentedBlock
11004 {
11005 uint32_t data;
11006 VmaDeviceMemoryBlock* block;
11007 };
11008 struct StateBalanced
11009 {
11010 VkDeviceSize avgFreeSize = 0;
11011 VkDeviceSize avgAllocSize = UINT64_MAX;
11012 };
11013 struct StateExtensive
11014 {
11015 enum class Operation : uint8_t
11016 {
11017 FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
11018 MoveBuffers, MoveTextures, MoveAll,
11019 Cleanup, Done
11020 };
11021
11022 Operation operation = Operation::FindFreeBlockTexture;
11023 size_t firstFreeBlock = SIZE_MAX;
11024 };
11025 struct MoveAllocationData
11026 {
11027 VkDeviceSize size;
11028 VkDeviceSize alignment;
11029 VmaSuballocationType type;
11031 VmaDefragmentationMove move = {};
11032 };
11033
11034 const VkDeviceSize m_MaxPassBytes;
11035 const uint32_t m_MaxPassAllocations;
11036
11037 VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
11038 VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
11039
11040 uint8_t m_IgnoredAllocs = 0;
11041 uint32_t m_Algorithm;
11042 uint32_t m_BlockVectorCount;
11043 VmaBlockVector* m_PoolBlockVector;
11044 VmaBlockVector** m_pBlockVectors;
11045 size_t m_ImmovableBlockCount = 0;
11046 VmaDefragmentationStats m_GlobalStats = { 0 };
11047 VmaDefragmentationStats m_PassStats = { 0 };
11048 void* m_AlgorithmState = VMA_NULL;
11049
11050 static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
11051 CounterStatus CheckCounters(VkDeviceSize bytes);
11052 bool IncrementCounters(VkDeviceSize bytes);
11053 bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
11054 bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
11055
11056 bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
11057 bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
11058 bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
11059 bool ComputeDefragmentation_Full(VmaBlockVector& vector);
11060 bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
11061
11062 void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
11063 bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
11064 VmaBlockVector& vector, size_t firstFreeBlock,
11065 bool& texturePresent, bool& bufferPresent, bool& otherPresent);
11066};
11067#endif // _VMA_DEFRAGMENTATION_CONTEXT
11068
11069#ifndef _VMA_POOL_T
11070struct VmaPool_T
11071{
11072 friend struct VmaPoolListItemTraits;
11073 VMA_CLASS_NO_COPY(VmaPool_T)
11074public:
11075 VmaBlockVector m_BlockVector;
11076 VmaDedicatedAllocationList m_DedicatedAllocations;
11077
11078 VmaPool_T(
11079 VmaAllocator hAllocator,
11080 const VmaPoolCreateInfo& createInfo,
11081 VkDeviceSize preferredBlockSize);
11082 ~VmaPool_T();
11083
11084 uint32_t GetId() const { return m_Id; }
11085 void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
11086
11087 const char* GetName() const { return m_Name; }
11088 void SetName(const char* pName);
11089
11090#if VMA_STATS_STRING_ENABLED
11091 //void PrintDetailedMap(class VmaStringBuilder& sb);
11092#endif
11093
11094private:
11095 uint32_t m_Id;
11096 char* m_Name;
11097 VmaPool_T* m_PrevPool = VMA_NULL;
11098 VmaPool_T* m_NextPool = VMA_NULL;
11099};
11100
11101struct VmaPoolListItemTraits
11102{
11103 typedef VmaPool_T ItemType;
11104
11105 static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
11106 static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
11107 static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
11108 static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
11109};
11110#endif // _VMA_POOL_T
11111
11112#ifndef _VMA_CURRENT_BUDGET_DATA
11113struct VmaCurrentBudgetData
11114{
11115 VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
11116 VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
11117 VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
11118 VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
11119
11120#if VMA_MEMORY_BUDGET
11121 VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
11122 VMA_RW_MUTEX m_BudgetMutex;
11123 uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
11124 uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
11125 uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
11126#endif // VMA_MEMORY_BUDGET
11127
11128 VmaCurrentBudgetData();
11129
11130 void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
11131 void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
11132};
11133
11134#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
11135VmaCurrentBudgetData::VmaCurrentBudgetData()
11136{
11137 for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
11138 {
11139 m_BlockCount[heapIndex] = 0;
11140 m_AllocationCount[heapIndex] = 0;
11141 m_BlockBytes[heapIndex] = 0;
11142 m_AllocationBytes[heapIndex] = 0;
11143#if VMA_MEMORY_BUDGET
11144 m_VulkanUsage[heapIndex] = 0;
11145 m_VulkanBudget[heapIndex] = 0;
11146 m_BlockBytesAtBudgetFetch[heapIndex] = 0;
11147#endif
11148 }
11149
11150#if VMA_MEMORY_BUDGET
11151 m_OperationsSinceBudgetFetch = 0;
11152#endif
11153}
11154
11155void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
11156{
11157 m_AllocationBytes[heapIndex] += allocationSize;
11158 ++m_AllocationCount[heapIndex];
11159#if VMA_MEMORY_BUDGET
11160 ++m_OperationsSinceBudgetFetch;
11161#endif
11162}
11163
11164void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
11165{
11166 VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
11167 m_AllocationBytes[heapIndex] -= allocationSize;
11168 VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
11169 --m_AllocationCount[heapIndex];
11170#if VMA_MEMORY_BUDGET
11171 ++m_OperationsSinceBudgetFetch;
11172#endif
11173}
11174#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
11175#endif // _VMA_CURRENT_BUDGET_DATA
11176
11177#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
11178/*
11179Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
11180*/
11181class VmaAllocationObjectAllocator
11182{
11183 VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
11184public:
11185 VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
11186 : m_Allocator(pAllocationCallbacks, 1024) {}
11187
11188 template<typename... Types> VmaAllocation Allocate(Types&&... args);
11189 void Free(VmaAllocation hAlloc);
11190
11191private:
11192 VMA_MUTEX m_Mutex;
11193 VmaPoolAllocator<VmaAllocation_T> m_Allocator;
11194};
11195
11196template<typename... Types>
11198{
11199 VmaMutexLock mutexLock(m_Mutex);
11200 return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
11201}
11202
11204{
11205 VmaMutexLock mutexLock(m_Mutex);
11206 m_Allocator.Free(hAlloc);
11207}
11208#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
11209
11210#ifndef _VMA_VIRTUAL_BLOCK_T
11211struct VmaVirtualBlock_T
11212{
11213 VMA_CLASS_NO_COPY(VmaVirtualBlock_T)
11214public:
11215 const bool m_AllocationCallbacksSpecified;
11216 const VkAllocationCallbacks m_AllocationCallbacks;
11217
11218 VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
11219 ~VmaVirtualBlock_T();
11220
11221 VkResult Init() { return VK_SUCCESS; }
11222 bool IsEmpty() const { return m_Metadata->IsEmpty(); }
11223 void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
11224 void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
11225 void Clear() { m_Metadata->Clear(); }
11226
11227 const VkAllocationCallbacks* GetAllocationCallbacks() const;
11228 void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
11229 VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
11230 VkDeviceSize* outOffset);
11231 void GetStatistics(VmaStatistics& outStats) const;
11232 void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
11233#if VMA_STATS_STRING_ENABLED
11234 void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
11235#endif
11236
11237private:
11238 VmaBlockMetadata* m_Metadata;
11239};
11240
11241#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
11242VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
11243 : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
11244 m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
11245{
11246 const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
11247 switch (algorithm)
11248 {
11249 default:
11250 VMA_ASSERT(0);
11251 case 0:
11252 m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
11253 break;
11255 m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
11256 break;
11257 }
11258
11259 m_Metadata->Init(createInfo.size);
11260}
11261
11262VmaVirtualBlock_T::~VmaVirtualBlock_T()
11263{
11264 // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations
11265 if (!m_Metadata->IsEmpty())
11266 m_Metadata->DebugLogAllAllocations();
11267 // This is the most important assert in the entire library.
11268 // Hitting it means you have some memory leak - unreleased virtual allocations.
11269 VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
11270
11271 vma_delete(GetAllocationCallbacks(), m_Metadata);
11272}
11273
11274const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
11275{
11276 return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
11277}
11278
11279void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
11280{
11281 m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
11282}
11283
11285 VkDeviceSize* outOffset)
11286{
11287 VmaAllocationRequest request = {};
11288 if (m_Metadata->CreateAllocationRequest(
11289 createInfo.size, // allocSize
11290 VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
11291 (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
11292 VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
11293 createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
11294 &request))
11295 {
11296 m_Metadata->Alloc(request,
11297 VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
11298 createInfo.pUserData);
11299 outAllocation = (VmaVirtualAllocation)request.allocHandle;
11300 if(outOffset)
11301 *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
11302 return VK_SUCCESS;
11303 }
11304 outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
11305 if (outOffset)
11306 *outOffset = UINT64_MAX;
11308}
11309
11310void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
11311{
11312 VmaClearStatistics(outStats);
11313 m_Metadata->AddStatistics(outStats);
11314}
11315
11316void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
11317{
11318 VmaClearDetailedStatistics(outStats);
11319 m_Metadata->AddDetailedStatistics(outStats);
11320}
11321
11322#if VMA_STATS_STRING_ENABLED
11323void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
11324{
11325 VmaJsonWriter json(GetAllocationCallbacks(), sb);
11326 json.BeginObject();
11327
11329 CalculateDetailedStatistics(stats);
11330
11331 json.WriteString("Stats");
11332 VmaPrintDetailedStatistics(json, stats);
11333
11334 if (detailedMap)
11335 {
11336 json.WriteString("Details");
11337 json.BeginObject();
11338 m_Metadata->PrintDetailedMap(json);
11339 json.EndObject();
11340 }
11341
11342 json.EndObject();
11343}
11344#endif // VMA_STATS_STRING_ENABLED
11345#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
11346#endif // _VMA_VIRTUAL_BLOCK_T
11347
11348
11349// Main allocator object.
11350struct VmaAllocator_T
11351{
11352 VMA_CLASS_NO_COPY(VmaAllocator_T)
11353public:
11354 bool m_UseMutex;
11355 uint32_t m_VulkanApiVersion;
11356 bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
11357 bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
11358 bool m_UseExtMemoryBudget;
11359 bool m_UseAmdDeviceCoherentMemory;
11360 bool m_UseKhrBufferDeviceAddress;
11361 bool m_UseExtMemoryPriority;
11362 VkDevice m_hDevice;
11363 VkInstance m_hInstance;
11364 bool m_AllocationCallbacksSpecified;
11365 VkAllocationCallbacks m_AllocationCallbacks;
11366 VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
11367 VmaAllocationObjectAllocator m_AllocationObjectAllocator;
11368
11369 // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
11370 uint32_t m_HeapSizeLimitMask;
11371
11372 VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
11374
11375 // Default pools.
11376 VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
11377 VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
11378
11379 VmaCurrentBudgetData m_Budget;
11380 VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
11381
11382 VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
11383 VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
11384 ~VmaAllocator_T();
11385
11386 const VkAllocationCallbacks* GetAllocationCallbacks() const
11387 {
11388 return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
11389 }
11390 const VmaVulkanFunctions& GetVulkanFunctions() const
11391 {
11392 return m_VulkanFunctions;
11393 }
11394
11395 VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
11396
11397 VkDeviceSize GetBufferImageGranularity() const
11398 {
11399 return VMA_MAX(
11400 static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
11401 m_PhysicalDeviceProperties.limits.bufferImageGranularity);
11402 }
11403
11404 uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
11405 uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
11406
11407 uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
11408 {
11409 VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
11410 return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
11411 }
11412 // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
11413 bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
11414 {
11417 }
11418 // Minimum alignment for all allocations in specific memory type.
11419 VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
11420 {
11421 return IsMemoryTypeNonCoherent(memTypeIndex) ?
11422 VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
11423 (VkDeviceSize)VMA_MIN_ALIGNMENT;
11424 }
11425
11426 bool IsIntegratedGpu() const
11427 {
11428 return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
11429 }
11430
11431 uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
11432
11433 void GetBufferMemoryRequirements(
11434 VkBuffer hBuffer,
11435 VkMemoryRequirements& memReq,
11436 bool& requiresDedicatedAllocation,
11437 bool& prefersDedicatedAllocation) const;
11438 void GetImageMemoryRequirements(
11439 VkImage hImage,
11440 VkMemoryRequirements& memReq,
11441 bool& requiresDedicatedAllocation,
11442 bool& prefersDedicatedAllocation) const;
11443 VkResult FindMemoryTypeIndex(
11444 uint32_t memoryTypeBits,
11445 const VmaAllocationCreateInfo* pAllocationCreateInfo,
11446 VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
11447 uint32_t* pMemoryTypeIndex) const;
11448
11449 // Main allocation function.
11450 VkResult AllocateMemory(
11451 const VkMemoryRequirements& vkMemReq,
11452 bool requiresDedicatedAllocation,
11453 bool prefersDedicatedAllocation,
11454 VkBuffer dedicatedBuffer,
11455 VkImage dedicatedImage,
11456 VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown.
11457 const VmaAllocationCreateInfo& createInfo,
11458 VmaSuballocationType suballocType,
11459 size_t allocationCount,
11460 VmaAllocation* pAllocations);
11461
11462 // Main deallocation function.
11463 void FreeMemory(
11464 size_t allocationCount,
11465 const VmaAllocation* pAllocations);
11466
11467 void CalculateStatistics(VmaTotalStatistics* pStats);
11468
11469 void GetHeapBudgets(
11470 VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
11471
11472#if VMA_STATS_STRING_ENABLED
11473 void PrintDetailedMap(class VmaJsonWriter& json);
11474#endif
11475
11476 void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
11477
11478 VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
11479 void DestroyPool(VmaPool pool);
11480 void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
11481 void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
11482
11483 void SetCurrentFrameIndex(uint32_t frameIndex);
11484 uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
11485
11486 VkResult CheckPoolCorruption(VmaPool hPool);
11487 VkResult CheckCorruption(uint32_t memoryTypeBits);
11488
11489 // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
11490 VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
11491 // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
11492 void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
11493 // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
11494 VkResult BindVulkanBuffer(
11495 VkDeviceMemory memory,
11496 VkDeviceSize memoryOffset,
11497 VkBuffer buffer,
11498 const void* pNext);
11499 // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
11500 VkResult BindVulkanImage(
11501 VkDeviceMemory memory,
11502 VkDeviceSize memoryOffset,
11503 VkImage image,
11504 const void* pNext);
11505
11506 VkResult Map(VmaAllocation hAllocation, void** ppData);
11507 void Unmap(VmaAllocation hAllocation);
11508
11509 VkResult BindBufferMemory(
11510 VmaAllocation hAllocation,
11511 VkDeviceSize allocationLocalOffset,
11512 VkBuffer hBuffer,
11513 const void* pNext);
11514 VkResult BindImageMemory(
11515 VmaAllocation hAllocation,
11516 VkDeviceSize allocationLocalOffset,
11517 VkImage hImage,
11518 const void* pNext);
11519
11520 VkResult FlushOrInvalidateAllocation(
11521 VmaAllocation hAllocation,
11522 VkDeviceSize offset, VkDeviceSize size,
11523 VMA_CACHE_OPERATION op);
11524 VkResult FlushOrInvalidateAllocations(
11525 uint32_t allocationCount,
11526 const VmaAllocation* allocations,
11527 const VkDeviceSize* offsets, const VkDeviceSize* sizes,
11528 VMA_CACHE_OPERATION op);
11529
11530 void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
11531
11532 /*
11533 Returns bit mask of memory types that can support defragmentation on GPU as
11534 they support creation of required buffer for copy operations.
11535 */
11536 uint32_t GetGpuDefragmentationMemoryTypeBits();
11537
11538#if VMA_EXTERNAL_MEMORY
11539 VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
11540 {
11541 return m_TypeExternalMemoryHandleTypes[memTypeIndex];
11542 }
11543#endif // #if VMA_EXTERNAL_MEMORY
11544
11545private:
11546 VkDeviceSize m_PreferredLargeHeapBlockSize;
11547
11548 VkPhysicalDevice m_PhysicalDevice;
11549 VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
11550 VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
11551#if VMA_EXTERNAL_MEMORY
11552 VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
11553#endif // #if VMA_EXTERNAL_MEMORY
11554
11555 VMA_RW_MUTEX m_PoolsMutex;
11556 typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
11557 // Protected by m_PoolsMutex.
11558 PoolList m_Pools;
11559 uint32_t m_NextPoolId;
11560
11561 VmaVulkanFunctions m_VulkanFunctions;
11562
11563 // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
11564 uint32_t m_GlobalMemoryTypeBits;
11565
11566 void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
11567
11568#if VMA_STATIC_VULKAN_FUNCTIONS == 1
11569 void ImportVulkanFunctions_Static();
11570#endif
11571
11572 void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
11573
11574#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
11575 void ImportVulkanFunctions_Dynamic();
11576#endif
11577
11578 void ValidateVulkanFunctions();
11579
11580 VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
11581
11582 VkResult AllocateMemoryOfType(
11583 VmaPool pool,
11584 VkDeviceSize size,
11585 VkDeviceSize alignment,
11586 bool dedicatedPreferred,
11587 VkBuffer dedicatedBuffer,
11588 VkImage dedicatedImage,
11589 VkFlags dedicatedBufferImageUsage,
11590 const VmaAllocationCreateInfo& createInfo,
11591 uint32_t memTypeIndex,
11592 VmaSuballocationType suballocType,
11593 VmaDedicatedAllocationList& dedicatedAllocations,
11594 VmaBlockVector& blockVector,
11595 size_t allocationCount,
11596 VmaAllocation* pAllocations);
11597
11598 // Helper function only to be used inside AllocateDedicatedMemory.
11599 VkResult AllocateDedicatedMemoryPage(
11600 VmaPool pool,
11601 VkDeviceSize size,
11602 VmaSuballocationType suballocType,
11603 uint32_t memTypeIndex,
11604 const VkMemoryAllocateInfo& allocInfo,
11605 bool map,
11606 bool isUserDataString,
11607 bool isMappingAllowed,
11608 void* pUserData,
11609 VmaAllocation* pAllocation);
11610
11611 // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
11612 VkResult AllocateDedicatedMemory(
11613 VmaPool pool,
11614 VkDeviceSize size,
11615 VmaSuballocationType suballocType,
11616 VmaDedicatedAllocationList& dedicatedAllocations,
11617 uint32_t memTypeIndex,
11618 bool map,
11619 bool isUserDataString,
11620 bool isMappingAllowed,
11621 bool canAliasMemory,
11622 void* pUserData,
11623 float priority,
11624 VkBuffer dedicatedBuffer,
11625 VkImage dedicatedImage,
11626 VkFlags dedicatedBufferImageUsage,
11627 size_t allocationCount,
11628 VmaAllocation* pAllocations,
11629 const void* pNextChain = nullptr);
11630
11631 void FreeDedicatedMemory(const VmaAllocation allocation);
11632
11633 VkResult CalcMemTypeParams(
11634 VmaAllocationCreateInfo& outCreateInfo,
11635 uint32_t memTypeIndex,
11636 VkDeviceSize size,
11637 size_t allocationCount);
11638 VkResult CalcAllocationParams(
11639 VmaAllocationCreateInfo& outCreateInfo,
11640 bool dedicatedRequired,
11641 bool dedicatedPreferred);
11642
11643 /*
11644 Calculates and returns bit mask of memory types that can support defragmentation
11645 on GPU as they support creation of required buffer for copy operations.
11646 */
11647 uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
11648 uint32_t CalculateGlobalMemoryTypeBits() const;
11649
11650 bool GetFlushOrInvalidateRange(
11651 VmaAllocation allocation,
11652 VkDeviceSize offset, VkDeviceSize size,
11653 VkMappedMemoryRange& outRange) const;
11654
11655#if VMA_MEMORY_BUDGET
11656 void UpdateVulkanBudget();
11657#endif // #if VMA_MEMORY_BUDGET
11658};
11659
11660
11661#ifndef _VMA_MEMORY_FUNCTIONS
11662static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
11663{
11664 return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
11665}
11666
11667static void VmaFree(VmaAllocator hAllocator, void* ptr)
11668{
11669 VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
11670}
11671
11672template<typename T>
11673static T* VmaAllocate(VmaAllocator hAllocator)
11674{
11675 return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
11676}
11677
11678template<typename T>
11679static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
11680{
11681 return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
11682}
11683
11684template<typename T>
11685static void vma_delete(VmaAllocator hAllocator, T* ptr)
11686{
11687 if(ptr != VMA_NULL)
11688 {
11689 ptr->~T();
11690 VmaFree(hAllocator, ptr);
11691 }
11692}
11693
11694template<typename T>
11695static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
11696{
11697 if(ptr != VMA_NULL)
11698 {
11699 for(size_t i = count; i--; )
11700 ptr[i].~T();
11701 VmaFree(hAllocator, ptr);
11702 }
11703}
11704#endif // _VMA_MEMORY_FUNCTIONS
11705
11706#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
11707VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
11708 : m_pMetadata(VMA_NULL),
11709 m_MemoryTypeIndex(UINT32_MAX),
11710 m_Id(0),
11711 m_hMemory(VK_NULL_HANDLE),
11712 m_MapCount(0),
11713 m_pMappedData(VMA_NULL) {}
11714
11715VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
11716{
11717 VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
11718 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11719}
11720
11722 VmaAllocator hAllocator,
11723 VmaPool hParentPool,
11724 uint32_t newMemoryTypeIndex,
11725 VkDeviceMemory newMemory,
11726 VkDeviceSize newSize,
11727 uint32_t id,
11728 uint32_t algorithm,
11729 VkDeviceSize bufferImageGranularity)
11730{
11731 VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
11732
11733 m_hParentPool = hParentPool;
11734 m_MemoryTypeIndex = newMemoryTypeIndex;
11735 m_Id = id;
11736 m_hMemory = newMemory;
11737
11738 switch (algorithm)
11739 {
11741 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
11742 bufferImageGranularity, false); // isVirtual
11743 break;
11744 default:
11745 VMA_ASSERT(0);
11746 // Fall-through.
11747 case 0:
11748 m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
11749 bufferImageGranularity, false); // isVirtual
11750 }
11751 m_pMetadata->Init(newSize);
11752}
11753
11754void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
11755{
11756 // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations
11757 if (!m_pMetadata->IsEmpty())
11758 m_pMetadata->DebugLogAllAllocations();
11759 // This is the most important assert in the entire library.
11760 // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
11761 VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
11762
11763 VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
11764 allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
11765 m_hMemory = VK_NULL_HANDLE;
11766
11767 vma_delete(allocator, m_pMetadata);
11768 m_pMetadata = VMA_NULL;
11769}
11770
11771void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
11772{
11773 if(m_MappingHysteresis.PostFree())
11774 {
11775 VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
11776 if (m_MapCount == 0)
11777 {
11778 m_pMappedData = VMA_NULL;
11779 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11780 }
11781 }
11782}
11783
11784bool VmaDeviceMemoryBlock::Validate() const
11785{
11786 VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
11787 (m_pMetadata->GetSize() != 0));
11788
11789 return m_pMetadata->Validate();
11790}
11791
11792VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
11793{
11794 void* pData = nullptr;
11795 VkResult res = Map(hAllocator, 1, &pData);
11796 if (res != VK_SUCCESS)
11797 {
11798 return res;
11799 }
11800
11801 res = m_pMetadata->CheckCorruption(pData);
11802
11803 Unmap(hAllocator, 1);
11804
11805 return res;
11806}
11807
11808VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
11809{
11810 if (count == 0)
11811 {
11812 return VK_SUCCESS;
11813 }
11814
11815 VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11816 const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
11817 m_MappingHysteresis.PostMap();
11818 if (oldTotalMapCount != 0)
11819 {
11820 m_MapCount += count;
11821 VMA_ASSERT(m_pMappedData != VMA_NULL);
11822 if (ppData != VMA_NULL)
11823 {
11824 *ppData = m_pMappedData;
11825 }
11826 return VK_SUCCESS;
11827 }
11828 else
11829 {
11830 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
11831 hAllocator->m_hDevice,
11832 m_hMemory,
11833 0, // offset
11835 0, // flags
11836 &m_pMappedData);
11837 if (result == VK_SUCCESS)
11838 {
11839 if (ppData != VMA_NULL)
11840 {
11841 *ppData = m_pMappedData;
11842 }
11843 m_MapCount = count;
11844 }
11845 return result;
11846 }
11847}
11848
11849void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
11850{
11851 if (count == 0)
11852 {
11853 return;
11854 }
11855
11856 VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11857 if (m_MapCount >= count)
11858 {
11859 m_MapCount -= count;
11860 const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
11861 if (totalMapCount == 0)
11862 {
11863 m_pMappedData = VMA_NULL;
11864 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11865 }
11866 m_MappingHysteresis.PostUnmap();
11867 }
11868 else
11869 {
11870 VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11871 }
11872}
11873
11874VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11875{
11876 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11877
11878 void* pData;
11879 VkResult res = Map(hAllocator, 1, &pData);
11880 if (res != VK_SUCCESS)
11881 {
11882 return res;
11883 }
11884
11885 VmaWriteMagicValue(pData, allocOffset + allocSize);
11886
11887 Unmap(hAllocator, 1);
11888 return VK_SUCCESS;
11889}
11890
11891VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11892{
11893 VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11894
11895 void* pData;
11896 VkResult res = Map(hAllocator, 1, &pData);
11897 if (res != VK_SUCCESS)
11898 {
11899 return res;
11900 }
11901
11902 if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
11903 {
11904 VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11905 }
11906
11907 Unmap(hAllocator, 1);
11908 return VK_SUCCESS;
11909}
11910
11911VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11912 const VmaAllocator hAllocator,
11913 const VmaAllocation hAllocation,
11914 VkDeviceSize allocationLocalOffset,
11915 VkBuffer hBuffer,
11916 const void* pNext)
11917{
11918 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11919 hAllocation->GetBlock() == this);
11920 VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11921 "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11922 const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11923 // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11924 VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11925 return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
11926}
11927
11928VkResult VmaDeviceMemoryBlock::BindImageMemory(
11929 const VmaAllocator hAllocator,
11930 const VmaAllocation hAllocation,
11931 VkDeviceSize allocationLocalOffset,
11932 VkImage hImage,
11933 const void* pNext)
11934{
11935 VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11936 hAllocation->GetBlock() == this);
11937 VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
11938 "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
11939 const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
11940 // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11941 VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
11942 return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
11943}
11944#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
11945
11946#ifndef _VMA_ALLOCATION_T_FUNCTIONS
11947VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
11948 : m_Alignment{ 1 },
11949 m_Size{ 0 },
11950 m_pUserData{ VMA_NULL },
11951 m_pName{ VMA_NULL },
11952 m_MemoryTypeIndex{ 0 },
11953 m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
11954 m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
11955 m_MapCount{ 0 },
11956 m_Flags{ 0 }
11957{
11958 if(mappingAllowed)
11959 m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
11960
11961#if VMA_STATS_STRING_ENABLED
11962 m_BufferImageUsage = 0;
11963#endif
11964}
11965
11966VmaAllocation_T::~VmaAllocation_T()
11967{
11968 VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
11969
11970 // Check if owned string was freed.
11971 VMA_ASSERT(m_pName == VMA_NULL);
11972}
11973
11974void VmaAllocation_T::InitBlockAllocation(
11975 VmaDeviceMemoryBlock* block,
11976 VmaAllocHandle allocHandle,
11977 VkDeviceSize alignment,
11978 VkDeviceSize size,
11979 uint32_t memoryTypeIndex,
11980 VmaSuballocationType suballocationType,
11981 bool mapped)
11982{
11983 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
11984 VMA_ASSERT(block != VMA_NULL);
11985 m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
11986 m_Alignment = alignment;
11987 m_Size = size;
11988 m_MemoryTypeIndex = memoryTypeIndex;
11989 if(mapped)
11990 {
11991 VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
11992 m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
11993 }
11994 m_SuballocationType = (uint8_t)suballocationType;
11995 m_BlockAllocation.m_Block = block;
11996 m_BlockAllocation.m_AllocHandle = allocHandle;
11997}
11998
11999void VmaAllocation_T::InitDedicatedAllocation(
12000 VmaPool hParentPool,
12001 uint32_t memoryTypeIndex,
12002 VkDeviceMemory hMemory,
12003 VmaSuballocationType suballocationType,
12004 void* pMappedData,
12005 VkDeviceSize size)
12006{
12007 VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
12008 VMA_ASSERT(hMemory != VK_NULL_HANDLE);
12009 m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
12010 m_Alignment = 0;
12011 m_Size = size;
12012 m_MemoryTypeIndex = memoryTypeIndex;
12013 m_SuballocationType = (uint8_t)suballocationType;
12014 if(pMappedData != VMA_NULL)
12015 {
12016 VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12017 m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
12018 }
12019 m_DedicatedAllocation.m_hParentPool = hParentPool;
12020 m_DedicatedAllocation.m_hMemory = hMemory;
12021 m_DedicatedAllocation.m_pMappedData = pMappedData;
12022 m_DedicatedAllocation.m_Prev = VMA_NULL;
12023 m_DedicatedAllocation.m_Next = VMA_NULL;
12024}
12025
12026void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
12027{
12028 VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
12029
12030 FreeName(hAllocator);
12031
12032 if (pName != VMA_NULL)
12033 m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
12034}
12035
12036uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
12037{
12038 VMA_ASSERT(allocation != VMA_NULL);
12039 VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
12040 VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
12041
12042 if (m_MapCount != 0)
12043 m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
12044
12045 m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
12046 VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation);
12047 m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
12048
12049#if VMA_STATS_STRING_ENABLED
12050 VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage);
12051#endif
12052 return m_MapCount;
12053}
12054
12055VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
12056{
12057 switch (m_Type)
12058 {
12059 case ALLOCATION_TYPE_BLOCK:
12060 return m_BlockAllocation.m_AllocHandle;
12061 case ALLOCATION_TYPE_DEDICATED:
12062 return VK_NULL_HANDLE;
12063 default:
12064 VMA_ASSERT(0);
12065 return VK_NULL_HANDLE;
12066 }
12067}
12068
12069VkDeviceSize VmaAllocation_T::GetOffset() const
12070{
12071 switch (m_Type)
12072 {
12073 case ALLOCATION_TYPE_BLOCK:
12074 return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
12075 case ALLOCATION_TYPE_DEDICATED:
12076 return 0;
12077 default:
12078 VMA_ASSERT(0);
12079 return 0;
12080 }
12081}
12082
12083VmaPool VmaAllocation_T::GetParentPool() const
12084{
12085 switch (m_Type)
12086 {
12087 case ALLOCATION_TYPE_BLOCK:
12088 return m_BlockAllocation.m_Block->GetParentPool();
12089 case ALLOCATION_TYPE_DEDICATED:
12090 return m_DedicatedAllocation.m_hParentPool;
12091 default:
12092 VMA_ASSERT(0);
12093 return VK_NULL_HANDLE;
12094 }
12095}
12096
12097VkDeviceMemory VmaAllocation_T::GetMemory() const
12098{
12099 switch (m_Type)
12100 {
12101 case ALLOCATION_TYPE_BLOCK:
12102 return m_BlockAllocation.m_Block->GetDeviceMemory();
12103 case ALLOCATION_TYPE_DEDICATED:
12104 return m_DedicatedAllocation.m_hMemory;
12105 default:
12106 VMA_ASSERT(0);
12107 return VK_NULL_HANDLE;
12108 }
12109}
12110
12111void* VmaAllocation_T::GetMappedData() const
12112{
12113 switch (m_Type)
12114 {
12115 case ALLOCATION_TYPE_BLOCK:
12116 if (m_MapCount != 0 || IsPersistentMap())
12117 {
12118 void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
12119 VMA_ASSERT(pBlockData != VMA_NULL);
12120 return (char*)pBlockData + GetOffset();
12121 }
12122 else
12123 {
12124 return VMA_NULL;
12125 }
12126 break;
12127 case ALLOCATION_TYPE_DEDICATED:
12128 VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
12129 return m_DedicatedAllocation.m_pMappedData;
12130 default:
12131 VMA_ASSERT(0);
12132 return VMA_NULL;
12133 }
12134}
12135
12136void VmaAllocation_T::BlockAllocMap()
12137{
12138 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
12139 VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12140
12141 if (m_MapCount < 0xFF)
12142 {
12143 ++m_MapCount;
12144 }
12145 else
12146 {
12147 VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
12148 }
12149}
12150
12151void VmaAllocation_T::BlockAllocUnmap()
12152{
12153 VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
12154
12155 if (m_MapCount > 0)
12156 {
12157 --m_MapCount;
12158 }
12159 else
12160 {
12161 VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
12162 }
12163}
12164
12165VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
12166{
12167 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
12168 VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
12169
12170 if (m_MapCount != 0 || IsPersistentMap())
12171 {
12172 if (m_MapCount < 0xFF)
12173 {
12174 VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
12175 *ppData = m_DedicatedAllocation.m_pMappedData;
12176 ++m_MapCount;
12177 return VK_SUCCESS;
12178 }
12179 else
12180 {
12181 VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
12183 }
12184 }
12185 else
12186 {
12187 VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12188 hAllocator->m_hDevice,
12189 m_DedicatedAllocation.m_hMemory,
12190 0, // offset
12192 0, // flags
12193 ppData);
12194 if (result == VK_SUCCESS)
12195 {
12196 m_DedicatedAllocation.m_pMappedData = *ppData;
12197 m_MapCount = 1;
12198 }
12199 return result;
12200 }
12201}
12202
12203void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
12204{
12205 VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
12206
12207 if (m_MapCount > 0)
12208 {
12209 --m_MapCount;
12210 if (m_MapCount == 0 && !IsPersistentMap())
12211 {
12212 m_DedicatedAllocation.m_pMappedData = VMA_NULL;
12213 (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
12214 hAllocator->m_hDevice,
12215 m_DedicatedAllocation.m_hMemory);
12216 }
12217 }
12218 else
12219 {
12220 VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
12221 }
12222}
12223
12224#if VMA_STATS_STRING_ENABLED
12225void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage)
12226{
12227 VMA_ASSERT(m_BufferImageUsage == 0);
12228 m_BufferImageUsage = bufferImageUsage;
12229}
12230
12231void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
12232{
12233 json.WriteString("Type");
12234 json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
12235
12236 json.WriteString("Size");
12237 json.WriteNumber(m_Size);
12238 json.WriteString("Usage");
12239 json.WriteNumber(m_BufferImageUsage);
12240
12241 if (m_pUserData != VMA_NULL)
12242 {
12243 json.WriteString("CustomData");
12244 json.BeginString();
12245 json.ContinueString_Pointer(m_pUserData);
12246 json.EndString();
12247 }
12248 if (m_pName != VMA_NULL)
12249 {
12250 json.WriteString("Name");
12251 json.WriteString(m_pName);
12252 }
12253}
12254#endif // VMA_STATS_STRING_ENABLED
12255
12256void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
12257{
12258 if(m_pName)
12259 {
12260 VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
12261 m_pName = VMA_NULL;
12262 }
12263}
12264#endif // _VMA_ALLOCATION_T_FUNCTIONS
12265
12266#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
12267VmaBlockVector::VmaBlockVector(
12268 VmaAllocator hAllocator,
12269 VmaPool hParentPool,
12270 uint32_t memoryTypeIndex,
12271 VkDeviceSize preferredBlockSize,
12272 size_t minBlockCount,
12273 size_t maxBlockCount,
12274 VkDeviceSize bufferImageGranularity,
12275 bool explicitBlockSize,
12276 uint32_t algorithm,
12277 float priority,
12278 VkDeviceSize minAllocationAlignment,
12279 void* pMemoryAllocateNext)
12280 : m_hAllocator(hAllocator),
12281 m_hParentPool(hParentPool),
12282 m_MemoryTypeIndex(memoryTypeIndex),
12283 m_PreferredBlockSize(preferredBlockSize),
12284 m_MinBlockCount(minBlockCount),
12285 m_MaxBlockCount(maxBlockCount),
12286 m_BufferImageGranularity(bufferImageGranularity),
12287 m_ExplicitBlockSize(explicitBlockSize),
12288 m_Algorithm(algorithm),
12289 m_Priority(priority),
12290 m_MinAllocationAlignment(minAllocationAlignment),
12291 m_pMemoryAllocateNext(pMemoryAllocateNext),
12292 m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12293 m_NextBlockId(0) {}
12294
12295VmaBlockVector::~VmaBlockVector()
12296{
12297 for (size_t i = m_Blocks.size(); i--; )
12298 {
12299 m_Blocks[i]->Destroy(m_hAllocator);
12300 vma_delete(m_hAllocator, m_Blocks[i]);
12301 }
12302}
12303
12304VkResult VmaBlockVector::CreateMinBlocks()
12305{
12306 for (size_t i = 0; i < m_MinBlockCount; ++i)
12307 {
12308 VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12309 if (res != VK_SUCCESS)
12310 {
12311 return res;
12312 }
12313 }
12314 return VK_SUCCESS;
12315}
12316
12317void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
12318{
12319 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12320
12321 const size_t blockCount = m_Blocks.size();
12322 for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12323 {
12324 const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12325 VMA_ASSERT(pBlock);
12326 VMA_HEAVY_ASSERT(pBlock->Validate());
12327 pBlock->m_pMetadata->AddStatistics(inoutStats);
12328 }
12329}
12330
12331void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
12332{
12333 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12334
12335 const size_t blockCount = m_Blocks.size();
12336 for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12337 {
12338 const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12339 VMA_ASSERT(pBlock);
12340 VMA_HEAVY_ASSERT(pBlock->Validate());
12341 pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
12342 }
12343}
12344
12345bool VmaBlockVector::IsEmpty()
12346{
12347 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12348 return m_Blocks.empty();
12349}
12350
12351bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12352{
12354 return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12355 (VMA_DEBUG_MARGIN > 0) &&
12356 (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12357 (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12358}
12359
12361 VkDeviceSize size,
12362 VkDeviceSize alignment,
12363 const VmaAllocationCreateInfo& createInfo,
12364 VmaSuballocationType suballocType,
12365 size_t allocationCount,
12366 VmaAllocation* pAllocations)
12367{
12368 size_t allocIndex;
12369 VkResult res = VK_SUCCESS;
12370
12371 alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
12372
12373 if (IsCorruptionDetectionEnabled())
12374 {
12375 size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12376 alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12377 }
12378
12379 {
12380 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12381 for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12382 {
12383 res = AllocatePage(
12384 size,
12385 alignment,
12386 createInfo,
12387 suballocType,
12388 pAllocations + allocIndex);
12389 if (res != VK_SUCCESS)
12390 {
12391 break;
12392 }
12393 }
12394 }
12395
12396 if (res != VK_SUCCESS)
12397 {
12398 // Free all already created allocations.
12399 while (allocIndex--)
12400 Free(pAllocations[allocIndex]);
12401 memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12402 }
12403
12404 return res;
12405}
12406
12407VkResult VmaBlockVector::AllocatePage(
12408 VkDeviceSize size,
12409 VkDeviceSize alignment,
12410 const VmaAllocationCreateInfo& createInfo,
12411 VmaSuballocationType suballocType,
12412 VmaAllocation* pAllocation)
12413{
12414 const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12415
12416 VkDeviceSize freeMemory;
12417 {
12418 const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12419 VmaBudget heapBudget = {};
12420 m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
12421 freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12422 }
12423
12424 const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
12426 const bool canCreateNewBlock =
12427 ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12428 (m_Blocks.size() < m_MaxBlockCount) &&
12429 (freeMemory >= size || !canFallbackToDedicated);
12430 uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12431
12432 // Upper address can only be used with linear allocator and within single memory block.
12433 if (isUpperAddress &&
12434 (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12435 {
12437 }
12438
12439 // Early reject: requested allocation size is larger that maximum block size for this block vector.
12440 if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12441 {
12443 }
12444
12445 // 1. Search existing allocations. Try to allocate.
12446 if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12447 {
12448 // Use only last block.
12449 if (!m_Blocks.empty())
12450 {
12451 VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12452 VMA_ASSERT(pCurrBlock);
12453 VkResult res = AllocateFromBlock(
12454 pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12455 if (res == VK_SUCCESS)
12456 {
12457 VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12458 IncrementallySortBlocks();
12459 return VK_SUCCESS;
12460 }
12461 }
12462 }
12463 else
12464 {
12465 if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
12466 {
12467 const bool isHostVisible =
12468 (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12469 if(isHostVisible)
12470 {
12471 const bool isMappingAllowed = (createInfo.flags &
12473 /*
12474 For non-mappable allocations, check blocks that are not mapped first.
12475 For mappable allocations, check blocks that are already mapped first.
12476 This way, having many blocks, we will separate mappable and non-mappable allocations,
12477 hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
12478 */
12479 for(size_t mappingI = 0; mappingI < 2; ++mappingI)
12480 {
12481 // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12482 for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12483 {
12484 VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12485 VMA_ASSERT(pCurrBlock);
12486 const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
12487 if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
12488 {
12489 VkResult res = AllocateFromBlock(
12490 pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12491 if (res == VK_SUCCESS)
12492 {
12493 VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12494 IncrementallySortBlocks();
12495 return VK_SUCCESS;
12496 }
12497 }
12498 }
12499 }
12500 }
12501 else
12502 {
12503 // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12504 for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12505 {
12506 VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12507 VMA_ASSERT(pCurrBlock);
12508 VkResult res = AllocateFromBlock(
12509 pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12510 if (res == VK_SUCCESS)
12511 {
12512 VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12513 IncrementallySortBlocks();
12514 return VK_SUCCESS;
12515 }
12516 }
12517 }
12518 }
12519 else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
12520 {
12521 // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12522 for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
12523 {
12524 VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12525 VMA_ASSERT(pCurrBlock);
12526 VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12527 if (res == VK_SUCCESS)
12528 {
12529 VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12530 IncrementallySortBlocks();
12531 return VK_SUCCESS;
12532 }
12533 }
12534 }
12535 }
12536
12537 // 2. Try to create new block.
12538 if (canCreateNewBlock)
12539 {
12540 // Calculate optimal size for new block.
12541 VkDeviceSize newBlockSize = m_PreferredBlockSize;
12542 uint32_t newBlockSizeShift = 0;
12543 const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12544
12545 if (!m_ExplicitBlockSize)
12546 {
12547 // Allocate 1/8, 1/4, 1/2 as first blocks.
12548 const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12549 for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12550 {
12551 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12552 if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12553 {
12554 newBlockSize = smallerNewBlockSize;
12555 ++newBlockSizeShift;
12556 }
12557 else
12558 {
12559 break;
12560 }
12561 }
12562 }
12563
12564 size_t newBlockIndex = 0;
12565 VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12566 CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12567 // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12568 if (!m_ExplicitBlockSize)
12569 {
12570 while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12571 {
12572 const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12573 if (smallerNewBlockSize >= size)
12574 {
12575 newBlockSize = smallerNewBlockSize;
12576 ++newBlockSizeShift;
12577 res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12578 CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12579 }
12580 else
12581 {
12582 break;
12583 }
12584 }
12585 }
12586
12587 if (res == VK_SUCCESS)
12588 {
12589 VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12590 VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12591
12592 res = AllocateFromBlock(
12593 pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
12594 if (res == VK_SUCCESS)
12595 {
12596 VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12597 IncrementallySortBlocks();
12598 return VK_SUCCESS;
12599 }
12600 else
12601 {
12602 // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12604 }
12605 }
12606 }
12607
12609}
12610
12611void VmaBlockVector::Free(const VmaAllocation hAllocation)
12612{
12613 VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
12614
12615 bool budgetExceeded = false;
12616 {
12617 const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12618 VmaBudget heapBudget = {};
12619 m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
12620 budgetExceeded = heapBudget.usage >= heapBudget.budget;
12621 }
12622
12623 // Scope for lock.
12624 {
12625 VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12626
12627 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
12628
12629 if (IsCorruptionDetectionEnabled())
12630 {
12631 VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
12632 VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
12633 }
12634
12635 if (hAllocation->IsPersistentMap())
12636 {
12637 pBlock->Unmap(m_hAllocator, 1);
12638 }
12639
12640 const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
12641 pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
12642 pBlock->PostFree(m_hAllocator);
12643 VMA_HEAVY_ASSERT(pBlock->Validate());
12644
12645 VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
12646
12647 const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
12648 // pBlock became empty after this deallocation.
12649 if (pBlock->m_pMetadata->IsEmpty())
12650 {
12651 // Already had empty block. We don't want to have two, so delete this one.
12652 if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
12653 {
12654 pBlockToDelete = pBlock;
12655 Remove(pBlock);
12656 }
12657 // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
12658 }
12659 // pBlock didn't become empty, but we have another empty block - find and free that one.
12660 // (This is optional, heuristics.)
12661 else if (hadEmptyBlockBeforeFree && canDeleteBlock)
12662 {
12663 VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
12664 if (pLastBlock->m_pMetadata->IsEmpty())
12665 {
12666 pBlockToDelete = pLastBlock;
12667 m_Blocks.pop_back();
12668 }
12669 }
12670
12671 IncrementallySortBlocks();
12672 }
12673
12674 // Destruction of a free block. Deferred until this point, outside of mutex
12675 // lock, for performance reason.
12676 if (pBlockToDelete != VMA_NULL)
12677 {
12678 VMA_DEBUG_LOG(" Deleted empty block #%u", pBlockToDelete->GetId());
12679 pBlockToDelete->Destroy(m_hAllocator);
12680 vma_delete(m_hAllocator, pBlockToDelete);
12681 }
12682
12683 m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
12684 m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
12685}
12686
12687VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
12688{
12689 VkDeviceSize result = 0;
12690 for (size_t i = m_Blocks.size(); i--; )
12691 {
12692 result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
12693 if (result >= m_PreferredBlockSize)
12694 {
12695 break;
12696 }
12697 }
12698 return result;
12699}
12700
12701void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
12702{
12703 for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12704 {
12705 if (m_Blocks[blockIndex] == pBlock)
12706 {
12707 VmaVectorRemove(m_Blocks, blockIndex);
12708 return;
12709 }
12710 }
12711 VMA_ASSERT(0);
12712}
12713
12714void VmaBlockVector::IncrementallySortBlocks()
12715{
12716 if (!m_IncrementalSort)
12717 return;
12718 if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12719 {
12720 // Bubble sort only until first swap.
12721 for (size_t i = 1; i < m_Blocks.size(); ++i)
12722 {
12723 if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
12724 {
12725 VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
12726 return;
12727 }
12728 }
12729 }
12730}
12731
12732void VmaBlockVector::SortByFreeSize()
12733{
12734 VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
12735 [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
12736 {
12737 return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
12738 });
12739}
12740
12741VkResult VmaBlockVector::AllocateFromBlock(
12742 VmaDeviceMemoryBlock* pBlock,
12743 VkDeviceSize size,
12744 VkDeviceSize alignment,
12745 VmaAllocationCreateFlags allocFlags,
12746 void* pUserData,
12747 VmaSuballocationType suballocType,
12748 uint32_t strategy,
12749 VmaAllocation* pAllocation)
12750{
12751 const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12752
12753 VmaAllocationRequest currRequest = {};
12754 if (pBlock->m_pMetadata->CreateAllocationRequest(
12755 size,
12756 alignment,
12757 isUpperAddress,
12758 suballocType,
12759 strategy,
12760 &currRequest))
12761 {
12762 return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
12763 }
12765}
12766
12767VkResult VmaBlockVector::CommitAllocationRequest(
12768 VmaAllocationRequest& allocRequest,
12769 VmaDeviceMemoryBlock* pBlock,
12770 VkDeviceSize alignment,
12771 VmaAllocationCreateFlags allocFlags,
12772 void* pUserData,
12773 VmaSuballocationType suballocType,
12774 VmaAllocation* pAllocation)
12775{
12776 const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12777 const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12778 const bool isMappingAllowed = (allocFlags &
12780
12781 pBlock->PostAlloc();
12782 // Allocate from pCurrBlock.
12783 if (mapped)
12784 {
12785 VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
12786 if (res != VK_SUCCESS)
12787 {
12788 return res;
12789 }
12790 }
12791
12792 *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
12793 pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
12794 (*pAllocation)->InitBlockAllocation(
12795 pBlock,
12796 allocRequest.allocHandle,
12797 alignment,
12798 allocRequest.size, // Not size, as actual allocation size may be larger than requested!
12799 m_MemoryTypeIndex,
12800 suballocType,
12801 mapped);
12802 VMA_HEAVY_ASSERT(pBlock->Validate());
12803 if (isUserDataString)
12804 (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
12805 else
12806 (*pAllocation)->SetUserData(m_hAllocator, pUserData);
12807 m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
12808 if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
12809 {
12810 m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
12811 }
12812 if (IsCorruptionDetectionEnabled())
12813 {
12814 VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
12815 VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
12816 }
12817 return VK_SUCCESS;
12818}
12819
12820VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
12821{
12823 allocInfo.pNext = m_pMemoryAllocateNext;
12824 allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
12825 allocInfo.allocationSize = blockSize;
12826
12827#if VMA_BUFFER_DEVICE_ADDRESS
12828 // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
12829 VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
12830 if (m_hAllocator->m_UseKhrBufferDeviceAddress)
12831 {
12832 allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
12833 VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
12834 }
12835#endif // VMA_BUFFER_DEVICE_ADDRESS
12836
12837#if VMA_MEMORY_PRIORITY
12838 VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
12839 if (m_hAllocator->m_UseExtMemoryPriority)
12840 {
12841 VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
12842 priorityInfo.priority = m_Priority;
12843 VmaPnextChainPushFront(&allocInfo, &priorityInfo);
12844 }
12845#endif // VMA_MEMORY_PRIORITY
12846
12847#if VMA_EXTERNAL_MEMORY
12848 // Attach VkExportMemoryAllocateInfoKHR if necessary.
12849 VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
12850 exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
12851 if (exportMemoryAllocInfo.handleTypes != 0)
12852 {
12853 VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
12854 }
12855#endif // VMA_EXTERNAL_MEMORY
12856
12857 VkDeviceMemory mem = VK_NULL_HANDLE;
12858 VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
12859 if (res < 0)
12860 {
12861 return res;
12862 }
12863
12864 // New VkDeviceMemory successfully created.
12865
12866 // Create new Allocation for it.
12867 VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
12868 pBlock->Init(
12869 m_hAllocator,
12870 m_hParentPool,
12871 m_MemoryTypeIndex,
12872 mem,
12873 allocInfo.allocationSize,
12874 m_NextBlockId++,
12875 m_Algorithm,
12876 m_BufferImageGranularity);
12877
12878 m_Blocks.push_back(pBlock);
12879 if (pNewBlockIndex != VMA_NULL)
12880 {
12881 *pNewBlockIndex = m_Blocks.size() - 1;
12882 }
12883
12884 return VK_SUCCESS;
12885}
12886
12887bool VmaBlockVector::HasEmptyBlock()
12888{
12889 for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
12890 {
12891 VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
12892 if (pBlock->m_pMetadata->IsEmpty())
12893 {
12894 return true;
12895 }
12896 }
12897 return false;
12898}
12899
12900#if VMA_STATS_STRING_ENABLED
12901void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12902{
12903 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12904
12905
12906 json.BeginObject();
12907 for (size_t i = 0; i < m_Blocks.size(); ++i)
12908 {
12909 json.BeginString();
12910 json.ContinueString(m_Blocks[i]->GetId());
12911 json.EndString();
12912
12913 json.BeginObject();
12914 json.WriteString("MapRefCount");
12915 json.WriteNumber(m_Blocks[i]->GetMapRefCount());
12916
12917 m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12918 json.EndObject();
12919 }
12920 json.EndObject();
12921}
12922#endif // VMA_STATS_STRING_ENABLED
12923
12924VkResult VmaBlockVector::CheckCorruption()
12925{
12926 if (!IsCorruptionDetectionEnabled())
12927 {
12929 }
12930
12931 VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12932 for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12933 {
12934 VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12935 VMA_ASSERT(pBlock);
12936 VkResult res = pBlock->CheckCorruption(m_hAllocator);
12937 if (res != VK_SUCCESS)
12938 {
12939 return res;
12940 }
12941 }
12942 return VK_SUCCESS;
12943}
12944
12945#endif // _VMA_BLOCK_VECTOR_FUNCTIONS
12946
12947#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
12948VmaDefragmentationContext_T::VmaDefragmentationContext_T(
12949 VmaAllocator hAllocator,
12950 const VmaDefragmentationInfo& info)
12951 : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
12952 m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
12953 m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
12954 m_Moves(m_MoveAllocator)
12955{
12957
12958 if (info.pool != VMA_NULL)
12959 {
12960 m_BlockVectorCount = 1;
12961 m_PoolBlockVector = &info.pool->m_BlockVector;
12962 m_pBlockVectors = &m_PoolBlockVector;
12963 m_PoolBlockVector->SetIncrementalSort(false);
12964 m_PoolBlockVector->SortByFreeSize();
12965 }
12966 else
12967 {
12968 m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
12969 m_PoolBlockVector = VMA_NULL;
12970 m_pBlockVectors = hAllocator->m_pBlockVectors;
12971 for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
12972 {
12973 VmaBlockVector* vector = m_pBlockVectors[i];
12974 if (vector != VMA_NULL)
12975 {
12976 vector->SetIncrementalSort(false);
12977 vector->SortByFreeSize();
12978 }
12979 }
12980 }
12981
12982 switch (m_Algorithm)
12983 {
12984 case 0: // Default algorithm
12987 {
12988 m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
12989 break;
12990 }
12992 {
12993 if (hAllocator->GetBufferImageGranularity() > 1)
12994 {
12995 m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
12996 }
12997 break;
12998 }
12999 }
13000}
13001
13002VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13003{
13004 if (m_PoolBlockVector != VMA_NULL)
13005 {
13006 m_PoolBlockVector->SetIncrementalSort(true);
13007 }
13008 else
13009 {
13010 for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
13011 {
13012 VmaBlockVector* vector = m_pBlockVectors[i];
13013 if (vector != VMA_NULL)
13014 vector->SetIncrementalSort(true);
13015 }
13016 }
13017
13018 if (m_AlgorithmState)
13019 {
13020 switch (m_Algorithm)
13021 {
13023 vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
13024 break;
13026 vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
13027 break;
13028 default:
13029 VMA_ASSERT(0);
13030 }
13031 }
13032}
13033
13034VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
13035{
13036 if (m_PoolBlockVector != VMA_NULL)
13037 {
13038 VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
13039
13040 if (m_PoolBlockVector->GetBlockCount() > 1)
13041 ComputeDefragmentation(*m_PoolBlockVector, 0);
13042 else if (m_PoolBlockVector->GetBlockCount() == 1)
13043 ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
13044 }
13045 else
13046 {
13047 for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
13048 {
13049 if (m_pBlockVectors[i] != VMA_NULL)
13050 {
13051 VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
13052
13053 if (m_pBlockVectors[i]->GetBlockCount() > 1)
13054 {
13055 if (ComputeDefragmentation(*m_pBlockVectors[i], i))
13056 break;
13057 }
13058 else if (m_pBlockVectors[i]->GetBlockCount() == 1)
13059 {
13060 if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
13061 break;
13062 }
13063 }
13064 }
13065 }
13066
13067 moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
13068 if (moveInfo.moveCount > 0)
13069 {
13070 moveInfo.pMoves = m_Moves.data();
13071 return VK_INCOMPLETE;
13072 }
13073
13074 moveInfo.pMoves = VMA_NULL;
13075 return VK_SUCCESS;
13076}
13077
13078VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
13079{
13080 VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
13081
13082 VkResult result = VK_SUCCESS;
13083 VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
13084 VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
13085 VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
13086
13087 VmaAllocator allocator = VMA_NULL;
13088 for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
13089 {
13090 VmaDefragmentationMove& move = moveInfo.pMoves[i];
13091 size_t prevCount = 0, currentCount = 0;
13092 VkDeviceSize freedBlockSize = 0;
13093
13094 uint32_t vectorIndex;
13095 VmaBlockVector* vector;
13096 if (m_PoolBlockVector != VMA_NULL)
13097 {
13098 vectorIndex = 0;
13099 vector = m_PoolBlockVector;
13100 }
13101 else
13102 {
13103 vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
13104 vector = m_pBlockVectors[vectorIndex];
13105 VMA_ASSERT(vector != VMA_NULL);
13106 }
13107
13108 switch (move.operation)
13109 {
13111 {
13112 uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
13113 if (mapCount > 0)
13114 {
13115 allocator = vector->m_hAllocator;
13116 VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
13117 bool notPresent = true;
13118 for (FragmentedBlock& block : mappedBlocks)
13119 {
13120 if (block.block == newMapBlock)
13121 {
13122 notPresent = false;
13123 block.data += mapCount;
13124 break;
13125 }
13126 }
13127 if (notPresent)
13128 mappedBlocks.push_back({ mapCount, newMapBlock });
13129 }
13130
13131 // Scope for locks, Free have it's own lock
13132 {
13133 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13134 prevCount = vector->GetBlockCount();
13135 freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
13136 }
13137 vector->Free(move.dstTmpAllocation);
13138 {
13139 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13140 currentCount = vector->GetBlockCount();
13141 }
13142
13143 result = VK_INCOMPLETE;
13144 break;
13145 }
13147 {
13148 m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
13149 --m_PassStats.allocationsMoved;
13150 vector->Free(move.dstTmpAllocation);
13151
13152 VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
13153 bool notPresent = true;
13154 for (const FragmentedBlock& block : immovableBlocks)
13155 {
13156 if (block.block == newBlock)
13157 {
13158 notPresent = false;
13159 break;
13160 }
13161 }
13162 if (notPresent)
13163 immovableBlocks.push_back({ vectorIndex, newBlock });
13164 break;
13165 }
13167 {
13168 m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
13169 --m_PassStats.allocationsMoved;
13170 // Scope for locks, Free have it's own lock
13171 {
13172 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13173 prevCount = vector->GetBlockCount();
13174 freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
13175 }
13176 vector->Free(move.srcAllocation);
13177 {
13178 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13179 currentCount = vector->GetBlockCount();
13180 }
13181 freedBlockSize *= prevCount - currentCount;
13182
13183 VkDeviceSize dstBlockSize;
13184 {
13185 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13186 dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
13187 }
13188 vector->Free(move.dstTmpAllocation);
13189 {
13190 VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13191 freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
13192 currentCount = vector->GetBlockCount();
13193 }
13194
13195 result = VK_INCOMPLETE;
13196 break;
13197 }
13198 default:
13199 VMA_ASSERT(0);
13200 }
13201
13202 if (prevCount > currentCount)
13203 {
13204 size_t freedBlocks = prevCount - currentCount;
13205 m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
13206 m_PassStats.bytesFreed += freedBlockSize;
13207 }
13208
13209 switch (m_Algorithm)
13210 {
13212 {
13213 if (m_AlgorithmState != VMA_NULL)
13214 {
13215 // Avoid unnecessary tries to allocate when new free block is avaiable
13216 StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
13217 if (state.firstFreeBlock != SIZE_MAX)
13218 {
13219 const size_t diff = prevCount - currentCount;
13220 if (state.firstFreeBlock >= diff)
13221 {
13222 state.firstFreeBlock -= diff;
13223 if (state.firstFreeBlock != 0)
13224 state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
13225 }
13226 else
13227 state.firstFreeBlock = 0;
13228 }
13229 }
13230 }
13231 }
13232 }
13233 moveInfo.moveCount = 0;
13234 moveInfo.pMoves = VMA_NULL;
13235 m_Moves.clear();
13236
13237 // Update stats
13238 m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
13239 m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
13240 m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
13241 m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
13242 m_PassStats = { 0 };
13243
13244 // Move blocks with immovable allocations according to algorithm
13245 if (immovableBlocks.size() > 0)
13246 {
13247 switch (m_Algorithm)
13248 {
13250 {
13251 if (m_AlgorithmState != VMA_NULL)
13252 {
13253 bool swapped = false;
13254 // Move to the start of free blocks range
13255 for (const FragmentedBlock& block : immovableBlocks)
13256 {
13257 StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
13259 {
13260 VmaBlockVector* vector = m_pBlockVectors[block.data];
13261 VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13262
13263 for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
13264 {
13265 if (vector->GetBlock(i) == block.block)
13266 {
13267 VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
13268 if (state.firstFreeBlock != SIZE_MAX)
13269 {
13270 if (i + 1 < state.firstFreeBlock)
13271 {
13272 if (state.firstFreeBlock > 1)
13273 VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
13274 else
13275 --state.firstFreeBlock;
13276 }
13277 }
13278 swapped = true;
13279 break;
13280 }
13281 }
13282 }
13283 }
13284 if (swapped)
13285 result = VK_INCOMPLETE;
13286 break;
13287 }
13288 }
13289 default:
13290 {
13291 // Move to the begining
13292 for (const FragmentedBlock& block : immovableBlocks)
13293 {
13294 VmaBlockVector* vector = m_pBlockVectors[block.data];
13295 VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
13296
13297 for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
13298 {
13299 if (vector->GetBlock(i) == block.block)
13300 {
13301 VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
13302 break;
13303 }
13304 }
13305 }
13306 break;
13307 }
13308 }
13309 }
13310
13311 // Bulk-map destination blocks
13312 for (const FragmentedBlock& block : mappedBlocks)
13313 {
13314 VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
13315 VMA_ASSERT(res == VK_SUCCESS);
13316 }
13317 return result;
13318}
13319
13320bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
13321{
13322 switch (m_Algorithm)
13323 {
13325 return ComputeDefragmentation_Fast(vector);
13326 default:
13327 VMA_ASSERT(0);
13329 return ComputeDefragmentation_Balanced(vector, index, true);
13331 return ComputeDefragmentation_Full(vector);
13333 return ComputeDefragmentation_Extensive(vector, index);
13334 }
13335}
13336
13337VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
13338 VmaAllocHandle handle, VmaBlockMetadata* metadata)
13339{
13340 MoveAllocationData moveData;
13341 moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
13342 moveData.size = moveData.move.srcAllocation->GetSize();
13343 moveData.alignment = moveData.move.srcAllocation->GetAlignment();
13344 moveData.type = moveData.move.srcAllocation->GetSuballocationType();
13345 moveData.flags = 0;
13346
13347 if (moveData.move.srcAllocation->IsPersistentMap())
13348 moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
13349 if (moveData.move.srcAllocation->IsMappingAllowed())
13351
13352 return moveData;
13353}
13354
13355VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
13356{
13357 // Ignore allocation if will exceed max size for copy
13358 if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
13359 {
13360 if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
13361 return CounterStatus::Ignore;
13362 else
13363 return CounterStatus::End;
13364 }
13365 return CounterStatus::Pass;
13366}
13367
13368bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
13369{
13370 m_PassStats.bytesMoved += bytes;
13371 // Early return when max found
13372 if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
13373 {
13374 VMA_ASSERT(m_PassStats.allocationsMoved == m_MaxPassAllocations ||
13375 m_PassStats.bytesMoved == m_MaxPassBytes && "Exceeded maximal pass threshold!");
13376 return true;
13377 }
13378 return false;
13379}
13380
13381bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
13382{
13383 VmaBlockMetadata* metadata = block->m_pMetadata;
13384
13385 for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13386 handle != VK_NULL_HANDLE;
13387 handle = metadata->GetNextAllocation(handle))
13388 {
13389 MoveAllocationData moveData = GetMoveData(handle, metadata);
13390 // Ignore newly created allocations by defragmentation algorithm
13391 if (moveData.move.srcAllocation->GetUserData() == this)
13392 continue;
13393 switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13394 {
13395 case CounterStatus::Ignore:
13396 continue;
13397 case CounterStatus::End:
13398 return true;
13399 default:
13400 VMA_ASSERT(0);
13401 case CounterStatus::Pass:
13402 break;
13403 }
13404
13405 VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13406 if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13407 {
13408 VmaAllocationRequest request = {};
13409 if (metadata->CreateAllocationRequest(
13410 moveData.size,
13411 moveData.alignment,
13412 false,
13413 moveData.type,
13415 &request))
13416 {
13417 if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13418 {
13419 if (vector.CommitAllocationRequest(
13420 request,
13421 block,
13422 moveData.alignment,
13423 moveData.flags,
13424 this,
13425 moveData.type,
13426 &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13427 {
13428 m_Moves.push_back(moveData.move);
13429 if (IncrementCounters(moveData.size))
13430 return true;
13431 }
13432 }
13433 }
13434 }
13435 }
13436 return false;
13437}
13438
13439bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
13440{
13441 for (; start < end; ++start)
13442 {
13443 VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
13444 if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
13445 {
13446 if (vector.AllocateFromBlock(dstBlock,
13447 data.size,
13448 data.alignment,
13449 data.flags,
13450 this,
13451 data.type,
13452 0,
13453 &data.move.dstTmpAllocation) == VK_SUCCESS)
13454 {
13455 m_Moves.push_back(data.move);
13456 if (IncrementCounters(data.size))
13457 return true;
13458 break;
13459 }
13460 }
13461 }
13462 return false;
13463}
13464
13465bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
13466{
13467 // Move only between blocks
13468
13469 // Go through allocations in last blocks and try to fit them inside first ones
13470 for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13471 {
13472 VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
13473
13474 for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13475 handle != VK_NULL_HANDLE;
13476 handle = metadata->GetNextAllocation(handle))
13477 {
13478 MoveAllocationData moveData = GetMoveData(handle, metadata);
13479 // Ignore newly created allocations by defragmentation algorithm
13480 if (moveData.move.srcAllocation->GetUserData() == this)
13481 continue;
13482 switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13483 {
13484 case CounterStatus::Ignore:
13485 continue;
13486 case CounterStatus::End:
13487 return true;
13488 default:
13489 VMA_ASSERT(0);
13490 case CounterStatus::Pass:
13491 break;
13492 }
13493
13494 // Check all previous blocks for free space
13495 if (AllocInOtherBlock(0, i, moveData, vector))
13496 return true;
13497 }
13498 }
13499 return false;
13500}
13501
13502bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
13503{
13504 // Go over every allocation and try to fit it in previous blocks at lowest offsets,
13505 // if not possible: realloc within single block to minimize offset (exclude offset == 0),
13506 // but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block)
13507 VMA_ASSERT(m_AlgorithmState != VMA_NULL);
13508
13509 StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
13510 if (update && vectorState.avgAllocSize == UINT64_MAX)
13511 UpdateVectorStatistics(vector, vectorState);
13512
13513 const size_t startMoveCount = m_Moves.size();
13514 VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
13515 for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13516 {
13517 VmaDeviceMemoryBlock* block = vector.GetBlock(i);
13518 VmaBlockMetadata* metadata = block->m_pMetadata;
13519 VkDeviceSize prevFreeRegionSize = 0;
13520
13521 for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13522 handle != VK_NULL_HANDLE;
13523 handle = metadata->GetNextAllocation(handle))
13524 {
13525 MoveAllocationData moveData = GetMoveData(handle, metadata);
13526 // Ignore newly created allocations by defragmentation algorithm
13527 if (moveData.move.srcAllocation->GetUserData() == this)
13528 continue;
13529 switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13530 {
13531 case CounterStatus::Ignore:
13532 continue;
13533 case CounterStatus::End:
13534 return true;
13535 default:
13536 VMA_ASSERT(0);
13537 case CounterStatus::Pass:
13538 break;
13539 }
13540
13541 // Check all previous blocks for free space
13542 const size_t prevMoveCount = m_Moves.size();
13543 if (AllocInOtherBlock(0, i, moveData, vector))
13544 return true;
13545
13546 VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
13547 // If no room found then realloc within block for lower offset
13548 VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13549 if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13550 {
13551 // Check if realloc will make sense
13552 if (prevFreeRegionSize >= minimalFreeRegion ||
13553 nextFreeRegionSize >= minimalFreeRegion ||
13554 moveData.size <= vectorState.avgFreeSize ||
13555 moveData.size <= vectorState.avgAllocSize)
13556 {
13557 VmaAllocationRequest request = {};
13558 if (metadata->CreateAllocationRequest(
13559 moveData.size,
13560 moveData.alignment,
13561 false,
13562 moveData.type,
13564 &request))
13565 {
13566 if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13567 {
13568 if (vector.CommitAllocationRequest(
13569 request,
13570 block,
13571 moveData.alignment,
13572 moveData.flags,
13573 this,
13574 moveData.type,
13575 &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13576 {
13577 m_Moves.push_back(moveData.move);
13578 if (IncrementCounters(moveData.size))
13579 return true;
13580 }
13581 }
13582 }
13583 }
13584 }
13585 prevFreeRegionSize = nextFreeRegionSize;
13586 }
13587 }
13588
13589 // No moves perfomed, update statistics to current vector state
13590 if (startMoveCount == m_Moves.size() && !update)
13591 {
13592 vectorState.avgAllocSize = UINT64_MAX;
13593 return ComputeDefragmentation_Balanced(vector, index, false);
13594 }
13595 return false;
13596}
13597
13598bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
13599{
13600 // Go over every allocation and try to fit it in previous blocks at lowest offsets,
13601 // if not possible: realloc within single block to minimize offset (exclude offset == 0)
13602
13603 for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
13604 {
13605 VmaDeviceMemoryBlock* block = vector.GetBlock(i);
13606 VmaBlockMetadata* metadata = block->m_pMetadata;
13607
13608 for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13609 handle != VK_NULL_HANDLE;
13610 handle = metadata->GetNextAllocation(handle))
13611 {
13612 MoveAllocationData moveData = GetMoveData(handle, metadata);
13613 // Ignore newly created allocations by defragmentation algorithm
13614 if (moveData.move.srcAllocation->GetUserData() == this)
13615 continue;
13616 switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13617 {
13618 case CounterStatus::Ignore:
13619 continue;
13620 case CounterStatus::End:
13621 return true;
13622 default:
13623 VMA_ASSERT(0);
13624 case CounterStatus::Pass:
13625 break;
13626 }
13627
13628 // Check all previous blocks for free space
13629 const size_t prevMoveCount = m_Moves.size();
13630 if (AllocInOtherBlock(0, i, moveData, vector))
13631 return true;
13632
13633 // If no room found then realloc within block for lower offset
13634 VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
13635 if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
13636 {
13637 VmaAllocationRequest request = {};
13638 if (metadata->CreateAllocationRequest(
13639 moveData.size,
13640 moveData.alignment,
13641 false,
13642 moveData.type,
13644 &request))
13645 {
13646 if (metadata->GetAllocationOffset(request.allocHandle) < offset)
13647 {
13648 if (vector.CommitAllocationRequest(
13649 request,
13650 block,
13651 moveData.alignment,
13652 moveData.flags,
13653 this,
13654 moveData.type,
13655 &moveData.move.dstTmpAllocation) == VK_SUCCESS)
13656 {
13657 m_Moves.push_back(moveData.move);
13658 if (IncrementCounters(moveData.size))
13659 return true;
13660 }
13661 }
13662 }
13663 }
13664 }
13665 }
13666 return false;
13667}
13668
13669bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
13670{
13671 // First free single block, then populate it to the brim, then free another block, and so on
13672
13673 // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
13674 if (vector.m_BufferImageGranularity == 1)
13675 return ComputeDefragmentation_Full(vector);
13676
13677 VMA_ASSERT(m_AlgorithmState != VMA_NULL);
13678
13679 StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
13680
13681 bool texturePresent = false, bufferPresent = false, otherPresent = false;
13682 switch (vectorState.operation)
13683 {
13684 case StateExtensive::Operation::Done: // Vector defragmented
13685 return false;
13686 case StateExtensive::Operation::FindFreeBlockBuffer:
13687 case StateExtensive::Operation::FindFreeBlockTexture:
13688 case StateExtensive::Operation::FindFreeBlockAll:
13689 {
13690 // No more blocks to free, just perform fast realloc and move to cleanup
13691 if (vectorState.firstFreeBlock == 0)
13692 {
13693 vectorState.operation = StateExtensive::Operation::Cleanup;
13694 return ComputeDefragmentation_Fast(vector);
13695 }
13696
13697 // No free blocks, have to clear last one
13698 size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
13699 VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
13700
13701 const size_t prevMoveCount = m_Moves.size();
13702 for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
13703 handle != VK_NULL_HANDLE;
13704 handle = freeMetadata->GetNextAllocation(handle))
13705 {
13706 MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
13707 switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13708 {
13709 case CounterStatus::Ignore:
13710 continue;
13711 case CounterStatus::End:
13712 return true;
13713 default:
13714 VMA_ASSERT(0);
13715 case CounterStatus::Pass:
13716 break;
13717 }
13718
13719 // Check all previous blocks for free space
13720 if (AllocInOtherBlock(0, last, moveData, vector))
13721 {
13722 // Full clear performed already
13723 if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
13724 reinterpret_cast<size_t*>(m_AlgorithmState)[index] = last;
13725 return true;
13726 }
13727 }
13728
13729 if (prevMoveCount == m_Moves.size())
13730 {
13731 // Cannot perform full clear, have to move data in other blocks around
13732 if (last != 0)
13733 {
13734 for (size_t i = last - 1; i; --i)
13735 {
13736 if (ReallocWithinBlock(vector, vector.GetBlock(i)))
13737 return true;
13738 }
13739 }
13740
13741 if (prevMoveCount == m_Moves.size())
13742 {
13743 // No possible reallocs within blocks, try to move them around fast
13744 return ComputeDefragmentation_Fast(vector);
13745 }
13746 }
13747 else
13748 {
13749 switch (vectorState.operation)
13750 {
13751 case StateExtensive::Operation::FindFreeBlockBuffer:
13752 vectorState.operation = StateExtensive::Operation::MoveBuffers;
13753 break;
13754 default:
13755 VMA_ASSERT(0);
13756 case StateExtensive::Operation::FindFreeBlockTexture:
13757 vectorState.operation = StateExtensive::Operation::MoveTextures;
13758 break;
13759 case StateExtensive::Operation::FindFreeBlockAll:
13760 vectorState.operation = StateExtensive::Operation::MoveAll;
13761 break;
13762 }
13763 vectorState.firstFreeBlock = last;
13764 // Nothing done, block found without reallocations, can perform another reallocs in same pass
13765 return ComputeDefragmentation_Extensive(vector, index);
13766 }
13767 break;
13768 }
13769 case StateExtensive::Operation::MoveTextures:
13770 {
13771 if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
13772 vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13773 {
13774 if (texturePresent)
13775 {
13776 vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
13777 return ComputeDefragmentation_Extensive(vector, index);
13778 }
13779
13780 if (!bufferPresent && !otherPresent)
13781 {
13782 vectorState.operation = StateExtensive::Operation::Cleanup;
13783 break;
13784 }
13785
13786 // No more textures to move, check buffers
13787 vectorState.operation = StateExtensive::Operation::MoveBuffers;
13788 bufferPresent = false;
13789 otherPresent = false;
13790 }
13791 else
13792 break;
13793 }
13794 case StateExtensive::Operation::MoveBuffers:
13795 {
13796 if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
13797 vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13798 {
13799 if (bufferPresent)
13800 {
13801 vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
13802 return ComputeDefragmentation_Extensive(vector, index);
13803 }
13804
13805 if (!otherPresent)
13806 {
13807 vectorState.operation = StateExtensive::Operation::Cleanup;
13808 break;
13809 }
13810
13811 // No more buffers to move, check all others
13812 vectorState.operation = StateExtensive::Operation::MoveAll;
13813 otherPresent = false;
13814 }
13815 else
13816 break;
13817 }
13818 case StateExtensive::Operation::MoveAll:
13819 {
13820 if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
13821 vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
13822 {
13823 if (otherPresent)
13824 {
13825 vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
13826 return ComputeDefragmentation_Extensive(vector, index);
13827 }
13828 // Everything moved
13829 vectorState.operation = StateExtensive::Operation::Cleanup;
13830 }
13831 break;
13832 }
13834 // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
13835 break;
13836 }
13837
13838 if (vectorState.operation == StateExtensive::Operation::Cleanup)
13839 {
13840 // All other work done, pack data in blocks even tighter if possible
13841 const size_t prevMoveCount = m_Moves.size();
13842 for (size_t i = 0; i < vector.GetBlockCount(); ++i)
13843 {
13844 if (ReallocWithinBlock(vector, vector.GetBlock(i)))
13845 return true;
13846 }
13847
13848 if (prevMoveCount == m_Moves.size())
13849 vectorState.operation = StateExtensive::Operation::Done;
13850 }
13851 return false;
13852}
13853
13854void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
13855{
13856 size_t allocCount = 0;
13857 size_t freeCount = 0;
13858 state.avgFreeSize = 0;
13859 state.avgAllocSize = 0;
13860
13861 for (size_t i = 0; i < vector.GetBlockCount(); ++i)
13862 {
13863 VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
13864
13865 allocCount += metadata->GetAllocationCount();
13866 freeCount += metadata->GetFreeRegionsCount();
13867 state.avgFreeSize += metadata->GetSumFreeSize();
13868 state.avgAllocSize += metadata->GetSize();
13869 }
13870
13871 state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
13872 state.avgFreeSize /= freeCount;
13873}
13874
13875bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
13876 VmaBlockVector& vector, size_t firstFreeBlock,
13877 bool& texturePresent, bool& bufferPresent, bool& otherPresent)
13878{
13879 const size_t prevMoveCount = m_Moves.size();
13880 for (size_t i = firstFreeBlock ; i;)
13881 {
13882 VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
13883 VmaBlockMetadata* metadata = block->m_pMetadata;
13884
13885 for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
13886 handle != VK_NULL_HANDLE;
13887 handle = metadata->GetNextAllocation(handle))
13888 {
13889 MoveAllocationData moveData = GetMoveData(handle, metadata);
13890 // Ignore newly created allocations by defragmentation algorithm
13891 if (moveData.move.srcAllocation->GetUserData() == this)
13892 continue;
13893 switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
13894 {
13895 case CounterStatus::Ignore:
13896 continue;
13897 case CounterStatus::End:
13898 return true;
13899 default:
13900 VMA_ASSERT(0);
13901 case CounterStatus::Pass:
13902 break;
13903 }
13904
13905 // Move only single type of resources at once
13906 if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
13907 {
13908 // Try to fit allocation into free blocks
13909 if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
13910 return false;
13911 }
13912
13913 if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
13914 texturePresent = true;
13915 else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
13916 bufferPresent = true;
13917 else
13918 otherPresent = true;
13919 }
13920 }
13921 return prevMoveCount == m_Moves.size();
13922}
13923#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
13924
13925#ifndef _VMA_POOL_T_FUNCTIONS
13926VmaPool_T::VmaPool_T(
13927 VmaAllocator hAllocator,
13928 const VmaPoolCreateInfo& createInfo,
13929 VkDeviceSize preferredBlockSize)
13930 : m_BlockVector(
13931 hAllocator,
13932 this, // hParentPool
13933 createInfo.memoryTypeIndex,
13934 createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
13935 createInfo.minBlockCount,
13936 createInfo.maxBlockCount,
13937 (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
13938 createInfo.blockSize != 0, // explicitBlockSize
13939 createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
13940 createInfo.priority,
13941 VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
13942 createInfo.pMemoryAllocateNext),
13943 m_Id(0),
13944 m_Name(VMA_NULL) {}
13945
13946VmaPool_T::~VmaPool_T()
13947{
13948 VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
13949}
13950
13951void VmaPool_T::SetName(const char* pName)
13952{
13953 const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
13954 VmaFreeString(allocs, m_Name);
13955
13956 if (pName != VMA_NULL)
13957 {
13958 m_Name = VmaCreateStringCopy(allocs, pName);
13959 }
13960 else
13961 {
13962 m_Name = VMA_NULL;
13963 }
13964}
13965#endif // _VMA_POOL_T_FUNCTIONS
13966
13967#ifndef _VMA_ALLOCATOR_T_FUNCTIONS
13968VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13969 m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13970 m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
13971 m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13972 m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
13973 m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
13974 m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
13975 m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
13976 m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
13977 m_hDevice(pCreateInfo->device),
13978 m_hInstance(pCreateInfo->instance),
13979 m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13980 m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13981 *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13982 m_AllocationObjectAllocator(&m_AllocationCallbacks),
13983 m_HeapSizeLimitMask(0),
13984 m_DeviceMemoryCount(0),
13985 m_PreferredLargeHeapBlockSize(0),
13986 m_PhysicalDevice(pCreateInfo->physicalDevice),
13987 m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
13988 m_NextPoolId(0),
13989 m_GlobalMemoryTypeBits(UINT32_MAX)
13990{
13991 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
13992 {
13993 m_UseKhrDedicatedAllocation = false;
13994 m_UseKhrBindMemory2 = false;
13995 }
13996
13997 if(VMA_DEBUG_DETECT_CORRUPTION)
13998 {
13999 // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
14000 VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
14001 }
14002
14003 VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
14004
14005 if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
14006 {
14007#if !(VMA_DEDICATED_ALLOCATION)
14009 {
14010 VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
14011 }
14012#endif
14013#if !(VMA_BIND_MEMORY2)
14014 if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
14015 {
14016 VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
14017 }
14018#endif
14019 }
14020#if !(VMA_MEMORY_BUDGET)
14021 if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
14022 {
14023 VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
14024 }
14025#endif
14026#if !(VMA_BUFFER_DEVICE_ADDRESS)
14027 if(m_UseKhrBufferDeviceAddress)
14028 {
14029 VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
14030 }
14031#endif
14032#if VMA_VULKAN_VERSION < 1002000
14033 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
14034 {
14035 VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
14036 }
14037#endif
14038#if VMA_VULKAN_VERSION < 1001000
14039 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14040 {
14041 VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
14042 }
14043#endif
14044#if !(VMA_MEMORY_PRIORITY)
14045 if(m_UseExtMemoryPriority)
14046 {
14047 VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
14048 }
14049#endif
14050
14051 memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
14052 memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
14053 memset(&m_MemProps, 0, sizeof(m_MemProps));
14054
14055 memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
14056 memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
14057
14058#if VMA_EXTERNAL_MEMORY
14059 memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
14060#endif // #if VMA_EXTERNAL_MEMORY
14061
14062 if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
14063 {
14064 m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
14065 m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
14066 m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
14067 }
14068
14069 ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
14070
14071 (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
14072 (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
14073
14074 VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
14075 VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
14076 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
14077 VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
14078
14079 m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
14080 pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
14081
14082 m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
14083
14084#if VMA_EXTERNAL_MEMORY
14085 if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
14086 {
14087 memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
14088 sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
14089 }
14090#endif // #if VMA_EXTERNAL_MEMORY
14091
14092 if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
14093 {
14094 for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
14095 {
14096 const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
14097 if(limit != VK_WHOLE_SIZE)
14098 {
14099 m_HeapSizeLimitMask |= 1u << heapIndex;
14100 if(limit < m_MemProps.memoryHeaps[heapIndex].size)
14101 {
14102 m_MemProps.memoryHeaps[heapIndex].size = limit;
14103 }
14104 }
14105 }
14106 }
14107
14108 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14109 {
14110 // Create only supported types
14111 if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
14112 {
14113 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
14114 m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
14115 this,
14116 VK_NULL_HANDLE, // hParentPool
14117 memTypeIndex,
14118 preferredBlockSize,
14119 0,
14120 SIZE_MAX,
14121 GetBufferImageGranularity(),
14122 false, // explicitBlockSize
14123 0, // algorithm
14124 0.5f, // priority (0.5 is the default per Vulkan spec)
14125 GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
14126 VMA_NULL); // // pMemoryAllocateNext
14127 // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
14128 // becase minBlockCount is 0.
14129 }
14130 }
14131}
14132
14134{
14135 VkResult res = VK_SUCCESS;
14136
14137#if VMA_MEMORY_BUDGET
14138 if(m_UseExtMemoryBudget)
14139 {
14140 UpdateVulkanBudget();
14141 }
14142#endif // #if VMA_MEMORY_BUDGET
14143
14144 return res;
14145}
14146
14147VmaAllocator_T::~VmaAllocator_T()
14148{
14149 VMA_ASSERT(m_Pools.IsEmpty());
14150
14151 for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
14152 {
14153 vma_delete(this, m_pBlockVectors[memTypeIndex]);
14154 }
14155}
14156
14157void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
14158{
14159#if VMA_STATIC_VULKAN_FUNCTIONS == 1
14160 ImportVulkanFunctions_Static();
14161#endif
14162
14163 if(pVulkanFunctions != VMA_NULL)
14164 {
14165 ImportVulkanFunctions_Custom(pVulkanFunctions);
14166 }
14167
14168#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14169 ImportVulkanFunctions_Dynamic();
14170#endif
14171
14172 ValidateVulkanFunctions();
14173}
14174
14175#if VMA_STATIC_VULKAN_FUNCTIONS == 1
14176
14177void VmaAllocator_T::ImportVulkanFunctions_Static()
14178{
14179 // Vulkan 1.0
14180 m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
14181 m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
14182 m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
14183 m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
14184 m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
14185 m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
14186 m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
14187 m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
14188 m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
14189 m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
14190 m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
14191 m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
14192 m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
14193 m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
14194 m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
14195 m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
14196 m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
14197 m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
14198 m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
14199
14200 // Vulkan 1.1
14201#if VMA_VULKAN_VERSION >= 1001000
14202 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14203 {
14204 m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
14205 m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
14206 m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
14207 m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
14208 m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
14209 }
14210#endif
14211
14212#if VMA_VULKAN_VERSION >= 1003000
14213 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14214 {
14215 m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
14216 m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
14217 }
14218#endif
14219}
14220
14221#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
14222
14223void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
14224{
14225 VMA_ASSERT(pVulkanFunctions != VMA_NULL);
14226
14227#define VMA_COPY_IF_NOT_NULL(funcName) \
14228 if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
14229
14230 VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
14231 VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
14232 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
14233 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
14234 VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
14235 VMA_COPY_IF_NOT_NULL(vkFreeMemory);
14236 VMA_COPY_IF_NOT_NULL(vkMapMemory);
14237 VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
14238 VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
14239 VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
14240 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
14241 VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
14242 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
14243 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
14244 VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
14245 VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
14246 VMA_COPY_IF_NOT_NULL(vkCreateImage);
14247 VMA_COPY_IF_NOT_NULL(vkDestroyImage);
14248 VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
14249
14250#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14251 VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
14252 VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
14253#endif
14254
14255#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14256 VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
14257 VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
14258#endif
14259
14260 // TODO: remove after merging https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/pull/322
14261#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14262 VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
14263#endif
14264
14265#if VMA_VULKAN_VERSION >= 1003000
14266 VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
14267 VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
14268#endif
14269
14270#undef VMA_COPY_IF_NOT_NULL
14271}
14272
14273#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14274
14275void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
14276{
14277 VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
14278 "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
14279 "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
14280 "Other members can be null.");
14281
14282#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
14283 if(m_VulkanFunctions.memberName == VMA_NULL) \
14284 m_VulkanFunctions.memberName = \
14285 (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
14286#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
14287 if(m_VulkanFunctions.memberName == VMA_NULL) \
14288 m_VulkanFunctions.memberName = \
14289 (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
14290
14291 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
14292 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
14293 VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
14294 VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
14295 VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
14296 VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
14297 VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
14298 VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
14299 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
14300 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
14301 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
14302 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
14303 VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
14304 VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
14305 VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
14306 VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
14307 VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
14308
14309#if VMA_VULKAN_VERSION >= 1001000
14310 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14311 {
14312 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
14313 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
14314 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
14315 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
14316 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
14317 }
14318#endif
14319
14320#if VMA_DEDICATED_ALLOCATION
14321 if(m_UseKhrDedicatedAllocation)
14322 {
14323 VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
14324 VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
14325 }
14326#endif
14327
14328#if VMA_BIND_MEMORY2
14329 if(m_UseKhrBindMemory2)
14330 {
14331 VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
14332 VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
14333 }
14334#endif // #if VMA_BIND_MEMORY2
14335
14336#if VMA_MEMORY_BUDGET
14337 if(m_UseExtMemoryBudget)
14338 {
14339 VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
14340 }
14341#endif // #if VMA_MEMORY_BUDGET
14342
14343#if VMA_VULKAN_VERSION >= 1003000
14344 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14345 {
14346 VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
14347 VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
14348 }
14349#endif
14350
14351#undef VMA_FETCH_DEVICE_FUNC
14352#undef VMA_FETCH_INSTANCE_FUNC
14353}
14354
14355#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
14356
14357void VmaAllocator_T::ValidateVulkanFunctions()
14358{
14359 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
14360 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
14361 VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
14362 VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
14363 VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
14364 VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
14365 VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14366 VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14367 VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14368 VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14369 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14370 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14371 VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14372 VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14373 VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14374 VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14375 VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14376
14377#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14378 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
14379 {
14380 VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14381 VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14382 }
14383#endif
14384
14385#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
14386 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
14387 {
14388 VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
14389 VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
14390 }
14391#endif
14392
14393#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
14394 if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14395 {
14396 VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
14397 }
14398#endif
14399
14400#if VMA_VULKAN_VERSION >= 1003000
14401 if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
14402 {
14403 VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
14404 VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
14405 }
14406#endif
14407}
14408
14409VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14410{
14411 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14412 const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14413 const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14414 return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
14415}
14416
14417VkResult VmaAllocator_T::AllocateMemoryOfType(
14418 VmaPool pool,
14419 VkDeviceSize size,
14420 VkDeviceSize alignment,
14421 bool dedicatedPreferred,
14422 VkBuffer dedicatedBuffer,
14423 VkImage dedicatedImage,
14424 VkFlags dedicatedBufferImageUsage,
14425 const VmaAllocationCreateInfo& createInfo,
14426 uint32_t memTypeIndex,
14427 VmaSuballocationType suballocType,
14428 VmaDedicatedAllocationList& dedicatedAllocations,
14429 VmaBlockVector& blockVector,
14430 size_t allocationCount,
14431 VmaAllocation* pAllocations)
14432{
14433 VMA_ASSERT(pAllocations != VMA_NULL);
14434 VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
14435
14436 VmaAllocationCreateInfo finalCreateInfo = createInfo;
14437 VkResult res = CalcMemTypeParams(
14438 finalCreateInfo,
14439 memTypeIndex,
14440 size,
14441 allocationCount);
14442 if(res != VK_SUCCESS)
14443 return res;
14444
14445 if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14446 {
14447 return AllocateDedicatedMemory(
14448 pool,
14449 size,
14450 suballocType,
14451 dedicatedAllocations,
14452 memTypeIndex,
14453 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14455 (finalCreateInfo.flags &
14457 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14458 finalCreateInfo.pUserData,
14459 finalCreateInfo.priority,
14460 dedicatedBuffer,
14461 dedicatedImage,
14462 dedicatedBufferImageUsage,
14463 allocationCount,
14464 pAllocations,
14465 blockVector.GetAllocationNextPtr());
14466 }
14467 else
14468 {
14469 const bool canAllocateDedicated =
14470 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14471 (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
14472
14473 if(canAllocateDedicated)
14474 {
14475 // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14476 if(size > blockVector.GetPreferredBlockSize() / 2)
14477 {
14478 dedicatedPreferred = true;
14479 }
14480 // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
14481 // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
14482 // 3/4 of the maximum allocation count.
14483 if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
14484 {
14485 dedicatedPreferred = false;
14486 }
14487
14488 if(dedicatedPreferred)
14489 {
14490 res = AllocateDedicatedMemory(
14491 pool,
14492 size,
14493 suballocType,
14494 dedicatedAllocations,
14495 memTypeIndex,
14496 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14498 (finalCreateInfo.flags &
14500 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14501 finalCreateInfo.pUserData,
14502 finalCreateInfo.priority,
14503 dedicatedBuffer,
14504 dedicatedImage,
14505 dedicatedBufferImageUsage,
14506 allocationCount,
14507 pAllocations,
14508 blockVector.GetAllocationNextPtr());
14509 if(res == VK_SUCCESS)
14510 {
14511 // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14512 VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14513 return VK_SUCCESS;
14514 }
14515 }
14516 }
14517
14518 res = blockVector.Allocate(
14519 size,
14520 alignment,
14521 finalCreateInfo,
14522 suballocType,
14523 allocationCount,
14524 pAllocations);
14525 if(res == VK_SUCCESS)
14526 return VK_SUCCESS;
14527
14528 // Try dedicated memory.
14529 if(canAllocateDedicated && !dedicatedPreferred)
14530 {
14531 res = AllocateDedicatedMemory(
14532 pool,
14533 size,
14534 suballocType,
14535 dedicatedAllocations,
14536 memTypeIndex,
14537 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14539 (finalCreateInfo.flags &
14541 (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
14542 finalCreateInfo.pUserData,
14543 finalCreateInfo.priority,
14544 dedicatedBuffer,
14545 dedicatedImage,
14546 dedicatedBufferImageUsage,
14547 allocationCount,
14548 pAllocations,
14549 blockVector.GetAllocationNextPtr());
14550 if(res == VK_SUCCESS)
14551 {
14552 // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14553 VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14554 return VK_SUCCESS;
14555 }
14556 }
14557 // Everything failed: Return error code.
14558 VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14559 return res;
14560 }
14561}
14562
14563VkResult VmaAllocator_T::AllocateDedicatedMemory(
14564 VmaPool pool,
14565 VkDeviceSize size,
14566 VmaSuballocationType suballocType,
14567 VmaDedicatedAllocationList& dedicatedAllocations,
14568 uint32_t memTypeIndex,
14569 bool map,
14570 bool isUserDataString,
14571 bool isMappingAllowed,
14572 bool canAliasMemory,
14573 void* pUserData,
14574 float priority,
14575 VkBuffer dedicatedBuffer,
14576 VkImage dedicatedImage,
14577 VkFlags dedicatedBufferImageUsage,
14578 size_t allocationCount,
14579 VmaAllocation* pAllocations,
14580 const void* pNextChain)
14581{
14582 VMA_ASSERT(allocationCount > 0 && pAllocations);
14583
14585 allocInfo.memoryTypeIndex = memTypeIndex;
14586 allocInfo.allocationSize = size;
14587 allocInfo.pNext = pNextChain;
14588
14589#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14591 if(!canAliasMemory)
14592 {
14593 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14594 {
14595 if(dedicatedBuffer != VK_NULL_HANDLE)
14596 {
14597 VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14598 dedicatedAllocInfo.buffer = dedicatedBuffer;
14599 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
14600 }
14601 else if(dedicatedImage != VK_NULL_HANDLE)
14602 {
14603 dedicatedAllocInfo.image = dedicatedImage;
14604 VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
14605 }
14606 }
14607 }
14608#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14609
14610#if VMA_BUFFER_DEVICE_ADDRESS
14611 VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
14612 if(m_UseKhrBufferDeviceAddress)
14613 {
14614 bool canContainBufferWithDeviceAddress = true;
14615 if(dedicatedBuffer != VK_NULL_HANDLE)
14616 {
14617 canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown
14618 (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
14619 }
14620 else if(dedicatedImage != VK_NULL_HANDLE)
14621 {
14622 canContainBufferWithDeviceAddress = false;
14623 }
14624 if(canContainBufferWithDeviceAddress)
14625 {
14626 allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
14627 VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
14628 }
14629 }
14630#endif // #if VMA_BUFFER_DEVICE_ADDRESS
14631
14632#if VMA_MEMORY_PRIORITY
14633 VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
14634 if(m_UseExtMemoryPriority)
14635 {
14636 VMA_ASSERT(priority >= 0.f && priority <= 1.f);
14637 priorityInfo.priority = priority;
14638 VmaPnextChainPushFront(&allocInfo, &priorityInfo);
14639 }
14640#endif // #if VMA_MEMORY_PRIORITY
14641
14642#if VMA_EXTERNAL_MEMORY
14643 // Attach VkExportMemoryAllocateInfoKHR if necessary.
14644 VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
14645 exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
14646 if(exportMemoryAllocInfo.handleTypes != 0)
14647 {
14648 VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
14649 }
14650#endif // #if VMA_EXTERNAL_MEMORY
14651
14652 size_t allocIndex;
14653 VkResult res = VK_SUCCESS;
14654 for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14655 {
14656 res = AllocateDedicatedMemoryPage(
14657 pool,
14658 size,
14659 suballocType,
14660 memTypeIndex,
14661 allocInfo,
14662 map,
14663 isUserDataString,
14664 isMappingAllowed,
14665 pUserData,
14666 pAllocations + allocIndex);
14667 if(res != VK_SUCCESS)
14668 {
14669 break;
14670 }
14671 }
14672
14673 if(res == VK_SUCCESS)
14674 {
14675 for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14676 {
14677 dedicatedAllocations.Register(pAllocations[allocIndex]);
14678 }
14679 VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
14680 }
14681 else
14682 {
14683 // Free all already created allocations.
14684 while(allocIndex--)
14685 {
14686 VmaAllocation currAlloc = pAllocations[allocIndex];
14687 VkDeviceMemory hMemory = currAlloc->GetMemory();
14688
14689 /*
14690 There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
14691 before vkFreeMemory.
14692
14693 if(currAlloc->GetMappedData() != VMA_NULL)
14694 {
14695 (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
14696 }
14697 */
14698
14699 FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
14700 m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
14701 m_AllocationObjectAllocator.Free(currAlloc);
14702 }
14703
14704 memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14705 }
14706
14707 return res;
14708}
14709
14710VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
14711 VmaPool pool,
14712 VkDeviceSize size,
14713 VmaSuballocationType suballocType,
14714 uint32_t memTypeIndex,
14715 const VkMemoryAllocateInfo& allocInfo,
14716 bool map,
14717 bool isUserDataString,
14718 bool isMappingAllowed,
14719 void* pUserData,
14720 VmaAllocation* pAllocation)
14721{
14722 VkDeviceMemory hMemory = VK_NULL_HANDLE;
14723 VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14724 if(res < 0)
14725 {
14726 VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14727 return res;
14728 }
14729
14730 void* pMappedData = VMA_NULL;
14731 if(map)
14732 {
14733 res = (*m_VulkanFunctions.vkMapMemory)(
14734 m_hDevice,
14735 hMemory,
14736 0,
14738 0,
14739 &pMappedData);
14740 if(res < 0)
14741 {
14742 VMA_DEBUG_LOG(" vkMapMemory FAILED");
14743 FreeVulkanMemory(memTypeIndex, size, hMemory);
14744 return res;
14745 }
14746 }
14747
14748 *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
14749 (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
14750 if (isUserDataString)
14751 (*pAllocation)->SetName(this, (const char*)pUserData);
14752 else
14753 (*pAllocation)->SetUserData(this, pUserData);
14754 m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
14755 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14756 {
14757 FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14758 }
14759
14760 return VK_SUCCESS;
14761}
14762
14763void VmaAllocator_T::GetBufferMemoryRequirements(
14764 VkBuffer hBuffer,
14765 VkMemoryRequirements& memReq,
14766 bool& requiresDedicatedAllocation,
14767 bool& prefersDedicatedAllocation) const
14768{
14769#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14770 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14771 {
14773 memReqInfo.buffer = hBuffer;
14774
14776
14778 VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
14779
14780 (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14781
14782 memReq = memReq2.memoryRequirements;
14783 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14784 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14785 }
14786 else
14787#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14788 {
14789 (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14790 requiresDedicatedAllocation = false;
14791 prefersDedicatedAllocation = false;
14792 }
14793}
14794
14795void VmaAllocator_T::GetImageMemoryRequirements(
14796 VkImage hImage,
14797 VkMemoryRequirements& memReq,
14798 bool& requiresDedicatedAllocation,
14799 bool& prefersDedicatedAllocation) const
14800{
14801#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14802 if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
14803 {
14805 memReqInfo.image = hImage;
14806
14808
14810 VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
14811
14812 (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14813
14814 memReq = memReq2.memoryRequirements;
14815 requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14816 prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14817 }
14818 else
14819#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
14820 {
14821 (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14822 requiresDedicatedAllocation = false;
14823 prefersDedicatedAllocation = false;
14824 }
14825}
14826
14827VkResult VmaAllocator_T::FindMemoryTypeIndex(
14828 uint32_t memoryTypeBits,
14829 const VmaAllocationCreateInfo* pAllocationCreateInfo,
14830 VkFlags bufImgUsage,
14831 uint32_t* pMemoryTypeIndex) const
14832{
14833 memoryTypeBits &= GetGlobalMemoryTypeBits();
14834
14835 if(pAllocationCreateInfo->memoryTypeBits != 0)
14836 {
14837 memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
14838 }
14839
14840 VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
14841 if(!FindMemoryPreferences(
14842 IsIntegratedGpu(),
14843 *pAllocationCreateInfo,
14844 bufImgUsage,
14845 requiredFlags, preferredFlags, notPreferredFlags))
14846 {
14848 }
14849
14850 *pMemoryTypeIndex = UINT32_MAX;
14851 uint32_t minCost = UINT32_MAX;
14852 for(uint32_t memTypeIndex = 0, memTypeBit = 1;
14853 memTypeIndex < GetMemoryTypeCount();
14854 ++memTypeIndex, memTypeBit <<= 1)
14855 {
14856 // This memory type is acceptable according to memoryTypeBits bitmask.
14857 if((memTypeBit & memoryTypeBits) != 0)
14858 {
14859 const VkMemoryPropertyFlags currFlags =
14860 m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
14861 // This memory type contains requiredFlags.
14862 if((requiredFlags & ~currFlags) == 0)
14863 {
14864 // Calculate cost as number of bits from preferredFlags not present in this memory type.
14865 uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
14866 VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
14867 // Remember memory type with lowest cost.
14868 if(currCost < minCost)
14869 {
14870 *pMemoryTypeIndex = memTypeIndex;
14871 if(currCost == 0)
14872 {
14873 return VK_SUCCESS;
14874 }
14875 minCost = currCost;
14876 }
14877 }
14878 }
14879 }
14880 return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
14881}
14882
14883VkResult VmaAllocator_T::CalcMemTypeParams(
14884 VmaAllocationCreateInfo& inoutCreateInfo,
14885 uint32_t memTypeIndex,
14886 VkDeviceSize size,
14887 size_t allocationCount)
14888{
14889 // If memory type is not HOST_VISIBLE, disable MAPPED.
14890 if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14891 (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14892 {
14893 inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14894 }
14895
14896 if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14897 (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
14898 {
14899 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14900 VmaBudget heapBudget = {};
14901 GetHeapBudgets(&heapBudget, heapIndex, 1);
14902 if(heapBudget.usage + size * allocationCount > heapBudget.budget)
14903 {
14905 }
14906 }
14907 return VK_SUCCESS;
14908}
14909
14910VkResult VmaAllocator_T::CalcAllocationParams(
14911 VmaAllocationCreateInfo& inoutCreateInfo,
14912 bool dedicatedRequired,
14913 bool dedicatedPreferred)
14914{
14915 VMA_ASSERT((inoutCreateInfo.flags &
14918 "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
14921 "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
14922 if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
14923 {
14924 if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
14925 {
14927 "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
14928 }
14929 }
14930
14931 // If memory is lazily allocated, it should be always dedicated.
14932 if(dedicatedRequired ||
14934 {
14936 }
14937
14938 if(inoutCreateInfo.pool != VK_NULL_HANDLE)
14939 {
14940 if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
14941 (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14942 {
14943 VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
14945 }
14946 inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
14947 }
14948
14949 if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14950 (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14951 {
14952 VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14954 }
14955
14956 if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
14957 (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14958 {
14960 }
14961
14962 // Non-auto USAGE values imply HOST_ACCESS flags.
14963 // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
14964 // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
14965 // Otherwise they just protect from assert on mapping.
14966 if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
14967 inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
14968 inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
14969 {
14971 {
14973 }
14974 }
14975
14976 return VK_SUCCESS;
14977}
14978
14979VkResult VmaAllocator_T::AllocateMemory(
14980 const VkMemoryRequirements& vkMemReq,
14981 bool requiresDedicatedAllocation,
14982 bool prefersDedicatedAllocation,
14983 VkBuffer dedicatedBuffer,
14984 VkImage dedicatedImage,
14985 VkFlags dedicatedBufferImageUsage,
14986 const VmaAllocationCreateInfo& createInfo,
14987 VmaSuballocationType suballocType,
14988 size_t allocationCount,
14989 VmaAllocation* pAllocations)
14990{
14991 memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
14992
14993 VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14994
14995 if(vkMemReq.size == 0)
14996 {
14998 }
14999
15000 VmaAllocationCreateInfo createInfoFinal = createInfo;
15001 VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
15002 if(res != VK_SUCCESS)
15003 return res;
15004
15005 if(createInfoFinal.pool != VK_NULL_HANDLE)
15006 {
15007 VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
15008 return AllocateMemoryOfType(
15009 createInfoFinal.pool,
15010 vkMemReq.size,
15011 vkMemReq.alignment,
15012 prefersDedicatedAllocation,
15013 dedicatedBuffer,
15014 dedicatedImage,
15015 dedicatedBufferImageUsage,
15016 createInfoFinal,
15017 blockVector.GetMemoryTypeIndex(),
15018 suballocType,
15019 createInfoFinal.pool->m_DedicatedAllocations,
15020 blockVector,
15021 allocationCount,
15022 pAllocations);
15023 }
15024 else
15025 {
15026 // Bit mask of memory Vulkan types acceptable for this allocation.
15027 uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
15028 uint32_t memTypeIndex = UINT32_MAX;
15029 res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
15030 // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
15031 if(res != VK_SUCCESS)
15032 return res;
15033 do
15034 {
15035 VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
15036 VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
15037 res = AllocateMemoryOfType(
15039 vkMemReq.size,
15040 vkMemReq.alignment,
15041 requiresDedicatedAllocation || prefersDedicatedAllocation,
15042 dedicatedBuffer,
15043 dedicatedImage,
15044 dedicatedBufferImageUsage,
15045 createInfoFinal,
15046 memTypeIndex,
15047 suballocType,
15048 m_DedicatedAllocations[memTypeIndex],
15049 *blockVector,
15050 allocationCount,
15051 pAllocations);
15052 // Allocation succeeded
15053 if(res == VK_SUCCESS)
15054 return VK_SUCCESS;
15055
15056 // Remove old memTypeIndex from list of possibilities.
15057 memoryTypeBits &= ~(1u << memTypeIndex);
15058 // Find alternative memTypeIndex.
15059 res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
15060 } while(res == VK_SUCCESS);
15061
15062 // No other matching memory type index could be found.
15063 // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
15065 }
15066}
15067
15068void VmaAllocator_T::FreeMemory(
15069 size_t allocationCount,
15070 const VmaAllocation* pAllocations)
15071{
15072 VMA_ASSERT(pAllocations);
15073
15074 for(size_t allocIndex = allocationCount; allocIndex--; )
15075 {
15076 VmaAllocation allocation = pAllocations[allocIndex];
15077
15078 if(allocation != VK_NULL_HANDLE)
15079 {
15080 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
15081 {
15082 FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
15083 }
15084
15085 allocation->FreeName(this);
15086
15087 switch(allocation->GetType())
15088 {
15089 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15090 {
15091 VmaBlockVector* pBlockVector = VMA_NULL;
15092 VmaPool hPool = allocation->GetParentPool();
15093 if(hPool != VK_NULL_HANDLE)
15094 {
15095 pBlockVector = &hPool->m_BlockVector;
15096 }
15097 else
15098 {
15099 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15100 pBlockVector = m_pBlockVectors[memTypeIndex];
15101 VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
15102 }
15103 pBlockVector->Free(allocation);
15104 }
15105 break;
15106 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15107 FreeDedicatedMemory(allocation);
15108 break;
15109 default:
15110 VMA_ASSERT(0);
15111 }
15112 }
15113 }
15114}
15115
15116void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
15117{
15118 // Initialize.
15119 VmaClearDetailedStatistics(pStats->total);
15120 for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
15121 VmaClearDetailedStatistics(pStats->memoryType[i]);
15122 for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
15123 VmaClearDetailedStatistics(pStats->memoryHeap[i]);
15124
15125 // Process default pools.
15126 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15127 {
15128 VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15129 if (pBlockVector != VMA_NULL)
15130 pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15131 }
15132
15133 // Process custom pools.
15134 {
15135 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15136 for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15137 {
15138 VmaBlockVector& blockVector = pool->m_BlockVector;
15139 const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
15140 blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15141 pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15142 }
15143 }
15144
15145 // Process dedicated allocations.
15146 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15147 {
15148 m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
15149 }
15150
15151 // Sum from memory types to memory heaps.
15152 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15153 {
15154 const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
15155 VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
15156 }
15157
15158 // Sum from memory heaps to total.
15159 for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
15160 VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
15161
15163 pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
15164 VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
15166}
15167
15168void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
15169{
15170#if VMA_MEMORY_BUDGET
15171 if(m_UseExtMemoryBudget)
15172 {
15173 if(m_Budget.m_OperationsSinceBudgetFetch < 30)
15174 {
15175 VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
15176 for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
15177 {
15178 const uint32_t heapIndex = firstHeap + i;
15179
15180 outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
15181 outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
15182 outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
15183 outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15184
15185 if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
15186 {
15187 outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
15188 outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15189 }
15190 else
15191 {
15192 outBudgets->usage = 0;
15193 }
15194
15195 // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
15196 outBudgets->budget = VMA_MIN(
15197 m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
15198 }
15199 }
15200 else
15201 {
15202 UpdateVulkanBudget(); // Outside of mutex lock
15203 GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
15204 }
15205 }
15206 else
15207#endif
15208 {
15209 for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
15210 {
15211 const uint32_t heapIndex = firstHeap + i;
15212
15213 outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
15214 outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
15215 outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
15216 outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
15217
15218 outBudgets->usage = outBudgets->statistics.blockBytes;
15219 outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15220 }
15221 }
15222}
15223
15224void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
15225{
15226 pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
15227 pAllocationInfo->deviceMemory = hAllocation->GetMemory();
15228 pAllocationInfo->offset = hAllocation->GetOffset();
15229 pAllocationInfo->size = hAllocation->GetSize();
15230 pAllocationInfo->pMappedData = hAllocation->GetMappedData();
15231 pAllocationInfo->pUserData = hAllocation->GetUserData();
15232 pAllocationInfo->pName = hAllocation->GetName();
15233}
15234
15235VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
15236{
15237 VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
15238
15239 VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
15240
15241 // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
15242 if(pCreateInfo->pMemoryAllocateNext)
15243 {
15244 VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
15245 }
15246
15247 if(newCreateInfo.maxBlockCount == 0)
15248 {
15249 newCreateInfo.maxBlockCount = SIZE_MAX;
15250 }
15251 if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
15252 {
15254 }
15255 // Memory type index out of range or forbidden.
15256 if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
15257 ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
15258 {
15260 }
15261 if(newCreateInfo.minAllocationAlignment > 0)
15262 {
15263 VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
15264 }
15265
15266 const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
15267
15268 *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
15269
15270 VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
15271 if(res != VK_SUCCESS)
15272 {
15273 vma_delete(this, *pPool);
15274 *pPool = VMA_NULL;
15275 return res;
15276 }
15277
15278 // Add to m_Pools.
15279 {
15280 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15281 (*pPool)->SetId(m_NextPoolId++);
15282 m_Pools.PushBack(*pPool);
15283 }
15284
15285 return VK_SUCCESS;
15286}
15287
15288void VmaAllocator_T::DestroyPool(VmaPool pool)
15289{
15290 // Remove from m_Pools.
15291 {
15292 VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
15293 m_Pools.Remove(pool);
15294 }
15295
15296 vma_delete(this, pool);
15297}
15298
15299void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
15300{
15301 VmaClearStatistics(*pPoolStats);
15302 pool->m_BlockVector.AddStatistics(*pPoolStats);
15303 pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
15304}
15305
15306void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
15307{
15308 VmaClearDetailedStatistics(*pPoolStats);
15309 pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
15310 pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
15311}
15312
15313void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
15314{
15315 m_CurrentFrameIndex.store(frameIndex);
15316
15317#if VMA_MEMORY_BUDGET
15318 if(m_UseExtMemoryBudget)
15319 {
15320 UpdateVulkanBudget();
15321 }
15322#endif // #if VMA_MEMORY_BUDGET
15323}
15324
15325VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
15326{
15327 return hPool->m_BlockVector.CheckCorruption();
15328}
15329
15330VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
15331{
15333
15334 // Process default pools.
15335 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15336 {
15337 VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
15338 if(pBlockVector != VMA_NULL)
15339 {
15340 VkResult localRes = pBlockVector->CheckCorruption();
15341 switch(localRes)
15342 {
15344 break;
15345 case VK_SUCCESS:
15346 finalRes = VK_SUCCESS;
15347 break;
15348 default:
15349 return localRes;
15350 }
15351 }
15352 }
15353
15354 // Process custom pools.
15355 {
15356 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15357 for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15358 {
15359 if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
15360 {
15361 VkResult localRes = pool->m_BlockVector.CheckCorruption();
15362 switch(localRes)
15363 {
15365 break;
15366 case VK_SUCCESS:
15367 finalRes = VK_SUCCESS;
15368 break;
15369 default:
15370 return localRes;
15371 }
15372 }
15373 }
15374 }
15375
15376 return finalRes;
15377}
15378
15379VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
15380{
15381 AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
15382 const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
15383#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
15384 if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
15385 {
15387 }
15388#endif
15389
15390 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
15391
15392 // HeapSizeLimit is in effect for this heap.
15393 if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
15394 {
15395 const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
15396 VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
15397 for(;;)
15398 {
15399 const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
15400 if(blockBytesAfterAllocation > heapSize)
15401 {
15403 }
15404 if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
15405 {
15406 break;
15407 }
15408 }
15409 }
15410 else
15411 {
15412 m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
15413 }
15414 ++m_Budget.m_BlockCount[heapIndex];
15415
15416 // VULKAN CALL vkAllocateMemory.
15417 VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
15418
15419 if(res == VK_SUCCESS)
15420 {
15421#if VMA_MEMORY_BUDGET
15422 ++m_Budget.m_OperationsSinceBudgetFetch;
15423#endif
15424
15425 // Informative callback.
15426 if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
15427 {
15428 (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
15429 }
15430
15431 deviceMemoryCountIncrement.Commit();
15432 }
15433 else
15434 {
15435 --m_Budget.m_BlockCount[heapIndex];
15436 m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
15437 }
15438
15439 return res;
15440}
15441
15442void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
15443{
15444 // Informative callback.
15445 if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
15446 {
15447 (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
15448 }
15449
15450 // VULKAN CALL vkFreeMemory.
15451 (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
15452
15453 const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
15454 --m_Budget.m_BlockCount[heapIndex];
15455 m_Budget.m_BlockBytes[heapIndex] -= size;
15456
15457 --m_DeviceMemoryCount;
15458}
15459
15460VkResult VmaAllocator_T::BindVulkanBuffer(
15461 VkDeviceMemory memory,
15462 VkDeviceSize memoryOffset,
15463 VkBuffer buffer,
15464 const void* pNext)
15465{
15466 if(pNext != VMA_NULL)
15467 {
15468#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15469 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15470 m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
15471 {
15473 bindBufferMemoryInfo.pNext = pNext;
15474 bindBufferMemoryInfo.buffer = buffer;
15475 bindBufferMemoryInfo.memory = memory;
15476 bindBufferMemoryInfo.memoryOffset = memoryOffset;
15477 return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15478 }
15479 else
15480#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15481 {
15483 }
15484 }
15485 else
15486 {
15487 return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
15488 }
15489}
15490
15491VkResult VmaAllocator_T::BindVulkanImage(
15492 VkDeviceMemory memory,
15493 VkDeviceSize memoryOffset,
15494 VkImage image,
15495 const void* pNext)
15496{
15497 if(pNext != VMA_NULL)
15498 {
15499#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
15500 if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
15501 m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
15502 {
15504 bindBufferMemoryInfo.pNext = pNext;
15505 bindBufferMemoryInfo.image = image;
15506 bindBufferMemoryInfo.memory = memory;
15507 bindBufferMemoryInfo.memoryOffset = memoryOffset;
15508 return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
15509 }
15510 else
15511#endif // #if VMA_BIND_MEMORY2
15512 {
15514 }
15515 }
15516 else
15517 {
15518 return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
15519 }
15520}
15521
15522VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
15523{
15524 switch(hAllocation->GetType())
15525 {
15526 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15527 {
15528 VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15529 char *pBytes = VMA_NULL;
15530 VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
15531 if(res == VK_SUCCESS)
15532 {
15533 *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
15534 hAllocation->BlockAllocMap();
15535 }
15536 return res;
15537 }
15538 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15539 return hAllocation->DedicatedAllocMap(this, ppData);
15540 default:
15541 VMA_ASSERT(0);
15543 }
15544}
15545
15546void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
15547{
15548 switch(hAllocation->GetType())
15549 {
15550 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15551 {
15552 VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15553 hAllocation->BlockAllocUnmap();
15554 pBlock->Unmap(this, 1);
15555 }
15556 break;
15557 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15558 hAllocation->DedicatedAllocUnmap(this);
15559 break;
15560 default:
15561 VMA_ASSERT(0);
15562 }
15563}
15564
15565VkResult VmaAllocator_T::BindBufferMemory(
15566 VmaAllocation hAllocation,
15567 VkDeviceSize allocationLocalOffset,
15568 VkBuffer hBuffer,
15569 const void* pNext)
15570{
15571 VkResult res = VK_SUCCESS;
15572 switch(hAllocation->GetType())
15573 {
15574 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15575 res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
15576 break;
15577 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15578 {
15579 VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
15580 VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
15581 res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
15582 break;
15583 }
15584 default:
15585 VMA_ASSERT(0);
15586 }
15587 return res;
15588}
15589
15590VkResult VmaAllocator_T::BindImageMemory(
15591 VmaAllocation hAllocation,
15592 VkDeviceSize allocationLocalOffset,
15593 VkImage hImage,
15594 const void* pNext)
15595{
15596 VkResult res = VK_SUCCESS;
15597 switch(hAllocation->GetType())
15598 {
15599 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15600 res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
15601 break;
15602 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15603 {
15604 VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
15605 VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
15606 res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
15607 break;
15608 }
15609 default:
15610 VMA_ASSERT(0);
15611 }
15612 return res;
15613}
15614
15615VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
15616 VmaAllocation hAllocation,
15617 VkDeviceSize offset, VkDeviceSize size,
15618 VMA_CACHE_OPERATION op)
15619{
15620 VkResult res = VK_SUCCESS;
15621
15622 VkMappedMemoryRange memRange = {};
15623 if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
15624 {
15625 switch(op)
15626 {
15627 case VMA_CACHE_FLUSH:
15628 res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15629 break;
15630 case VMA_CACHE_INVALIDATE:
15631 res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15632 break;
15633 default:
15634 VMA_ASSERT(0);
15635 }
15636 }
15637 // else: Just ignore this call.
15638 return res;
15639}
15640
15641VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
15642 uint32_t allocationCount,
15643 const VmaAllocation* allocations,
15644 const VkDeviceSize* offsets, const VkDeviceSize* sizes,
15645 VMA_CACHE_OPERATION op)
15646{
15647 typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
15648 typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
15649 RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
15650
15651 for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15652 {
15653 const VmaAllocation alloc = allocations[allocIndex];
15654 const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
15655 const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
15656 VkMappedMemoryRange newRange;
15657 if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
15658 {
15659 ranges.push_back(newRange);
15660 }
15661 }
15662
15663 VkResult res = VK_SUCCESS;
15664 if(!ranges.empty())
15665 {
15666 switch(op)
15667 {
15668 case VMA_CACHE_FLUSH:
15669 res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
15670 break;
15671 case VMA_CACHE_INVALIDATE:
15672 res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
15673 break;
15674 default:
15675 VMA_ASSERT(0);
15676 }
15677 }
15678 // else: Just ignore this call.
15679 return res;
15680}
15681
15682void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
15683{
15684 VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15685
15686 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15687 VmaPool parentPool = allocation->GetParentPool();
15688 if(parentPool == VK_NULL_HANDLE)
15689 {
15690 // Default pool
15691 m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
15692 }
15693 else
15694 {
15695 // Custom pool
15696 parentPool->m_DedicatedAllocations.Unregister(allocation);
15697 }
15698
15699 VkDeviceMemory hMemory = allocation->GetMemory();
15700
15701 /*
15702 There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15703 before vkFreeMemory.
15704
15705 if(allocation->GetMappedData() != VMA_NULL)
15706 {
15707 (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15708 }
15709 */
15710
15711 FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15712
15713 m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
15714 m_AllocationObjectAllocator.Free(allocation);
15715
15716 VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15717}
15718
15719uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
15720{
15721 VkBufferCreateInfo dummyBufCreateInfo;
15722 VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
15723
15724 uint32_t memoryTypeBits = 0;
15725
15726 // Create buffer.
15727 VkBuffer buf = VK_NULL_HANDLE;
15728 VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
15729 m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
15730 if(res == VK_SUCCESS)
15731 {
15732 // Query for supported memory types.
15733 VkMemoryRequirements memReq;
15734 (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
15735 memoryTypeBits = memReq.memoryTypeBits;
15736
15737 // Destroy buffer.
15738 (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
15739 }
15740
15741 return memoryTypeBits;
15742}
15743
15744uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
15745{
15746 // Make sure memory information is already fetched.
15747 VMA_ASSERT(GetMemoryTypeCount() > 0);
15748
15749 uint32_t memoryTypeBits = UINT32_MAX;
15750
15751 if(!m_UseAmdDeviceCoherentMemory)
15752 {
15753 // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
15754 for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15755 {
15756 if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
15757 {
15758 memoryTypeBits &= ~(1u << memTypeIndex);
15759 }
15760 }
15761 }
15762
15763 return memoryTypeBits;
15764}
15765
15766bool VmaAllocator_T::GetFlushOrInvalidateRange(
15767 VmaAllocation allocation,
15768 VkDeviceSize offset, VkDeviceSize size,
15769 VkMappedMemoryRange& outRange) const
15770{
15771 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15772 if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
15773 {
15774 const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
15775 const VkDeviceSize allocationSize = allocation->GetSize();
15776 VMA_ASSERT(offset <= allocationSize);
15777
15779 outRange.pNext = VMA_NULL;
15780 outRange.memory = allocation->GetMemory();
15781
15782 switch(allocation->GetType())
15783 {
15784 case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15785 outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15786 if(size == VK_WHOLE_SIZE)
15787 {
15788 outRange.size = allocationSize - outRange.offset;
15789 }
15790 else
15791 {
15792 VMA_ASSERT(offset + size <= allocationSize);
15793 outRange.size = VMA_MIN(
15794 VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
15795 allocationSize - outRange.offset);
15796 }
15797 break;
15798 case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15799 {
15800 // 1. Still within this allocation.
15801 outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15802 if(size == VK_WHOLE_SIZE)
15803 {
15804 size = allocationSize - offset;
15805 }
15806 else
15807 {
15808 VMA_ASSERT(offset + size <= allocationSize);
15809 }
15810 outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
15811
15812 // 2. Adjust to whole block.
15813 const VkDeviceSize allocationOffset = allocation->GetOffset();
15814 VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15815 const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
15816 outRange.offset += allocationOffset;
15817 outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
15818
15819 break;
15820 }
15821 default:
15822 VMA_ASSERT(0);
15823 }
15824 return true;
15825 }
15826 return false;
15827}
15828
15829#if VMA_MEMORY_BUDGET
15830void VmaAllocator_T::UpdateVulkanBudget()
15831{
15832 VMA_ASSERT(m_UseExtMemoryBudget);
15833
15835
15836 VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
15837 VmaPnextChainPushFront(&memProps, &budgetProps);
15838
15839 GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
15840
15841 {
15842 VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
15843
15844 for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15845 {
15846 m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
15847 m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
15848 m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
15849
15850 // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
15851 if(m_Budget.m_VulkanBudget[heapIndex] == 0)
15852 {
15853 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
15854 }
15855 else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
15856 {
15857 m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
15858 }
15859 if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
15860 {
15861 m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
15862 }
15863 }
15864 m_Budget.m_OperationsSinceBudgetFetch = 0;
15865 }
15866}
15867#endif // VMA_MEMORY_BUDGET
15868
15869void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15870{
15871 if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15872 hAllocation->IsMappingAllowed() &&
15873 (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15874 {
15875 void* pData = VMA_NULL;
15876 VkResult res = Map(hAllocation, &pData);
15877 if(res == VK_SUCCESS)
15878 {
15879 memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15880 FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15881 Unmap(hAllocation);
15882 }
15883 else
15884 {
15885 VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15886 }
15887 }
15888}
15889
15890uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
15891{
15892 uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
15893 if(memoryTypeBits == UINT32_MAX)
15894 {
15895 memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
15896 m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
15897 }
15898 return memoryTypeBits;
15899}
15900
15901#if VMA_STATS_STRING_ENABLED
15902void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15903{
15904 json.WriteString("DefaultPools");
15905 json.BeginObject();
15906 {
15907 for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15908 {
15909 VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
15910 VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
15911 if (pBlockVector != VMA_NULL)
15912 {
15913 json.BeginString("Type ");
15914 json.ContinueString(memTypeIndex);
15915 json.EndString();
15916 json.BeginObject();
15917 {
15918 json.WriteString("PreferredBlockSize");
15919 json.WriteNumber(pBlockVector->GetPreferredBlockSize());
15920
15921 json.WriteString("Blocks");
15922 pBlockVector->PrintDetailedMap(json);
15923
15924 json.WriteString("DedicatedAllocations");
15925 dedicatedAllocList.BuildStatsString(json);
15926 }
15927 json.EndObject();
15928 }
15929 }
15930 }
15931 json.EndObject();
15932
15933 json.WriteString("CustomPools");
15934 json.BeginObject();
15935 {
15936 VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15937 if (!m_Pools.IsEmpty())
15938 {
15939 for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15940 {
15941 bool displayType = true;
15942 size_t index = 0;
15943 for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
15944 {
15945 VmaBlockVector& blockVector = pool->m_BlockVector;
15946 if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
15947 {
15948 if (displayType)
15949 {
15950 json.BeginString("Type ");
15951 json.ContinueString(memTypeIndex);
15952 json.EndString();
15953 json.BeginArray();
15954 displayType = false;
15955 }
15956
15957 json.BeginObject();
15958 {
15959 json.WriteString("Name");
15960 json.BeginString();
15961 json.ContinueString_Size(index++);
15962 if (pool->GetName())
15963 {
15964 json.ContinueString(" - ");
15965 json.ContinueString(pool->GetName());
15966 }
15967 json.EndString();
15968
15969 json.WriteString("PreferredBlockSize");
15970 json.WriteNumber(blockVector.GetPreferredBlockSize());
15971
15972 json.WriteString("Blocks");
15973 blockVector.PrintDetailedMap(json);
15974
15975 json.WriteString("DedicatedAllocations");
15976 pool->m_DedicatedAllocations.BuildStatsString(json);
15977 }
15978 json.EndObject();
15979 }
15980 }
15981
15982 if (!displayType)
15983 json.EndArray();
15984 }
15985 }
15986 }
15987 json.EndObject();
15988}
15989#endif // VMA_STATS_STRING_ENABLED
15990#endif // _VMA_ALLOCATOR_T_FUNCTIONS
15991
15992
15993#ifndef _VMA_PUBLIC_INTERFACE
15995 const VmaAllocatorCreateInfo* pCreateInfo,
15996 VmaAllocator* pAllocator)
15997{
15998 VMA_ASSERT(pCreateInfo && pAllocator);
15999 VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
16000 (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
16001 VMA_DEBUG_LOG("vmaCreateAllocator");
16002 *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
16003 VkResult result = (*pAllocator)->Init(pCreateInfo);
16004 if(result < 0)
16005 {
16006 vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
16007 *pAllocator = VK_NULL_HANDLE;
16008 }
16009 return result;
16010}
16011
16013 VmaAllocator allocator)
16014{
16015 if(allocator != VK_NULL_HANDLE)
16016 {
16017 VMA_DEBUG_LOG("vmaDestroyAllocator");
16018 VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
16019 vma_delete(&allocationCallbacks, allocator);
16020 }
16021}
16022
16024{
16025 VMA_ASSERT(allocator && pAllocatorInfo);
16026 pAllocatorInfo->instance = allocator->m_hInstance;
16027 pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
16028 pAllocatorInfo->device = allocator->m_hDevice;
16029}
16030
16032 VmaAllocator allocator,
16033 const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
16034{
16035 VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
16036 *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
16037}
16038
16040 VmaAllocator allocator,
16041 const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
16042{
16043 VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
16044 *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
16045}
16046
16048 VmaAllocator allocator,
16049 uint32_t memoryTypeIndex,
16050 VkMemoryPropertyFlags* pFlags)
16051{
16052 VMA_ASSERT(allocator && pFlags);
16053 VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
16054 *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
16055}
16056
16058 VmaAllocator allocator,
16059 uint32_t frameIndex)
16060{
16061 VMA_ASSERT(allocator);
16062
16063 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16064
16065 allocator->SetCurrentFrameIndex(frameIndex);
16066}
16067
16069 VmaAllocator allocator,
16070 VmaTotalStatistics* pStats)
16071{
16072 VMA_ASSERT(allocator && pStats);
16073 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16074 allocator->CalculateStatistics(pStats);
16075}
16076
16078 VmaAllocator allocator,
16079 VmaBudget* pBudgets)
16080{
16081 VMA_ASSERT(allocator && pBudgets);
16082 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16083 allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
16084}
16085
16086#if VMA_STATS_STRING_ENABLED
16087
16089 VmaAllocator allocator,
16090 char** ppStatsString,
16091 VkBool32 detailedMap)
16092{
16093 VMA_ASSERT(allocator && ppStatsString);
16094 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16095
16096 VmaStringBuilder sb(allocator->GetAllocationCallbacks());
16097 {
16099 allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
16100
16101 VmaTotalStatistics stats;
16102 allocator->CalculateStatistics(&stats);
16103
16104 VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
16105 json.BeginObject();
16106 {
16107 json.WriteString("General");
16108 json.BeginObject();
16109 {
16110 const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
16111 const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
16112
16113 json.WriteString("API");
16114 json.WriteString("Vulkan");
16115
16116 json.WriteString("apiVersion");
16117 json.BeginString();
16118 json.ContinueString(VK_API_VERSION_MAJOR(deviceProperties.apiVersion));
16119 json.ContinueString(".");
16120 json.ContinueString(VK_API_VERSION_MINOR(deviceProperties.apiVersion));
16121 json.ContinueString(".");
16122 json.ContinueString(VK_API_VERSION_PATCH(deviceProperties.apiVersion));
16123 json.EndString();
16124
16125 json.WriteString("GPU");
16126 json.WriteString(deviceProperties.deviceName);
16127 json.WriteString("deviceType");
16128 json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
16129
16130 json.WriteString("maxMemoryAllocationCount");
16131 json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
16132 json.WriteString("bufferImageGranularity");
16133 json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
16134 json.WriteString("nonCoherentAtomSize");
16135 json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
16136
16137 json.WriteString("memoryHeapCount");
16138 json.WriteNumber(memoryProperties.memoryHeapCount);
16139 json.WriteString("memoryTypeCount");
16140 json.WriteNumber(memoryProperties.memoryTypeCount);
16141 }
16142 json.EndObject();
16143 }
16144 {
16145 json.WriteString("Total");
16146 VmaPrintDetailedStatistics(json, stats.total);
16147 }
16148 {
16149 json.WriteString("MemoryInfo");
16150 json.BeginObject();
16151 {
16152 for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
16153 {
16154 json.BeginString("Heap ");
16155 json.ContinueString(heapIndex);
16156 json.EndString();
16157 json.BeginObject();
16158 {
16159 const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
16160 json.WriteString("Flags");
16161 json.BeginArray(true);
16162 {
16164 json.WriteString("DEVICE_LOCAL");
16165 #if VMA_VULKAN_VERSION >= 1001000
16167 json.WriteString("MULTI_INSTANCE");
16168 #endif
16169
16170 VkMemoryHeapFlags flags = heapInfo.flags &
16172 #if VMA_VULKAN_VERSION >= 1001000
16174 #endif
16175 );
16176 if (flags != 0)
16177 json.WriteNumber(flags);
16178 }
16179 json.EndArray();
16180
16181 json.WriteString("Size");
16182 json.WriteNumber(heapInfo.size);
16183
16184 json.WriteString("Budget");
16185 json.BeginObject();
16186 {
16187 json.WriteString("BudgetBytes");
16188 json.WriteNumber(budgets[heapIndex].budget);
16189 json.WriteString("UsageBytes");
16190 json.WriteNumber(budgets[heapIndex].usage);
16191 }
16192 json.EndObject();
16193
16194 json.WriteString("Stats");
16195 VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
16196
16197 json.WriteString("MemoryPools");
16198 json.BeginObject();
16199 {
16200 for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
16201 {
16202 if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
16203 {
16204 json.BeginString("Type ");
16205 json.ContinueString(typeIndex);
16206 json.EndString();
16207 json.BeginObject();
16208 {
16209 json.WriteString("Flags");
16210 json.BeginArray(true);
16211 {
16212 VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
16214 json.WriteString("DEVICE_LOCAL");
16216 json.WriteString("HOST_VISIBLE");
16218 json.WriteString("HOST_COHERENT");
16220 json.WriteString("HOST_CACHED");
16222 json.WriteString("LAZILY_ALLOCATED");
16223 #if VMA_VULKAN_VERSION >= 1001000
16225 json.WriteString("PROTECTED");
16226 #endif
16227 #if VK_AMD_device_coherent_memory
16228 if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
16229 json.WriteString("DEVICE_COHERENT_AMD");
16230 if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
16231 json.WriteString("DEVICE_UNCACHED_AMD");
16232 #endif
16233
16235 #if VMA_VULKAN_VERSION >= 1001000
16237 #endif
16238 #if VK_AMD_device_coherent_memory
16239 | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
16240 | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
16241 #endif
16245 if (flags != 0)
16246 json.WriteNumber(flags);
16247 }
16248 json.EndArray();
16249
16250 json.WriteString("Stats");
16251 VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
16252 }
16253 json.EndObject();
16254 }
16255 }
16256
16257 }
16258 json.EndObject();
16259 }
16260 json.EndObject();
16261 }
16262 }
16263 json.EndObject();
16264 }
16265
16266 if (detailedMap == VK_TRUE)
16267 allocator->PrintDetailedMap(json);
16268
16269 json.EndObject();
16270 }
16271
16272 *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
16273}
16274
16276 VmaAllocator allocator,
16277 char* pStatsString)
16278{
16279 if(pStatsString != VMA_NULL)
16280 {
16281 VMA_ASSERT(allocator);
16282 VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
16283 }
16284}
16285
16286#endif // VMA_STATS_STRING_ENABLED
16287
16288/*
16289This function is not protected by any mutex because it just reads immutable data.
16290*/
16292 VmaAllocator allocator,
16293 uint32_t memoryTypeBits,
16294 const VmaAllocationCreateInfo* pAllocationCreateInfo,
16295 uint32_t* pMemoryTypeIndex)
16296{
16297 VMA_ASSERT(allocator != VK_NULL_HANDLE);
16298 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16299 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16300
16301 return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex);
16302}
16303
16305 VmaAllocator allocator,
16306 const VkBufferCreateInfo* pBufferCreateInfo,
16307 const VmaAllocationCreateInfo* pAllocationCreateInfo,
16308 uint32_t* pMemoryTypeIndex)
16309{
16310 VMA_ASSERT(allocator != VK_NULL_HANDLE);
16311 VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
16312 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16313 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16314
16315 const VkDevice hDev = allocator->m_hDevice;
16316 const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
16317 VkResult res;
16318
16319#if VMA_VULKAN_VERSION >= 1003000
16320 if(funcs->vkGetDeviceBufferMemoryRequirements)
16321 {
16322 // Can query straight from VkBufferCreateInfo :)
16323 VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS};
16324 devBufMemReq.pCreateInfo = pBufferCreateInfo;
16325
16327 (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
16328
16329 res = allocator->FindMemoryTypeIndex(
16330 memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
16331 }
16332 else
16333#endif // #if VMA_VULKAN_VERSION >= 1003000
16334 {
16335 // Must create a dummy buffer to query :(
16336 VkBuffer hBuffer = VK_NULL_HANDLE;
16337 res = funcs->vkCreateBuffer(
16338 hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
16339 if(res == VK_SUCCESS)
16340 {
16341 VkMemoryRequirements memReq = {};
16342 funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
16343
16344 res = allocator->FindMemoryTypeIndex(
16345 memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
16346
16347 funcs->vkDestroyBuffer(
16348 hDev, hBuffer, allocator->GetAllocationCallbacks());
16349 }
16350 }
16351 return res;
16352}
16353
16355 VmaAllocator allocator,
16356 const VkImageCreateInfo* pImageCreateInfo,
16357 const VmaAllocationCreateInfo* pAllocationCreateInfo,
16358 uint32_t* pMemoryTypeIndex)
16359{
16360 VMA_ASSERT(allocator != VK_NULL_HANDLE);
16361 VMA_ASSERT(pImageCreateInfo != VMA_NULL);
16362 VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
16363 VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
16364
16365 const VkDevice hDev = allocator->m_hDevice;
16366 const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
16367 VkResult res;
16368
16369#if VMA_VULKAN_VERSION >= 1003000
16370 if(funcs->vkGetDeviceImageMemoryRequirements)
16371 {
16372 // Can query straight from VkImageCreateInfo :)
16373 VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS};
16374 devImgMemReq.pCreateInfo = pImageCreateInfo;
16375 VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
16376 "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
16377
16379 (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
16380
16381 res = allocator->FindMemoryTypeIndex(
16382 memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
16383 }
16384 else
16385#endif // #if VMA_VULKAN_VERSION >= 1003000
16386 {
16387 // Must create a dummy image to query :(
16388 VkImage hImage = VK_NULL_HANDLE;
16389 res = funcs->vkCreateImage(
16390 hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
16391 if(res == VK_SUCCESS)
16392 {
16393 VkMemoryRequirements memReq = {};
16394 funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
16395
16396 res = allocator->FindMemoryTypeIndex(
16397 memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
16398
16399 funcs->vkDestroyImage(
16400 hDev, hImage, allocator->GetAllocationCallbacks());
16401 }
16402 }
16403 return res;
16404}
16405
16407 VmaAllocator allocator,
16408 const VmaPoolCreateInfo* pCreateInfo,
16409 VmaPool* pPool)
16410{
16411 VMA_ASSERT(allocator && pCreateInfo && pPool);
16412
16413 VMA_DEBUG_LOG("vmaCreatePool");
16414
16415 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16416
16417 return allocator->CreatePool(pCreateInfo, pPool);
16418}
16419
16421 VmaAllocator allocator,
16422 VmaPool pool)
16423{
16424 VMA_ASSERT(allocator);
16425
16426 if(pool == VK_NULL_HANDLE)
16427 {
16428 return;
16429 }
16430
16431 VMA_DEBUG_LOG("vmaDestroyPool");
16432
16433 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16434
16435 allocator->DestroyPool(pool);
16436}
16437
16439 VmaAllocator allocator,
16440 VmaPool pool,
16441 VmaStatistics* pPoolStats)
16442{
16443 VMA_ASSERT(allocator && pool && pPoolStats);
16444
16445 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16446
16447 allocator->GetPoolStatistics(pool, pPoolStats);
16448}
16449
16451 VmaAllocator allocator,
16452 VmaPool pool,
16453 VmaDetailedStatistics* pPoolStats)
16454{
16455 VMA_ASSERT(allocator && pool && pPoolStats);
16456
16457 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16458
16459 allocator->CalculatePoolStatistics(pool, pPoolStats);
16460}
16461
16463{
16464 VMA_ASSERT(allocator && pool);
16465
16466 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16467
16468 VMA_DEBUG_LOG("vmaCheckPoolCorruption");
16469
16470 return allocator->CheckPoolCorruption(pool);
16471}
16472
16474 VmaAllocator allocator,
16475 VmaPool pool,
16476 const char** ppName)
16477{
16478 VMA_ASSERT(allocator && pool && ppName);
16479
16480 VMA_DEBUG_LOG("vmaGetPoolName");
16481
16482 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16483
16484 *ppName = pool->GetName();
16485}
16486
16488 VmaAllocator allocator,
16489 VmaPool pool,
16490 const char* pName)
16491{
16492 VMA_ASSERT(allocator && pool);
16493
16494 VMA_DEBUG_LOG("vmaSetPoolName");
16495
16496 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16497
16498 pool->SetName(pName);
16499}
16500
16502 VmaAllocator allocator,
16503 const VkMemoryRequirements* pVkMemoryRequirements,
16504 const VmaAllocationCreateInfo* pCreateInfo,
16505 VmaAllocation* pAllocation,
16506 VmaAllocationInfo* pAllocationInfo)
16507{
16508 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
16509
16510 VMA_DEBUG_LOG("vmaAllocateMemory");
16511
16512 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16513
16514 VkResult result = allocator->AllocateMemory(
16515 *pVkMemoryRequirements,
16516 false, // requiresDedicatedAllocation
16517 false, // prefersDedicatedAllocation
16518 VK_NULL_HANDLE, // dedicatedBuffer
16519 VK_NULL_HANDLE, // dedicatedImage
16520 UINT32_MAX, // dedicatedBufferImageUsage
16521 *pCreateInfo,
16522 VMA_SUBALLOCATION_TYPE_UNKNOWN,
16523 1, // allocationCount
16524 pAllocation);
16525
16526 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16527 {
16528 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16529 }
16530
16531 return result;
16532}
16533
16535 VmaAllocator allocator,
16536 const VkMemoryRequirements* pVkMemoryRequirements,
16537 const VmaAllocationCreateInfo* pCreateInfo,
16538 size_t allocationCount,
16539 VmaAllocation* pAllocations,
16540 VmaAllocationInfo* pAllocationInfo)
16541{
16542 if(allocationCount == 0)
16543 {
16544 return VK_SUCCESS;
16545 }
16546
16547 VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
16548
16549 VMA_DEBUG_LOG("vmaAllocateMemoryPages");
16550
16551 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16552
16553 VkResult result = allocator->AllocateMemory(
16554 *pVkMemoryRequirements,
16555 false, // requiresDedicatedAllocation
16556 false, // prefersDedicatedAllocation
16557 VK_NULL_HANDLE, // dedicatedBuffer
16558 VK_NULL_HANDLE, // dedicatedImage
16559 UINT32_MAX, // dedicatedBufferImageUsage
16560 *pCreateInfo,
16561 VMA_SUBALLOCATION_TYPE_UNKNOWN,
16562 allocationCount,
16563 pAllocations);
16564
16565 if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
16566 {
16567 for(size_t i = 0; i < allocationCount; ++i)
16568 {
16569 allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
16570 }
16571 }
16572
16573 return result;
16574}
16575
16577 VmaAllocator allocator,
16578 VkBuffer buffer,
16579 const VmaAllocationCreateInfo* pCreateInfo,
16580 VmaAllocation* pAllocation,
16581 VmaAllocationInfo* pAllocationInfo)
16582{
16583 VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16584
16585 VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
16586
16587 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16588
16589 VkMemoryRequirements vkMemReq = {};
16590 bool requiresDedicatedAllocation = false;
16591 bool prefersDedicatedAllocation = false;
16592 allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
16593 requiresDedicatedAllocation,
16594 prefersDedicatedAllocation);
16595
16596 VkResult result = allocator->AllocateMemory(
16597 vkMemReq,
16598 requiresDedicatedAllocation,
16599 prefersDedicatedAllocation,
16600 buffer, // dedicatedBuffer
16601 VK_NULL_HANDLE, // dedicatedImage
16602 UINT32_MAX, // dedicatedBufferImageUsage
16603 *pCreateInfo,
16604 VMA_SUBALLOCATION_TYPE_BUFFER,
16605 1, // allocationCount
16606 pAllocation);
16607
16608 if(pAllocationInfo && result == VK_SUCCESS)
16609 {
16610 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16611 }
16612
16613 return result;
16614}
16615
16617 VmaAllocator allocator,
16618 VkImage image,
16619 const VmaAllocationCreateInfo* pCreateInfo,
16620 VmaAllocation* pAllocation,
16621 VmaAllocationInfo* pAllocationInfo)
16622{
16623 VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
16624
16625 VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
16626
16627 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16628
16629 VkMemoryRequirements vkMemReq = {};
16630 bool requiresDedicatedAllocation = false;
16631 bool prefersDedicatedAllocation = false;
16632 allocator->GetImageMemoryRequirements(image, vkMemReq,
16633 requiresDedicatedAllocation, prefersDedicatedAllocation);
16634
16635 VkResult result = allocator->AllocateMemory(
16636 vkMemReq,
16637 requiresDedicatedAllocation,
16638 prefersDedicatedAllocation,
16639 VK_NULL_HANDLE, // dedicatedBuffer
16640 image, // dedicatedImage
16641 UINT32_MAX, // dedicatedBufferImageUsage
16642 *pCreateInfo,
16643 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
16644 1, // allocationCount
16645 pAllocation);
16646
16647 if(pAllocationInfo && result == VK_SUCCESS)
16648 {
16649 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16650 }
16651
16652 return result;
16653}
16654
16656 VmaAllocator allocator,
16657 VmaAllocation allocation)
16658{
16659 VMA_ASSERT(allocator);
16660
16661 if(allocation == VK_NULL_HANDLE)
16662 {
16663 return;
16664 }
16665
16666 VMA_DEBUG_LOG("vmaFreeMemory");
16667
16668 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16669
16670 allocator->FreeMemory(
16671 1, // allocationCount
16672 &allocation);
16673}
16674
16676 VmaAllocator allocator,
16677 size_t allocationCount,
16678 const VmaAllocation* pAllocations)
16679{
16680 if(allocationCount == 0)
16681 {
16682 return;
16683 }
16684
16685 VMA_ASSERT(allocator);
16686
16687 VMA_DEBUG_LOG("vmaFreeMemoryPages");
16688
16689 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16690
16691 allocator->FreeMemory(allocationCount, pAllocations);
16692}
16693
16695 VmaAllocator allocator,
16696 VmaAllocation allocation,
16697 VmaAllocationInfo* pAllocationInfo)
16698{
16699 VMA_ASSERT(allocator && allocation && pAllocationInfo);
16700
16701 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16702
16703 allocator->GetAllocationInfo(allocation, pAllocationInfo);
16704}
16705
16707 VmaAllocator allocator,
16708 VmaAllocation allocation,
16709 void* pUserData)
16710{
16711 VMA_ASSERT(allocator && allocation);
16712
16713 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16714
16715 allocation->SetUserData(allocator, pUserData);
16716}
16717
16719 VmaAllocator VMA_NOT_NULL allocator,
16720 VmaAllocation VMA_NOT_NULL allocation,
16721 const char* VMA_NULLABLE pName)
16722{
16723 allocation->SetName(allocator, pName);
16724}
16725
16727 VmaAllocator VMA_NOT_NULL allocator,
16728 VmaAllocation VMA_NOT_NULL allocation,
16730{
16731 VMA_ASSERT(allocator && allocation && pFlags);
16732 const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16733 *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
16734}
16735
16737 VmaAllocator allocator,
16738 VmaAllocation allocation,
16739 void** ppData)
16740{
16741 VMA_ASSERT(allocator && allocation && ppData);
16742
16743 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16744
16745 return allocator->Map(allocation, ppData);
16746}
16747
16749 VmaAllocator allocator,
16750 VmaAllocation allocation)
16751{
16752 VMA_ASSERT(allocator && allocation);
16753
16754 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16755
16756 allocator->Unmap(allocation);
16757}
16758
16760 VmaAllocator allocator,
16761 VmaAllocation allocation,
16762 VkDeviceSize offset,
16763 VkDeviceSize size)
16764{
16765 VMA_ASSERT(allocator && allocation);
16766
16767 VMA_DEBUG_LOG("vmaFlushAllocation");
16768
16769 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16770
16771 const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
16772
16773 return res;
16774}
16775
16777 VmaAllocator allocator,
16778 VmaAllocation allocation,
16779 VkDeviceSize offset,
16780 VkDeviceSize size)
16781{
16782 VMA_ASSERT(allocator && allocation);
16783
16784 VMA_DEBUG_LOG("vmaInvalidateAllocation");
16785
16786 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16787
16788 const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
16789
16790 return res;
16791}
16792
16794 VmaAllocator allocator,
16795 uint32_t allocationCount,
16796 const VmaAllocation* allocations,
16797 const VkDeviceSize* offsets,
16798 const VkDeviceSize* sizes)
16799{
16800 VMA_ASSERT(allocator);
16801
16802 if(allocationCount == 0)
16803 {
16804 return VK_SUCCESS;
16805 }
16806
16807 VMA_ASSERT(allocations);
16808
16809 VMA_DEBUG_LOG("vmaFlushAllocations");
16810
16811 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16812
16813 const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
16814
16815 return res;
16816}
16817
16819 VmaAllocator allocator,
16820 uint32_t allocationCount,
16821 const VmaAllocation* allocations,
16822 const VkDeviceSize* offsets,
16823 const VkDeviceSize* sizes)
16824{
16825 VMA_ASSERT(allocator);
16826
16827 if(allocationCount == 0)
16828 {
16829 return VK_SUCCESS;
16830 }
16831
16832 VMA_ASSERT(allocations);
16833
16834 VMA_DEBUG_LOG("vmaInvalidateAllocations");
16835
16836 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16837
16838 const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
16839
16840 return res;
16841}
16842
16844 VmaAllocator allocator,
16845 uint32_t memoryTypeBits)
16846{
16847 VMA_ASSERT(allocator);
16848
16849 VMA_DEBUG_LOG("vmaCheckCorruption");
16850
16851 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16852
16853 return allocator->CheckCorruption(memoryTypeBits);
16854}
16855
16857 VmaAllocator allocator,
16858 const VmaDefragmentationInfo* pInfo,
16859 VmaDefragmentationContext* pContext)
16860{
16861 VMA_ASSERT(allocator && pInfo && pContext);
16862
16863 VMA_DEBUG_LOG("vmaBeginDefragmentation");
16864
16865 if (pInfo->pool != VMA_NULL)
16866 {
16867 // Check if run on supported algorithms
16868 if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
16870 }
16871
16872 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16873
16874 *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
16875 return VK_SUCCESS;
16876}
16877
16879 VmaAllocator allocator,
16882{
16883 VMA_ASSERT(allocator && context);
16884
16885 VMA_DEBUG_LOG("vmaEndDefragmentation");
16886
16887 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16888
16889 if (pStats)
16890 context->GetStats(*pStats);
16891 vma_delete(allocator, context);
16892}
16893
16895 VmaAllocator VMA_NOT_NULL allocator,
16898{
16899 VMA_ASSERT(context && pPassInfo);
16900
16901 VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
16902
16903 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16904
16905 return context->DefragmentPassBegin(*pPassInfo);
16906}
16907
16909 VmaAllocator VMA_NOT_NULL allocator,
16912{
16913 VMA_ASSERT(context && pPassInfo);
16914
16915 VMA_DEBUG_LOG("vmaEndDefragmentationPass");
16916
16917 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16918
16919 return context->DefragmentPassEnd(*pPassInfo);
16920}
16921
16923 VmaAllocator allocator,
16924 VmaAllocation allocation,
16925 VkBuffer buffer)
16926{
16927 VMA_ASSERT(allocator && allocation && buffer);
16928
16929 VMA_DEBUG_LOG("vmaBindBufferMemory");
16930
16931 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16932
16933 return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
16934}
16935
16937 VmaAllocator allocator,
16938 VmaAllocation allocation,
16939 VkDeviceSize allocationLocalOffset,
16940 VkBuffer buffer,
16941 const void* pNext)
16942{
16943 VMA_ASSERT(allocator && allocation && buffer);
16944
16945 VMA_DEBUG_LOG("vmaBindBufferMemory2");
16946
16947 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16948
16949 return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
16950}
16951
16953 VmaAllocator allocator,
16954 VmaAllocation allocation,
16955 VkImage image)
16956{
16957 VMA_ASSERT(allocator && allocation && image);
16958
16959 VMA_DEBUG_LOG("vmaBindImageMemory");
16960
16961 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16962
16963 return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
16964}
16965
16967 VmaAllocator allocator,
16968 VmaAllocation allocation,
16969 VkDeviceSize allocationLocalOffset,
16970 VkImage image,
16971 const void* pNext)
16972{
16973 VMA_ASSERT(allocator && allocation && image);
16974
16975 VMA_DEBUG_LOG("vmaBindImageMemory2");
16976
16977 VMA_DEBUG_GLOBAL_MUTEX_LOCK
16978
16979 return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
16980}
16981
16983 VmaAllocator allocator,
16984 const VkBufferCreateInfo* pBufferCreateInfo,
16985 const VmaAllocationCreateInfo* pAllocationCreateInfo,
16986 VkBuffer* pBuffer,
16987 VmaAllocation* pAllocation,
16988 VmaAllocationInfo* pAllocationInfo)
16989{
16990 VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16991
16992 if(pBufferCreateInfo->size == 0)
16993 {
16995 }
16996 if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
16997 !allocator->m_UseKhrBufferDeviceAddress)
16998 {
16999 VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17001 }
17002
17003 VMA_DEBUG_LOG("vmaCreateBuffer");
17004
17005 VMA_DEBUG_GLOBAL_MUTEX_LOCK
17006
17007 *pBuffer = VK_NULL_HANDLE;
17008 *pAllocation = VK_NULL_HANDLE;
17009
17010 // 1. Create VkBuffer.
17011 VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17012 allocator->m_hDevice,
17013 pBufferCreateInfo,
17014 allocator->GetAllocationCallbacks(),
17015 pBuffer);
17016 if(res >= 0)
17017 {
17018 // 2. vkGetBufferMemoryRequirements.
17019 VkMemoryRequirements vkMemReq = {};
17020 bool requiresDedicatedAllocation = false;
17021 bool prefersDedicatedAllocation = false;
17022 allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17023 requiresDedicatedAllocation, prefersDedicatedAllocation);
17024
17025 // 3. Allocate memory using allocator.
17026 res = allocator->AllocateMemory(
17027 vkMemReq,
17028 requiresDedicatedAllocation,
17029 prefersDedicatedAllocation,
17030 *pBuffer, // dedicatedBuffer
17031 VK_NULL_HANDLE, // dedicatedImage
17032 pBufferCreateInfo->usage, // dedicatedBufferImageUsage
17033 *pAllocationCreateInfo,
17034 VMA_SUBALLOCATION_TYPE_BUFFER,
17035 1, // allocationCount
17036 pAllocation);
17037
17038 if(res >= 0)
17039 {
17040 // 3. Bind buffer with memory.
17041 if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17042 {
17043 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17044 }
17045 if(res >= 0)
17046 {
17047 // All steps succeeded.
17048 #if VMA_STATS_STRING_ENABLED
17049 (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17050 #endif
17051 if(pAllocationInfo != VMA_NULL)
17052 {
17053 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17054 }
17055
17056 return VK_SUCCESS;
17057 }
17058 allocator->FreeMemory(
17059 1, // allocationCount
17060 pAllocation);
17061 *pAllocation = VK_NULL_HANDLE;
17062 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17063 *pBuffer = VK_NULL_HANDLE;
17064 return res;
17065 }
17066 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17067 *pBuffer = VK_NULL_HANDLE;
17068 return res;
17069 }
17070 return res;
17071}
17072
17074 VmaAllocator allocator,
17075 const VkBufferCreateInfo* pBufferCreateInfo,
17076 const VmaAllocationCreateInfo* pAllocationCreateInfo,
17077 VkDeviceSize minAlignment,
17078 VkBuffer* pBuffer,
17079 VmaAllocation* pAllocation,
17080 VmaAllocationInfo* pAllocationInfo)
17081{
17082 VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
17083
17084 if(pBufferCreateInfo->size == 0)
17085 {
17087 }
17088 if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
17089 !allocator->m_UseKhrBufferDeviceAddress)
17090 {
17091 VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17093 }
17094
17095 VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
17096
17097 VMA_DEBUG_GLOBAL_MUTEX_LOCK
17098
17099 *pBuffer = VK_NULL_HANDLE;
17100 *pAllocation = VK_NULL_HANDLE;
17101
17102 // 1. Create VkBuffer.
17103 VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17104 allocator->m_hDevice,
17105 pBufferCreateInfo,
17106 allocator->GetAllocationCallbacks(),
17107 pBuffer);
17108 if(res >= 0)
17109 {
17110 // 2. vkGetBufferMemoryRequirements.
17111 VkMemoryRequirements vkMemReq = {};
17112 bool requiresDedicatedAllocation = false;
17113 bool prefersDedicatedAllocation = false;
17114 allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
17115 requiresDedicatedAllocation, prefersDedicatedAllocation);
17116
17117 // 2a. Include minAlignment
17118 vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
17119
17120 // 3. Allocate memory using allocator.
17121 res = allocator->AllocateMemory(
17122 vkMemReq,
17123 requiresDedicatedAllocation,
17124 prefersDedicatedAllocation,
17125 *pBuffer, // dedicatedBuffer
17126 VK_NULL_HANDLE, // dedicatedImage
17127 pBufferCreateInfo->usage, // dedicatedBufferImageUsage
17128 *pAllocationCreateInfo,
17129 VMA_SUBALLOCATION_TYPE_BUFFER,
17130 1, // allocationCount
17131 pAllocation);
17132
17133 if(res >= 0)
17134 {
17135 // 3. Bind buffer with memory.
17136 if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17137 {
17138 res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
17139 }
17140 if(res >= 0)
17141 {
17142 // All steps succeeded.
17143 #if VMA_STATS_STRING_ENABLED
17144 (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
17145 #endif
17146 if(pAllocationInfo != VMA_NULL)
17147 {
17148 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17149 }
17150
17151 return VK_SUCCESS;
17152 }
17153 allocator->FreeMemory(
17154 1, // allocationCount
17155 pAllocation);
17156 *pAllocation = VK_NULL_HANDLE;
17157 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17158 *pBuffer = VK_NULL_HANDLE;
17159 return res;
17160 }
17161 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17162 *pBuffer = VK_NULL_HANDLE;
17163 return res;
17164 }
17165 return res;
17166}
17167
17169 VmaAllocator VMA_NOT_NULL allocator,
17170 VmaAllocation VMA_NOT_NULL allocation,
17171 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
17173{
17174 VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
17175
17176 VMA_DEBUG_LOG("vmaCreateAliasingBuffer");
17177
17178 *pBuffer = VK_NULL_HANDLE;
17179
17180 if (pBufferCreateInfo->size == 0)
17181 {
17183 }
17184 if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
17185 !allocator->m_UseKhrBufferDeviceAddress)
17186 {
17187 VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
17189 }
17190
17191 VMA_DEBUG_GLOBAL_MUTEX_LOCK
17192
17193 // 1. Create VkBuffer.
17194 VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
17195 allocator->m_hDevice,
17196 pBufferCreateInfo,
17197 allocator->GetAllocationCallbacks(),
17198 pBuffer);
17199 if (res >= 0)
17200 {
17201 // 2. Bind buffer with memory.
17202 res = allocator->BindBufferMemory(allocation, 0, *pBuffer, VMA_NULL);
17203 if (res >= 0)
17204 {
17205 return VK_SUCCESS;
17206 }
17207 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
17208 }
17209 return res;
17210}
17211
17213 VmaAllocator allocator,
17214 VkBuffer buffer,
17215 VmaAllocation allocation)
17216{
17217 VMA_ASSERT(allocator);
17218
17219 if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17220 {
17221 return;
17222 }
17223
17224 VMA_DEBUG_LOG("vmaDestroyBuffer");
17225
17226 VMA_DEBUG_GLOBAL_MUTEX_LOCK
17227
17228 if(buffer != VK_NULL_HANDLE)
17229 {
17230 (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
17231 }
17232
17233 if(allocation != VK_NULL_HANDLE)
17234 {
17235 allocator->FreeMemory(
17236 1, // allocationCount
17237 &allocation);
17238 }
17239}
17240
17242 VmaAllocator allocator,
17243 const VkImageCreateInfo* pImageCreateInfo,
17244 const VmaAllocationCreateInfo* pAllocationCreateInfo,
17245 VkImage* pImage,
17246 VmaAllocation* pAllocation,
17247 VmaAllocationInfo* pAllocationInfo)
17248{
17249 VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
17250
17251 if(pImageCreateInfo->extent.width == 0 ||
17252 pImageCreateInfo->extent.height == 0 ||
17253 pImageCreateInfo->extent.depth == 0 ||
17254 pImageCreateInfo->mipLevels == 0 ||
17255 pImageCreateInfo->arrayLayers == 0)
17256 {
17258 }
17259
17260 VMA_DEBUG_LOG("vmaCreateImage");
17261
17262 VMA_DEBUG_GLOBAL_MUTEX_LOCK
17263
17264 *pImage = VK_NULL_HANDLE;
17265 *pAllocation = VK_NULL_HANDLE;
17266
17267 // 1. Create VkImage.
17268 VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17269 allocator->m_hDevice,
17270 pImageCreateInfo,
17271 allocator->GetAllocationCallbacks(),
17272 pImage);
17273 if(res >= 0)
17274 {
17275 VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
17276 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
17277 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
17278
17279 // 2. Allocate memory using allocator.
17280 VkMemoryRequirements vkMemReq = {};
17281 bool requiresDedicatedAllocation = false;
17282 bool prefersDedicatedAllocation = false;
17283 allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
17284 requiresDedicatedAllocation, prefersDedicatedAllocation);
17285
17286 res = allocator->AllocateMemory(
17287 vkMemReq,
17288 requiresDedicatedAllocation,
17289 prefersDedicatedAllocation,
17290 VK_NULL_HANDLE, // dedicatedBuffer
17291 *pImage, // dedicatedImage
17292 pImageCreateInfo->usage, // dedicatedBufferImageUsage
17293 *pAllocationCreateInfo,
17294 suballocType,
17295 1, // allocationCount
17296 pAllocation);
17297
17298 if(res >= 0)
17299 {
17300 // 3. Bind image with memory.
17301 if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
17302 {
17303 res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
17304 }
17305 if(res >= 0)
17306 {
17307 // All steps succeeded.
17308 #if VMA_STATS_STRING_ENABLED
17309 (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
17310 #endif
17311 if(pAllocationInfo != VMA_NULL)
17312 {
17313 allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
17314 }
17315
17316 return VK_SUCCESS;
17317 }
17318 allocator->FreeMemory(
17319 1, // allocationCount
17320 pAllocation);
17321 *pAllocation = VK_NULL_HANDLE;
17322 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17323 *pImage = VK_NULL_HANDLE;
17324 return res;
17325 }
17326 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17327 *pImage = VK_NULL_HANDLE;
17328 return res;
17329 }
17330 return res;
17331}
17332
17334 VmaAllocator VMA_NOT_NULL allocator,
17335 VmaAllocation VMA_NOT_NULL allocation,
17336 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
17338{
17339 VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
17340
17341 *pImage = VK_NULL_HANDLE;
17342
17343 VMA_DEBUG_LOG("vmaCreateImage");
17344
17345 if (pImageCreateInfo->extent.width == 0 ||
17346 pImageCreateInfo->extent.height == 0 ||
17347 pImageCreateInfo->extent.depth == 0 ||
17348 pImageCreateInfo->mipLevels == 0 ||
17349 pImageCreateInfo->arrayLayers == 0)
17350 {
17352 }
17353
17354 VMA_DEBUG_GLOBAL_MUTEX_LOCK
17355
17356 // 1. Create VkImage.
17357 VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
17358 allocator->m_hDevice,
17359 pImageCreateInfo,
17360 allocator->GetAllocationCallbacks(),
17361 pImage);
17362 if (res >= 0)
17363 {
17364 // 2. Bind image with memory.
17365 res = allocator->BindImageMemory(allocation, 0, *pImage, VMA_NULL);
17366 if (res >= 0)
17367 {
17368 return VK_SUCCESS;
17369 }
17370 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
17371 }
17372 return res;
17373}
17374
17376 VmaAllocator VMA_NOT_NULL allocator,
17377 VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
17378 VmaAllocation VMA_NULLABLE allocation)
17379{
17380 VMA_ASSERT(allocator);
17381
17382 if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
17383 {
17384 return;
17385 }
17386
17387 VMA_DEBUG_LOG("vmaDestroyImage");
17388
17389 VMA_DEBUG_GLOBAL_MUTEX_LOCK
17390
17391 if(image != VK_NULL_HANDLE)
17392 {
17393 (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
17394 }
17395 if(allocation != VK_NULL_HANDLE)
17396 {
17397 allocator->FreeMemory(
17398 1, // allocationCount
17399 &allocation);
17400 }
17401}
17402
17404 const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
17406{
17407 VMA_ASSERT(pCreateInfo && pVirtualBlock);
17408 VMA_ASSERT(pCreateInfo->size > 0);
17409 VMA_DEBUG_LOG("vmaCreateVirtualBlock");
17410 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17411 *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
17412 VkResult res = (*pVirtualBlock)->Init();
17413 if(res < 0)
17414 {
17415 vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
17416 *pVirtualBlock = VK_NULL_HANDLE;
17417 }
17418 return res;
17419}
17420
17422{
17423 if(virtualBlock != VK_NULL_HANDLE)
17424 {
17425 VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
17426 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17427 VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
17428 vma_delete(&allocationCallbacks, virtualBlock);
17429 }
17430}
17431
17433{
17434 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17435 VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
17436 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17437 return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
17438}
17439
17442{
17443 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
17444 VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
17445 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17446 virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
17447}
17448
17451 VkDeviceSize* VMA_NULLABLE pOffset)
17452{
17453 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
17454 VMA_DEBUG_LOG("vmaVirtualAllocate");
17455 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17456 return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
17457}
17458
17460{
17461 if(allocation != VK_NULL_HANDLE)
17462 {
17463 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17464 VMA_DEBUG_LOG("vmaVirtualFree");
17465 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17466 virtualBlock->Free(allocation);
17467 }
17468}
17469
17471{
17472 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17473 VMA_DEBUG_LOG("vmaClearVirtualBlock");
17474 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17475 virtualBlock->Clear();
17476}
17477
17480{
17481 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17482 VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
17483 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17484 virtualBlock->SetAllocationUserData(allocation, pUserData);
17485}
17486
17489{
17490 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
17491 VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
17492 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17493 virtualBlock->GetStatistics(*pStats);
17494}
17495
17498{
17499 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
17500 VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
17501 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17502 virtualBlock->CalculateDetailedStatistics(*pStats);
17503}
17504
17505#if VMA_STATS_STRING_ENABLED
17506
17508 char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
17509{
17510 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
17511 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17512 const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
17513 VmaStringBuilder sb(allocationCallbacks);
17514 virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
17515 *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
17516}
17517
17519 char* VMA_NULLABLE pStatsString)
17520{
17521 if(pStatsString != VMA_NULL)
17522 {
17523 VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
17524 VMA_DEBUG_GLOBAL_MUTEX_LOCK;
17525 VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
17526 }
17527}
17528#endif // VMA_STATS_STRING_ENABLED
17529#endif // _VMA_PUBLIC_INTERFACE
17530#endif // VMA_IMPLEMENTATION
17531
17532/**
17533\page quick_start Quick start
17534
17535\section quick_start_project_setup Project setup
17536
17537Vulkan Memory Allocator comes in form of a "stb-style" single header file.
17538You don't need to build it as a separate library project.
17539You can add this file directly to your project and submit it to code repository next to your other source files.
17540
17541"Single header" doesn't mean that everything is contained in C/C++ declarations,
17542like it tends to be in case of inline functions or C++ templates.
17543It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
17544If you don't do it properly, you will get linker errors.
17545
17546To do it properly:
17547
17548-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
17549 This includes declarations of all members of the library.
17550-# In exactly one CPP file define following macro before this include.
17551 It enables also internal definitions.
17552
17553\code
17554#define VMA_IMPLEMENTATION
17555#include "vk_mem_alloc.h"
17556\endcode
17557
17558It may be a good idea to create dedicated CPP file just for this purpose.
17559
17560This library includes header `<vulkan/vulkan.h>`, which in turn
17561includes `<windows.h>` on Windows. If you need some specific macros defined
17562before including these headers (like `WIN32_LEAN_AND_MEAN` or
17563`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
17564them before every `#include` of this library.
17565
17566This library is written in C++, but has C-compatible interface.
17567Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
17568implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
17569Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used.
17570
17571
17572\section quick_start_initialization Initialization
17573
17574At program startup:
17575
17576-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
17577-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
17578 calling vmaCreateAllocator().
17579
17580Only members `physicalDevice`, `device`, `instance` are required.
17581However, you should inform the library which Vulkan version do you use by setting
17582VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
17583by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
17584Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
17585
17586You may need to configure importing Vulkan functions. There are 3 ways to do this:
17587
17588-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
17589 - You don't need to do anything.
17590 - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
17591-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
17592 `vkGetDeviceProcAddr` (this is the option presented in the example below):
17593 - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
17594 - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
17595 VmaVulkanFunctions::vkGetDeviceProcAddr.
17596 - The library will fetch pointers to all other functions it needs internally.
17597-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
17598 [Volk](https://github.com/zeux/volk):
17599 - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
17600 - Pass these pointers via structure #VmaVulkanFunctions.
17601
17602\code
17603VmaVulkanFunctions vulkanFunctions = {};
17604vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
17605vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
17606
17607VmaAllocatorCreateInfo allocatorCreateInfo = {};
17608allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
17609allocatorCreateInfo.physicalDevice = physicalDevice;
17610allocatorCreateInfo.device = device;
17611allocatorCreateInfo.instance = instance;
17612allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
17613
17614VmaAllocator allocator;
17615vmaCreateAllocator(&allocatorCreateInfo, &allocator);
17616\endcode
17617
17618
17619\section quick_start_resource_allocation Resource allocation
17620
17621When you want to create a buffer or image:
17622
17623-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
17624-# Fill VmaAllocationCreateInfo structure.
17625-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
17626 already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
17627
17628\code
17629VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17630bufferInfo.size = 65536;
17631bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
17632
17633VmaAllocationCreateInfo allocInfo = {};
17634allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17635
17636VkBuffer buffer;
17637VmaAllocation allocation;
17638vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17639\endcode
17640
17641Don't forget to destroy your objects when no longer needed:
17642
17643\code
17644vmaDestroyBuffer(allocator, buffer, allocation);
17645vmaDestroyAllocator(allocator);
17646\endcode
17647
17648
17649\page choosing_memory_type Choosing memory type
17650
17651Physical devices in Vulkan support various combinations of memory heaps and
17652types. Help with choosing correct and optimal memory type for your specific
17653resource is one of the key features of this library. You can use it by filling
17654appropriate members of VmaAllocationCreateInfo structure, as described below.
17655You can also combine multiple methods.
17656
17657-# If you just want to find memory type index that meets your requirements, you
17658 can use function: vmaFindMemoryTypeIndexForBufferInfo(),
17659 vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
17660-# If you want to allocate a region of device memory without association with any
17661 specific image or buffer, you can use function vmaAllocateMemory(). Usage of
17662 this function is not recommended and usually not needed.
17663 vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
17664 which may be useful for sparse binding.
17665-# If you already have a buffer or an image created, you want to allocate memory
17666 for it and then you will bind it yourself, you can use function
17667 vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
17668 For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
17669 or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
17670-# **This is the easiest and recommended way to use this library:**
17671 If you want to create a buffer or an image, allocate memory for it and bind
17672 them together, all in one call, you can use function vmaCreateBuffer(),
17673 vmaCreateImage().
17674
17675When using 3. or 4., the library internally queries Vulkan for memory types
17676supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
17677and uses only one of these types.
17678
17679If no memory type can be found that meets all the requirements, these functions
17680return `VK_ERROR_FEATURE_NOT_PRESENT`.
17681
17682You can leave VmaAllocationCreateInfo structure completely filled with zeros.
17683It means no requirements are specified for memory type.
17684It is valid, although not very useful.
17685
17686\section choosing_memory_type_usage Usage
17687
17688The easiest way to specify memory requirements is to fill member
17689VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
17690It defines high level, common usage types.
17691Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
17692
17693For example, if you want to create a uniform buffer that will be filled using
17694transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
17695do it using following code. The buffer will most likely end up in a memory type with
17696`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
17697
17698\code
17699VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17700bufferInfo.size = 65536;
17701bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
17702
17703VmaAllocationCreateInfo allocInfo = {};
17704allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17705
17706VkBuffer buffer;
17707VmaAllocation allocation;
17708vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17709\endcode
17710
17711If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
17712on systems with discrete graphics card that have the memories separate, you can use
17713#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
17714
17715When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
17716you also need to specify one of the host access flags:
17717#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
17718This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
17719so you can map it.
17720
17721For example, a staging buffer that will be filled via mapped pointer and then
17722used as a source of transfer to the buffer decribed previously can be created like this.
17723It will likely and up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
17724but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
17725
17726\code
17727VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17728stagingBufferInfo.size = 65536;
17729stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
17730
17731VmaAllocationCreateInfo stagingAllocInfo = {};
17732stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
17733stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
17734
17735VkBuffer stagingBuffer;
17736VmaAllocation stagingAllocation;
17737vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
17738\endcode
17739
17740For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
17741
17742Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
17743about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
17744so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
17745If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
17746memory type, as decribed below.
17747
17748\note
17749Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
17750`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
17751are still available and work same way as in previous versions of the library
17752for backward compatibility, but they are not recommended.
17753
17754\section choosing_memory_type_required_preferred_flags Required and preferred flags
17755
17756You can specify more detailed requirements by filling members
17757VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
17758with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
17759if you want to create a buffer that will be persistently mapped on host (so it
17760must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
17761use following code:
17762
17763\code
17764VmaAllocationCreateInfo allocInfo = {};
17765allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17766allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17767allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
17768
17769VkBuffer buffer;
17770VmaAllocation allocation;
17771vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17772\endcode
17773
17774A memory type is chosen that has all the required flags and as many preferred
17775flags set as possible.
17776
17777Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
17778plus some extra "magic" (heuristics).
17779
17780\section choosing_memory_type_explicit_memory_types Explicit memory types
17781
17782If you inspected memory types available on the physical device and you have
17783a preference for memory types that you want to use, you can fill member
17784VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
17785means that a memory type with that index is allowed to be used for the
17786allocation. Special value 0, just like `UINT32_MAX`, means there are no
17787restrictions to memory type index.
17788
17789Please note that this member is NOT just a memory type index.
17790Still you can use it to choose just one, specific memory type.
17791For example, if you already determined that your buffer should be created in
17792memory type 2, use following code:
17793
17794\code
17795uint32_t memoryTypeIndex = 2;
17796
17797VmaAllocationCreateInfo allocInfo = {};
17798allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
17799
17800VkBuffer buffer;
17801VmaAllocation allocation;
17802vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
17803\endcode
17804
17805
17806\section choosing_memory_type_custom_memory_pools Custom memory pools
17807
17808If you allocate from custom memory pool, all the ways of specifying memory
17809requirements described above are not applicable and the aforementioned members
17810of VmaAllocationCreateInfo structure are ignored. Memory type is selected
17811explicitly when creating the pool and then used to make all the allocations from
17812that pool. For further details, see \ref custom_memory_pools.
17813
17814\section choosing_memory_type_dedicated_allocations Dedicated allocations
17815
17816Memory for allocations is reserved out of larger block of `VkDeviceMemory`
17817allocated from Vulkan internally. That is the main feature of this whole library.
17818You can still request a separate memory block to be created for an allocation,
17819just like you would do in a trivial solution without using any allocator.
17820In that case, a buffer or image is always bound to that memory at offset 0.
17821This is called a "dedicated allocation".
17822You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
17823The library can also internally decide to use dedicated allocation in some cases, e.g.:
17824
17825- When the size of the allocation is large.
17826- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
17827 and it reports that dedicated allocation is required or recommended for the resource.
17828- When allocation of next big memory block fails due to not enough device memory,
17829 but allocation with the exact requested size succeeds.
17830
17831
17832\page memory_mapping Memory mapping
17833
17834To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
17835to be able to read from it or write to it in CPU code.
17836Mapping is possible only of memory allocated from a memory type that has
17837`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
17838Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
17839You can use them directly with memory allocated by this library,
17840but it is not recommended because of following issue:
17841Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
17842This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
17843Because of this, Vulkan Memory Allocator provides following facilities:
17844
17845\note If you want to be able to map an allocation, you need to specify one of the flags
17846#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
17847in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
17848when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
17849For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
17850but they can still be used for consistency.
17851
17852\section memory_mapping_mapping_functions Mapping functions
17853
17854The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
17855They are safer and more convenient to use than standard Vulkan functions.
17856You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
17857You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
17858The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
17859For further details, see description of vmaMapMemory() function.
17860Example:
17861
17862\code
17863// Having these objects initialized:
17864struct ConstantBuffer
17865{
17866 ...
17867};
17868ConstantBuffer constantBufferData = ...
17869
17870VmaAllocator allocator = ...
17871VkBuffer constantBuffer = ...
17872VmaAllocation constantBufferAllocation = ...
17873
17874// You can map and fill your buffer using following code:
17875
17876void* mappedData;
17877vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
17878memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
17879vmaUnmapMemory(allocator, constantBufferAllocation);
17880\endcode
17881
17882When mapping, you may see a warning from Vulkan validation layer similar to this one:
17883
17884<i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
17885
17886It happens because the library maps entire `VkDeviceMemory` block, where different
17887types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
17888You can safely ignore it if you are sure you access only memory of the intended
17889object that you wanted to map.
17890
17891
17892\section memory_mapping_persistently_mapped_memory Persistently mapped memory
17893
17894Kepping your memory persistently mapped is generally OK in Vulkan.
17895You don't need to unmap it before using its data on the GPU.
17896The library provides a special feature designed for that:
17897Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
17898VmaAllocationCreateInfo::flags stay mapped all the time,
17899so you can just access CPU pointer to it any time
17900without a need to call any "map" or "unmap" function.
17901Example:
17902
17903\code
17904VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
17905bufCreateInfo.size = sizeof(ConstantBuffer);
17906bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
17907
17908VmaAllocationCreateInfo allocCreateInfo = {};
17909allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
17910allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
17911 VMA_ALLOCATION_CREATE_MAPPED_BIT;
17912
17913VkBuffer buf;
17914VmaAllocation alloc;
17915VmaAllocationInfo allocInfo;
17916vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
17917
17918// Buffer is already mapped. You can access its memory.
17919memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
17920\endcode
17921
17922\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
17923in a mappable memory type.
17924For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
17925#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
17926#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
17927For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
17928
17929\section memory_mapping_cache_control Cache flush and invalidate
17930
17931Memory in Vulkan doesn't need to be unmapped before using it on GPU,
17932but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
17933you need to manually **invalidate** cache before reading of mapped pointer
17934and **flush** cache after writing to mapped pointer.
17935Map/unmap operations don't do that automatically.
17936Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
17937`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
17938functions that refer to given allocation object: vmaFlushAllocation(),
17939vmaInvalidateAllocation(),
17940or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
17941
17942Regions of memory specified for flush/invalidate must be aligned to
17943`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
17944In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
17945within blocks are aligned to this value, so their offsets are always multiply of
17946`nonCoherentAtomSize` and two different allocations never share same "line" of this size.
17947
17948Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
17949currently provide `HOST_COHERENT` flag on all memory types that are
17950`HOST_VISIBLE`, so on PC you may not need to bother.
17951
17952
17953\page staying_within_budget Staying within budget
17954
17955When developing a graphics-intensive game or program, it is important to avoid allocating
17956more GPU memory than it is physically available. When the memory is over-committed,
17957various bad things can happen, depending on the specific GPU, graphics driver, and
17958operating system:
17959
17960- It may just work without any problems.
17961- The application may slow down because some memory blocks are moved to system RAM
17962 and the GPU has to access them through PCI Express bus.
17963- A new allocation may take very long time to complete, even few seconds, and possibly
17964 freeze entire system.
17965- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
17966- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
17967 returned somewhere later.
17968
17969\section staying_within_budget_querying_for_budget Querying for budget
17970
17971To query for current memory usage and available budget, use function vmaGetHeapBudgets().
17972Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
17973
17974Please note that this function returns different information and works faster than
17975vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
17976allocation, while vmaCalculateStatistics() is intended to be used rarely,
17977only to obtain statistical information, e.g. for debugging purposes.
17978
17979It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
17980about the budget from Vulkan device. VMA is able to use this extension automatically.
17981When not enabled, the allocator behaves same way, but then it estimates current usage
17982and available budget based on its internal information and Vulkan memory heap sizes,
17983which may be less precise. In order to use this extension:
17984
179851. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
17986 required by it are available and enable them. Please note that the first is a device
17987 extension and the second is instance extension!
179882. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
179893. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
17990 Vulkan inside of it to avoid overhead of querying it with every allocation.
17991
17992\section staying_within_budget_controlling_memory_usage Controlling memory usage
17993
17994There are many ways in which you can try to stay within the budget.
17995
17996First, when making new allocation requires allocating a new memory block, the library
17997tries not to exceed the budget automatically. If a block with default recommended size
17998(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
17999dedicated memory for just this resource.
18000
18001If the size of the requested resource plus current memory usage is more than the
18002budget, by default the library still tries to create it, leaving it to the Vulkan
18003implementation whether the allocation succeeds or fails. You can change this behavior
18004by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
18005not made if it would exceed the budget or if the budget is already exceeded.
18006VMA then tries to make the allocation from the next eligible Vulkan memory type.
18007The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
18008Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
18009when creating resources that are not essential for the application (e.g. the texture
18010of a specific object) and not to pass it when creating critically important resources
18011(e.g. render targets).
18012
18013On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
18014that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
18015whether it should fail with an error code or still allow the allocation.
18016Usage of this extension involves only passing extra structure on Vulkan device creation,
18017so it is out of scope of this library.
18018
18019Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
18020a new allocation is created only when it fits inside one of the existing memory blocks.
18021If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
18022This also ensures that the function call is very fast because it never goes to Vulkan
18023to obtain a new block.
18024
18025\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
18026set to more than 0 will currently try to allocate memory blocks without checking whether they
18027fit within budget.
18028
18029
18030\page resource_aliasing Resource aliasing (overlap)
18031
18032New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
18033management, give an opportunity to alias (overlap) multiple resources in the
18034same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
18035It can be useful to save video memory, but it must be used with caution.
18036
18037For example, if you know the flow of your whole render frame in advance, you
18038are going to use some intermediate textures or buffers only during a small range of render passes,
18039and you know these ranges don't overlap in time, you can bind these resources to
18040the same place in memory, even if they have completely different parameters (width, height, format etc.).
18041
18042![Resource aliasing (overlap)](../gfx/Aliasing.png)
18043
18044Such scenario is possible using VMA, but you need to create your images manually.
18045Then you need to calculate parameters of an allocation to be made using formula:
18046
18047- allocation size = max(size of each image)
18048- allocation alignment = max(alignment of each image)
18049- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
18050
18051Following example shows two different images bound to the same place in memory,
18052allocated to fit largest of them.
18053
18054\code
18055// A 512x512 texture to be sampled.
18056VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18057img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
18058img1CreateInfo.extent.width = 512;
18059img1CreateInfo.extent.height = 512;
18060img1CreateInfo.extent.depth = 1;
18061img1CreateInfo.mipLevels = 10;
18062img1CreateInfo.arrayLayers = 1;
18063img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
18064img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18065img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18066img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
18067img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18068
18069// A full screen texture to be used as color attachment.
18070VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18071img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
18072img2CreateInfo.extent.width = 1920;
18073img2CreateInfo.extent.height = 1080;
18074img2CreateInfo.extent.depth = 1;
18075img2CreateInfo.mipLevels = 1;
18076img2CreateInfo.arrayLayers = 1;
18077img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
18078img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18079img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18080img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
18081img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18082
18083VkImage img1;
18084res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
18085VkImage img2;
18086res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
18087
18088VkMemoryRequirements img1MemReq;
18089vkGetImageMemoryRequirements(device, img1, &img1MemReq);
18090VkMemoryRequirements img2MemReq;
18091vkGetImageMemoryRequirements(device, img2, &img2MemReq);
18092
18093VkMemoryRequirements finalMemReq = {};
18094finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
18095finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
18096finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
18097// Validate if(finalMemReq.memoryTypeBits != 0)
18098
18099VmaAllocationCreateInfo allocCreateInfo = {};
18100allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18101
18102VmaAllocation alloc;
18103res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
18104
18105res = vmaBindImageMemory(allocator, alloc, img1);
18106res = vmaBindImageMemory(allocator, alloc, img2);
18107
18108// You can use img1, img2 here, but not at the same time!
18109
18110vmaFreeMemory(allocator, alloc);
18111vkDestroyImage(allocator, img2, nullptr);
18112vkDestroyImage(allocator, img1, nullptr);
18113\endcode
18114
18115Remember that using resources that alias in memory requires proper synchronization.
18116You need to issue a memory barrier to make sure commands that use `img1` and `img2`
18117don't overlap on GPU timeline.
18118You also need to treat a resource after aliasing as uninitialized - containing garbage data.
18119For example, if you use `img1` and then want to use `img2`, you need to issue
18120an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
18121
18122Additional considerations:
18123
18124- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
18125See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
18126- You can create more complex layout where different images and buffers are bound
18127at different offsets inside one large allocation. For example, one can imagine
18128a big texture used in some render passes, aliasing with a set of many small buffers
18129used between in some further passes. To bind a resource at non-zero offset in an allocation,
18130use vmaBindBufferMemory2() / vmaBindImageMemory2().
18131- Before allocating memory for the resources you want to alias, check `memoryTypeBits`
18132returned in memory requirements of each resource to make sure the bits overlap.
18133Some GPUs may expose multiple memory types suitable e.g. only for buffers or
18134images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
18135resources may be disjoint. Aliasing them is not possible in that case.
18136
18137
18138\page custom_memory_pools Custom memory pools
18139
18140A memory pool contains a number of `VkDeviceMemory` blocks.
18141The library automatically creates and manages default pool for each memory type available on the device.
18142Default memory pool automatically grows in size.
18143Size of allocated blocks is also variable and managed automatically.
18144
18145You can create custom pool and allocate memory out of it.
18146It can be useful if you want to:
18147
18148- Keep certain kind of allocations separate from others.
18149- Enforce particular, fixed size of Vulkan memory blocks.
18150- Limit maximum amount of Vulkan memory allocated for that pool.
18151- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
18152- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
18153 #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
18154- Perform defragmentation on a specific subset of your allocations.
18155
18156To use custom memory pools:
18157
18158-# Fill VmaPoolCreateInfo structure.
18159-# Call vmaCreatePool() to obtain #VmaPool handle.
18160-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
18161 You don't need to specify any other parameters of this structure, like `usage`.
18162
18163Example:
18164
18165\code
18166// Find memoryTypeIndex for the pool.
18167VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18168sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
18169sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18170
18171VmaAllocationCreateInfo sampleAllocCreateInfo = {};
18172sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18173
18174uint32_t memTypeIndex;
18175VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
18176 &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
18177// Check res...
18178
18179// Create a pool that can have at most 2 blocks, 128 MiB each.
18180VmaPoolCreateInfo poolCreateInfo = {};
18181poolCreateInfo.memoryTypeIndex = memTypeIndex;
18182poolCreateInfo.blockSize = 128ull * 1024 * 1024;
18183poolCreateInfo.maxBlockCount = 2;
18184
18185VmaPool pool;
18186res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
18187// Check res...
18188
18189// Allocate a buffer out of it.
18190VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18191bufCreateInfo.size = 1024;
18192bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18193
18194VmaAllocationCreateInfo allocCreateInfo = {};
18195allocCreateInfo.pool = pool;
18196
18197VkBuffer buf;
18198VmaAllocation alloc;
18199res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
18200// Check res...
18201\endcode
18202
18203You have to free all allocations made from this pool before destroying it.
18204
18205\code
18206vmaDestroyBuffer(allocator, buf, alloc);
18207vmaDestroyPool(allocator, pool);
18208\endcode
18209
18210New versions of this library support creating dedicated allocations in custom pools.
18211It is supported only when VmaPoolCreateInfo::blockSize = 0.
18212To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
18213VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
18214
18215\note Excessive use of custom pools is a common mistake when using this library.
18216Custom pools may be useful for special purposes - when you want to
18217keep certain type of resources separate e.g. to reserve minimum amount of memory
18218for them or limit maximum amount of memory they can occupy. For most
18219resources this is not needed and so it is not recommended to create #VmaPool
18220objects and allocations out of them. Allocating from the default pool is sufficient.
18221
18222
18223\section custom_memory_pools_MemTypeIndex Choosing memory type index
18224
18225When creating a pool, you must explicitly specify memory type index.
18226To find the one suitable for your buffers or images, you can use helper functions
18227vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
18228You need to provide structures with example parameters of buffers or images
18229that you are going to create in that pool.
18230
18231\code
18232VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18233exampleBufCreateInfo.size = 1024; // Doesn't matter
18234exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
18235
18236VmaAllocationCreateInfo allocCreateInfo = {};
18237allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18238
18239uint32_t memTypeIndex;
18240vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
18241
18242VmaPoolCreateInfo poolCreateInfo = {};
18243poolCreateInfo.memoryTypeIndex = memTypeIndex;
18244// ...
18245\endcode
18246
18247When creating buffers/images allocated in that pool, provide following parameters:
18248
18249- `VkBufferCreateInfo`: Prefer to pass same parameters as above.
18250 Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
18251 Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
18252 or the other way around.
18253- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
18254 Other members are ignored anyway.
18255
18256\section linear_algorithm Linear allocation algorithm
18257
18258Each Vulkan memory block managed by this library has accompanying metadata that
18259keeps track of used and unused regions. By default, the metadata structure and
18260algorithm tries to find best place for new allocations among free regions to
18261optimize memory usage. This way you can allocate and free objects in any order.
18262
18263![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
18264
18265Sometimes there is a need to use simpler, linear allocation algorithm. You can
18266create custom pool that uses such algorithm by adding flag
18267#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
18268#VmaPool object. Then an alternative metadata management is used. It always
18269creates new allocations after last one and doesn't reuse free regions after
18270allocations freed in the middle. It results in better allocation performance and
18271less memory consumed by metadata.
18272
18273![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
18274
18275With this one flag, you can create a custom pool that can be used in many ways:
18276free-at-once, stack, double stack, and ring buffer. See below for details.
18277You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
18278
18279\subsection linear_algorithm_free_at_once Free-at-once
18280
18281In a pool that uses linear algorithm, you still need to free all the allocations
18282individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
18283them in any order. New allocations are always made after last one - free space
18284in the middle is not reused. However, when you release all the allocation and
18285the pool becomes empty, allocation starts from the beginning again. This way you
18286can use linear algorithm to speed up creation of allocations that you are going
18287to release all at once.
18288
18289![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
18290
18291This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
18292value that allows multiple memory blocks.
18293
18294\subsection linear_algorithm_stack Stack
18295
18296When you free an allocation that was created last, its space can be reused.
18297Thanks to this, if you always release allocations in the order opposite to their
18298creation (LIFO - Last In First Out), you can achieve behavior of a stack.
18299
18300![Stack](../gfx/Linear_allocator_4_stack.png)
18301
18302This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
18303value that allows multiple memory blocks.
18304
18305\subsection linear_algorithm_double_stack Double stack
18306
18307The space reserved by a custom pool with linear algorithm may be used by two
18308stacks:
18309
18310- First, default one, growing up from offset 0.
18311- Second, "upper" one, growing down from the end towards lower offsets.
18312
18313To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
18314to VmaAllocationCreateInfo::flags.
18315
18316![Double stack](../gfx/Linear_allocator_7_double_stack.png)
18317
18318Double stack is available only in pools with one memory block -
18319VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
18320
18321When the two stacks' ends meet so there is not enough space between them for a
18322new allocation, such allocation fails with usual
18323`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
18324
18325\subsection linear_algorithm_ring_buffer Ring buffer
18326
18327When you free some allocations from the beginning and there is not enough free space
18328for a new one at the end of a pool, allocator's "cursor" wraps around to the
18329beginning and starts allocation there. Thanks to this, if you always release
18330allocations in the same order as you created them (FIFO - First In First Out),
18331you can achieve behavior of a ring buffer / queue.
18332
18333![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
18334
18335Ring buffer is available only in pools with one memory block -
18336VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
18337
18338\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
18339
18340
18341\page defragmentation Defragmentation
18342
18343Interleaved allocations and deallocations of many objects of varying size can
18344cause fragmentation over time, which can lead to a situation where the library is unable
18345to find a continuous range of free memory for a new allocation despite there is
18346enough free space, just scattered across many small free ranges between existing
18347allocations.
18348
18349To mitigate this problem, you can use defragmentation feature.
18350It doesn't happen automatically though and needs your cooperation,
18351because VMA is a low level library that only allocates memory.
18352It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
18353It cannot copy their contents as it doesn't record any commands to a command buffer.
18354
18355Example:
18356
18357\code
18358VmaDefragmentationInfo defragInfo = {};
18359defragInfo.pool = myPool;
18360defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
18361
18362VmaDefragmentationContext defragCtx;
18363VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
18364// Check res...
18365
18366for(;;)
18367{
18368 VmaDefragmentationPassMoveInfo pass;
18369 res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
18370 if(res == VK_SUCCESS)
18371 break;
18372 else if(res != VK_INCOMPLETE)
18373 // Handle error...
18374
18375 for(uint32_t i = 0; i < pass.moveCount; ++i)
18376 {
18377 // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
18378 VmaAllocationInfo allocInfo;
18379 vmaGetAllocationInfo(allocator, pMoves[i].srcAllocation, &allocInfo);
18380 MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
18381
18382 // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
18383 VkImageCreateInfo imgCreateInfo = ...
18384 VkImage newImg;
18385 res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
18386 // Check res...
18387 res = vmaBindImageMemory(allocator, pMoves[i].dstTmpAllocation, newImg);
18388 // Check res...
18389
18390 // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
18391 vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
18392 }
18393
18394 // Make sure the copy commands finished executing.
18395 vkWaitForFences(...);
18396
18397 // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
18398 for(uint32_t i = 0; i < pass.moveCount; ++i)
18399 {
18400 // ...
18401 vkDestroyImage(device, resData->img, nullptr);
18402 }
18403
18404 // Update appropriate descriptors to point to the new places...
18405
18406 res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
18407 if(res == VK_SUCCESS)
18408 break;
18409 else if(res != VK_INCOMPLETE)
18410 // Handle error...
18411}
18412
18413vmaEndDefragmentation(allocator, defragCtx, nullptr);
18414\endcode
18415
18416Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
18417create/destroy an allocation and a buffer/image at once, these are just a shortcut for
18418creating the resource, allocating memory, and binding them together.
18419Defragmentation works on memory allocations only. You must handle the rest manually.
18420Defragmentation is an iterative process that should repreat "passes" as long as related functions
18421return `VK_INCOMPLETE` not `VK_SUCCESS`.
18422In each pass:
18423
184241. vmaBeginDefragmentationPass() function call:
18425 - Calculates and returns the list of allocations to be moved in this pass.
18426 Note this can be a time-consuming process.
18427 - Reserves destination memory for them by creating temporary destination allocations
18428 that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
184292. Inside the pass, **you should**:
18430 - Inspect the returned list of allocations to be moved.
18431 - Create new buffers/images and bind them at the returned destination temporary allocations.
18432 - Copy data from source to destination resources if necessary.
18433 - Destroy the source buffers/images, but NOT their allocations.
184343. vmaEndDefragmentationPass() function call:
18435 - Frees the source memory reserved for the allocations that are moved.
18436 - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
18437 - Frees `VkDeviceMemory` blocks that became empty.
18438
18439Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
18440Defragmentation algorithm tries to move all suitable allocations.
18441You can, however, refuse to move some of them inside a defragmentation pass, by setting
18442`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
18443This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
18444If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
18445
18446Inside a pass, for each allocation that should be moved:
18447
18448- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
18449 - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
18450- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
18451 filled, and used temporarily in each rendering frame, you can just recreate this image
18452 without copying its data.
18453- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
18454 using `memcpy()`.
18455- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
18456 This will cancel the move.
18457 - vmaEndDefragmentationPass() will then free the destination memory
18458 not the source memory of the allocation, leaving it unchanged.
18459- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
18460 you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
18461 - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
18462
18463You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
18464(like in the example above) or all the default pools by setting this member to null.
18465
18466Defragmentation is always performed in each pool separately.
18467Allocations are never moved between different Vulkan memory types.
18468The size of the destination memory reserved for a moved allocation is the same as the original one.
18469Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
18470Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
18471
18472You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
18473in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
18474See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
18475
18476It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
18477usage, possibly from multiple threads, with the exception that allocations
18478returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
18479
18480<b>Mapping</b> is preserved on allocations that are moved during defragmentation.
18481Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
18482are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
18483using VmaAllocationInfo::pMappedData.
18484
18485\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
18486
18487
18488\page statistics Statistics
18489
18490This library contains several functions that return information about its internal state,
18491especially the amount of memory allocated from Vulkan.
18492
18493\section statistics_numeric_statistics Numeric statistics
18494
18495If you need to obtain basic statistics about memory usage per heap, together with current budget,
18496you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
18497This is useful to keep track of memory usage and stay withing budget
18498(see also \ref staying_within_budget).
18499Example:
18500
18501\code
18502uint32_t heapIndex = ...
18503
18504VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
18505vmaGetHeapBudgets(allocator, budgets);
18506
18507printf("My heap currently has %u allocations taking %llu B,\n",
18508 budgets[heapIndex].statistics.allocationCount,
18509 budgets[heapIndex].statistics.allocationBytes);
18510printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
18511 budgets[heapIndex].statistics.blockCount,
18512 budgets[heapIndex].statistics.blockBytes);
18513printf("Vulkan reports total usage %llu B with budget %llu B.\n",
18514 budgets[heapIndex].usage,
18515 budgets[heapIndex].budget);
18516\endcode
18517
18518You can query for more detailed statistics per memory heap, type, and totals,
18519including minimum and maximum allocation size and unused range size,
18520by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
18521This function is slower though, as it has to traverse all the internal data structures,
18522so it should be used only for debugging purposes.
18523
18524You can query for statistics of a custom pool using function vmaGetPoolStatistics()
18525or vmaCalculatePoolStatistics().
18526
18527You can query for information about a specific allocation using function vmaGetAllocationInfo().
18528It fill structure #VmaAllocationInfo.
18529
18530\section statistics_json_dump JSON dump
18531
18532You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
18533The result is guaranteed to be correct JSON.
18534It uses ANSI encoding.
18535Any strings provided by user (see [Allocation names](@ref allocation_names))
18536are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
18537this JSON string can be treated as using this encoding.
18538It must be freed using function vmaFreeStatsString().
18539
18540The format of this JSON string is not part of official documentation of the library,
18541but it will not change in backward-incompatible way without increasing library major version number
18542and appropriate mention in changelog.
18543
18544The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
18545It can also contain detailed map of allocated memory blocks and their regions -
18546free and occupied by allocations.
18547This allows e.g. to visualize the memory or assess fragmentation.
18548
18549
18550\page allocation_annotation Allocation names and user data
18551
18552\section allocation_user_data Allocation user data
18553
18554You can annotate allocations with your own information, e.g. for debugging purposes.
18555To do that, fill VmaAllocationCreateInfo::pUserData field when creating
18556an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
18557some handle, index, key, ordinal number or any other value that would associate
18558the allocation with your custom metadata.
18559It it useful to identify appropriate data structures in your engine given #VmaAllocation,
18560e.g. when doing \ref defragmentation.
18561
18562\code
18563VkBufferCreateInfo bufCreateInfo = ...
18564
18565MyBufferMetadata* pMetadata = CreateBufferMetadata();
18566
18567VmaAllocationCreateInfo allocCreateInfo = {};
18568allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18569allocCreateInfo.pUserData = pMetadata;
18570
18571VkBuffer buffer;
18572VmaAllocation allocation;
18573vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
18574\endcode
18575
18576The pointer may be later retrieved as VmaAllocationInfo::pUserData:
18577
18578\code
18579VmaAllocationInfo allocInfo;
18580vmaGetAllocationInfo(allocator, allocation, &allocInfo);
18581MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
18582\endcode
18583
18584It can also be changed using function vmaSetAllocationUserData().
18585
18586Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
18587vmaBuildStatsString() in hexadecimal form.
18588
18589\section allocation_names Allocation names
18590
18591An allocation can also carry a null-terminated string, giving a name to the allocation.
18592To set it, call vmaSetAllocationName().
18593The library creates internal copy of the string, so the pointer you pass doesn't need
18594to be valid for whole lifetime of the allocation. You can free it after the call.
18595
18596\code
18597std::string imageName = "Texture: ";
18598imageName += fileName;
18599vmaSetAllocationName(allocator, allocation, imageName.c_str());
18600\endcode
18601
18602The string can be later retrieved by inspecting VmaAllocationInfo::pName.
18603It is also printed in JSON report created by vmaBuildStatsString().
18604
18605\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
18606You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
18607
18608
18609\page virtual_allocator Virtual allocator
18610
18611As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
18612It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
18613You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
18614A common use case is sub-allocation of pieces of one large GPU buffer.
18615
18616\section virtual_allocator_creating_virtual_block Creating virtual block
18617
18618To use this functionality, there is no main "allocator" object.
18619You don't need to have #VmaAllocator object created.
18620All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
18621
18622-# Fill in #VmaVirtualBlockCreateInfo structure.
18623-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
18624
18625Example:
18626
18627\code
18628VmaVirtualBlockCreateInfo blockCreateInfo = {};
18629blockCreateInfo.size = 1048576; // 1 MB
18630
18631VmaVirtualBlock block;
18632VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
18633\endcode
18634
18635\section virtual_allocator_making_virtual_allocations Making virtual allocations
18636
18637#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
18638using the same code as the main Vulkan memory allocator.
18639Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
18640that represents an opaque handle to an allocation withing the virtual block.
18641
18642In order to make such allocation:
18643
18644-# Fill in #VmaVirtualAllocationCreateInfo structure.
18645-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
18646 You can also receive `VkDeviceSize offset` that was assigned to the allocation.
18647
18648Example:
18649
18650\code
18651VmaVirtualAllocationCreateInfo allocCreateInfo = {};
18652allocCreateInfo.size = 4096; // 4 KB
18653
18654VmaVirtualAllocation alloc;
18655VkDeviceSize offset;
18656res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
18657if(res == VK_SUCCESS)
18658{
18659 // Use the 4 KB of your memory starting at offset.
18660}
18661else
18662{
18663 // Allocation failed - no space for it could be found. Handle this error!
18664}
18665\endcode
18666
18667\section virtual_allocator_deallocation Deallocation
18668
18669When no longer needed, an allocation can be freed by calling vmaVirtualFree().
18670You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
18671called for the same #VmaVirtualBlock.
18672
18673When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
18674All allocations must be freed before the block is destroyed, which is checked internally by an assert.
18675However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
18676a feature not available in normal Vulkan memory allocator. Example:
18677
18678\code
18679vmaVirtualFree(block, alloc);
18680vmaDestroyVirtualBlock(block);
18681\endcode
18682
18683\section virtual_allocator_allocation_parameters Allocation parameters
18684
18685You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
18686Its default value is null.
18687It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
18688larger data structure containing more information. Example:
18689
18690\code
18691struct CustomAllocData
18692{
18693 std::string m_AllocName;
18694};
18695CustomAllocData* allocData = new CustomAllocData();
18696allocData->m_AllocName = "My allocation 1";
18697vmaSetVirtualAllocationUserData(block, alloc, allocData);
18698\endcode
18699
18700The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
18701vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
18702If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
18703Example:
18704
18705\code
18706VmaVirtualAllocationInfo allocInfo;
18707vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
18708delete (CustomAllocData*)allocInfo.pUserData;
18709
18710vmaVirtualFree(block, alloc);
18711\endcode
18712
18713\section virtual_allocator_alignment_and_units Alignment and units
18714
18715It feels natural to express sizes and offsets in bytes.
18716If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
18717VmaVirtualAllocationCreateInfo::alignment to request it. Example:
18718
18719\code
18720VmaVirtualAllocationCreateInfo allocCreateInfo = {};
18721allocCreateInfo.size = 4096; // 4 KB
18722allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
18723
18724VmaVirtualAllocation alloc;
18725res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
18726\endcode
18727
18728Alignments of different allocations made from one block may vary.
18729However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
18730you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
18731It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
18732
18733- VmaVirtualBlockCreateInfo::size
18734- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
18735- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
18736
18737\section virtual_allocator_statistics Statistics
18738
18739You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
18740(to get brief statistics that are fast to calculate)
18741or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
18742The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
18743Example:
18744
18745\code
18746VmaStatistics stats;
18747vmaGetVirtualBlockStatistics(block, &stats);
18748printf("My virtual block has %llu bytes used by %u virtual allocations\n",
18749 stats.allocationBytes, stats.allocationCount);
18750\endcode
18751
18752You can also request a full list of allocations and free regions as a string in JSON format by calling
18753vmaBuildVirtualBlockStatsString().
18754Returned string must be later freed using vmaFreeVirtualBlockStatsString().
18755The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
18756
18757\section virtual_allocator_additional_considerations Additional considerations
18758
18759The "virtual allocator" functionality is implemented on a level of individual memory blocks.
18760Keeping track of a whole collection of blocks, allocating new ones when out of free space,
18761deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
18762
18763Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
18764See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
18765You can find their description in chapter \ref custom_memory_pools.
18766Allocation strategies are also supported.
18767See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
18768
18769Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
18770buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
18771
18772
18773\page debugging_memory_usage Debugging incorrect memory usage
18774
18775If you suspect a bug with memory usage, like usage of uninitialized memory or
18776memory being overwritten out of bounds of an allocation,
18777you can use debug features of this library to verify this.
18778
18779\section debugging_memory_usage_initialization Memory initialization
18780
18781If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
18782you can enable automatic memory initialization to verify this.
18783To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
18784
18785\code
18786#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
18787#include "vk_mem_alloc.h"
18788\endcode
18789
18790It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
18791Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
18792Memory is automatically mapped and unmapped if necessary.
18793
18794If you find these values while debugging your program, good chances are that you incorrectly
18795read Vulkan memory that is allocated but not initialized, or already freed, respectively.
18796
18797Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
18798It works also with dedicated allocations.
18799
18800\section debugging_memory_usage_margins Margins
18801
18802By default, allocations are laid out in memory blocks next to each other if possible
18803(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
18804
18805![Allocations without margin](../gfx/Margins_1.png)
18806
18807Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
18808number of bytes as a margin after every allocation.
18809
18810\code
18811#define VMA_DEBUG_MARGIN 16
18812#include "vk_mem_alloc.h"
18813\endcode
18814
18815![Allocations with margin](../gfx/Margins_2.png)
18816
18817If your bug goes away after enabling margins, it means it may be caused by memory
18818being overwritten outside of allocation boundaries. It is not 100% certain though.
18819Change in application behavior may also be caused by different order and distribution
18820of allocations across memory blocks after margins are applied.
18821
18822Margins work with all types of memory.
18823
18824Margin is applied only to allocations made out of memory blocks and not to dedicated
18825allocations, which have their own memory block of specific size.
18826It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
18827or those automatically decided to put into dedicated allocations, e.g. due to its
18828large size or recommended by VK_KHR_dedicated_allocation extension.
18829
18830Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
18831
18832Note that enabling margins increases memory usage and fragmentation.
18833
18834Margins do not apply to \ref virtual_allocator.
18835
18836\section debugging_memory_usage_corruption_detection Corruption detection
18837
18838You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
18839of contents of the margins.
18840
18841\code
18842#define VMA_DEBUG_MARGIN 16
18843#define VMA_DEBUG_DETECT_CORRUPTION 1
18844#include "vk_mem_alloc.h"
18845\endcode
18846
18847When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
18848(it must be multiply of 4) after every allocation is filled with a magic number.
18849This idea is also know as "canary".
18850Memory is automatically mapped and unmapped if necessary.
18851
18852This number is validated automatically when the allocation is destroyed.
18853If it is not equal to the expected value, `VMA_ASSERT()` is executed.
18854It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
18855which indicates a serious bug.
18856
18857You can also explicitly request checking margins of all allocations in all memory blocks
18858that belong to specified memory types by using function vmaCheckCorruption(),
18859or in memory blocks that belong to specified custom pool, by using function
18860vmaCheckPoolCorruption().
18861
18862Margin validation (corruption detection) works only for memory types that are
18863`HOST_VISIBLE` and `HOST_COHERENT`.
18864
18865
18866\page opengl_interop OpenGL Interop
18867
18868VMA provides some features that help with interoperability with OpenGL.
18869
18870\section opengl_interop_exporting_memory Exporting memory
18871
18872If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library:
18873
18874It is recommended to create \ref custom_memory_pools for such allocations.
18875Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
18876while creating the custom pool.
18877Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
18878not only while creating it, as no copy of the structure is made,
18879but its original pointer is used for each allocation instead.
18880
18881If you want to export all memory allocated by the library from certain memory types,
18882also dedicated allocations or other allocations made from default pools,
18883an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
18884It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
18885through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
18886Please note that new versions of the library also support dedicated allocations created in custom pools.
18887
18888You should not mix these two methods in a way that allows to apply both to the same memory type.
18889Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
18890
18891
18892\section opengl_interop_custom_alignment Custom alignment
18893
18894Buffers or images exported to a different API like OpenGL may require a different alignment,
18895higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
18896To impose such alignment:
18897
18898It is recommended to create \ref custom_memory_pools for such allocations.
18899Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
18900to be made out of this pool.
18901The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
18902from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
18903
18904If you want to create a buffer with a specific minimum alignment out of default pools,
18905use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
18906
18907Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
18908allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
18909Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
18910
18911
18912\page usage_patterns Recommended usage patterns
18913
18914Vulkan gives great flexibility in memory allocation.
18915This chapter shows the most common patterns.
18916
18917See also slides from talk:
18918[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
18919
18920
18921\section usage_patterns_gpu_only GPU-only resource
18922
18923<b>When:</b>
18924Any resources that you frequently write and read on GPU,
18925e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
18926images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
18927
18928<b>What to do:</b>
18929Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
18930
18931\code
18932VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
18933imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
18934imgCreateInfo.extent.width = 3840;
18935imgCreateInfo.extent.height = 2160;
18936imgCreateInfo.extent.depth = 1;
18937imgCreateInfo.mipLevels = 1;
18938imgCreateInfo.arrayLayers = 1;
18939imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
18940imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
18941imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
18942imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
18943imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
18944
18945VmaAllocationCreateInfo allocCreateInfo = {};
18946allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18947allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
18948allocCreateInfo.priority = 1.0f;
18949
18950VkImage img;
18951VmaAllocation alloc;
18952vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
18953\endcode
18954
18955<b>Also consider:</b>
18956Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
18957especially if they are large or if you plan to destroy and recreate them with different sizes
18958e.g. when display resolution changes.
18959Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
18960When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
18961to decrease chances to be evicted to system memory by the operating system.
18962
18963\section usage_patterns_staging_copy_upload Staging copy for upload
18964
18965<b>When:</b>
18966A "staging" buffer than you want to map and fill from CPU code, then use as a source od transfer
18967to some GPU resource.
18968
18969<b>What to do:</b>
18970Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
18971Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
18972
18973\code
18974VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
18975bufCreateInfo.size = 65536;
18976bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
18977
18978VmaAllocationCreateInfo allocCreateInfo = {};
18979allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
18980allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
18981 VMA_ALLOCATION_CREATE_MAPPED_BIT;
18982
18983VkBuffer buf;
18984VmaAllocation alloc;
18985VmaAllocationInfo allocInfo;
18986vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
18987
18988...
18989
18990memcpy(allocInfo.pMappedData, myData, myDataSize);
18991\endcode
18992
18993<b>Also consider:</b>
18994You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
18995using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
18996
18997
18998\section usage_patterns_readback Readback
18999
19000<b>When:</b>
19001Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
19002e.g. results of some computations.
19003
19004<b>What to do:</b>
19005Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
19006Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
19007and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
19008
19009\code
19010VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19011bufCreateInfo.size = 65536;
19012bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
19013
19014VmaAllocationCreateInfo allocCreateInfo = {};
19015allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19016allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
19017 VMA_ALLOCATION_CREATE_MAPPED_BIT;
19018
19019VkBuffer buf;
19020VmaAllocation alloc;
19021VmaAllocationInfo allocInfo;
19022vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
19023
19024...
19025
19026const float* downloadedData = (const float*)allocInfo.pMappedData;
19027\endcode
19028
19029
19030\section usage_patterns_advanced_data_uploading Advanced data uploading
19031
19032For resources that you frequently write on CPU via mapped pointer and
19033freqnently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
19034
19035-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
19036 even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
19037 and make the device reach out to that resource directly.
19038 - Reads performed by the device will then go through PCI Express bus.
19039 The performace of this access may be limited, but it may be fine depending on the size
19040 of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
19041 of access.
19042-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
19043 a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
19044 (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
19045-# Systems with a discrete graphics card and separate video memory may or may not expose
19046 a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
19047 If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
19048 that is available to CPU for mapping.
19049 - Writes performed by the host to that memory go through PCI Express bus.
19050 The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
19051 as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
19052-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
19053 a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
19054
19055Thankfully, VMA offers an aid to create and use such resources in the the way optimal
19056for the current Vulkan device. To help the library make the best choice,
19057use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
19058#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
19059It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
19060but if no such memory type is available or allocation from it fails
19061(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
19062it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
19063It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
19064so you need to create another "staging" allocation and perform explicit transfers.
19065
19066\code
19067VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19068bufCreateInfo.size = 65536;
19069bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
19070
19071VmaAllocationCreateInfo allocCreateInfo = {};
19072allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19073allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
19074 VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
19075 VMA_ALLOCATION_CREATE_MAPPED_BIT;
19076
19077VkBuffer buf;
19078VmaAllocation alloc;
19079VmaAllocationInfo allocInfo;
19080vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
19081
19082VkMemoryPropertyFlags memPropFlags;
19083vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
19084
19085if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
19086{
19087 // Allocation ended up in a mappable memory and is already mapped - write to it directly.
19088
19089 // [Executed in runtime]:
19090 memcpy(allocInfo.pMappedData, myData, myDataSize);
19091}
19092else
19093{
19094 // Allocation ended up in a non-mappable memory - need to transfer.
19095 VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
19096 stagingBufCreateInfo.size = 65536;
19097 stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
19098
19099 VmaAllocationCreateInfo stagingAllocCreateInfo = {};
19100 stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19101 stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
19102 VMA_ALLOCATION_CREATE_MAPPED_BIT;
19103
19104 VkBuffer stagingBuf;
19105 VmaAllocation stagingAlloc;
19106 VmaAllocationInfo stagingAllocInfo;
19107 vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
19108 &stagingBuf, &stagingAlloc, stagingAllocInfo);
19109
19110 // [Executed in runtime]:
19111 memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
19112 //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
19113 VkBufferCopy bufCopy = {
19114 0, // srcOffset
19115 0, // dstOffset,
19116 myDataSize); // size
19117 vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
19118}
19119\endcode
19120
19121\section usage_patterns_other_use_cases Other use cases
19122
19123Here are some other, less obvious use cases and their recommended settings:
19124
19125- An image that is used only as transfer source and destination, but it should stay on the device,
19126 as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
19127 for temporal antialiasing or other temporal effects.
19128 - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
19129 - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
19130- An image that is used only as transfer source and destination, but it should be placed
19131 in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
19132 least recently used textures from VRAM.
19133 - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
19134 - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
19135 as VMA needs a hint here to differentiate from the previous case.
19136- A buffer that you want to map and write from the CPU, directly read from the GPU
19137 (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
19138 host memory due to its large size.
19139 - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
19140 - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
19141 - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
19142
19143
19144\page configuration Configuration
19145
19146Please check "CONFIGURATION SECTION" in the code to find macros that you can define
19147before each include of this file or change directly in this file to provide
19148your own implementation of basic facilities like assert, `min()` and `max()` functions,
19149mutex, atomic etc.
19150The library uses its own implementation of containers by default, but you can switch to using
19151STL containers instead.
19152
19153For example, define `VMA_ASSERT(expr)` before including the library to provide
19154custom implementation of the assertion, compatible with your project.
19155By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
19156and empty otherwise.
19157
19158\section config_Vulkan_functions Pointers to Vulkan functions
19159
19160There are multiple ways to import pointers to Vulkan functions in the library.
19161In the simplest case you don't need to do anything.
19162If the compilation or linking of your program or the initialization of the #VmaAllocator
19163doesn't work for you, you can try to reconfigure it.
19164
19165First, the allocator tries to fetch pointers to Vulkan functions linked statically,
19166like this:
19167
19168\code
19169m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
19170\endcode
19171
19172If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
19173
19174Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
19175You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
19176by using a helper library like [volk](https://github.com/zeux/volk).
19177
19178Third, VMA tries to fetch remaining pointers that are still null by calling
19179`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
19180You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
19181Other pointers will be fetched automatically.
19182If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
19183
19184Finally, all the function pointers required by the library (considering selected
19185Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
19186
19187
19188\section custom_memory_allocator Custom host memory allocator
19189
19190If you use custom allocator for CPU memory rather than default operator `new`
19191and `delete` from C++, you can make this library using your allocator as well
19192by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
19193functions will be passed to Vulkan, as well as used by the library itself to
19194make any CPU-side allocations.
19195
19196\section allocation_callbacks Device memory allocation callbacks
19197
19198The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
19199You can setup callbacks to be informed about these calls, e.g. for the purpose
19200of gathering some statistics. To do it, fill optional member
19201VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
19202
19203\section heap_memory_limit Device heap memory limit
19204
19205When device memory of certain heap runs out of free space, new allocations may
19206fail (returning error code) or they may succeed, silently pushing some existing_
19207memory blocks from GPU VRAM to system RAM (which degrades performance). This
19208behavior is implementation-dependent - it depends on GPU vendor and graphics
19209driver.
19210
19211On AMD cards it can be controlled while creating Vulkan device object by using
19212VK_AMD_memory_overallocation_behavior extension, if available.
19213
19214Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
19215memory available without switching your graphics card to one that really has
19216smaller VRAM, you can use a feature of this library intended for this purpose.
19217To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
19218
19219
19220
19221\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
19222
19223VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
19224performance on some GPUs. It augments Vulkan API with possibility to query
19225driver whether it prefers particular buffer or image to have its own, dedicated
19226allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
19227to do some internal optimizations. The extension is supported by this library.
19228It will be used automatically when enabled.
19229
19230It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
19231and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
19232you are all set.
19233
19234Otherwise, if you want to use it as an extension:
19235
192361 . When creating Vulkan device, check if following 2 device extensions are
19237supported (call `vkEnumerateDeviceExtensionProperties()`).
19238If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
19239
19240- VK_KHR_get_memory_requirements2
19241- VK_KHR_dedicated_allocation
19242
19243If you enabled these extensions:
19244
192452 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
19246your #VmaAllocator to inform the library that you enabled required extensions
19247and you want the library to use them.
19248
19249\code
19250allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
19251
19252vmaCreateAllocator(&allocatorInfo, &allocator);
19253\endcode
19254
19255That is all. The extension will be automatically used whenever you create a
19256buffer using vmaCreateBuffer() or image using vmaCreateImage().
19257
19258When using the extension together with Vulkan Validation Layer, you will receive
19259warnings like this:
19260
19261_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
19262
19263It is OK, you should just ignore it. It happens because you use function
19264`vkGetBufferMemoryRequirements2KHR()` instead of standard
19265`vkGetBufferMemoryRequirements()`, while the validation layer seems to be
19266unaware of it.
19267
19268To learn more about this extension, see:
19269
19270- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
19271- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
19272
19273
19274
19275\page vk_ext_memory_priority VK_EXT_memory_priority
19276
19277VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
19278value to Vulkan memory allocations that the implementation may use prefer certain
19279buffers and images that are critical for performance to stay in device-local memory
19280in cases when the memory is over-subscribed, while some others may be moved to the system memory.
19281
19282VMA offers convenient usage of this extension.
19283If you enable it, you can pass "priority" parameter when creating allocations or custom pools
19284and the library automatically passes the value to Vulkan using this extension.
19285
19286If you want to use this extension in connection with VMA, follow these steps:
19287
19288\section vk_ext_memory_priority_initialization Initialization
19289
192901) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19291Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
19292
192932) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19294Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19295Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
19296
192973) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
19298to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19299
193004) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19301Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19302Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
19303`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
19304
193055) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19306have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
19307to VmaAllocatorCreateInfo::flags.
19308
19309\section vk_ext_memory_priority_usage Usage
19310
19311When using this extension, you should initialize following member:
19312
19313- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
19314- VmaPoolCreateInfo::priority when creating a custom pool.
19315
19316It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
19317Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
19318and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
19319
19320It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
19321as dedicated and set high priority to them. For example:
19322
19323\code
19324VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
19325imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
19326imgCreateInfo.extent.width = 3840;
19327imgCreateInfo.extent.height = 2160;
19328imgCreateInfo.extent.depth = 1;
19329imgCreateInfo.mipLevels = 1;
19330imgCreateInfo.arrayLayers = 1;
19331imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
19332imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
19333imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
19334imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
19335imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
19336
19337VmaAllocationCreateInfo allocCreateInfo = {};
19338allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
19339allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
19340allocCreateInfo.priority = 1.0f;
19341
19342VkImage img;
19343VmaAllocation alloc;
19344vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
19345\endcode
19346
19347`priority` member is ignored in the following situations:
19348
19349- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
19350 from the parametrs passed in #VmaPoolCreateInfo when the pool was created.
19351- Allocations created in default pools: They inherit the priority from the parameters
19352 VMA used when creating default pools, which means `priority == 0.5f`.
19353
19354
19355\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
19356
19357VK_AMD_device_coherent_memory is a device extension that enables access to
19358additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
19359`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
19360allocation of buffers intended for writing "breadcrumb markers" in between passes
19361or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
19362
19363When the extension is available but has not been enabled, Vulkan physical device
19364still exposes those memory types, but their usage is forbidden. VMA automatically
19365takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
19366to allocate memory of such type is made.
19367
19368If you want to use this extension in connection with VMA, follow these steps:
19369
19370\section vk_amd_device_coherent_memory_initialization Initialization
19371
193721) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19373Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
19374
193752) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19376Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19377Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
19378
193793) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
19380to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19381
193824) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19383Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19384Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
19385`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
19386
193875) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19388have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
19389to VmaAllocatorCreateInfo::flags.
19390
19391\section vk_amd_device_coherent_memory_usage Usage
19392
19393After following steps described above, you can create VMA allocations and custom pools
19394out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
19395devices. There are multiple ways to do it, for example:
19396
19397- You can request or prefer to allocate out of such memory types by adding
19398 `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
19399 or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
19400 other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
19401- If you manually found memory type index to use for this purpose, force allocation
19402 from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
19403
19404\section vk_amd_device_coherent_memory_more_information More information
19405
19406To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
19407
19408Example use of this extension can be found in the code of the sample and test suite
19409accompanying this library.
19410
19411
19412\page enabling_buffer_device_address Enabling buffer device address
19413
19414Device extension VK_KHR_buffer_device_address
19415allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
19416It has been promoted to core Vulkan 1.2.
19417
19418If you want to use this feature in connection with VMA, follow these steps:
19419
19420\section enabling_buffer_device_address_initialization Initialization
19421
194221) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
19423Check if the extension is supported - if returned array of `VkExtensionProperties` contains
19424"VK_KHR_buffer_device_address".
19425
194262) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
19427Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
19428Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
19429
194303) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
19431"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
19432
194334) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
19434Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
19435Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
19436`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
19437
194385) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
19439have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
19440to VmaAllocatorCreateInfo::flags.
19441
19442\section enabling_buffer_device_address_usage Usage
19443
19444After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
19445The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
19446allocated memory blocks wherever it might be needed.
19447
19448Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
19449The second part of this functionality related to "capture and replay" is not supported,
19450as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
19451
19452\section enabling_buffer_device_address_more_information More information
19453
19454To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
19455
19456Example use of this extension can be found in the code of the sample and test suite
19457accompanying this library.
19458
19459\page general_considerations General considerations
19460
19461\section general_considerations_thread_safety Thread safety
19462
19463- The library has no global state, so separate #VmaAllocator objects can be used
19464 independently.
19465 There should be no need to create multiple such objects though - one per `VkDevice` is enough.
19466- By default, all calls to functions that take #VmaAllocator as first parameter
19467 are safe to call from multiple threads simultaneously because they are
19468 synchronized internally when needed.
19469 This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
19470- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
19471 flag, calls to functions that take such #VmaAllocator object must be
19472 synchronized externally.
19473- Access to a #VmaAllocation object must be externally synchronized. For example,
19474 you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
19475 threads at the same time if you pass the same #VmaAllocation object to these
19476 functions.
19477- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
19478
19479\section general_considerations_versioning_and_compatibility Versioning and compatibility
19480
19481The library uses [**Semantic Versioning**](https://semver.org/),
19482which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
19483
19484- Incremented Patch version means a release is backward- and forward-compatible,
19485 introducing only some internal improvements, bug fixes, optimizations etc.
19486 or changes that are out of scope of the official API described in this documentation.
19487- Incremented Minor version means a release is backward-compatible,
19488 so existing code that uses the library should continue to work, while some new
19489 symbols could have been added: new structures, functions, new values in existing
19490 enums and bit flags, new structure members, but not new function parameters.
19491- Incrementing Major version means a release could break some backward compatibility.
19492
19493All changes between official releases are documented in file "CHANGELOG.md".
19494
19495\warning Backward compatiblity is considered on the level of C++ source code, not binary linkage.
19496Adding new members to existing structures is treated as backward compatible if initializing
19497the new members to binary zero results in the old behavior.
19498You should always fully initialize all library structures to zeros and not rely on their
19499exact binary size.
19500
19501\section general_considerations_validation_layer_warnings Validation layer warnings
19502
19503When using this library, you can meet following types of warnings issued by
19504Vulkan validation layer. They don't necessarily indicate a bug, so you may need
19505to just ignore them.
19506
19507- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
19508 - It happens when VK_KHR_dedicated_allocation extension is enabled.
19509 `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
19510- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
19511 - It happens when you map a buffer or image, because the library maps entire
19512 `VkDeviceMemory` block, where different types of images and buffers may end
19513 up together, especially on GPUs with unified memory like Intel.
19514- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
19515 - It may happen when you use [defragmentation](@ref defragmentation).
19516
19517\section general_considerations_allocation_algorithm Allocation algorithm
19518
19519The library uses following algorithm for allocation, in order:
19520
19521-# Try to find free range of memory in existing blocks.
19522-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
19523-# If failed, try to create such block with size / 2, size / 4, size / 8.
19524-# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
19525 just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
19526-# If failed, choose other memory type that meets the requirements specified in
19527 VmaAllocationCreateInfo and go to point 1.
19528-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
19529
19530\section general_considerations_features_not_supported Features not supported
19531
19532Features deliberately excluded from the scope of this library:
19533
19534-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
19535 between CPU and GPU memory and related synchronization is responsibility of the user.
19536 Defining some "texture" object that would automatically stream its data from a
19537 staging copy in CPU memory to GPU memory would rather be a feature of another,
19538 higher-level library implemented on top of VMA.
19539 VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
19540-# **Recreation of buffers and images.** Although the library has functions for
19541 buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
19542 recreate these objects yourself after defragmentation. That is because the big
19543 structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
19544 #VmaAllocation object.
19545-# **Handling CPU memory allocation failures.** When dynamically creating small C++
19546 objects in CPU memory (not Vulkan memory), allocation failures are not checked
19547 and handled gracefully, because that would complicate code significantly and
19548 is usually not needed in desktop PC applications anyway.
19549 Success of an allocation is just checked with an assert.
19550-# **Code free of any compiler warnings.** Maintaining the library to compile and
19551 work correctly on so many different platforms is hard enough. Being free of
19552 any warnings, on any version of any compiler, is simply not feasible.
19553 There are many preprocessor macros that make some variables unused, function parameters unreferenced,
19554 or conditional expressions constant in some configurations.
19555 The code of this library should not be bigger or more complicated just to silence these warnings.
19556 It is recommended to disable such warnings instead.
19557-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
19558 are not going to be included into this repository.
19559*/
#define TYPE(T)
Explicitly instantiate CGUISimpleSetting for the basic types.
Definition: CGUISetting.cpp:110
bool Init(const CmdLineArgs &args, int flags)
Returns true if successful, false if Init is aborted early (for instance if mods changed,...
Definition: GameSetup.cpp:519
bool operator==(const FCDJointWeightPair &a, const FCDJointWeightPair &b)
Definition: GeomReindex.cpp:59
static enum @29 state
#define VkImageMemoryRequirementsInfo2KHR
Definition: VMA.h:97
#define vkGetPhysicalDeviceMemoryProperties2KHR
Definition: VMA.h:92
#define PFN_vkBindBufferMemory2KHR
Definition: VMA.h:82
#define VkMemoryDedicatedAllocateInfoKHR
Definition: VMA.h:98
#define VMA_ASSERT(EXPR)
Definition: VMA.h:29
#define vkBindImageMemory2KHR
Definition: VMA.h:89
#define PFN_vkGetBufferMemoryRequirements2KHR
Definition: VMA.h:84
#define VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR
Definition: VMA.h:104
#define VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR
Definition: VMA.h:108
#define VkBufferMemoryRequirementsInfo2KHR
Definition: VMA.h:96
#define VMA_HEAVY_ASSERT(EXPR)
Definition: VMA.h:30
#define vkGetImageMemoryRequirements2KHR
Definition: VMA.h:91
#define VkBindImageMemoryInfoKHR
Definition: VMA.h:95
#define VkMemoryRequirements2KHR
Definition: VMA.h:100
#define VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR
Definition: VMA.h:103
#define VMA_DEBUG_LOG(...)
Definition: VMA.h:36
#define PFN_vkBindImageMemory2KHR
Definition: VMA.h:83
#define VkBindBufferMemoryInfoKHR
Definition: VMA.h:94
#define VkPhysicalDeviceMemoryProperties2KHR
Definition: VMA.h:101
#define VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR
Definition: VMA.h:106
#define vkBindBufferMemory2KHR
Definition: VMA.h:88
#define VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR
Definition: VMA.h:109
#define vkGetBufferMemoryRequirements2KHR
Definition: VMA.h:90
#define PFN_vkGetImageMemoryRequirements2KHR
Definition: VMA.h:85
#define VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR
Definition: VMA.h:105
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR
Definition: VMA.h:110
#define VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR
Definition: VMA.h:107
#define PFN_vkGetPhysicalDeviceMemoryProperties2KHR
Definition: VMA.h:86
#define VkMemoryDedicatedRequirementsKHR
Definition: VMA.h:99
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, const void *VMA_NULLABLE pNext)
Binds image to allocation with additional parameters.
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(VmaAllocator VMA_NOT_NULL allocator, size_t allocationCount, const VmaAllocation VMA_NULLABLE *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations)
Frees memory and destroys multiple allocations.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, uint32_t *VMA_NOT_NULL pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(VmaAllocator VMA_NOT_NULL allocator, uint32_t allocationCount, const VmaAllocation VMA_NOT_NULL *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes)
Flushes memory of given set of allocations.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(VmaAllocator VMA_NOT_NULL allocator, const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, VkDeviceSize minAlignment, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer, VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, VmaAllocationInfo *VMA_NULLABLE pAllocationInfo)
Creates a buffer with additional minimum alignment.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(VmaAllocator VMA_NOT_NULL allocator, const VmaDefragmentationInfo *VMA_NOT_NULL pInfo, VmaDefragmentationContext VMA_NULLABLE *VMA_NOT_NULL pContext)
Begins defragmentation process.
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, void *VMA_NULLABLE *VMA_NOT_NULL ppData)
Maps memory represented by given allocation and returns pointer to it.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(VmaAllocator VMA_NOT_NULL allocator, const VmaPoolCreateInfo *VMA_NOT_NULL pCreateInfo, VmaPool VMA_NULLABLE *VMA_NOT_NULL pPool)
Allocates Vulkan device memory and creates VmaPool object.
VkFlags VmaPoolCreateFlags
Flags to be passed as VmaPoolCreateInfo::flags. See VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:698
VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const char *VMA_NULLABLE pName)
Sets pName in given allocation to new value.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Parameters for defragmentation.
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(VmaAllocator VMA_NOT_NULL allocator, VkImage VMA_NULLABLE_NON_DISPATCHABLE image, VmaAllocation VMA_NULLABLE allocation)
Destroys Vulkan image and frees allocated memory.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkImage VMA_NOT_NULL_NON_DISPATCHABLE image)
Binds image to allocation.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
Parameters of new VmaAllocation.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer)
Creates a new VkBuffer, binds already created memory for it.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, const void *VMA_NULLABLE pNext)
Binds buffer to allocation with additional parameters.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(VmaAllocator VMA_NOT_NULL allocator, VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, const VmaAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, VmaAllocationInfo *VMA_NULLABLE pAllocationInfo)
Allocates memory suitable for given VkImage.
VkFlags VmaAllocationCreateFlags
See VmaAllocationCreateFlagBits.
Definition: vk_mem_alloc.h:653
struct VmaDefragmentationMove VmaDefragmentationMove
Single move of an allocation to be done for defragmentation.
VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(VmaAllocator VMA_NOT_NULL allocator, const VmaAllocation VMA_NULLABLE allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationFlagBits
Flags to be passed as VmaDefragmentationInfo::flags.
Definition: vk_mem_alloc.h:702
VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(VmaAllocator VMA_NOT_NULL allocator, uint32_t allocationCount, const VmaAllocation VMA_NOT_NULL *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes)
Invalidates memory of given set of allocations.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator VMA_NOT_NULL allocator, const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, uint32_t *VMA_NOT_NULL pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer)
Binds buffer to allocation.
VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, const char *VMA_NULLABLE *VMA_NOT_NULL ppName)
Retrieves name of a custom pool.
VkFlags VmaDefragmentationFlags
See VmaDefragmentationFlagBits.
Definition: vk_mem_alloc.h:732
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:657
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, VkImage VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pImage)
Function similar to vmaCreateAliasingBuffer().
VmaMemoryUsage
Intended usage of the allocated memory.
Definition: vk_mem_alloc.h:441
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(VmaAllocator VMA_NOT_NULL allocator, const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, VkImage VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pImage, VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, VmaAllocationInfo *VMA_NULLABLE pAllocationInfo)
Function similar to vmaCreateBuffer().
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(VmaAllocator VMA_NOT_NULL allocator, const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, uint32_t *VMA_NOT_NULL pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VmaAllocationInfo *VMA_NOT_NULL pAllocationInfo)
Returns current information about specified allocation.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationPassMoveInfo *VMA_NOT_NULL pPassInfo)
Ends single defragmentation pass.
VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, const char *VMA_NULLABLE pName)
Sets name of a custom pool.
VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, void *VMA_NULLABLE pUserData)
Sets pUserData in given allocation to new value.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationPassMoveInfo *VMA_NOT_NULL pPassInfo)
Starts single defragmentation pass.
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NULLABLE pool)
Destroys VmaPool object and frees Vulkan device memory.
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
Parameters for incremental defragmentation steps.
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned for defragmentation process in function vmaEndDefragmentation().
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:523
VmaDefragmentationMoveOperation
Operation performed on single defragmentation move. See structure VmaDefragmentationMove.
Definition: vk_mem_alloc.h:736
VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, VmaDefragmentationStats *VMA_NULLABLE pStats)
Ends defragmentation process.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(VmaAllocator VMA_NOT_NULL allocator, const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer, VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, VmaAllocationInfo *VMA_NULLABLE pAllocationInfo)
Creates a new VkBuffer, allocates and binds memory for it.
VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkMemoryPropertyFlags *VMA_NOT_NULL pFlags)
Given an allocation, returns Property Flags of its memory type.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(VmaAllocator VMA_NOT_NULL allocator, VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, const VmaAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, VmaAllocationInfo *VMA_NULLABLE pAllocationInfo)
Allocates memory suitable for given VkBuffer.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(VmaAllocator VMA_NOT_NULL allocator, const VkMemoryRequirements *VMA_NOT_NULL pVkMemoryRequirements, const VmaAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, VmaAllocationInfo *VMA_NULLABLE pAllocationInfo)
General purpose memory allocation.
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(VmaAllocator VMA_NOT_NULL allocator, VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, VmaAllocation VMA_NULLABLE allocation)
Destroys Vulkan buffer and frees allocated memory.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(VmaAllocator VMA_NOT_NULL allocator, const VkMemoryRequirements *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, const VmaAllocationCreateInfo *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, size_t allocationCount, VmaAllocation VMA_NULLABLE *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, VmaAllocationInfo *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
@ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT
Definition: vk_mem_alloc.h:706
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:729
@ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK
A bit mask to extract only ALGORITHM bits from entire set of flags.
Definition: vk_mem_alloc.h:723
@ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT
Use the most roboust algorithm at the cost of time to compute and number of copies to make.
Definition: vk_mem_alloc.h:720
@ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT
Definition: vk_mem_alloc.h:710
@ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT
Definition: vk_mem_alloc.h:714
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:688
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:695
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:675
@ VMA_POOL_CREATE_ALGORITHM_MASK
Bit mask to extract only ALGORITHM bits from entire set of flags.
Definition: vk_mem_alloc.h:692
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:518
@ VMA_MEMORY_USAGE_AUTO
Selects best memory type automatically.
Definition: vk_mem_alloc.h:492
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:455
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:470
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:465
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Lazily allocated GPU memory having VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT.
Definition: vk_mem_alloc.h:479
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:460
@ VMA_MEMORY_USAGE_AUTO_PREFER_HOST
Selects best memory type automatically with preference for CPU (host) memory.
Definition: vk_mem_alloc.h:516
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:450
@ VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE
Selects best memory type automatically with preference for GPU (device) memory.
Definition: vk_mem_alloc.h:504
@ VMA_MEMORY_USAGE_UNKNOWN
No intended memory usage specified.
Definition: vk_mem_alloc.h:445
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Allocation strategy that chooses first suitable free range for the allocation - not necessarily in te...
Definition: vk_mem_alloc.h:631
@ VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT
Together with VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_AC...
Definition: vk_mem_alloc.h:622
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:549
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Create both buffer/image and allocation, but don't bind them together.
Definition: vk_mem_alloc.h:572
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Alias to VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
Definition: vk_mem_alloc.h:642
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:528
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Allocation will be created from upper stack in a double stack pool.
Definition: vk_mem_alloc.h:562
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT
Allocation strategy that chooses always the lowest offset in available space.
Definition: vk_mem_alloc.h:636
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Alias to VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
Definition: vk_mem_alloc.h:639
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:538
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Allocation strategy that chooses smallest possible free range for the allocation to minimize memory u...
Definition: vk_mem_alloc.h:626
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
A bit mask to extract only STRATEGY bits from entire set of flags.
Definition: vk_mem_alloc.h:645
@ VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
Requests possibility to map the allocation (using vmaMapMemory() or VMA_ALLOCATION_CREATE_MAPPED_BIT)...
Definition: vk_mem_alloc.h:598
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:557
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Create allocation only if additional device memory required for it, if any, won't exceed memory budge...
Definition: vk_mem_alloc.h:576
@ VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
Requests possibility to map the allocation (using vmaMapMemory() or VMA_ALLOCATION_CREATE_MAPPED_BIT)...
Definition: vk_mem_alloc.h:610
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:650
@ VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT
Set this flag if the allocated memory will have aliasing resources.
Definition: vk_mem_alloc.h:582
@ VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
Set this value if you decide to abandon the allocation and you destroyed the buffer/image....
Definition: vk_mem_alloc.h:742
@ VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE
Set this value if you cannot move the allocation. New place reserved at dstTmpAllocation will be free...
Definition: vk_mem_alloc.h:740
@ VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
Buffer/image has been recreated at dstTmpAllocation, data has been copied, old buffer/image has been ...
Definition: vk_mem_alloc.h:738
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(VmaAllocator VMA_NULLABLE allocator)
Destroys allocator object.
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *VMA_NOT_NULL pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(VmaAllocator VMA_NOT_NULL allocator, const VkPhysicalDeviceMemoryProperties *VMA_NULLABLE *VMA_NOT_NULL ppPhysicalDeviceMemoryProperties)
PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:315
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryType, VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, VkDeviceSize size, void *VMA_NULLABLE pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:925
VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo *VMA_NOT_NULL pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryType, VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, VkDeviceSize size, void *VMA_NULLABLE pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:917
VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(VmaAllocator VMA_NOT_NULL allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VkFlags VmaAllocatorCreateFlags
See VmaAllocatorCreateFlagBits.
Definition: vk_mem_alloc.h:430
VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(VmaAllocator VMA_NOT_NULL allocator, const VkPhysicalDeviceProperties *VMA_NULLABLE *VMA_NOT_NULL ppPhysicalDeviceProperties)
PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(const VmaAllocatorCreateInfo *VMA_NOT_NULL pCreateInfo, VmaAllocator VMA_NULLABLE *VMA_NOT_NULL pAllocator)
Creates VmaAllocator object.
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Enables usage of VK_AMD_device_coherent_memory extension.
Definition: vk_mem_alloc.h:390
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:320
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Enables usage of VK_EXT_memory_budget extension.
Definition: vk_mem_alloc.h:372
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Enables usage of "buffer device address" feature, which allows you to use function vkGetBufferDeviceA...
Definition: vk_mem_alloc.h:408
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Enables usage of VK_KHR_bind_memory2 extension.
Definition: vk_mem_alloc.h:360
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:345
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:427
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
Enables usage of VK_EXT_memory_priority extension in the library.
Definition: vk_mem_alloc.h:425
VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, VmaDetailedStatistics *VMA_NOT_NULL pPoolStats)
Retrieves detailed statistics of existing VmaPool object.
VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, char *VMA_NULLABLE pStatsString)
Frees a string returned by vmaBuildVirtualBlockStatsString().
VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(VmaAllocator VMA_NOT_NULL allocator, VmaBudget *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets)
Retrieves information about current memory usage and budget for all memory heaps.
struct VmaTotalStatistics VmaTotalStatistics
General statistics from current state of the Allocator - total memory usage across all memory heaps a...
VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool, VmaStatistics *VMA_NOT_NULL pPoolStats)
Retrieves statistics of existing VmaPool object.
VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, char *VMA_NULLABLE *VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
Builds and returns a null-terminated string in JSON format with information about given VmaVirtualBlo...
VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(VmaAllocator VMA_NOT_NULL allocator, char *VMA_NULLABLE *VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as a null-terminated string in JSON format.
struct VmaDetailedStatistics VmaDetailedStatistics
More detailed statistics than VmaStatistics.
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget for a specific memory heap.
struct VmaStatistics VmaStatistics
Calculated statistics of memory usage e.g.
VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(VmaAllocator VMA_NOT_NULL allocator, VmaTotalStatistics *VMA_NOT_NULL pStats)
Retrieves statistics from current state of the Allocator.
VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(VmaAllocator VMA_NOT_NULL allocator, char *VMA_NULLABLE pStatsString)
VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
Destroys VmaVirtualBlock object.
VmaVirtualAllocationCreateFlagBits
Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:780
VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, const VmaVirtualAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pAllocation, VkDeviceSize *VMA_NULLABLE pOffset)
Allocates new virtual allocation inside given VmaVirtualBlock.
VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
Frees all virtual allocations inside given VmaVirtualBlock.
struct VmaVirtualBlockCreateInfo VmaVirtualBlockCreateInfo
Parameters of created VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
VkFlags VmaVirtualBlockCreateFlags
Flags to be passed as VmaVirtualBlockCreateInfo::flags. See VmaVirtualBlockCreateFlagBits.
Definition: vk_mem_alloc.h:776
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)
VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
Returns true of the VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space a...
VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo *VMA_NOT_NULL pVirtualAllocInfo)
Returns information about a specific virtual allocation within a virtual block, like its size and pUs...
struct VmaVirtualAllocationInfo VmaVirtualAllocationInfo
Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(const VmaVirtualBlockCreateInfo *VMA_NOT_NULL pCreateInfo, VmaVirtualBlock VMA_NULLABLE *VMA_NOT_NULL pVirtualBlock)
Creates new VmaVirtualBlock object.
VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaDetailedStatistics *VMA_NOT_NULL pStats)
Calculates and returns detailed statistics about virtual allocations and memory usage in given VmaVir...
VmaVirtualBlockCreateFlagBits
Flags to be passed as VmaVirtualBlockCreateInfo::flags.
Definition: vk_mem_alloc.h:754
VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void *VMA_NULLABLE pUserData)
Changes custom pointer associated with given virtual allocation.
VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
Frees virtual allocation inside given VmaVirtualBlock.
struct VmaVirtualAllocationCreateInfo VmaVirtualAllocationCreateInfo
Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaStatistics *VMA_NOT_NULL pStats)
Calculates and returns statistics about virtual allocations and memory usage in given VmaVirtualBlock...
VkFlags VmaVirtualAllocationCreateFlags
Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See VmaVirtualAllocationCreateFlagBits.
Definition: vk_mem_alloc.h:805
@ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT
Allocation strategy that chooses always the lowest offset in available space.
Definition: vk_mem_alloc.h:795
@ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Allocation strategy that tries to minimize allocation time.
Definition: vk_mem_alloc.h:791
@ VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Allocation will be created from upper stack in a double stack pool.
Definition: vk_mem_alloc.h:785
@ VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:802
@ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK
A bit mask to extract only STRATEGY bits from entire set of flags.
Definition: vk_mem_alloc.h:800
@ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Allocation strategy that tries to minimize memory usage.
Definition: vk_mem_alloc.h:788
@ VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:773
@ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this virtual block.
Definition: vk_mem_alloc.h:766
@ VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK
Bit mask to extract only ALGORITHM bits from entire set of flags.
Definition: vk_mem_alloc.h:770
bool operator!=(const SStencilOpState &lhs, const SStencilOpState &rhs)
Definition: DeviceCommandContext.cpp:55
static void Cleanup()
Definition: smbios.cpp:125
@ Normal
Definition: CCmpRangeManager.cpp:211
Config::Value_type Value
Definition: json_spirit_value.h:182
void Free(void *p, size_t size)
decommit memory and release address space.
Definition: uvm.cpp:113
bool Commit(uintptr_t address, size_t size, PageType pageType, int prot)
map physical memory to previously reserved address space.
Definition: uvm.cpp:59
static AddressRangeDescriptor ranges[2 *os_cpu_MaxProcessors]
Definition: wvm.cpp:304
#define SIZE_MAX
Definition: posix_types.h:57
#define T(string_literal)
Definition: secure_crt.cpp:77
std::shared_ptr< u8 > Allocate(size_t size)
Definition: shared_ptr.cpp:55
Definition: wnuma.cpp:50
Definition: vulkan.h:1831
void * pUserData
Definition: vulkan.h:1832
PFN_vkAllocationFunction pfnAllocation
Definition: vulkan.h:1833
PFN_vkFreeFunction pfnFree
Definition: vulkan.h:1835
Definition: vulkan.h:1755
Definition: vulkan.h:2611
VkDeviceSize size
Definition: vulkan.h:2615
VkStructureType sType
Definition: vulkan.h:2612
VkBufferUsageFlags usage
Definition: vulkan.h:2616
uint32_t depth
Definition: vulkan.h:1779
uint32_t height
Definition: vulkan.h:1778
uint32_t width
Definition: vulkan.h:1777
Definition: vulkan.h:2685
VkImageCreateFlags flags
Definition: vulkan.h:2688
uint32_t mipLevels
Definition: vulkan.h:2692
uint32_t arrayLayers
Definition: vulkan.h:2693
VkExtent3D extent
Definition: vulkan.h:2691
VkImageTiling tiling
Definition: vulkan.h:2695
VkImageUsageFlags usage
Definition: vulkan.h:2696
Definition: vulkan.h:2570
const void * pNext
Definition: vulkan.h:2572
VkDeviceSize offset
Definition: vulkan.h:2574
VkDeviceMemory memory
Definition: vulkan.h:2573
VkDeviceSize size
Definition: vulkan.h:2575
VkStructureType sType
Definition: vulkan.h:2571
Definition: vulkan.h:2533
uint32_t memoryTypeIndex
Definition: vulkan.h:2537
VkDeviceSize allocationSize
Definition: vulkan.h:2536
const void * pNext
Definition: vulkan.h:2535
Definition: vulkan.h:2565
VkMemoryHeapFlags flags
Definition: vulkan.h:2567
VkDeviceSize size
Definition: vulkan.h:2566
Definition: vulkan.h:3623
VkMemoryRequirements memoryRequirements
Definition: vulkan.h:3626
Definition: vulkan.h:2540
uint32_t memoryTypeBits
Definition: vulkan.h:2543
VkDeviceSize size
Definition: vulkan.h:2541
VkDeviceSize alignment
Definition: vulkan.h:2542
uint32_t heapIndex
Definition: vulkan.h:2562
VkMemoryPropertyFlags propertyFlags
Definition: vulkan.h:2561
VkDeviceSize bufferImageGranularity
Definition: vulkan.h:3216
VkDeviceSize nonCoherentAtomSize
Definition: vulkan.h:3310
uint32_t maxMemoryAllocationCount
Definition: vulkan.h:3214
Definition: vulkan.h:4133
VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]
Definition: vulkan.h:4137
VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]
Definition: vulkan.h:4135
uint32_t memoryHeapCount
Definition: vulkan.h:4136
uint32_t memoryTypeCount
Definition: vulkan.h:4134
Definition: vulkan.h:4108
uint32_t apiVersion
Definition: vulkan.h:4109
VkPhysicalDeviceType deviceType
Definition: vulkan.h:4113
char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]
Definition: vulkan.h:4114
VkPhysicalDeviceLimits limits
Definition: vulkan.h:4116
Parameters of new VmaAllocation.
Definition: vk_mem_alloc.h:1222
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:1248
void *VMA_NULLABLE pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:1261
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1240
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:1235
float priority
A floating-point value between 0 and 1, indicating the priority of the allocation relative to other m...
Definition: vk_mem_alloc.h:1268
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:1230
VmaPool VMA_NULLABLE pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:1254
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:1224
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:1337
void *VMA_NULLABLE pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:1379
VkDeviceSize offset
Offset in VkDeviceMemory object to the beginning of this allocation, in bytes.
Definition: vk_mem_alloc.h:1359
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:1342
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:1370
VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1349
void *VMA_NULLABLE pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:1384
const char *VMA_NULLABLE pName
Custom allocation name that was set with vmaSetAllocationName().
Definition: vk_mem_alloc.h:1392
Represents single memory allocation.
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1001
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1003
const VmaVulkanFunctions *VMA_NULLABLE pVulkanFunctions
Pointers to Vulkan functions.
Definition: vk_mem_alloc.h:1049
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:1012
VkInstance VMA_NOT_NULL instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:1054
const VkAllocationCallbacks *VMA_NULLABLE pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1015
VkDevice VMA_NOT_NULL device
Vulkan device.
Definition: vk_mem_alloc.h:1009
VkPhysicalDevice VMA_NOT_NULL physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1006
const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
uint32_t vulkanApiVersion
Optional.
Definition: vk_mem_alloc.h:1063
const VmaDeviceMemoryCallbacks *VMA_NULLABLE pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1018
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:1080
VkPhysicalDevice VMA_NOT_NULL physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:1090
VkInstance VMA_NOT_NULL instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:1085
VkDevice VMA_NOT_NULL device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:1095
Represents main object of this library initialized.
Statistics of current memory usage and available budget for a specific memory heap.
Definition: vk_mem_alloc.h:1185
VmaStatistics statistics
Statistics fetched from the library.
Definition: vk_mem_alloc.h:1188
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:1197
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:1207
An opaque object that represents started defragmentation process.
Parameters for defragmentation.
Definition: vk_mem_alloc.h:1400
VmaPool VMA_NULLABLE pool
Custom pool to be defragmented.
Definition: vk_mem_alloc.h:1407
VmaDefragmentationFlags flags
Use combination of VmaDefragmentationFlagBits.
Definition: vk_mem_alloc.h:1402
VkDeviceSize maxBytesPerPass
Maximum numbers of bytes that can be copied during single pass, while moving allocations to different...
Definition: vk_mem_alloc.h:1412
uint32_t maxAllocationsPerPass
Maximum number of allocations that can be moved during single pass to a different place.
Definition: vk_mem_alloc.h:1417
Single move of an allocation to be done for defragmentation.
Definition: vk_mem_alloc.h:1422
VmaDefragmentationMoveOperation operation
Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is VMA_DEFR...
Definition: vk_mem_alloc.h:1424
VmaAllocation VMA_NOT_NULL dstTmpAllocation
Temporary allocation pointing to destination memory that will replace srcAllocation.
Definition: vk_mem_alloc.h:1433
VmaAllocation VMA_NOT_NULL srcAllocation
Allocation that should be moved.
Definition: vk_mem_alloc.h:1426
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:1441
uint32_t moveCount
Number of elements in the pMoves array.
Definition: vk_mem_alloc.h:1443
VmaDefragmentationMove *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves
Array of moves to be performed by the user in the current defragmentation pass.
Statistics returned for defragmentation process in function vmaEndDefragmentation().
Definition: vk_mem_alloc.h:1472
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:1480
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:1474
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:1476
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:1478
More detailed statistics than VmaStatistics.
Definition: vk_mem_alloc.h:1151
VkDeviceSize allocationSizeMax
Largest allocation size. 0 if there are 0 allocations.
Definition: vk_mem_alloc.h:1159
VmaStatistics statistics
Basic statistics.
Definition: vk_mem_alloc.h:1153
VkDeviceSize allocationSizeMin
Smallest allocation size. VK_WHOLE_SIZE if there are 0 allocations.
Definition: vk_mem_alloc.h:1157
VkDeviceSize unusedRangeSizeMin
Smallest empty range size. VK_WHOLE_SIZE if there are 0 empty ranges.
Definition: vk_mem_alloc.h:1161
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1155
VkDeviceSize unusedRangeSizeMax
Largest empty range size. 0 if there are 0 empty ranges.
Definition: vk_mem_alloc.h:1163
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:940
PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:944
void *VMA_NULLABLE pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:946
PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:942
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:1273
float priority
A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relat...
Definition: vk_mem_alloc.h:1308
void *VMA_NULLABLE pMemoryAllocateNext
Additional pNext chain to be attached to VkMemoryAllocateInfo used for every allocation made by this ...
Definition: vk_mem_alloc.h:1325
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:1276
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:1279
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes.
Definition: vk_mem_alloc.h:1289
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:1294
VkDeviceSize minAllocationAlignment
Additional minimum alignment to be used for all allocations created from this pool.
Definition: vk_mem_alloc.h:1315
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool.
Definition: vk_mem_alloc.h:1302
Represents custom memory pool.
Calculated statistics of memory usage e.g.
Definition: vk_mem_alloc.h:1111
VkDeviceSize allocationBytes
Total number of bytes occupied by all VmaAllocation objects.
Definition: vk_mem_alloc.h:1133
VkDeviceSize blockBytes
Number of bytes allocated in VkDeviceMemory blocks.
Definition: vk_mem_alloc.h:1126
uint32_t blockCount
Number of VkDeviceMemory objects - Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1114
uint32_t allocationCount
Number of VmaAllocation objects allocated.
Definition: vk_mem_alloc.h:1119
General statistics from current state of the Allocator - total memory usage across all memory heaps a...
Definition: vk_mem_alloc.h:1173
VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1175
VmaDetailedStatistics total
Definition: vk_mem_alloc.h:1176
VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1174
Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
Definition: vk_mem_alloc.h:1513
VkDeviceSize alignment
Required alignment of the allocation.
Definition: vk_mem_alloc.h:1523
void *VMA_NULLABLE pUserData
Custom pointer to be associated with the allocation.
Definition: vk_mem_alloc.h:1531
VkDeviceSize size
Size of the allocation.
Definition: vk_mem_alloc.h:1518
VmaVirtualAllocationCreateFlags flags
Use combination of VmaVirtualAllocationCreateFlagBits.
Definition: vk_mem_alloc.h:1526
Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
Definition: vk_mem_alloc.h:1536
void *VMA_NULLABLE pUserData
Custom pointer associated with the allocation.
Definition: vk_mem_alloc.h:1551
VkDeviceSize offset
Offset of the allocation.
Definition: vk_mem_alloc.h:1541
VkDeviceSize size
Size of the allocation.
Definition: vk_mem_alloc.h:1546
Represents single memory allocation done inside VmaVirtualBlock.
Parameters of created VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
Definition: vk_mem_alloc.h:1492
VkDeviceSize size
Total size of the virtual block.
Definition: vk_mem_alloc.h:1498
VmaVirtualBlockCreateFlags flags
Use combination of VmaVirtualBlockCreateFlagBits.
Definition: vk_mem_alloc.h:1502
const VkAllocationCallbacks *VMA_NULLABLE pAllocationCallbacks
Custom CPU memory allocation callbacks.
Definition: vk_mem_alloc.h:1508
Handle to a virtual block object that allows to use core allocation algorithm without allocating any ...
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:954
PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer
Definition: vk_mem_alloc.h:971
PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage
Definition: vk_mem_alloc.h:974
PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory
Definition: vk_mem_alloc.h:962
PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:960
PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr
Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
Definition: vk_mem_alloc.h:956
PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:965
PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer
Definition: vk_mem_alloc.h:975
PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr
Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
Definition: vk_mem_alloc.h:958
PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:959
PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory
Definition: vk_mem_alloc.h:967
PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:966
PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer
Definition: vk_mem_alloc.h:972
PFN_vkMapMemory VMA_NULLABLE vkMapMemory
Definition: vk_mem_alloc.h:963
PFN_vkCreateImage VMA_NULLABLE vkCreateImage
Definition: vk_mem_alloc.h:973
PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory
Definition: vk_mem_alloc.h:961
PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory
Definition: vk_mem_alloc.h:968
PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:969
PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:970
PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory
Definition: vk_mem_alloc.h:964
Definition: mongoose.cpp:428
#define VMA_NOT_NULL
Definition: vk_mem_alloc.h:271
#define VMA_CALL_PRE
Definition: vk_mem_alloc.h:234
#define VMA_NULLABLE
Definition: vk_mem_alloc.h:261
#define VMA_LEN_IF_NOT_NULL(len)
Definition: vk_mem_alloc.h:252
#define VMA_CALL_POST
Definition: vk_mem_alloc.h:237
#define VMA_NULLABLE_NON_DISPATCHABLE
Definition: vk_mem_alloc.h:289
#define VMA_NOT_NULL_NON_DISPATCHABLE
Definition: vk_mem_alloc.h:281
#define VKAPI_PTR
Definition: vk_platform.h:58
VkResult(GLAD_API_PTR * PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos)
Definition: vulkan.h:4214
#define VK_API_VERSION_1_0
Definition: vulkan.h:245
VkResult(GLAD_API_PTR * PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset)
Definition: vulkan.h:4215
#define vkFreeMemory
Definition: vulkan.h:4725
#define vkDestroyBuffer
Definition: vulkan.h:4649
#define vkCreateBuffer
Definition: vulkan.h:4591
#define vkFlushMappedMemoryRanges
Definition: vulkan.h:4719
#define vkAllocateMemory
Definition: vulkan.h:4445
VkFlags VkMemoryPropertyFlags
Definition: vulkan.h:2441
#define VK_MAX_MEMORY_HEAPS
Definition: vulkan.h:212
VkResult(GLAD_API_PTR * PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void **ppData)
Definition: vulkan.h:4409
#define VK_MAX_MEMORY_TYPES
Definition: vulkan.h:213
#define VK_VERSION_MAJOR(version)
Definition: vulkan.h:231
#define VK_TRUE
Definition: vulkan.h:221
@ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
Definition: vulkan.h:921
@ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
Definition: vulkan.h:919
@ VK_MEMORY_PROPERTY_PROTECTED_BIT
Definition: vulkan.h:922
@ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
Definition: vulkan.h:917
@ VK_MEMORY_PROPERTY_HOST_CACHED_BIT
Definition: vulkan.h:920
@ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
Definition: vulkan.h:918
VkResult(GLAD_API_PTR * PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer)
Definition: vulkan.h:4284
void(GLAD_API_PTR * PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator)
Definition: vulkan.h:4313
#define vkBindImageMemory2
Definition: vulkan.h:4455
void(GLAD_API_PTR * PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions)
Definition: vulkan.h:4233
#define vkGetPhysicalDeviceMemoryProperties2
Definition: vulkan.h:4799
#define vkBindBufferMemory2
Definition: vulkan.h:4451
#define vkBindImageMemory
Definition: vulkan.h:4453
#define VK_DEFINE_HANDLE(object)
Definition: vulkan.h:250
@ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
Definition: vulkan.h:1294
VkResult(GLAD_API_PTR * PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges)
Definition: vulkan.h:4408
#define VK_API_VERSION_PATCH(version)
Definition: vulkan.h:241
VkResult(GLAD_API_PTR * PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset)
Definition: vulkan.h:4213
uint64_t VkDeviceSize
Definition: vulkan.h:2417
void(GLAD_API_PTR * PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements)
Definition: vulkan.h:4373
@ VK_IMAGE_TILING_OPTIMAL
Definition: vulkan.h:828
#define vkMapMemory
Definition: vulkan.h:4841
VkResult(GLAD_API_PTR * PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos)
Definition: vulkan.h:4216
PFN_vkVoidFunction(GLAD_API_PTR * PFN_vkGetInstanceProcAddr)(VkInstance instance, const char *pName)
Definition: vulkan.h:4377
VkResult(GLAD_API_PTR * PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage)
Definition: vulkan.h:4298
#define vkBindBufferMemory
Definition: vulkan.h:4449
#define VK_VERSION_MINOR(version)
Definition: vulkan.h:233
VkResult(GLAD_API_PTR * PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory)
Definition: vulkan.h:4211
#define vkUnmapMemory
Definition: vulkan.h:4881
void(GLAD_API_PTR * PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
Definition: vulkan.h:4388
VkFlags VkMemoryHeapFlags
Definition: vulkan.h:2442
@ VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
Definition: vulkan.h:891
@ VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
Definition: vulkan.h:890
@ VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR
Definition: vulkan.h:1640
void(GLAD_API_PTR * PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator)
Definition: vulkan.h:4325
#define vkCreateImage
Definition: vulkan.h:4619
#define VK_API_VERSION_MINOR(version)
Definition: vulkan.h:240
#define vkDestroyImage
Definition: vulkan.h:4673
#define VK_FALSE
Definition: vulkan.h:176
#define VK_MAKE_VERSION(major, minor, patch)
Definition: vulkan.h:228
void(GLAD_API_PTR * PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks *pAllocator)
Definition: vulkan.h:4351
PFN_vkVoidFunction(GLAD_API_PTR * PFN_vkGetDeviceProcAddr)(VkDevice device, const char *pName)
Definition: vulkan.h:4367
void(GLAD_API_PTR * PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements)
Definition: vulkan.h:4356
@ VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU
Definition: vulkan.h:927
void(GLAD_API_PTR * PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements)
Definition: vulkan.h:4355
#define VK_WHOLE_SIZE
Definition: vulkan.h:223
#define vkGetPhysicalDeviceMemoryProperties
Definition: vulkan.h:4797
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
Definition: vulkan.h:393
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
Definition: vulkan.h:392
void(GLAD_API_PTR * PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties)
Definition: vulkan.h:4390
void(GLAD_API_PTR * PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory)
Definition: vulkan.h:4429
uint32_t VkFlags
Definition: vulkan.h:2416
VkResult
Definition: vulkan.h:1020
@ VK_ERROR_EXTENSION_NOT_PRESENT
Definition: vulkan.h:1033
@ VK_INCOMPLETE
Definition: vulkan.h:1026
@ VK_SUCCESS
Definition: vulkan.h:1021
@ VK_ERROR_INITIALIZATION_FAILED
Definition: vulkan.h:1029
@ VK_ERROR_OUT_OF_DEVICE_MEMORY
Definition: vulkan.h:1028
@ VK_ERROR_FEATURE_NOT_PRESENT
Definition: vulkan.h:1034
@ VK_ERROR_TOO_MANY_OBJECTS
Definition: vulkan.h:1036
@ VK_ERROR_MEMORY_MAP_FAILED
Definition: vulkan.h:1031
#define vkGetPhysicalDeviceProperties
Definition: vulkan.h:4803
#define vkGetBufferMemoryRequirements2
Definition: vulkan.h:4735
#define vkInvalidateMappedMemoryRanges
Definition: vulkan.h:4839
VkResult(GLAD_API_PTR * PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges)
Definition: vulkan.h:4348
#define VK_NULL_HANDLE
Definition: vulkan.h:266
#define vkGetImageMemoryRequirements
Definition: vulkan.h:4767
#define vkCmdCopyBuffer
Definition: vulkan.h:4489
void(GLAD_API_PTR * PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements)
Definition: vulkan.h:4372
#define vkGetBufferMemoryRequirements
Definition: vulkan.h:4733
void(GLAD_API_PTR * PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties)
Definition: vulkan.h:4387
uint32_t VkBool32
Definition: vulkan.h:2415
#define vkGetInstanceProcAddr
Definition: vulkan.h:4777
#define vkGetDeviceProcAddr
Definition: vulkan.h:4757
#define vkGetImageMemoryRequirements2
Definition: vulkan.h:4769
#define VK_API_VERSION_MAJOR(version)
Definition: vulkan.h:239
@ VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2
Definition: vulkan.h:1165
@ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
Definition: vulkan.h:1110
@ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
Definition: vulkan.h:1103
@ VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE
Definition: vulkan.h:1104
#define INDENT
Definition: wdbg_sym.cpp:492
unsigned short uint16_t
Definition: wposix_types.h:52
unsigned int uint32_t
Definition: wposix_types.h:53
unsigned long long uint64_t
Definition: wposix_types.h:57
#define UINT32_MAX
Definition: wposix_types.h:73
unsigned char uint8_t
Definition: wposix_types.h:51
pthread_key_t key
Definition: wpthread.cpp:149