Pyrogenesis trunk
vk_mem_alloc.h
Go to the documentation of this file.
1//
2// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
3//
4// Permission is hereby granted, free of charge, to any person obtaining a copy
5// of this software and associated documentation files (the "Software"), to deal
6// in the Software without restriction, including without limitation the rights
7// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8// copies of the Software, and to permit persons to whom the Software is
9// furnished to do so, subject to the following conditions:
10//
11// The above copyright notice and this permission notice shall be included in
12// all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20// THE SOFTWARE.
21//
22
23#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24#define AMD_VULKAN_MEMORY_ALLOCATOR_H
25
26/** \mainpage Vulkan Memory Allocator
27
28<b>Version 3.0.1 (2022-05-26)</b>
29
30Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
31License: MIT
32
33<b>API documentation divided into groups:</b> [Modules](modules.html)
34
35\section main_table_of_contents Table of contents
36
37- <b>User guide</b>
38 - \subpage quick_start
39 - [Project setup](@ref quick_start_project_setup)
40 - [Initialization](@ref quick_start_initialization)
41 - [Resource allocation](@ref quick_start_resource_allocation)
42 - \subpage choosing_memory_type
43 - [Usage](@ref choosing_memory_type_usage)
44 - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
45 - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
46 - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
47 - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
48 - \subpage memory_mapping
49 - [Mapping functions](@ref memory_mapping_mapping_functions)
50 - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
51 - [Cache flush and invalidate](@ref memory_mapping_cache_control)
52 - \subpage staying_within_budget
53 - [Querying for budget](@ref staying_within_budget_querying_for_budget)
54 - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
55 - \subpage resource_aliasing
56 - \subpage custom_memory_pools
57 - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
58 - [Linear allocation algorithm](@ref linear_algorithm)
59 - [Free-at-once](@ref linear_algorithm_free_at_once)
60 - [Stack](@ref linear_algorithm_stack)
61 - [Double stack](@ref linear_algorithm_double_stack)
62 - [Ring buffer](@ref linear_algorithm_ring_buffer)
63 - \subpage defragmentation
64 - \subpage statistics
65 - [Numeric statistics](@ref statistics_numeric_statistics)
66 - [JSON dump](@ref statistics_json_dump)
67 - \subpage allocation_annotation
68 - [Allocation user data](@ref allocation_user_data)
69 - [Allocation names](@ref allocation_names)
70 - \subpage virtual_allocator
71 - \subpage debugging_memory_usage
72 - [Memory initialization](@ref debugging_memory_usage_initialization)
73 - [Margins](@ref debugging_memory_usage_margins)
74 - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
75 - \subpage opengl_interop
76- \subpage usage_patterns
77 - [GPU-only resource](@ref usage_patterns_gpu_only)
78 - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
79 - [Readback](@ref usage_patterns_readback)
80 - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
81 - [Other use cases](@ref usage_patterns_other_use_cases)
82- \subpage configuration
83 - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
84 - [Custom host memory allocator](@ref custom_memory_allocator)
85 - [Device memory allocation callbacks](@ref allocation_callbacks)
86 - [Device heap memory limit](@ref heap_memory_limit)
87- <b>Extension support</b>
88 - \subpage vk_khr_dedicated_allocation
89 - \subpage enabling_buffer_device_address
90 - \subpage vk_ext_memory_priority
91 - \subpage vk_amd_device_coherent_memory
92- \subpage general_considerations
93 - [Thread safety](@ref general_considerations_thread_safety)
94 - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
95 - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
96 - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
97 - [Features not supported](@ref general_considerations_features_not_supported)
98
99\section main_see_also See also
100
101- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
102- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
103
104\defgroup group_init Library initialization
105
106\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
107
108\defgroup group_alloc Memory allocation
109
110\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
111Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
112
113\defgroup group_virtual Virtual allocator
114
115\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
116for user-defined purpose without allocating any real GPU memory.
117
118\defgroup group_stats Statistics
119
120\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
121See documentation chapter: \ref statistics.
122*/
123
124
125#ifdef __cplusplus
126extern "C" {
127#endif
128
129#ifndef VULKAN_H_
130 #include <vulkan/vulkan.h>
131#endif
132
133// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
134// where AAA = major, BBB = minor, CCC = patch.
135// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
136#if !defined(VMA_VULKAN_VERSION)
137 #if defined(VK_VERSION_1_3)
138 #define VMA_VULKAN_VERSION 1003000
139 #elif defined(VK_VERSION_1_2)
140 #define VMA_VULKAN_VERSION 1002000
141 #elif defined(VK_VERSION_1_1)
142 #define VMA_VULKAN_VERSION 1001000
143 #else
144 #define VMA_VULKAN_VERSION 1000000
145 #endif
146#endif
147
148#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
168 #if VMA_VULKAN_VERSION >= 1001000
174 #endif // #if VMA_VULKAN_VERSION >= 1001000
175#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
176
177#if !defined(VMA_DEDICATED_ALLOCATION)
178 #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
179 #define VMA_DEDICATED_ALLOCATION 1
180 #else
181 #define VMA_DEDICATED_ALLOCATION 0
182 #endif
183#endif
184
185#if !defined(VMA_BIND_MEMORY2)
186 #if VK_KHR_bind_memory2
187 #define VMA_BIND_MEMORY2 1
188 #else
189 #define VMA_BIND_MEMORY2 0
190 #endif
191#endif
192
193#if !defined(VMA_MEMORY_BUDGET)
194 #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
195 #define VMA_MEMORY_BUDGET 1
196 #else
197 #define VMA_MEMORY_BUDGET 0
198 #endif
199#endif
200
201// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
202#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
203 #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
204 #define VMA_BUFFER_DEVICE_ADDRESS 1
205 #else
206 #define VMA_BUFFER_DEVICE_ADDRESS 0
207 #endif
208#endif
209
210// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
211#if !defined(VMA_MEMORY_PRIORITY)
212 #if VK_EXT_memory_priority
213 #define VMA_MEMORY_PRIORITY 1
214 #else
215 #define VMA_MEMORY_PRIORITY 0
216 #endif
217#endif
218
219// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
220#if !defined(VMA_EXTERNAL_MEMORY)
221 #if VK_KHR_external_memory
222 #define VMA_EXTERNAL_MEMORY 1
223 #else
224 #define VMA_EXTERNAL_MEMORY 0
225 #endif
226#endif
227
228// Define these macros to decorate all public functions with additional code,
229// before and after returned type, appropriately. This may be useful for
230// exporting the functions when compiling VMA as a separate library. Example:
231// #define VMA_CALL_PRE __declspec(dllexport)
232// #define VMA_CALL_POST __cdecl
233#ifndef VMA_CALL_PRE
234 #define VMA_CALL_PRE
235#endif
236#ifndef VMA_CALL_POST
237 #define VMA_CALL_POST
238#endif
239
240// Define this macro to decorate pointers with an attribute specifying the
241// length of the array they point to if they are not null.
242//
243// The length may be one of
244// - The name of another parameter in the argument list where the pointer is declared
245// - The name of another member in the struct where the pointer is declared
246// - The name of a member of a struct type, meaning the value of that member in
247// the context of the call. For example
248// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
249// this means the number of memory heaps available in the device associated
250// with the VmaAllocator being dealt with.
251#ifndef VMA_LEN_IF_NOT_NULL
252 #define VMA_LEN_IF_NOT_NULL(len)
253#endif
254
255// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
256// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
257#ifndef VMA_NULLABLE
258 #ifdef __clang__
259 #define VMA_NULLABLE _Nullable
260 #else
261 #define VMA_NULLABLE
262 #endif
263#endif
264
265// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
266// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
267#ifndef VMA_NOT_NULL
268 #ifdef __clang__
269 #define VMA_NOT_NULL _Nonnull
270 #else
271 #define VMA_NOT_NULL
272 #endif
273#endif
274
275// If non-dispatchable handles are represented as pointers then we can give
276// then nullability annotations
277#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
278 #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
279 #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
280 #else
281 #define VMA_NOT_NULL_NON_DISPATCHABLE
282 #endif
283#endif
284
285#ifndef VMA_NULLABLE_NON_DISPATCHABLE
286 #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
287 #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
288 #else
289 #define VMA_NULLABLE_NON_DISPATCHABLE
290 #endif
291#endif
292
293#ifndef VMA_STATS_STRING_ENABLED
294 #define VMA_STATS_STRING_ENABLED 1
295#endif
296
297////////////////////////////////////////////////////////////////////////////////
298////////////////////////////////////////////////////////////////////////////////
299//
300// INTERFACE
301//
302////////////////////////////////////////////////////////////////////////////////
303////////////////////////////////////////////////////////////////////////////////
304
305// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
306#ifndef _VMA_ENUM_DECLARATIONS
307
308/**
309\addtogroup group_init
310@{
311*/
312
313/// Flags for created #VmaAllocator.
315{
316 /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
317
318 Using this flag may increase performance because internal mutexes are not used.
319 */
321 /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
322
323 The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
324 When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
325
326 Using this extension will automatically allocate dedicated blocks of memory for
327 some buffers and images instead of suballocating place for them out of bigger
328 memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
329 flag) when it is recommended by the driver. It may improve performance on some
330 GPUs.
331
332 You may set this flag only if you found out that following device extensions are
333 supported, you enabled them while creating Vulkan device passed as
334 VmaAllocatorCreateInfo::device, and you want them to be used internally by this
335 library:
336
337 - VK_KHR_get_memory_requirements2 (device extension)
338 - VK_KHR_dedicated_allocation (device extension)
339
340 When this flag is set, you can experience following warnings reported by Vulkan
341 validation layer. You can ignore them.
342
343 > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
344 */
346 /**
347 Enables usage of VK_KHR_bind_memory2 extension.
348
349 The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
350 When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
351
352 You may set this flag only if you found out that this device extension is supported,
353 you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
354 and you want it to be used internally by this library.
355
356 The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
357 which allow to pass a chain of `pNext` structures while binding.
358 This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
359 */
361 /**
362 Enables usage of VK_EXT_memory_budget extension.
363
364 You may set this flag only if you found out that this device extension is supported,
365 you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
366 and you want it to be used internally by this library, along with another instance extension
367 VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
368
369 The extension provides query for current memory usage and budget, which will probably
370 be more accurate than an estimation used by the library otherwise.
371 */
373 /**
374 Enables usage of VK_AMD_device_coherent_memory extension.
375
376 You may set this flag only if you:
377
378 - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
379 - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
380 - want it to be used internally by this library.
381
382 The extension and accompanying device feature provide access to memory types with
383 `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
384 They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
385
386 When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
387 To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
388 returning `VK_ERROR_FEATURE_NOT_PRESENT`.
389 */
391 /**
392 Enables usage of "buffer device address" feature, which allows you to use function
393 `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
394
395 You may set this flag only if you:
396
397 1. (For Vulkan version < 1.2) Found as available and enabled device extension
398 VK_KHR_buffer_device_address.
399 This extension is promoted to core Vulkan 1.2.
400 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
401
402 When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
403 The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
404 allocated memory blocks wherever it might be needed.
405
406 For more information, see documentation chapter \ref enabling_buffer_device_address.
407 */
409 /**
410 Enables usage of VK_EXT_memory_priority extension in the library.
411
412 You may set this flag only if you found available and enabled this device extension,
413 along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
414 while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
415
416 When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
417 are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
418
419 A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
420 Larger values are higher priority. The granularity of the priorities is implementation-dependent.
421 It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
422 The value to be used for default priority is 0.5.
423 For more details, see the documentation of the VK_EXT_memory_priority extension.
424 */
426
429/// See #VmaAllocatorCreateFlagBits.
431
432/** @} */
433
434/**
435\addtogroup group_alloc
436@{
437*/
438
439/// \brief Intended usage of the allocated memory.
440typedef enum VmaMemoryUsage
441{
442 /** No intended memory usage specified.
443 Use other members of VmaAllocationCreateInfo to specify your requirements.
444 */
446 /**
447 \deprecated Obsolete, preserved for backward compatibility.
448 Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
449 */
451 /**
452 \deprecated Obsolete, preserved for backward compatibility.
453 Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
454 */
456 /**
457 \deprecated Obsolete, preserved for backward compatibility.
458 Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
459 */
461 /**
462 \deprecated Obsolete, preserved for backward compatibility.
463 Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
464 */
466 /**
467 \deprecated Obsolete, preserved for backward compatibility.
468 Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
469 */
471 /**
472 Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
473 Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
474
475 Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
476
477 Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
478 */
480 /**
481 Selects best memory type automatically.
482 This flag is recommended for most common use cases.
483
484 When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
485 you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
486 in VmaAllocationCreateInfo::flags.
487
488 It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
489 vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
490 and not with generic memory allocation functions.
491 */
493 /**
494 Selects best memory type automatically with preference for GPU (device) memory.
495
496 When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
497 you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
498 in VmaAllocationCreateInfo::flags.
499
500 It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
501 vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
502 and not with generic memory allocation functions.
503 */
505 /**
506 Selects best memory type automatically with preference for CPU (host) memory.
507
508 When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
509 you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
510 in VmaAllocationCreateInfo::flags.
511
512 It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
513 vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
514 and not with generic memory allocation functions.
515 */
517
518 VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
520
521/// Flags to be passed as VmaAllocationCreateInfo::flags.
523{
524 /** \brief Set this flag if the allocation should have its own memory block.
525
526 Use it for special, big resources, like fullscreen images used as attachments.
527 */
529
530 /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
531
532 If new allocation cannot be placed in any of the existing blocks, allocation
533 fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
534
535 You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
536 #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
537 */
539 /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
540
541 Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
542
543 It is valid to use this flag for allocation made from memory type that is not
544 `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
545 useful if you need an allocation that is efficient to use on GPU
546 (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
547 support it (e.g. Intel GPU).
548 */
550 /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
551
552 Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
553 null-terminated string. Instead of copying pointer value, a local copy of the
554 string is made and stored in allocation's `pName`. The string is automatically
555 freed together with the allocation. It is also used in vmaBuildStatsString().
556 */
558 /** Allocation will be created from upper stack in a double stack pool.
559
560 This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
561 */
563 /** Create both buffer/image and allocation, but don't bind them together.
564 It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
565 The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
566 Otherwise it is ignored.
567
568 If you want to make sure the new buffer/image is not tied to the new memory allocation
569 through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
570 use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
571 */
573 /** Create allocation only if additional device memory required for it, if any, won't exceed
574 memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
575 */
577 /** \brief Set this flag if the allocated memory will have aliasing resources.
578
579 Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
580 Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
581 */
583 /**
584 Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
585
586 - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
587 you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
588 - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
589 This includes allocations created in \ref custom_memory_pools.
590
591 Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
592 never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
593
594 \warning Violating this declaration may work correctly, but will likely be very slow.
595 Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
596 Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
597 */
599 /**
600 Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
601
602 - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
603 you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
604 - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
605 This includes allocations created in \ref custom_memory_pools.
606
607 Declares that mapped memory can be read, written, and accessed in random order,
608 so a `HOST_CACHED` memory type is required.
609 */
611 /**
612 Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
613 it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
614 if it may improve performance.
615
616 By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
617 (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
618 issue an explicit transfer to write/read your data.
619 To prepare for this possibility, don't forget to add appropriate flags like
620 `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
621 */
623 /** Allocation strategy that chooses smallest possible free range for the allocation
624 to minimize memory usage and fragmentation, possibly at the expense of allocation time.
625 */
627 /** Allocation strategy that chooses first suitable free range for the allocation -
628 not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
629 to minimize allocation time, possibly at the expense of allocation quality.
630 */
632 /** Allocation strategy that chooses always the lowest offset in available space.
633 This is not the most efficient strategy but achieves highly packed data.
634 Used internally by defragmentation, not recomended in typical usage.
635 */
637 /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
638 */
640 /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
641 */
643 /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
644 */
649
652/// See #VmaAllocationCreateFlagBits.
654
655/// Flags to be passed as VmaPoolCreateInfo::flags.
657{
658 /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
659
660 This is an optional optimization flag.
661
662 If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
663 vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
664 knows exact type of your allocations so it can handle Buffer-Image Granularity
665 in the optimal way.
666
667 If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
668 exact type of such allocations is not known, so allocator must be conservative
669 in handling Buffer-Image Granularity, which can lead to suboptimal allocation
670 (wasted memory). In that case, if you can make sure you always allocate only
671 buffers and linear images or only optimal images out of this pool, use this flag
672 to make allocator disregard Buffer-Image Granularity and so make allocations
673 faster and more optimal.
674 */
676
677 /** \brief Enables alternative, linear allocation algorithm in this pool.
678
679 Specify this flag to enable linear allocation algorithm, which always creates
680 new allocations after last one and doesn't reuse space from allocations freed in
681 between. It trades memory consumption for simplified algorithm and data
682 structure, which has better performance and uses less memory for metadata.
683
684 By using this flag, you can achieve behavior of free-at-once, stack,
685 ring buffer, and double stack.
686 For details, see documentation chapter \ref linear_algorithm.
687 */
689
690 /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
691 */
694
697/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
699
700/// Flags to be passed as VmaDefragmentationInfo::flags.
702{
703 /* \brief Use simple but fast algorithm for defragmentation.
704 May not achieve best results but will require least time to compute and least allocations to copy.
705 */
707 /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
708 Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
709 */
711 /* \brief Perform full defragmentation of memory.
712 Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
713 */
715 /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
716 Only available when bufferImageGranularity is greater than 1, since it aims to reduce
717 alignment issues between different types of resources.
718 Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
719 */
721
722 /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
728
731/// See #VmaDefragmentationFlagBits.
733
734/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
736{
737 /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
739 /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
741 /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
744
745/** @} */
746
747/**
748\addtogroup group_virtual
749@{
750*/
751
752/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
754{
755 /** \brief Enables alternative, linear allocation algorithm in this virtual block.
756
757 Specify this flag to enable linear allocation algorithm, which always creates
758 new allocations after last one and doesn't reuse space from allocations freed in
759 between. It trades memory consumption for simplified algorithm and data
760 structure, which has better performance and uses less memory for metadata.
761
762 By using this flag, you can achieve behavior of free-at-once, stack,
763 ring buffer, and double stack.
764 For details, see documentation chapter \ref linear_algorithm.
765 */
767
768 /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
769 */
772
775/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
777
778/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
780{
781 /** \brief Allocation will be created from upper stack in a double stack pool.
782
783 This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
784 */
786 /** \brief Allocation strategy that tries to minimize memory usage.
787 */
789 /** \brief Allocation strategy that tries to minimize allocation time.
790 */
792 /** Allocation strategy that chooses always the lowest offset in available space.
793 This is not the most efficient strategy but achieves highly packed data.
794 */
796 /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
797
798 These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
799 */
801
804/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
806
807/** @} */
808
809#endif // _VMA_ENUM_DECLARATIONS
810
811#ifndef _VMA_DATA_TYPES_DECLARATIONS
812
813/**
814\addtogroup group_init
815@{ */
816
817/** \struct VmaAllocator
818\brief Represents main object of this library initialized.
819
820Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
821Call function vmaDestroyAllocator() to destroy it.
822
823It is recommended to create just one object of this type per `VkDevice` object,
824right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
825*/
827
828/** @} */
829
830/**
831\addtogroup group_alloc
832@{
833*/
834
835/** \struct VmaPool
836\brief Represents custom memory pool
837
838Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
839Call function vmaDestroyPool() to destroy it.
840
841For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
842*/
844
845/** \struct VmaAllocation
846\brief Represents single memory allocation.
847
848It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
849plus unique offset.
850
851There are multiple ways to create such object.
852You need to fill structure VmaAllocationCreateInfo.
853For more information see [Choosing memory type](@ref choosing_memory_type).
854
855Although the library provides convenience functions that create Vulkan buffer or image,
856allocate memory for it and bind them together,
857binding of the allocation to a buffer or an image is out of scope of the allocation itself.
858Allocation object can exist without buffer/image bound,
859binding can be done manually by the user, and destruction of it can be done
860independently of destruction of the allocation.
861
862The object also remembers its size and some other information.
863To retrieve this information, use function vmaGetAllocationInfo() and inspect
864returned structure VmaAllocationInfo.
865*/
867
868/** \struct VmaDefragmentationContext
869\brief An opaque object that represents started defragmentation process.
870
871Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
872Call function vmaEndDefragmentation() to destroy it.
873*/
875
876/** @} */
877
878/**
879\addtogroup group_virtual
880@{
881*/
882
883/** \struct VmaVirtualAllocation
884\brief Represents single memory allocation done inside VmaVirtualBlock.
885
886Use it as a unique identifier to virtual allocation within the single block.
887
888Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
889*/
891
892/** @} */
893
894/**
895\addtogroup group_virtual
896@{
897*/
898
899/** \struct VmaVirtualBlock
900\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
901
902Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
903For more information, see documentation chapter \ref virtual_allocator.
904
905This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
906*/
908
909/** @} */
910
911/**
912\addtogroup group_init
913@{
914*/
915
916/// Callback function called after successful vkAllocateMemory.
918 VmaAllocator VMA_NOT_NULL allocator,
919 uint32_t memoryType,
920 VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
921 VkDeviceSize size,
922 void* VMA_NULLABLE pUserData);
923
924/// Callback function called before vkFreeMemory.
926 VmaAllocator VMA_NOT_NULL allocator,
927 uint32_t memoryType,
928 VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
929 VkDeviceSize size,
930 void* VMA_NULLABLE pUserData);
931
932/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
933
934Provided for informative purpose, e.g. to gather statistics about number of
935allocations or total amount of memory allocated in Vulkan.
936
937Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
938*/
940{
941 /// Optional, can be null.
943 /// Optional, can be null.
945 /// Optional, can be null.
948
949/** \brief Pointers to some Vulkan functions - a subset used by the library.
950
951Used in VmaAllocatorCreateInfo::pVulkanFunctions.
952*/
953typedef struct VmaVulkanFunctions
954{
955 /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
957 /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
976#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
977 /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
979 /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
981#endif
982#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
983 /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
985 /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
987#endif
988#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
990#endif
991#if VMA_VULKAN_VERSION >= 1003000
992 /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
993 PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
994 /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
995 PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
996#endif
998
999/// Description of a Allocator to be created.
1001{
1002 /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
1004 /// Vulkan physical device.
1005 /** It must be valid throughout whole lifetime of created allocator. */
1007 /// Vulkan device.
1008 /** It must be valid throughout whole lifetime of created allocator. */
1010 /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
1011 /** Set to 0 to use default, which is currently 256 MiB. */
1013 /// Custom CPU memory allocation callbacks. Optional.
1014 /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
1016 /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
1017 /** Optional, can be null. */
1019 /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
1020
1021 If not NULL, it must be a pointer to an array of
1022 `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
1023 maximum number of bytes that can be allocated out of particular Vulkan memory
1024 heap.
1025
1026 Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
1027 heap. This is also the default in case of `pHeapSizeLimit` = NULL.
1028
1029 If there is a limit defined for a heap:
1030
1031 - If user tries to allocate more memory from that heap using this allocator,
1032 the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
1033 - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
1034 value of this limit will be reported instead when using vmaGetMemoryProperties().
1035
1036 Warning! Using this feature may not be equivalent to installing a GPU with
1037 smaller amount of memory, because graphics driver doesn't necessary fail new
1038 allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
1039 exceeded. It may return success and just silently migrate some device memory
1040 blocks to system RAM. This driver behavior can also be controlled using
1041 VK_AMD_memory_overallocation_behavior extension.
1042 */
1043 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
1044
1045 /** \brief Pointers to Vulkan functions. Can be null.
1046
1047 For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
1048 */
1050 /** \brief Handle to Vulkan instance object.
1051
1052 Starting from version 3.0.0 this member is no longer optional, it must be set!
1053 */
1055 /** \brief Optional. The highest version of Vulkan that the application is designed to use.
1056
1057 It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
1058 The patch version number specified is ignored. Only the major and minor versions are considered.
1059 It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
1060 Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
1061 Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
1062 */
1064#if VMA_EXTERNAL_MEMORY
1065 /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
1066
1067 If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
1068 elements, defining external memory handle types of particular Vulkan memory type,
1069 to be passed using `VkExportMemoryAllocateInfoKHR`.
1070
1071 Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
1072 This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
1073 */
1074 const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
1075#endif // #if VMA_EXTERNAL_MEMORY
1077
1078/// Information about existing #VmaAllocator object.
1079typedef struct VmaAllocatorInfo
1080{
1081 /** \brief Handle to Vulkan instance object.
1082
1083 This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
1084 */
1086 /** \brief Handle to Vulkan physical device object.
1087
1088 This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
1089 */
1091 /** \brief Handle to Vulkan device object.
1092
1093 This is the same value as has been passed through VmaAllocatorCreateInfo::device.
1094 */
1097
1098/** @} */
1099
1100/**
1101\addtogroup group_stats
1102@{
1103*/
1104
1105/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
1106
1107These are fast to calculate.
1108See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
1109*/
1110typedef struct VmaStatistics
1111{
1112 /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
1113 */
1115 /** \brief Number of #VmaAllocation objects allocated.
1116
1117 Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
1118 */
1120 /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
1121
1122 \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
1123 (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
1124 "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
1125 */
1127 /** \brief Total number of bytes occupied by all #VmaAllocation objects.
1128
1129 Always less or equal than `blockBytes`.
1130 Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
1131 but unused by any #VmaAllocation.
1132 */
1135
1136/** \brief More detailed statistics than #VmaStatistics.
1137
1138These are slower to calculate. Use for debugging purposes.
1139See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
1140
1141Previous version of the statistics API provided averages, but they have been removed
1142because they can be easily calculated as:
1143
1144\code
1145VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
1146VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
1147VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
1148\endcode
1149*/
1151{
1152 /// Basic statistics.
1154 /// Number of free ranges of memory between allocations.
1156 /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
1158 /// Largest allocation size. 0 if there are 0 allocations.
1160 /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
1162 /// Largest empty range size. 0 if there are 0 empty ranges.
1165
1166/** \brief General statistics from current state of the Allocator -
1167total memory usage across all memory heaps and types.
1168
1169These are slower to calculate. Use for debugging purposes.
1170See function vmaCalculateStatistics().
1171*/
1173{
1178
1179/** \brief Statistics of current memory usage and available budget for a specific memory heap.
1180
1181These are fast to calculate.
1182See function vmaGetHeapBudgets().
1183*/
1184typedef struct VmaBudget
1185{
1186 /** \brief Statistics fetched from the library.
1187 */
1189 /** \brief Estimated current memory usage of the program, in bytes.
1190
1191 Fetched from system using VK_EXT_memory_budget extension if enabled.
1192
1193 It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
1194 also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
1195 `VkDeviceMemory` blocks allocated outside of this library, if any.
1196 */
1198 /** \brief Estimated amount of memory available to the program, in bytes.
1199
1200 Fetched from system using VK_EXT_memory_budget extension if enabled.
1201
1202 It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
1203 external to the program, decided by the operating system.
1204 Difference `budget - usage` is the amount of additional memory that can probably
1205 be allocated without problems. Exceeding the budget may result in various problems.
1206 */
1209
1210/** @} */
1211
1212/**
1213\addtogroup group_alloc
1214@{
1215*/
1216
1217/** \brief Parameters of new #VmaAllocation.
1218
1219To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
1220*/
1222{
1223 /// Use #VmaAllocationCreateFlagBits enum.
1225 /** \brief Intended usage of memory.
1226
1227 You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
1228 If `pool` is not null, this member is ignored.
1229 */
1231 /** \brief Flags that must be set in a Memory Type chosen for an allocation.
1232
1233 Leave 0 if you specify memory requirements in other way. \n
1234 If `pool` is not null, this member is ignored.*/
1236 /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
1237
1238 Set to 0 if no additional flags are preferred. \n
1239 If `pool` is not null, this member is ignored. */
1241 /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
1242
1243 Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
1244 it meets other requirements specified by this structure, with no further
1245 restrictions on memory type index. \n
1246 If `pool` is not null, this member is ignored.
1247 */
1249 /** \brief Pool that this allocation should be created in.
1250
1251 Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
1252 `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
1253 */
1255 /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
1256
1257 If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
1258 null or pointer to a null-terminated string. The string will be then copied to
1259 internal buffer, so it doesn't need to be valid after allocation call.
1260 */
1262 /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
1263
1264 It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
1265 and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
1266 Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
1267 */
1270
1271/// Describes parameter of created #VmaPool.
1272typedef struct VmaPoolCreateInfo
1273{
1274 /** \brief Vulkan memory type index to allocate this pool from.
1275 */
1277 /** \brief Use combination of #VmaPoolCreateFlagBits.
1278 */
1280 /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
1281
1282 Specify nonzero to set explicit, constant size of memory blocks used by this
1283 pool.
1284
1285 Leave 0 to use default and let the library manage block sizes automatically.
1286 Sizes of particular blocks may vary.
1287 In this case, the pool will also support dedicated allocations.
1288 */
1290 /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
1291
1292 Set to 0 to have no preallocated blocks and allow the pool be completely empty.
1293 */
1295 /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
1296
1297 Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
1298
1299 Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
1300 throughout whole lifetime of this pool.
1301 */
1303 /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
1304
1305 It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
1306 Otherwise, this variable is ignored.
1307 */
1309 /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
1310
1311 Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
1312 It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
1313 e.g. when doing interop with OpenGL.
1314 */
1316 /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
1317
1318 Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
1319 It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
1320 Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
1321
1322 Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
1323 can be attached automatically by this library when using other, more convenient of its features.
1324 */
1327
1328/** @} */
1329
1330/**
1331\addtogroup group_alloc
1332@{
1333*/
1334
1335/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
1336typedef struct VmaAllocationInfo
1337{
1338 /** \brief Memory type index that this allocation was allocated from.
1339
1340 It never changes.
1341 */
1343 /** \brief Handle to Vulkan memory object.
1344
1345 Same memory object can be shared by multiple allocations.
1346
1347 It can change after the allocation is moved during \ref defragmentation.
1348 */
1350 /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
1351
1352 You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
1353 vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
1354 not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
1355 and apply this offset automatically.
1356
1357 It can change after the allocation is moved during \ref defragmentation.
1358 */
1360 /** \brief Size of this allocation, in bytes.
1361
1362 It never changes.
1363
1364 \note Allocation size returned in this variable may be greater than the size
1365 requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
1366 allocation is accessible for operations on memory e.g. using a pointer after
1367 mapping with vmaMapMemory(), but operations on the resource e.g. using
1368 `vkCmdCopyBuffer` must be limited to the size of the resource.
1369 */
1371 /** \brief Pointer to the beginning of this allocation as mapped data.
1372
1373 If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
1374 created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
1375
1376 It can change after call to vmaMapMemory(), vmaUnmapMemory().
1377 It can also change after the allocation is moved during \ref defragmentation.
1378 */
1380 /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
1381
1382 It can change after call to vmaSetAllocationUserData() for this allocation.
1383 */
1385 /** \brief Custom allocation name that was set with vmaSetAllocationName().
1386
1387 It can change after call to vmaSetAllocationName() for this allocation.
1388
1389 Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
1390 additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
1391 */
1392 const char* VMA_NULLABLE pName;
1394
1395/** \brief Parameters for defragmentation.
1396
1397To be used with function vmaBeginDefragmentation().
1398*/
1400{
1401 /// \brief Use combination of #VmaDefragmentationFlagBits.
1403 /** \brief Custom pool to be defragmented.
1404
1405 If null then default pools will undergo defragmentation process.
1406 */
1408 /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
1409
1410 `0` means no limit.
1411 */
1413 /** \brief Maximum number of allocations that can be moved during single pass to a different place.
1414
1415 `0` means no limit.
1416 */
1419
1420/// Single move of an allocation to be done for defragmentation.
1422{
1423 /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
1425 /// Allocation that should be moved.
1427 /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
1428
1429 \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
1430 to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
1431 vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
1432 */
1435
1436/** \brief Parameters for incremental defragmentation steps.
1437
1438To be used with function vmaBeginDefragmentationPass().
1439*/
1441{
1442 /// Number of elements in the `pMoves` array.
1444 /** \brief Array of moves to be performed by the user in the current defragmentation pass.
1445
1446 Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
1447
1448 For each element, you should:
1449
1450 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
1451 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
1452 3. Make sure these commands finished executing on the GPU.
1453 4. Destroy the old buffer/image.
1454
1455 Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
1456 After this call, the allocation will point to the new place in memory.
1457
1458 Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
1459
1460 Alternatively, if you decide you want to completely remove the allocation:
1461
1462 1. Destroy its buffer/image.
1463 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
1464
1465 Then, after vmaEndDefragmentationPass() the allocation will be freed.
1466 */
1469
1470/// Statistics returned for defragmentation process in function vmaEndDefragmentation().
1472{
1473 /// Total number of bytes that have been copied while moving allocations to different places.
1475 /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
1477 /// Number of allocations that have been moved to different places.
1479 /// Number of empty `VkDeviceMemory` objects that have been released to the system.
1482
1483/** @} */
1484
1485/**
1486\addtogroup group_virtual
1487@{
1488*/
1489
1490/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
1492{
1493 /** \brief Total size of the virtual block.
1494
1495 Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
1496 For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
1497 */
1499
1500 /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
1501 */
1503
1504 /** \brief Custom CPU memory allocation callbacks. Optional.
1505
1506 Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
1507 */
1510
1511/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
1513{
1514 /** \brief Size of the allocation.
1515
1516 Cannot be zero.
1517 */
1519 /** \brief Required alignment of the allocation. Optional.
1520
1521 Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
1522 */
1524 /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
1525 */
1527 /** \brief Custom pointer to be associated with the allocation. Optional.
1528
1529 It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
1530 */
1533
1534/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
1536{
1537 /** \brief Offset of the allocation.
1538
1539 Offset at which the allocation was made.
1540 */
1542 /** \brief Size of the allocation.
1543
1544 Same value as passed in VmaVirtualAllocationCreateInfo::size.
1545 */
1547 /** \brief Custom pointer associated with the allocation.
1548
1549 Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
1550 */
1553
1554/** @} */
1555
1556#endif // _VMA_DATA_TYPES_DECLARATIONS
1557
1558#ifndef _VMA_FUNCTION_HEADERS
1559
1560/**
1561\addtogroup group_init
1562@{
1563*/
1564
1565/// Creates #VmaAllocator object.
1567 const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
1569
1570/// Destroys allocator object.
1572 VmaAllocator VMA_NULLABLE allocator);
1573
1574/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
1575
1576It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
1577`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
1578*/
1580 VmaAllocator VMA_NOT_NULL allocator,
1581 VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
1582
1583/**
1584PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
1585You can access it here, without fetching it again on your own.
1586*/
1588 VmaAllocator VMA_NOT_NULL allocator,
1589 const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
1590
1591/**
1592PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
1593You can access it here, without fetching it again on your own.
1594*/
1596 VmaAllocator VMA_NOT_NULL allocator,
1597 const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
1598
1599/**
1600\brief Given Memory Type Index, returns Property Flags of this memory type.
1601
1602This is just a convenience function. Same information can be obtained using
1603vmaGetMemoryProperties().
1604*/
1606 VmaAllocator VMA_NOT_NULL allocator,
1607 uint32_t memoryTypeIndex,
1609
1610/** \brief Sets index of the current frame.
1611*/
1613 VmaAllocator VMA_NOT_NULL allocator,
1614 uint32_t frameIndex);
1615
1616/** @} */
1617
1618/**
1619\addtogroup group_stats
1620@{
1621*/
1622
1623/** \brief Retrieves statistics from current state of the Allocator.
1624
1625This function is called "calculate" not "get" because it has to traverse all
1626internal data structures, so it may be quite slow. Use it for debugging purposes.
1627For faster but more brief statistics suitable to be called every frame or every allocation,
1628use vmaGetHeapBudgets().
1629
1630Note that when using allocator from multiple threads, returned information may immediately
1631become outdated.
1632*/
1634 VmaAllocator VMA_NOT_NULL allocator,
1636
1637/** \brief Retrieves information about current memory usage and budget for all memory heaps.
1638
1639\param allocator
1640\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
1641
1642This function is called "get" not "calculate" because it is very fast, suitable to be called
1643every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
1644
1645Note that when using allocator from multiple threads, returned information may immediately
1646become outdated.
1647*/
1649 VmaAllocator VMA_NOT_NULL allocator,
1650 VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
1651
1652/** @} */
1653
1654/**
1655\addtogroup group_alloc
1656@{
1657*/
1658
1659/**
1660\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
1661
1662This algorithm tries to find a memory type that:
1663
1664- Is allowed by memoryTypeBits.
1665- Contains all the flags from pAllocationCreateInfo->requiredFlags.
1666- Matches intended usage.
1667- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
1668
1669\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
1670from this function or any other allocating function probably means that your
1671device doesn't support any memory type with requested features for the specific
1672type of resource you want to use it for. Please check parameters of your
1673resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
1674*/
1676 VmaAllocator VMA_NOT_NULL allocator,
1677 uint32_t memoryTypeBits,
1678 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1679 uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1680
1681/**
1682\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
1683
1684It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1685It internally creates a temporary, dummy buffer that never has memory bound.
1686*/
1688 VmaAllocator VMA_NOT_NULL allocator,
1689 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
1690 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1691 uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1692
1693/**
1694\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
1695
1696It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
1697It internally creates a temporary, dummy image that never has memory bound.
1698*/
1700 VmaAllocator VMA_NOT_NULL allocator,
1701 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
1702 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
1703 uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
1704
1705/** \brief Allocates Vulkan device memory and creates #VmaPool object.
1706
1707\param allocator Allocator object.
1708\param pCreateInfo Parameters of pool to create.
1709\param[out] pPool Handle to created pool.
1710*/
1712 VmaAllocator VMA_NOT_NULL allocator,
1713 const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
1715
1716/** \brief Destroys #VmaPool object and frees Vulkan device memory.
1717*/
1719 VmaAllocator VMA_NOT_NULL allocator,
1720 VmaPool VMA_NULLABLE pool);
1721
1722/** @} */
1723
1724/**
1725\addtogroup group_stats
1726@{
1727*/
1728
1729/** \brief Retrieves statistics of existing #VmaPool object.
1730
1731\param allocator Allocator object.
1732\param pool Pool object.
1733\param[out] pPoolStats Statistics of specified pool.
1734*/
1736 VmaAllocator VMA_NOT_NULL allocator,
1737 VmaPool VMA_NOT_NULL pool,
1738 VmaStatistics* VMA_NOT_NULL pPoolStats);
1739
1740/** \brief Retrieves detailed statistics of existing #VmaPool object.
1741
1742\param allocator Allocator object.
1743\param pool Pool object.
1744\param[out] pPoolStats Statistics of specified pool.
1745*/
1747 VmaAllocator VMA_NOT_NULL allocator,
1748 VmaPool VMA_NOT_NULL pool,
1750
1751/** @} */
1752
1753/**
1754\addtogroup group_alloc
1755@{
1756*/
1757
1758/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
1759
1760Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
1761`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
1762`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
1763
1764Possible return values:
1765
1766- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
1767- `VK_SUCCESS` - corruption detection has been performed and succeeded.
1768- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
1769 `VMA_ASSERT` is also fired in that case.
1770- Other value: Error returned by Vulkan, e.g. memory mapping failure.
1771*/
1773 VmaAllocator VMA_NOT_NULL allocator,
1774 VmaPool VMA_NOT_NULL pool);
1775
1776/** \brief Retrieves name of a custom pool.
1777
1778After the call `ppName` is either null or points to an internally-owned null-terminated string
1779containing name of the pool that was previously set. The pointer becomes invalid when the pool is
1780destroyed or its name is changed using vmaSetPoolName().
1781*/
1783 VmaAllocator VMA_NOT_NULL allocator,
1784 VmaPool VMA_NOT_NULL pool,
1785 const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
1786
1787/** \brief Sets name of a custom pool.
1788
1789`pName` can be either null or pointer to a null-terminated string with new name for the pool.
1790Function makes internal copy of the string, so it can be changed or freed immediately after this call.
1791*/
1793 VmaAllocator VMA_NOT_NULL allocator,
1794 VmaPool VMA_NOT_NULL pool,
1795 const char* VMA_NULLABLE pName);
1796
1797/** \brief General purpose memory allocation.
1798
1799\param allocator
1800\param pVkMemoryRequirements
1801\param pCreateInfo
1802\param[out] pAllocation Handle to allocated memory.
1803\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1804
1805You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1806
1807It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
1808vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
1809*/
1811 VmaAllocator VMA_NOT_NULL allocator,
1812 const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
1813 const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1815 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1816
1817/** \brief General purpose memory allocation for multiple allocation objects at once.
1818
1819\param allocator Allocator object.
1820\param pVkMemoryRequirements Memory requirements for each allocation.
1821\param pCreateInfo Creation parameters for each allocation.
1822\param allocationCount Number of allocations to make.
1823\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
1824\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
1825
1826You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
1827
1828Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
1829It is just a general purpose allocation function able to make multiple allocations at once.
1830It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
1831
1832All allocations are made using same parameters. All of them are created out of the same memory pool and type.
1833If any allocation fails, all allocations already made within this function call are also freed, so that when
1834returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
1835*/
1837 VmaAllocator VMA_NOT_NULL allocator,
1838 const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
1839 const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
1840 size_t allocationCount,
1841 VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
1842 VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
1843
1844/** \brief Allocates memory suitable for given `VkBuffer`.
1845
1846\param allocator
1847\param buffer
1848\param pCreateInfo
1849\param[out] pAllocation Handle to allocated memory.
1850\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1851
1852It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
1853
1854This is a special-purpose function. In most cases you should use vmaCreateBuffer().
1855
1856You must free the allocation using vmaFreeMemory() when no longer needed.
1857*/
1859 VmaAllocator VMA_NOT_NULL allocator,
1860 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
1861 const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1863 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1864
1865/** \brief Allocates memory suitable for given `VkImage`.
1866
1867\param allocator
1868\param image
1869\param pCreateInfo
1870\param[out] pAllocation Handle to allocated memory.
1871\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
1872
1873It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
1874
1875This is a special-purpose function. In most cases you should use vmaCreateImage().
1876
1877You must free the allocation using vmaFreeMemory() when no longer needed.
1878*/
1880 VmaAllocator VMA_NOT_NULL allocator,
1881 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
1882 const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
1884 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
1885
1886/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
1887
1888Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
1889*/
1891 VmaAllocator VMA_NOT_NULL allocator,
1892 const VmaAllocation VMA_NULLABLE allocation);
1893
1894/** \brief Frees memory and destroys multiple allocations.
1895
1896Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
1897It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
1898vmaAllocateMemoryPages() and other functions.
1899It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
1900
1901Allocations in `pAllocations` array can come from any memory pools and types.
1902Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
1903*/
1905 VmaAllocator VMA_NOT_NULL allocator,
1906 size_t allocationCount,
1907 const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
1908
1909/** \brief Returns current information about specified allocation.
1910
1911Current paramteres of given allocation are returned in `pAllocationInfo`.
1912
1913Although this function doesn't lock any mutex, so it should be quite efficient,
1914you should avoid calling it too often.
1915You can retrieve same VmaAllocationInfo structure while creating your resource, from function
1916vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
1917(e.g. due to defragmentation).
1918*/
1920 VmaAllocator VMA_NOT_NULL allocator,
1921 VmaAllocation VMA_NOT_NULL allocation,
1922 VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
1923
1924/** \brief Sets pUserData in given allocation to new value.
1925
1926The value of pointer `pUserData` is copied to allocation's `pUserData`.
1927It is opaque, so you can use it however you want - e.g.
1928as a pointer, ordinal number or some handle to you own data.
1929*/
1931 VmaAllocator VMA_NOT_NULL allocator,
1932 VmaAllocation VMA_NOT_NULL allocation,
1933 void* VMA_NULLABLE pUserData);
1934
1935/** \brief Sets pName in given allocation to new value.
1936
1937`pName` must be either null, or pointer to a null-terminated string. The function
1938makes local copy of the string and sets it as allocation's `pName`. String
1939passed as pName doesn't need to be valid for whole lifetime of the allocation -
1940you can free it after this call. String previously pointed by allocation's
1941`pName` is freed from memory.
1942*/
1944 VmaAllocator VMA_NOT_NULL allocator,
1945 VmaAllocation VMA_NOT_NULL allocation,
1946 const char* VMA_NULLABLE pName);
1947
1948/**
1949\brief Given an allocation, returns Property Flags of its memory type.
1950
1951This is just a convenience function. Same information can be obtained using
1952vmaGetAllocationInfo() + vmaGetMemoryProperties().
1953*/
1955 VmaAllocator VMA_NOT_NULL allocator,
1956 VmaAllocation VMA_NOT_NULL allocation,
1958
1959/** \brief Maps memory represented by given allocation and returns pointer to it.
1960
1961Maps memory represented by given allocation to make it accessible to CPU code.
1962When succeeded, `*ppData` contains pointer to first byte of this memory.
1963
1964\warning
1965If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
1966correctly offsetted to the beginning of region assigned to this particular allocation.
1967Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
1968You should not add VmaAllocationInfo::offset to it!
1969
1970Mapping is internally reference-counted and synchronized, so despite raw Vulkan
1971function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
1972multiple times simultaneously, it is safe to call this function on allocations
1973assigned to the same memory block. Actual Vulkan memory will be mapped on first
1974mapping and unmapped on last unmapping.
1975
1976If the function succeeded, you must call vmaUnmapMemory() to unmap the
1977allocation when mapping is no longer needed or before freeing the allocation, at
1978the latest.
1979
1980It also safe to call this function multiple times on the same allocation. You
1981must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
1982
1983It is also safe to call this function on allocation created with
1984#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
1985You must still call vmaUnmapMemory() same number of times as you called
1986vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
1987"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
1988
1989This function fails when used on allocation made in memory type that is not
1990`HOST_VISIBLE`.
1991
1992This function doesn't automatically flush or invalidate caches.
1993If the allocation is made from a memory types that is not `HOST_COHERENT`,
1994you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
1995*/
1997 VmaAllocator VMA_NOT_NULL allocator,
1998 VmaAllocation VMA_NOT_NULL allocation,
1999 void* VMA_NULLABLE* VMA_NOT_NULL ppData);
2000
2001/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
2002
2003For details, see description of vmaMapMemory().
2004
2005This function doesn't automatically flush or invalidate caches.
2006If the allocation is made from a memory types that is not `HOST_COHERENT`,
2007you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
2008*/
2010 VmaAllocator VMA_NOT_NULL allocator,
2011 VmaAllocation VMA_NOT_NULL allocation);
2012
2013/** \brief Flushes memory of given allocation.
2014
2015Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
2016It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
2017Unmap operation doesn't do that automatically.
2018
2019- `offset` must be relative to the beginning of allocation.
2020- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2021- `offset` and `size` don't have to be aligned.
2022 They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2023- If `size` is 0, this call is ignored.
2024- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2025 this call is ignored.
2026
2027Warning! `offset` and `size` are relative to the contents of given `allocation`.
2028If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2029Do not pass allocation's offset as `offset`!!!
2030
2031This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2032called, otherwise `VK_SUCCESS`.
2033*/
2035 VmaAllocator VMA_NOT_NULL allocator,
2036 VmaAllocation VMA_NOT_NULL allocation,
2037 VkDeviceSize offset,
2038 VkDeviceSize size);
2039
2040/** \brief Invalidates memory of given allocation.
2041
2042Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
2043It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
2044Map operation doesn't do that automatically.
2045
2046- `offset` must be relative to the beginning of allocation.
2047- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
2048- `offset` and `size` don't have to be aligned.
2049 They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
2050- If `size` is 0, this call is ignored.
2051- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
2052 this call is ignored.
2053
2054Warning! `offset` and `size` are relative to the contents of given `allocation`.
2055If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
2056Do not pass allocation's offset as `offset`!!!
2057
2058This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
2059it is called, otherwise `VK_SUCCESS`.
2060*/
2062 VmaAllocator VMA_NOT_NULL allocator,
2063 VmaAllocation VMA_NOT_NULL allocation,
2064 VkDeviceSize offset,
2065 VkDeviceSize size);
2066
2067/** \brief Flushes memory of given set of allocations.
2068
2069Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2070For more information, see documentation of vmaFlushAllocation().
2071
2072\param allocator
2073\param allocationCount
2074\param allocations
2075\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2076\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2077
2078This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
2079called, otherwise `VK_SUCCESS`.
2080*/
2082 VmaAllocator VMA_NOT_NULL allocator,
2083 uint32_t allocationCount,
2084 const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2085 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2086 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2087
2088/** \brief Invalidates memory of given set of allocations.
2089
2090Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
2091For more information, see documentation of vmaInvalidateAllocation().
2092
2093\param allocator
2094\param allocationCount
2095\param allocations
2096\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
2097\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
2098
2099This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
2100called, otherwise `VK_SUCCESS`.
2101*/
2103 VmaAllocator VMA_NOT_NULL allocator,
2104 uint32_t allocationCount,
2105 const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
2106 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
2107 const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
2108
2109/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
2110
2111\param allocator
2112\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
2113
2114Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
2115`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
2116`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
2117
2118Possible return values:
2119
2120- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
2121- `VK_SUCCESS` - corruption detection has been performed and succeeded.
2122- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
2123 `VMA_ASSERT` is also fired in that case.
2124- Other value: Error returned by Vulkan, e.g. memory mapping failure.
2125*/
2127 VmaAllocator VMA_NOT_NULL allocator,
2128 uint32_t memoryTypeBits);
2129
2130/** \brief Begins defragmentation process.
2131
2132\param allocator Allocator object.
2133\param pInfo Structure filled with parameters of defragmentation.
2134\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
2135\returns
2136- `VK_SUCCESS` if defragmentation can begin.
2137- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
2138
2139For more information about defragmentation, see documentation chapter:
2140[Defragmentation](@ref defragmentation).
2141*/
2143 VmaAllocator VMA_NOT_NULL allocator,
2146
2147/** \brief Ends defragmentation process.
2148
2149\param allocator Allocator object.
2150\param context Context object that has been created by vmaBeginDefragmentation().
2151\param[out] pStats Optional stats for the defragmentation. Can be null.
2152
2153Use this function to finish defragmentation started by vmaBeginDefragmentation().
2154*/
2156 VmaAllocator VMA_NOT_NULL allocator,
2159
2160/** \brief Starts single defragmentation pass.
2161
2162\param allocator Allocator object.
2163\param context Context object that has been created by vmaBeginDefragmentation().
2164\param[out] pPassInfo Computed informations for current pass.
2165\returns
2166- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
2167- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
2168 and then preferably try another pass with vmaBeginDefragmentationPass().
2169*/
2171 VmaAllocator VMA_NOT_NULL allocator,
2174
2175/** \brief Ends single defragmentation pass.
2176
2177\param allocator Allocator object.
2178\param context Context object that has been created by vmaBeginDefragmentation().
2179\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
2180
2181Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
2182
2183Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
2184After this call:
2185
2186- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
2187 (which is the default) will be pointing to the new destination place.
2188- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
2189 will be freed.
2190
2191If no more moves are possible you can end whole defragmentation.
2192*/
2194 VmaAllocator VMA_NOT_NULL allocator,
2197
2198/** \brief Binds buffer to allocation.
2199
2200Binds specified buffer to region of memory represented by specified allocation.
2201Gets `VkDeviceMemory` handle and offset from the allocation.
2202If you want to create a buffer, allocate memory for it and bind them together separately,
2203you should use this function for binding instead of standard `vkBindBufferMemory()`,
2204because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2205allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2206(which is illegal in Vulkan).
2207
2208It is recommended to use function vmaCreateBuffer() instead of this one.
2209*/
2211 VmaAllocator VMA_NOT_NULL allocator,
2212 VmaAllocation VMA_NOT_NULL allocation,
2213 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
2214
2215/** \brief Binds buffer to allocation with additional parameters.
2216
2217\param allocator
2218\param allocation
2219\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2220\param buffer
2221\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
2222
2223This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
2224
2225If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2226or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2227*/
2229 VmaAllocator VMA_NOT_NULL allocator,
2230 VmaAllocation VMA_NOT_NULL allocation,
2231 VkDeviceSize allocationLocalOffset,
2232 VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
2233 const void* VMA_NULLABLE pNext);
2234
2235/** \brief Binds image to allocation.
2236
2237Binds specified image to region of memory represented by specified allocation.
2238Gets `VkDeviceMemory` handle and offset from the allocation.
2239If you want to create an image, allocate memory for it and bind them together separately,
2240you should use this function for binding instead of standard `vkBindImageMemory()`,
2241because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
2242allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
2243(which is illegal in Vulkan).
2244
2245It is recommended to use function vmaCreateImage() instead of this one.
2246*/
2248 VmaAllocator VMA_NOT_NULL allocator,
2249 VmaAllocation VMA_NOT_NULL allocation,
2250 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
2251
2252/** \brief Binds image to allocation with additional parameters.
2253
2254\param allocator
2255\param allocation
2256\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
2257\param image
2258\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
2259
2260This function is similar to vmaBindImageMemory(), but it provides additional parameters.
2261
2262If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
2263or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
2264*/
2266 VmaAllocator VMA_NOT_NULL allocator,
2267 VmaAllocation VMA_NOT_NULL allocation,
2268 VkDeviceSize allocationLocalOffset,
2269 VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
2270 const void* VMA_NULLABLE pNext);
2271
2272/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
2273
2274\param allocator
2275\param pBufferCreateInfo
2276\param pAllocationCreateInfo
2277\param[out] pBuffer Buffer that was created.
2278\param[out] pAllocation Allocation that was created.
2279\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
2280
2281This function automatically:
2282
2283-# Creates buffer.
2284-# Allocates appropriate memory for it.
2285-# Binds the buffer with the memory.
2286
2287If any of these operations fail, buffer and allocation are not created,
2288returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
2289
2290If the function succeeded, you must destroy both buffer and allocation when you
2291no longer need them using either convenience function vmaDestroyBuffer() or
2292separately, using `vkDestroyBuffer()` and vmaFreeMemory().
2293
2294If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
2295VK_KHR_dedicated_allocation extension is used internally to query driver whether
2296it requires or prefers the new buffer to have dedicated allocation. If yes,
2297and if dedicated allocation is possible
2298(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
2299allocation for this buffer, just like when using
2300#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
2301
2302\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
2303although recommended as a good practice, is out of scope of this library and could be implemented
2304by the user as a higher-level logic on top of VMA.
2305*/
2307 VmaAllocator VMA_NOT_NULL allocator,
2308 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2309 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2312 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2313
2314/** \brief Creates a buffer with additional minimum alignment.
2315
2316Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
2317minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
2318for interop with OpenGL.
2319*/
2321 VmaAllocator VMA_NOT_NULL allocator,
2322 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2323 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2324 VkDeviceSize minAlignment,
2327 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2328
2329/** \brief Creates a new `VkBuffer`, binds already created memory for it.
2330
2331\param allocator
2332\param allocation Allocation that provides memory to be used for binding new buffer to it.
2333\param pBufferCreateInfo
2334\param[out] pBuffer Buffer that was created.
2335
2336This function automatically:
2337
2338-# Creates buffer.
2339-# Binds the buffer with the supplied memory.
2340
2341If any of these operations fail, buffer is not created,
2342returned value is negative error code and `*pBuffer` is null.
2343
2344If the function succeeded, you must destroy the buffer when you
2345no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
2346allocation you can use convenience function vmaDestroyBuffer().
2347*/
2349 VmaAllocator VMA_NOT_NULL allocator,
2350 VmaAllocation VMA_NOT_NULL allocation,
2351 const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2353
2354/** \brief Destroys Vulkan buffer and frees allocated memory.
2355
2356This is just a convenience function equivalent to:
2357
2358\code
2359vkDestroyBuffer(device, buffer, allocationCallbacks);
2360vmaFreeMemory(allocator, allocation);
2361\endcode
2362
2363It it safe to pass null as buffer and/or allocation.
2364*/
2366 VmaAllocator VMA_NOT_NULL allocator,
2367 VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
2368 VmaAllocation VMA_NULLABLE allocation);
2369
2370/// Function similar to vmaCreateBuffer().
2372 VmaAllocator VMA_NOT_NULL allocator,
2373 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2374 const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2377 VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
2378
2379/// Function similar to vmaCreateAliasingBuffer().
2381 VmaAllocator VMA_NOT_NULL allocator,
2382 VmaAllocation VMA_NOT_NULL allocation,
2383 const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2385
2386/** \brief Destroys Vulkan image and frees allocated memory.
2387
2388This is just a convenience function equivalent to:
2389
2390\code
2391vkDestroyImage(device, image, allocationCallbacks);
2392vmaFreeMemory(allocator, allocation);
2393\endcode
2394
2395It it safe to pass null as image and/or allocation.
2396*/
2398 VmaAllocator VMA_NOT_NULL allocator,
2399 VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
2400 VmaAllocation VMA_NULLABLE allocation);
2401
2402/** @} */
2403
2404/**
2405\addtogroup group_virtual
2406@{
2407*/
2408
2409/** \brief Creates new #VmaVirtualBlock object.
2410
2411\param pCreateInfo Parameters for creation.
2412\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
2413*/
2415 const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
2417
2418/** \brief Destroys #VmaVirtualBlock object.
2419
2420Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
2421You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
2422if you are sure this is what you want. If you do neither, an assert is called.
2423
2424If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
2425don't forget to free them.
2426*/
2428 VmaVirtualBlock VMA_NULLABLE virtualBlock);
2429
2430/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
2431*/
2433 VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2434
2435/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
2436*/
2438 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2440
2441/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
2442
2443If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
2444(despite the function doesn't ever allocate actual GPU memory).
2445`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
2446
2447\param virtualBlock Virtual block
2448\param pCreateInfo Parameters for the allocation
2449\param[out] pAllocation Returned handle of the new allocation
2450\param[out] pOffset Returned offset of the new allocation. Optional, can be null.
2451*/
2453 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2456 VkDeviceSize* VMA_NULLABLE pOffset);
2457
2458/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
2459
2460It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
2461*/
2463 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2465
2466/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
2467
2468You must either call this function or free each virtual allocation individually with vmaVirtualFree()
2469before destroying a virtual block. Otherwise, an assert is called.
2470
2471If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
2472don't forget to free it as well.
2473*/
2475 VmaVirtualBlock VMA_NOT_NULL virtualBlock);
2476
2477/** \brief Changes custom pointer associated with given virtual allocation.
2478*/
2480 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2482 void* VMA_NULLABLE pUserData);
2483
2484/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2485
2486This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
2487*/
2489 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2490 VmaStatistics* VMA_NOT_NULL pStats);
2491
2492/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
2493
2494This function is slow to call. Use for debugging purposes.
2495For less detailed statistics, see vmaGetVirtualBlockStatistics().
2496*/
2498 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2500
2501/** @} */
2502
2503#if VMA_STATS_STRING_ENABLED
2504/**
2505\addtogroup group_stats
2506@{
2507*/
2508
2509/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
2510\param virtualBlock Virtual block.
2511\param[out] ppStatsString Returned string.
2512\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
2513
2514Returned string must be freed using vmaFreeVirtualBlockStatsString().
2515*/
2517 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2518 char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2519 VkBool32 detailedMap);
2520
2521/// Frees a string returned by vmaBuildVirtualBlockStatsString().
2523 VmaVirtualBlock VMA_NOT_NULL virtualBlock,
2524 char* VMA_NULLABLE pStatsString);
2525
2526/** \brief Builds and returns statistics as a null-terminated string in JSON format.
2527\param allocator
2528\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
2529\param detailedMap
2530*/
2532 VmaAllocator VMA_NOT_NULL allocator,
2533 char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
2534 VkBool32 detailedMap);
2535
2537 VmaAllocator VMA_NOT_NULL allocator,
2538 char* VMA_NULLABLE pStatsString);
2539
2540/** @} */
2541
2542#endif // VMA_STATS_STRING_ENABLED
2543
2544#endif // _VMA_FUNCTION_HEADERS
2545
2546#ifdef __cplusplus
2547}
2548#endif
2549
2550#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2551
2552////////////////////////////////////////////////////////////////////////////////
2553////////////////////////////////////////////////////////////////////////////////
2554//
2555// IMPLEMENTATION
2556//
2557////////////////////////////////////////////////////////////////////////////////
2558////////////////////////////////////////////////////////////////////////////////
2559
2560// For Visual Studio IntelliSense.
2561#if defined(__cplusplus) && defined(__INTELLISENSE__)
2562#define VMA_IMPLEMENTATION
2563#endif
2564
2565#ifdef VMA_IMPLEMENTATION
2566#undef VMA_IMPLEMENTATION
2567
2568#include <cstdint>
2569#include <cstdlib>
2570#include <cstring>
2571#include <utility>
2572#include <type_traits>
2573
2574#ifdef _MSC_VER
2575 #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
2576#endif
2577#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
2578 #include <bit> // For std::popcount
2579#endif
2580
2581/*******************************************************************************
2582CONFIGURATION SECTION
2583
2584Define some of these macros before each #include of this header or change them
2585here if you need other then default behavior depending on your environment.
2586*/
2587#ifndef _VMA_CONFIGURATION
2588
2589/*
2590Define this macro to 1 to make the library fetch pointers to Vulkan functions
2591internally, like:
2592
2593 vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2594*/
2595#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2596 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2597#endif
2598
2599/*
2600Define this macro to 1 to make the library fetch pointers to Vulkan functions
2601internally, like:
2602
2603 vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
2604
2605To use this feature in new versions of VMA you now have to pass
2606VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
2607VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
2608*/
2609#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
2610 #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
2611#endif
2612
2613#ifndef VMA_USE_STL_SHARED_MUTEX
2614 // Compiler conforms to C++17.
2615 #if __cplusplus >= 201703L
2616 #define VMA_USE_STL_SHARED_MUTEX 1
2617 // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
2618 // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
2619 #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
2620 #define VMA_USE_STL_SHARED_MUTEX 1
2621 #else
2622 #define VMA_USE_STL_SHARED_MUTEX 0
2623 #endif
2624#endif
2625
2626/*
2627Define this macro to include custom header files without having to edit this file directly, e.g.:
2628
2629 // Inside of "my_vma_configuration_user_includes.h":
2630
2631 #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
2632 #include "my_custom_min.h" // for my_custom_min
2633 #include <algorithm>
2634 #include <mutex>
2635
2636 // Inside a different file, which includes "vk_mem_alloc.h":
2637
2638 #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
2639 #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
2640 #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
2641 #include "vk_mem_alloc.h"
2642 ...
2643
2644The following headers are used in this CONFIGURATION section only, so feel free to
2645remove them if not needed.
2646*/
2647#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
2648 #include <cassert> // for assert
2649 #include <algorithm> // for min, max
2650 #include <mutex>
2651#else
2652 #include VMA_CONFIGURATION_USER_INCLUDES_H
2653#endif
2654
2655#ifndef VMA_NULL
2656 // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2657 #define VMA_NULL nullptr
2658#endif
2659
2660#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
2661#include <cstdlib>
2662static void* vma_aligned_alloc(size_t alignment, size_t size)
2663{
2664 // alignment must be >= sizeof(void*)
2665 if(alignment < sizeof(void*))
2666 {
2667 alignment = sizeof(void*);
2668 }
2669
2670 return memalign(alignment, size);
2671}
2672#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
2673#include <cstdlib>
2674
2675#if defined(__APPLE__)
2676#include <AvailabilityMacros.h>
2677#endif
2678
2679static void* vma_aligned_alloc(size_t alignment, size_t size)
2680{
2681 // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
2682 // Therefore, for now disable this specific exception until a proper solution is found.
2683 //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
2684 //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
2685 // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
2686 // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
2687 // // MAC_OS_X_VERSION_10_16), even though the function is marked
2688 // // availabe for 10.15. That is why the preprocessor checks for 10.16 but
2689 // // the __builtin_available checks for 10.15.
2690 // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
2691 // if (__builtin_available(macOS 10.15, iOS 13, *))
2692 // return aligned_alloc(alignment, size);
2693 //#endif
2694 //#endif
2695
2696 // alignment must be >= sizeof(void*)
2697 if(alignment < sizeof(void*))
2698 {
2699 alignment = sizeof(void*);
2700 }
2701
2702 void *pointer;
2703 if(posix_memalign(&pointer, alignment, size) == 0)
2704 return pointer;
2705 return VMA_NULL;
2706}
2707#elif defined(_WIN32)
2708static void* vma_aligned_alloc(size_t alignment, size_t size)
2709{
2710 return _aligned_malloc(size, alignment);
2711}
2712#else
2713static void* vma_aligned_alloc(size_t alignment, size_t size)
2714{
2715 return aligned_alloc(alignment, size);
2716}
2717#endif
2718
2719#if defined(_WIN32)
2720static void vma_aligned_free(void* ptr)
2721{
2722 _aligned_free(ptr);
2723}
2724#else
2725static void vma_aligned_free(void* VMA_NULLABLE ptr)
2726{
2727 free(ptr);
2728}
2729#endif
2730
2731// If your compiler is not compatible with C++11 and definition of
2732// aligned_alloc() function is missing, uncommeting following line may help:
2733
2734//#include <malloc.h>
2735
2736// Normal assert to check for programmer's errors, especially in Debug configuration.
2737#ifndef VMA_ASSERT
2738 #ifdef NDEBUG
2739 #define VMA_ASSERT(expr)
2740 #else
2741 #define VMA_ASSERT(expr) assert(expr)
2742 #endif
2743#endif
2744
2745// Assert that will be called very often, like inside data structures e.g. operator[].
2746// Making it non-empty can make program slow.
2747#ifndef VMA_HEAVY_ASSERT
2748 #ifdef NDEBUG
2749 #define VMA_HEAVY_ASSERT(expr)
2750 #else
2751 #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2752 #endif
2753#endif
2754
2755#ifndef VMA_ALIGN_OF
2756 #define VMA_ALIGN_OF(type) (__alignof(type))
2757#endif
2758
2759#ifndef VMA_SYSTEM_ALIGNED_MALLOC
2760 #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
2761#endif
2762
2763#ifndef VMA_SYSTEM_ALIGNED_FREE
2764 // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
2765 #if defined(VMA_SYSTEM_FREE)
2766 #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
2767 #else
2768 #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
2769 #endif
2770#endif
2771
2772#ifndef VMA_COUNT_BITS_SET
2773 // Returns number of bits set to 1 in (v)
2774 #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
2775#endif
2776
2777#ifndef VMA_BITSCAN_LSB
2778 // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
2779 #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
2780#endif
2781
2782#ifndef VMA_BITSCAN_MSB
2783 // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
2784 #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
2785#endif
2786
2787#ifndef VMA_MIN
2788 #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
2789#endif
2790
2791#ifndef VMA_MAX
2792 #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
2793#endif
2794
2795#ifndef VMA_SWAP
2796 #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2797#endif
2798
2799#ifndef VMA_SORT
2800 #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2801#endif
2802
2803#ifndef VMA_DEBUG_LOG
2804 #define VMA_DEBUG_LOG(format, ...)
2805 /*
2806 #define VMA_DEBUG_LOG(format, ...) do { \
2807 printf(format, __VA_ARGS__); \
2808 printf("\n"); \
2809 } while(false)
2810 */
2811#endif
2812
2813// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2814#if VMA_STATS_STRING_ENABLED
2815 static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
2816 {
2817 snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2818 }
2819 static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
2820 {
2821 snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2822 }
2823 static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
2824 {
2825 snprintf(outStr, strLen, "%p", ptr);
2826 }
2827#endif
2828
2829#ifndef VMA_MUTEX
2830 class VmaMutex
2831 {
2832 public:
2833 void Lock() { m_Mutex.lock(); }
2834 void Unlock() { m_Mutex.unlock(); }
2835 bool TryLock() { return m_Mutex.try_lock(); }
2836 private:
2837 std::mutex m_Mutex;
2838 };
2839 #define VMA_MUTEX VmaMutex
2840#endif
2841
2842// Read-write mutex, where "read" is shared access, "write" is exclusive access.
2843#ifndef VMA_RW_MUTEX
2844 #if VMA_USE_STL_SHARED_MUTEX
2845 // Use std::shared_mutex from C++17.
2846 #include <shared_mutex>
2847 class VmaRWMutex
2848 {
2849 public:
2850 void LockRead() { m_Mutex.lock_shared(); }
2851 void UnlockRead() { m_Mutex.unlock_shared(); }
2852 bool TryLockRead() { return m_Mutex.try_lock_shared(); }
2853 void LockWrite() { m_Mutex.lock(); }
2854 void UnlockWrite() { m_Mutex.unlock(); }
2855 bool TryLockWrite() { return m_Mutex.try_lock(); }
2856 private:
2857 std::shared_mutex m_Mutex;
2858 };
2859 #define VMA_RW_MUTEX VmaRWMutex
2860 #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
2861 // Use SRWLOCK from WinAPI.
2862 // Minimum supported client = Windows Vista, server = Windows Server 2008.
2863 class VmaRWMutex
2864 {
2865 public:
2866 VmaRWMutex() { InitializeSRWLock(&m_Lock); }
2867 void LockRead() { AcquireSRWLockShared(&m_Lock); }
2868 void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
2869 bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
2870 void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
2871 void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
2872 bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
2873 private:
2874 SRWLOCK m_Lock;
2875 };
2876 #define VMA_RW_MUTEX VmaRWMutex
2877 #else
2878 // Less efficient fallback: Use normal mutex.
2879 class VmaRWMutex
2880 {
2881 public:
2882 void LockRead() { m_Mutex.Lock(); }
2883 void UnlockRead() { m_Mutex.Unlock(); }
2884 bool TryLockRead() { return m_Mutex.TryLock(); }
2885 void LockWrite() { m_Mutex.Lock(); }
2886 void UnlockWrite() { m_Mutex.Unlock(); }
2887 bool TryLockWrite() { return m_Mutex.TryLock(); }
2888 private:
2889 VMA_MUTEX m_Mutex;
2890 };
2891 #define VMA_RW_MUTEX VmaRWMutex
2892 #endif // #if VMA_USE_STL_SHARED_MUTEX
2893#endif // #ifndef VMA_RW_MUTEX
2894
2895/*
2896If providing your own implementation, you need to implement a subset of std::atomic.
2897*/
2898#ifndef VMA_ATOMIC_UINT32
2899 #include <atomic>
2900 #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2901#endif
2902
2903#ifndef VMA_ATOMIC_UINT64
2904 #include <atomic>
2905 #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
2906#endif
2907
2908#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2909 /**
2910 Every allocation will have its own memory block.
2911 Define to 1 for debugging purposes only.
2912 */
2913 #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2914#endif
2915
2916#ifndef VMA_MIN_ALIGNMENT
2917 /**
2918 Minimum alignment of all allocations, in bytes.
2919 Set to more than 1 for debugging purposes. Must be power of two.
2920 */
2921 #ifdef VMA_DEBUG_ALIGNMENT // Old name
2922 #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
2923 #else
2924 #define VMA_MIN_ALIGNMENT (1)
2925 #endif
2926#endif
2927
2928#ifndef VMA_DEBUG_MARGIN
2929 /**
2930 Minimum margin after every allocation, in bytes.
2931 Set nonzero for debugging purposes only.
2932 */
2933 #define VMA_DEBUG_MARGIN (0)
2934#endif
2935
2936#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
2937 /**
2938 Define this macro to 1 to automatically fill new allocations and destroyed
2939 allocations with some bit pattern.
2940 */
2941 #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
2942#endif
2943
2944#ifndef VMA_DEBUG_DETECT_CORRUPTION
2945 /**
2946 Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
2947 enable writing magic value to the margin after every allocation and
2948 validating it, so that memory corruptions (out-of-bounds writes) are detected.
2949 */
2950 #define VMA_DEBUG_DETECT_CORRUPTION (0)
2951#endif
2952
2953#ifndef VMA_DEBUG_GLOBAL_MUTEX
2954 /**
2955 Set this to 1 for debugging purposes only, to enable single mutex protecting all
2956 entry calls to the library. Can be useful for debugging multithreading issues.
2957 */
2958 #define VMA_DEBUG_GLOBAL_MUTEX (0)
2959#endif
2960
2961#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2962 /**
2963 Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
2964 Set to more than 1 for debugging purposes only. Must be power of two.
2965 */
2966 #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2967#endif
2968
2969#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
2970 /*
2971 Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
2972 and return error instead of leaving up to Vulkan implementation what to do in such cases.
2973 */
2974 #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
2975#endif
2976
2977#ifndef VMA_SMALL_HEAP_MAX_SIZE
2978 /// Maximum size of a memory heap in Vulkan to consider it "small".
2979 #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2980#endif
2981
2982#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2983 /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
2984 #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2985#endif
2986
2987/*
2988Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
2989or a persistently mapped allocation is created and destroyed several times in a row.
2990It keeps additional +1 mapping of a device memory block to prevent calling actual
2991vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
2992tools like RenderDOc.
2993*/
2994#ifndef VMA_MAPPING_HYSTERESIS_ENABLED
2995 #define VMA_MAPPING_HYSTERESIS_ENABLED 1
2996#endif
2997
2998#ifndef VMA_CLASS_NO_COPY
2999 #define VMA_CLASS_NO_COPY(className) \
3000 private: \
3001 className(const className&) = delete; \
3002 className& operator=(const className&) = delete;
3003#endif
3004
3005#define VMA_VALIDATE(cond) do { if(!(cond)) { \
3006 VMA_ASSERT(0 && "Validation failed: " #cond); \
3007 return false; \
3008 } } while(false)
3009
3010/*******************************************************************************
3011END OF CONFIGURATION
3012*/
3013#endif // _VMA_CONFIGURATION
3014
3015
3016static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3017static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3018// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3019static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3020
3021// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
3022static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
3023static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
3024static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
3025static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
3026static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
3027static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3028static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
3029static const uint32_t VMA_VENDOR_ID_AMD = 4098;
3030
3031// This one is tricky. Vulkan specification defines this code as available since
3032// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
3033// See pull request #207.
3034#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
3035
3036
3037#if VMA_STATS_STRING_ENABLED
3038// Correspond to values of enum VmaSuballocationType.
3039static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
3040{
3041 "FREE",
3042 "UNKNOWN",
3043 "BUFFER",
3044 "IMAGE_UNKNOWN",
3045 "IMAGE_LINEAR",
3046 "IMAGE_OPTIMAL",
3047};
3048#endif
3049
3050static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
3051 { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3052
3053
3054#ifndef _VMA_ENUM_DECLARATIONS
3055
3056enum VmaSuballocationType
3057{
3058 VMA_SUBALLOCATION_TYPE_FREE = 0,
3059 VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3060 VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3061 VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3062 VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3063 VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3064 VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3065};
3066
3067enum VMA_CACHE_OPERATION
3068{
3069 VMA_CACHE_FLUSH,
3070 VMA_CACHE_INVALIDATE
3071};
3072
3073enum class VmaAllocationRequestType
3074{
3075 Normal,
3076 TLSF,
3077 // Used by "Linear" algorithm.
3078 UpperAddress,
3079 EndOf1st,
3080 EndOf2nd,
3081};
3082
3083#endif // _VMA_ENUM_DECLARATIONS
3084
3085#ifndef _VMA_FORWARD_DECLARATIONS
3086// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
3087VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle);
3088
3089struct VmaMutexLock;
3090struct VmaMutexLockRead;
3091struct VmaMutexLockWrite;
3092
3093template<typename T>
3094struct AtomicTransactionalIncrement;
3095
3096template<typename T>
3097struct VmaStlAllocator;
3098
3099template<typename T, typename AllocatorT>
3100class VmaVector;
3101
3102template<typename T, typename AllocatorT, size_t N>
3103class VmaSmallVector;
3104
3105template<typename T>
3106class VmaPoolAllocator;
3107
3108template<typename T>
3109struct VmaListItem;
3110
3111template<typename T>
3112class VmaRawList;
3113
3114template<typename T, typename AllocatorT>
3115class VmaList;
3116
3117template<typename ItemTypeTraits>
3118class VmaIntrusiveLinkedList;
3119
3120// Unused in this version
3121#if 0
3122template<typename T1, typename T2>
3123struct VmaPair;
3124template<typename FirstT, typename SecondT>
3125struct VmaPairFirstLess;
3126
3127template<typename KeyT, typename ValueT>
3128class VmaMap;
3129#endif
3130
3131#if VMA_STATS_STRING_ENABLED
3132class VmaStringBuilder;
3133class VmaJsonWriter;
3134#endif
3135
3136class VmaDeviceMemoryBlock;
3137
3138struct VmaDedicatedAllocationListItemTraits;
3139class VmaDedicatedAllocationList;
3140
3141struct VmaSuballocation;
3142struct VmaSuballocationOffsetLess;
3143struct VmaSuballocationOffsetGreater;
3144struct VmaSuballocationItemSizeLess;
3145
3146typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
3147
3148struct VmaAllocationRequest;
3149
3150class VmaBlockMetadata;
3151class VmaBlockMetadata_Linear;
3152class VmaBlockMetadata_TLSF;
3153
3154class VmaBlockVector;
3155
3156struct VmaPoolListItemTraits;
3157
3158struct VmaCurrentBudgetData;
3159
3160class VmaAllocationObjectAllocator;
3161
3162#endif // _VMA_FORWARD_DECLARATIONS
3163
3164
3165#ifndef _VMA_FUNCTIONS
3166
3167/*
3168Returns number of bits set to 1 in (v).
3169
3170On specific platforms and compilers you can use instrinsics like:
3171
3172Visual Studio:
3173 return __popcnt(v);
3174GCC, Clang:
3175 return static_cast<uint32_t>(__builtin_popcount(v));
3176
3177Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
3178But you need to check in runtime whether user's CPU supports these, as some old processors don't.
3179*/
3180static inline uint32_t VmaCountBitsSet(uint32_t v)
3181{
3182#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
3183 return std::popcount(v);
3184#else
3185 uint32_t c = v - ((v >> 1) & 0x55555555);
3186 c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3187 c = ((c >> 4) + c) & 0x0F0F0F0F;
3188 c = ((c >> 8) + c) & 0x00FF00FF;
3189 c = ((c >> 16) + c) & 0x0000FFFF;
3190 return c;
3191#endif
3192}
3193
3194static inline uint8_t VmaBitScanLSB(uint64_t mask)
3195{
3196#if defined(_MSC_VER) && defined(_WIN64)
3197 unsigned long pos;
3198 if (_BitScanForward64(&pos, mask))
3199 return static_cast<uint8_t>(pos);
3200 return UINT8_MAX;
3201#elif defined __GNUC__ || defined __clang__
3202 return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
3203#else
3204 uint8_t pos = 0;
3205 uint64_t bit = 1;
3206 do
3207 {
3208 if (mask & bit)
3209 return pos;
3210 bit <<= 1;
3211 } while (pos++ < 63);
3212 return UINT8_MAX;
3213#endif
3214}
3215
3216static inline uint8_t VmaBitScanLSB(uint32_t mask)
3217{
3218#ifdef _MSC_VER
3219 unsigned long pos;
3220 if (_BitScanForward(&pos, mask))
3221 return static_cast<uint8_t>(pos);
3222 return UINT8_MAX;
3223#elif defined __GNUC__ || defined __clang__
3224 return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
3225#else
3226 uint8_t pos = 0;
3227 uint32_t bit = 1;
3228 do
3229 {
3230 if (mask & bit)
3231 return pos;
3232 bit <<= 1;
3233 } while (pos++ < 31);
3234 return UINT8_MAX;
3235#endif
3236}
3237
3238static inline uint8_t VmaBitScanMSB(uint64_t mask)
3239{
3240#if defined(_MSC_VER) && defined(_WIN64)
3241 unsigned long pos;
3242 if (_BitScanReverse64(&pos, mask))
3243 return static_cast<uint8_t>(pos);
3244#elif defined __GNUC__ || defined __clang__
3245 if (mask)
3246 return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
3247#else
3248 uint8_t pos = 63;
3249 uint64_t bit = 1ULL << 63;
3250 do
3251 {
3252 if (mask & bit)
3253 return pos;
3254 bit >>= 1;
3255 } while (pos-- > 0);
3256#endif
3257 return UINT8_MAX;
3258}
3259
3260static inline uint8_t VmaBitScanMSB(uint32_t mask)
3261{
3262#ifdef _MSC_VER
3263 unsigned long pos;
3264 if (_BitScanReverse(&pos, mask))
3265 return static_cast<uint8_t>(pos);
3266#elif defined __GNUC__ || defined __clang__
3267 if (mask)
3268 return 31 - static_cast<uint8_t>(__builtin_clz(mask));
3269#else
3270 uint8_t pos = 31;
3271 uint32_t bit = 1UL << 31;
3272 do
3273 {
3274 if (mask & bit)
3275 return pos;
3276 bit >>= 1;
3277 } while (pos-- > 0);
3278#endif
3279 return UINT8_MAX;
3280}
3281
3282/*
3283Returns true if given number is a power of two.
3284T must be unsigned integer number or signed integer but always nonnegative.
3285For 0 returns true.
3286*/
3287template <typename T>
3288inline bool VmaIsPow2(T x)
3289{
3290 return (x & (x - 1)) == 0;
3291}
3292
3293// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3294// Use types like uint32_t, uint64_t as T.
3295template <typename T>
3296static inline T VmaAlignUp(T val, T alignment)
3297{
3298 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3299 return (val + alignment - 1) & ~(alignment - 1);
3300}
3301
3302// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3303// Use types like uint32_t, uint64_t as T.
3304template <typename T>
3305static inline T VmaAlignDown(T val, T alignment)
3306{
3307 VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
3308 return val & ~(alignment - 1);
3309}
3310
3311// Division with mathematical rounding to nearest number.
3312template <typename T>
3313static inline T VmaRoundDiv(T x, T y)
3314{
3315 return (x + (y / (T)2)) / y;
3316}
3317
3318// Divide by 'y' and round up to nearest integer.
3319template <typename T>
3320static inline T VmaDivideRoundingUp(T x, T y)
3321{
3322 return (x + y - (T)1) / y;
3323}
3324
3325// Returns smallest power of 2 greater or equal to v.
3326static inline uint32_t VmaNextPow2(uint32_t v)
3327{
3328 v--;
3329 v |= v >> 1;
3330 v |= v >> 2;
3331 v |= v >> 4;
3332 v |= v >> 8;
3333 v |= v >> 16;
3334 v++;
3335 return v;
3336}
3337
3338static inline uint64_t VmaNextPow2(uint64_t v)
3339{
3340 v--;
3341 v |= v >> 1;
3342 v |= v >> 2;
3343 v |= v >> 4;
3344 v |= v >> 8;
3345 v |= v >> 16;
3346 v |= v >> 32;
3347 v++;
3348 return v;
3349}
3350
3351// Returns largest power of 2 less or equal to v.
3352static inline uint32_t VmaPrevPow2(uint32_t v)
3353{
3354 v |= v >> 1;
3355 v |= v >> 2;
3356 v |= v >> 4;
3357 v |= v >> 8;
3358 v |= v >> 16;
3359 v = v ^ (v >> 1);
3360 return v;
3361}
3362
3363static inline uint64_t VmaPrevPow2(uint64_t v)
3364{
3365 v |= v >> 1;
3366 v |= v >> 2;
3367 v |= v >> 4;
3368 v |= v >> 8;
3369 v |= v >> 16;
3370 v |= v >> 32;
3371 v = v ^ (v >> 1);
3372 return v;
3373}
3374
3375static inline bool VmaStrIsEmpty(const char* pStr)
3376{
3377 return pStr == VMA_NULL || *pStr == '\0';
3378}
3379
3380/*
3381Returns true if two memory blocks occupy overlapping pages.
3382ResourceA must be in less memory offset than ResourceB.
3383
3384Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3385chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3386*/
3387static inline bool VmaBlocksOnSamePage(
3388 VkDeviceSize resourceAOffset,
3389 VkDeviceSize resourceASize,
3390 VkDeviceSize resourceBOffset,
3391 VkDeviceSize pageSize)
3392{
3393 VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3394 VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3395 VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3396 VkDeviceSize resourceBStart = resourceBOffset;
3397 VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3398 return resourceAEndPage == resourceBStartPage;
3399}
3400
3401/*
3402Returns true if given suballocation types could conflict and must respect
3403VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3404or linear image and another one is optimal image. If type is unknown, behave
3405conservatively.
3406*/
3407static inline bool VmaIsBufferImageGranularityConflict(
3408 VmaSuballocationType suballocType1,
3409 VmaSuballocationType suballocType2)
3410{
3411 if (suballocType1 > suballocType2)
3412 {
3413 VMA_SWAP(suballocType1, suballocType2);
3414 }
3415
3416 switch (suballocType1)
3417 {
3418 case VMA_SUBALLOCATION_TYPE_FREE:
3419 return false;
3420 case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3421 return true;
3422 case VMA_SUBALLOCATION_TYPE_BUFFER:
3423 return
3424 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3425 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3426 case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3427 return
3428 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3429 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3430 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3431 case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3432 return
3433 suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3434 case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3435 return false;
3436 default:
3437 VMA_ASSERT(0);
3438 return true;
3439 }
3440}
3441
3442static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3443{
3444#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3445 uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3446 const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3447 for (size_t i = 0; i < numberCount; ++i, ++pDst)
3448 {
3449 *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3450 }
3451#else
3452 // no-op
3453#endif
3454}
3455
3456static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3457{
3458#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
3459 const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3460 const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3461 for (size_t i = 0; i < numberCount; ++i, ++pSrc)
3462 {
3463 if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3464 {
3465 return false;
3466 }
3467 }
3468#endif
3469 return true;
3470}
3471
3472/*
3473Fills structure with parameters of an example buffer to be used for transfers
3474during GPU memory defragmentation.
3475*/
3476static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
3477{
3478 memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
3481 outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
3482}
3483
3484
3485/*
3486Performs binary search and returns iterator to first element that is greater or
3487equal to (key), according to comparison (cmp).
3488
3489Cmp should return true if first argument is less than second argument.
3490
3491Returned value is the found element, if present in the collection or place where
3492new element with value (key) should be inserted.
3493*/
3494template <typename CmpLess, typename IterT, typename KeyT>
3495static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
3496{
3497 size_t down = 0, up = (end - beg);
3498 while (down < up)
3499 {
3500 const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
3501 if (cmp(*(beg + mid), key))
3502 {
3503 down = mid + 1;
3504 }
3505 else
3506 {
3507 up = mid;
3508 }
3509 }
3510 return beg + down;
3511}
3512
3513template<typename CmpLess, typename IterT, typename KeyT>
3514IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
3515{
3516 IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
3517 beg, end, value, cmp);
3518 if (it == end ||
3519 (!cmp(*it, value) && !cmp(value, *it)))
3520 {
3521 return it;
3522 }
3523 return end;
3524}
3525
3526/*
3527Returns true if all pointers in the array are not-null and unique.
3528Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3529T must be pointer type, e.g. VmaAllocation, VmaPool.
3530*/
3531template<typename T>
3532static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3533{
3534 for (uint32_t i = 0; i < count; ++i)
3535 {
3536 const T iPtr = arr[i];
3537 if (iPtr == VMA_NULL)
3538 {
3539 return false;
3540 }
3541 for (uint32_t j = i + 1; j < count; ++j)
3542 {
3543 if (iPtr == arr[j])
3544 {
3545 return false;
3546 }
3547 }
3548 }
3549 return true;
3550}
3551
3552template<typename MainT, typename NewT>
3553static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
3554{
3555 newStruct->pNext = mainStruct->pNext;
3556 mainStruct->pNext = newStruct;
3557}
3558
3559// This is the main algorithm that guides the selection of a memory type best for an allocation -
3560// converts usage to required/preferred/not preferred flags.
3561static bool FindMemoryPreferences(
3562 bool isIntegratedGPU,
3563 const VmaAllocationCreateInfo& allocCreateInfo,
3564 VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
3565 VkMemoryPropertyFlags& outRequiredFlags,
3566 VkMemoryPropertyFlags& outPreferredFlags,
3567 VkMemoryPropertyFlags& outNotPreferredFlags)
3568{
3569 outRequiredFlags = allocCreateInfo.requiredFlags;
3570 outPreferredFlags = allocCreateInfo.preferredFlags;
3571 outNotPreferredFlags = 0;
3572
3573 switch(allocCreateInfo.usage)
3574 {
3576 break;
3578 if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3579 {
3580 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3581 }
3582 break;
3585 break;
3587 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3588 if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
3589 {
3590 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3591 }
3592 break;
3594 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3595 outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3596 break;
3598 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3599 break;
3601 outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
3602 break;
3606 {
3607 if(bufImgUsage == UINT32_MAX)
3608 {
3609 VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
3610 return false;
3611 }
3612 // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
3613 const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
3614 const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
3615 const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
3616 const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
3617 const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
3618 const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
3619
3620 // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
3621 if(hostAccessRandom)
3622 {
3623 if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3624 {
3625 // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
3626 // Omitting HOST_VISIBLE here is intentional.
3627 // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
3628 // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
3630 }
3631 else
3632 {
3633 // Always CPU memory, cached.
3635 }
3636 }
3637 // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
3638 else if(hostAccessSequentialWrite)
3639 {
3640 // Want uncached and write-combined.
3641 outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
3642
3643 if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
3644 {
3646 }
3647 else
3648 {
3649 outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3650 // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
3651 if(deviceAccess)
3652 {
3653 // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
3654 if(preferHost)
3655 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3656 else
3657 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3658 }
3659 // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
3660 else
3661 {
3662 // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
3663 if(preferDevice)
3664 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3665 else
3666 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3667 }
3668 }
3669 }
3670 // No CPU access
3671 else
3672 {
3673 // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory
3674 if(deviceAccess)
3675 {
3676 // ...unless there is a clear preference from the user not to do so.
3677 if(preferHost)
3678 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3679 else
3680 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3681 }
3682 // No direct GPU access, no CPU access, just transfers.
3683 // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
3684 // a "swap file" copy to free some GPU memory (then better CPU memory).
3685 // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
3686 if(preferHost)
3687 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3688 else
3689 outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
3690 }
3691 break;
3692 }
3693 default:
3694 VMA_ASSERT(0);
3695 }
3696
3697 // Avoid DEVICE_COHERENT unless explicitly requested.
3698 if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
3699 (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
3700 {
3701 outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
3702 }
3703
3704 return true;
3705}
3706
3707////////////////////////////////////////////////////////////////////////////////
3708// Memory allocation
3709
3710static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3711{
3712 void* result = VMA_NULL;
3713 if ((pAllocationCallbacks != VMA_NULL) &&
3714 (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3715 {
3716 result = (*pAllocationCallbacks->pfnAllocation)(
3717 pAllocationCallbacks->pUserData,
3718 size,
3719 alignment,
3721 }
3722 else
3723 {
3724 result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3725 }
3726 VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
3727 return result;
3728}
3729
3730static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3731{
3732 if ((pAllocationCallbacks != VMA_NULL) &&
3733 (pAllocationCallbacks->pfnFree != VMA_NULL))
3734 {
3735 (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3736 }
3737 else
3738 {
3739 VMA_SYSTEM_ALIGNED_FREE(ptr);
3740 }
3741}
3742
3743template<typename T>
3744static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3745{
3746 return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3747}
3748
3749template<typename T>
3750static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3751{
3752 return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3753}
3754
3755#define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3756
3757#define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3758
3759template<typename T>
3760static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3761{
3762 ptr->~T();
3763 VmaFree(pAllocationCallbacks, ptr);
3764}
3765
3766template<typename T>
3767static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3768{
3769 if (ptr != VMA_NULL)
3770 {
3771 for (size_t i = count; i--; )
3772 {
3773 ptr[i].~T();
3774 }
3775 VmaFree(pAllocationCallbacks, ptr);
3776 }
3777}
3778
3779static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
3780{
3781 if (srcStr != VMA_NULL)
3782 {
3783 const size_t len = strlen(srcStr);
3784 char* const result = vma_new_array(allocs, char, len + 1);
3785 memcpy(result, srcStr, len + 1);
3786 return result;
3787 }
3788 return VMA_NULL;
3789}
3790
3791#if VMA_STATS_STRING_ENABLED
3792static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
3793{
3794 if (srcStr != VMA_NULL)
3795 {
3796 char* const result = vma_new_array(allocs, char, strLen + 1);
3797 memcpy(result, srcStr, strLen);
3798 result[strLen] = '\0';
3799 return result;
3800 }
3801 return VMA_NULL;
3802}
3803#endif // VMA_STATS_STRING_ENABLED
3804
3805static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
3806{
3807 if (str != VMA_NULL)
3808 {
3809 const size_t len = strlen(str);
3810 vma_delete_array(allocs, str, len + 1);
3811 }
3812}
3813
3814template<typename CmpLess, typename VectorT>
3815size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
3816{
3817 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3818 vector.data(),
3819 vector.data() + vector.size(),
3820 value,
3821 CmpLess()) - vector.data();
3822 VmaVectorInsert(vector, indexToInsert, value);
3823 return indexToInsert;
3824}
3825
3826template<typename CmpLess, typename VectorT>
3827bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
3828{
3829 CmpLess comparator;
3830 typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3831 vector.begin(),
3832 vector.end(),
3833 value,
3834 comparator);
3835 if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
3836 {
3837 size_t indexToRemove = it - vector.begin();
3838 VmaVectorRemove(vector, indexToRemove);
3839 return true;
3840 }
3841 return false;
3842}
3843#endif // _VMA_FUNCTIONS
3844
3845#ifndef _VMA_STATISTICS_FUNCTIONS
3846
3847static void VmaClearStatistics(VmaStatistics& outStats)
3848{
3849 outStats.blockCount = 0;
3850 outStats.allocationCount = 0;
3851 outStats.blockBytes = 0;
3852 outStats.allocationBytes = 0;
3853}
3854
3855static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
3856{
3857 inoutStats.blockCount += src.blockCount;
3858 inoutStats.allocationCount += src.allocationCount;
3859 inoutStats.blockBytes += src.blockBytes;
3860 inoutStats.allocationBytes += src.allocationBytes;
3861}
3862
3863static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
3864{
3865 VmaClearStatistics(outStats.statistics);
3866 outStats.unusedRangeCount = 0;
3868 outStats.allocationSizeMax = 0;
3870 outStats.unusedRangeSizeMax = 0;
3871}
3872
3873static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3874{
3875 inoutStats.statistics.allocationCount++;
3876 inoutStats.statistics.allocationBytes += size;
3877 inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
3878 inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
3879}
3880
3881static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
3882{
3883 inoutStats.unusedRangeCount++;
3884 inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
3885 inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
3886}
3887
3888static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
3889{
3890 VmaAddStatistics(inoutStats.statistics, src.statistics);
3891 inoutStats.unusedRangeCount += src.unusedRangeCount;
3892 inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
3893 inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
3894 inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
3895 inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
3896}
3897
3898#endif // _VMA_STATISTICS_FUNCTIONS
3899
3900#ifndef _VMA_MUTEX_LOCK
3901// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3902struct VmaMutexLock
3903{
3904 VMA_CLASS_NO_COPY(VmaMutexLock)
3905public:
3906 VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
3907 m_pMutex(useMutex ? &mutex : VMA_NULL)
3908 {
3909 if (m_pMutex) { m_pMutex->Lock(); }
3910 }
3911 ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
3912
3913private:
3914 VMA_MUTEX* m_pMutex;
3915};
3916
3917// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3918struct VmaMutexLockRead
3919{
3920 VMA_CLASS_NO_COPY(VmaMutexLockRead)
3921public:
3922 VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3923 m_pMutex(useMutex ? &mutex : VMA_NULL)
3924 {
3925 if (m_pMutex) { m_pMutex->LockRead(); }
3926 }
3927 ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
3928
3929private:
3930 VMA_RW_MUTEX* m_pMutex;
3931};
3932
3933// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3934struct VmaMutexLockWrite
3935{
3936 VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3937public:
3938 VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
3939 : m_pMutex(useMutex ? &mutex : VMA_NULL)
3940 {
3941 if (m_pMutex) { m_pMutex->LockWrite(); }
3942 }
3943 ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
3944
3945private:
3946 VMA_RW_MUTEX* m_pMutex;
3947};
3948
3949#if VMA_DEBUG_GLOBAL_MUTEX
3950 static VMA_MUTEX gDebugGlobalMutex;
3951 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3952#else
3953 #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3954#endif
3955#endif // _VMA_MUTEX_LOCK
3956
3957#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
3958// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
3959template<typename T>
3960struct AtomicTransactionalIncrement
3961{
3962public:
3963 typedef std::atomic<T> AtomicT;
3964
3965 ~AtomicTransactionalIncrement()
3966 {
3967 if(m_Atomic)
3968 --(*m_Atomic);
3969 }
3970
3971 void Commit() { m_Atomic = nullptr; }
3972 T Increment(AtomicT* atomic)
3973 {
3974 m_Atomic = atomic;
3975 return m_Atomic->fetch_add(1);
3976 }
3977
3978private:
3979 AtomicT* m_Atomic = nullptr;
3980};
3981#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
3982
3983#ifndef _VMA_STL_ALLOCATOR
3984// STL-compatible allocator.
3985template<typename T>
3986struct VmaStlAllocator
3987{
3988 const VkAllocationCallbacks* const m_pCallbacks;
3989 typedef T value_type;
3990
3991 VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
3992 template<typename U>
3993 VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
3994 VmaStlAllocator(const VmaStlAllocator&) = default;
3995 VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
3996
3997 T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3998 void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3999
4000 template<typename U>
4001 bool operator==(const VmaStlAllocator<U>& rhs) const
4002 {
4003 return m_pCallbacks == rhs.m_pCallbacks;
4004 }
4005 template<typename U>
4006 bool operator!=(const VmaStlAllocator<U>& rhs) const
4007 {
4008 return m_pCallbacks != rhs.m_pCallbacks;
4009 }
4010};
4011#endif // _VMA_STL_ALLOCATOR
4012
4013#ifndef _VMA_VECTOR
4014/* Class with interface compatible with subset of std::vector.
4015T must be POD because constructors and destructors are not called and memcpy is
4016used for these objects. */
4017template<typename T, typename AllocatorT>
4018class VmaVector
4019{
4020public:
4021 typedef T value_type;
4022 typedef T* iterator;
4023 typedef const T* const_iterator;
4024
4025 VmaVector(const AllocatorT& allocator);
4026 VmaVector(size_t count, const AllocatorT& allocator);
4027 // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4028 // value is unused.
4029 VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
4030 VmaVector(const VmaVector<T, AllocatorT>& src);
4031 VmaVector& operator=(const VmaVector& rhs);
4032 ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
4033
4034 bool empty() const { return m_Count == 0; }
4035 size_t size() const { return m_Count; }
4036 T* data() { return m_pArray; }
4037 T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
4038 T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
4039 const T* data() const { return m_pArray; }
4040 const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
4041 const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
4042
4043 iterator begin() { return m_pArray; }
4044 iterator end() { return m_pArray + m_Count; }
4045 const_iterator cbegin() const { return m_pArray; }
4046 const_iterator cend() const { return m_pArray + m_Count; }
4047 const_iterator begin() const { return cbegin(); }
4048 const_iterator end() const { return cend(); }
4049
4050 void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
4051 void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
4052 void push_front(const T& src) { insert(0, src); }
4053
4054 void push_back(const T& src);
4055 void reserve(size_t newCapacity, bool freeMemory = false);
4056 void resize(size_t newCount);
4057 void clear() { resize(0); }
4058 void shrink_to_fit();
4059 void insert(size_t index, const T& src);
4060 void remove(size_t index);
4061
4062 T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4063 const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
4064
4065private:
4066 AllocatorT m_Allocator;
4067 T* m_pArray;
4068 size_t m_Count;
4069 size_t m_Capacity;
4070};
4071
4072#ifndef _VMA_VECTOR_FUNCTIONS
4073template<typename T, typename AllocatorT>
4074VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
4075 : m_Allocator(allocator),
4076 m_pArray(VMA_NULL),
4077 m_Count(0),
4078 m_Capacity(0) {}
4079
4080template<typename T, typename AllocatorT>
4081VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
4082 : m_Allocator(allocator),
4083 m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4084 m_Count(count),
4085 m_Capacity(count) {}
4086
4087template<typename T, typename AllocatorT>
4088VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
4089 : m_Allocator(src.m_Allocator),
4090 m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4091 m_Count(src.m_Count),
4092 m_Capacity(src.m_Count)
4093{
4094 if (m_Count != 0)
4095 {
4096 memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4097 }
4098}
4099
4100template<typename T, typename AllocatorT>
4101VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
4102{
4103 if (&rhs != this)
4104 {
4105 resize(rhs.m_Count);
4106 if (m_Count != 0)
4107 {
4108 memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4109 }
4110 }
4111 return *this;
4112}
4113
4114template<typename T, typename AllocatorT>
4115void VmaVector<T, AllocatorT>::push_back(const T& src)
4116{
4117 const size_t newIndex = size();
4118 resize(newIndex + 1);
4119 m_pArray[newIndex] = src;
4120}
4121
4122template<typename T, typename AllocatorT>
4123void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
4124{
4125 newCapacity = VMA_MAX(newCapacity, m_Count);
4126
4127 if ((newCapacity < m_Capacity) && !freeMemory)
4128 {
4129 newCapacity = m_Capacity;
4130 }
4131
4132 if (newCapacity != m_Capacity)
4133 {
4134 T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
4135 if (m_Count != 0)
4136 {
4137 memcpy(newArray, m_pArray, m_Count * sizeof(T));
4138 }
4139 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4140 m_Capacity = newCapacity;
4141 m_pArray = newArray;
4142 }
4143}
4144
4145template<typename T, typename AllocatorT>
4146void VmaVector<T, AllocatorT>::resize(size_t newCount)
4147{
4148 size_t newCapacity = m_Capacity;
4149 if (newCount > m_Capacity)
4150 {
4151 newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
4152 }
4153
4154 if (newCapacity != m_Capacity)
4155 {
4156 T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
4157 const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
4158 if (elementsToCopy != 0)
4159 {
4160 memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
4161 }
4162 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4163 m_Capacity = newCapacity;
4164 m_pArray = newArray;
4165 }
4166
4167 m_Count = newCount;
4168}
4169
4170template<typename T, typename AllocatorT>
4171void VmaVector<T, AllocatorT>::shrink_to_fit()
4172{
4173 if (m_Capacity > m_Count)
4174 {
4175 T* newArray = VMA_NULL;
4176 if (m_Count > 0)
4177 {
4178 newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
4179 memcpy(newArray, m_pArray, m_Count * sizeof(T));
4180 }
4181 VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4182 m_Capacity = m_Count;
4183 m_pArray = newArray;
4184 }
4185}
4186
4187template<typename T, typename AllocatorT>
4188void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
4189{
4190 VMA_HEAVY_ASSERT(index <= m_Count);
4191 const size_t oldCount = size();
4192 resize(oldCount + 1);
4193 if (index < oldCount)
4194 {
4195 memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4196 }
4197 m_pArray[index] = src;
4198}
4199
4200template<typename T, typename AllocatorT>
4201void VmaVector<T, AllocatorT>::remove(size_t index)
4202{
4203 VMA_HEAVY_ASSERT(index < m_Count);
4204 const size_t oldCount = size();
4205 if (index < oldCount - 1)
4206 {
4207 memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4208 }
4209 resize(oldCount - 1);
4210}
4211#endif // _VMA_VECTOR_FUNCTIONS
4212
4213template<typename T, typename allocatorT>
4214static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4215{
4216 vec.insert(index, item);
4217}
4218
4219template<typename T, typename allocatorT>
4220static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4221{
4222 vec.remove(index);
4223}
4224#endif // _VMA_VECTOR
4225
4226#ifndef _VMA_SMALL_VECTOR
4227/*
4228This is a vector (a variable-sized array), optimized for the case when the array is small.
4229
4230It contains some number of elements in-place, which allows it to avoid heap allocation
4231when the actual number of elements is below that threshold. This allows normal "small"
4232cases to be fast without losing generality for large inputs.
4233*/
4234template<typename T, typename AllocatorT, size_t N>
4235class VmaSmallVector
4236{
4237public:
4238 typedef T value_type;
4239 typedef T* iterator;
4240
4241 VmaSmallVector(const AllocatorT& allocator);
4242 VmaSmallVector(size_t count, const AllocatorT& allocator);
4243 template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4244 VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4245 template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
4246 VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
4247 ~VmaSmallVector() = default;
4248
4249 bool empty() const { return m_Count == 0; }
4250 size_t size() const { return m_Count; }
4251 T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
4252 T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
4253 T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
4254 const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
4255 const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
4256 const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
4257
4258 iterator begin() { return data(); }
4259 iterator end() { return data() + m_Count; }
4260
4261 void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
4262 void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
4263 void push_front(const T& src) { insert(0, src); }
4264
4265 void push_back(const T& src);
4266 void resize(size_t newCount, bool freeMemory = false);
4267 void clear(bool freeMemory = false);
4268 void insert(size_t index, const T& src);
4269 void remove(size_t index);
4270
4271 T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4272 const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
4273
4274private:
4275 size_t m_Count;
4276 T m_StaticArray[N]; // Used when m_Size <= N
4277 VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
4278};
4279
4280#ifndef _VMA_SMALL_VECTOR_FUNCTIONS
4281template<typename T, typename AllocatorT, size_t N>
4282VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
4283 : m_Count(0),
4284 m_DynamicArray(allocator) {}
4285
4286template<typename T, typename AllocatorT, size_t N>
4287VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
4288 : m_Count(count),
4289 m_DynamicArray(count > N ? count : 0, allocator) {}
4290
4291template<typename T, typename AllocatorT, size_t N>
4292void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
4293{
4294 const size_t newIndex = size();
4295 resize(newIndex + 1);
4296 data()[newIndex] = src;
4297}
4298
4299template<typename T, typename AllocatorT, size_t N>
4300void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
4301{
4302 if (newCount > N && m_Count > N)
4303 {
4304 // Any direction, staying in m_DynamicArray
4305 m_DynamicArray.resize(newCount);
4306 if (freeMemory)
4307 {
4308 m_DynamicArray.shrink_to_fit();
4309 }
4310 }
4311 else if (newCount > N && m_Count <= N)
4312 {
4313 // Growing, moving from m_StaticArray to m_DynamicArray
4314 m_DynamicArray.resize(newCount);
4315 if (m_Count > 0)
4316 {
4317 memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
4318 }
4319 }
4320 else if (newCount <= N && m_Count > N)
4321 {
4322 // Shrinking, moving from m_DynamicArray to m_StaticArray
4323 if (newCount > 0)
4324 {
4325 memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
4326 }
4327 m_DynamicArray.resize(0);
4328 if (freeMemory)
4329 {
4330 m_DynamicArray.shrink_to_fit();
4331 }
4332 }
4333 else
4334 {
4335 // Any direction, staying in m_StaticArray - nothing to do here
4336 }
4337 m_Count = newCount;
4338}
4339
4340template<typename T, typename AllocatorT, size_t N>
4341void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
4342{
4343 m_DynamicArray.clear();
4344 if (freeMemory)
4345 {
4346 m_DynamicArray.shrink_to_fit();
4347 }
4348 m_Count = 0;
4349}
4350
4351template<typename T, typename AllocatorT, size_t N>
4352void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
4353{
4354 VMA_HEAVY_ASSERT(index <= m_Count);
4355 const size_t oldCount = size();
4356 resize(oldCount + 1);
4357 T* const dataPtr = data();
4358 if (index < oldCount)
4359 {
4360 // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
4361 memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
4362 }
4363 dataPtr[index] = src;
4364}
4365
4366template<typename T, typename AllocatorT, size_t N>
4367void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
4368{
4369 VMA_HEAVY_ASSERT(index < m_Count);
4370 const size_t oldCount = size();
4371 if (index < oldCount - 1)
4372 {
4373 // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
4374 T* const dataPtr = data();
4375 memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
4376 }
4377 resize(oldCount - 1);
4378}
4379#endif // _VMA_SMALL_VECTOR_FUNCTIONS
4380#endif // _VMA_SMALL_VECTOR
4381
4382#ifndef _VMA_POOL_ALLOCATOR
4383/*
4384Allocator for objects of type T using a list of arrays (pools) to speed up
4385allocation. Number of elements that can be allocated is not bounded because
4386allocator can create multiple blocks.
4387*/
4388template<typename T>
4389class VmaPoolAllocator
4390{
4391 VMA_CLASS_NO_COPY(VmaPoolAllocator)
4392public:
4393 VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
4394 ~VmaPoolAllocator();
4395 template<typename... Types> T* Alloc(Types&&... args);
4396 void Free(T* ptr);
4397
4398private:
4399 union Item
4400 {
4401 uint32_t NextFreeIndex;
4402 alignas(T) char Value[sizeof(T)];
4403 };
4404 struct ItemBlock
4405 {
4406 Item* pItems;
4407 uint32_t Capacity;
4408 uint32_t FirstFreeIndex;
4409 };
4410
4411 const VkAllocationCallbacks* m_pAllocationCallbacks;
4412 const uint32_t m_FirstBlockCapacity;
4413 VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
4414
4415 ItemBlock& CreateNewBlock();
4416};
4417
4418#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
4419template<typename T>
4420VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
4421 : m_pAllocationCallbacks(pAllocationCallbacks),
4422 m_FirstBlockCapacity(firstBlockCapacity),
4423 m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4424{
4425 VMA_ASSERT(m_FirstBlockCapacity > 1);
4426}
4427
4428template<typename T>
4429VmaPoolAllocator<T>::~VmaPoolAllocator()
4430{
4431 for (size_t i = m_ItemBlocks.size(); i--;)
4432 vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
4433 m_ItemBlocks.clear();
4434}
4435
4436template<typename T>
4437template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
4438{
4439 for (size_t i = m_ItemBlocks.size(); i--; )
4440 {
4441 ItemBlock& block = m_ItemBlocks[i];
4442 // This block has some free items: Use first one.
4443 if (block.FirstFreeIndex != UINT32_MAX)
4444 {
4445 Item* const pItem = &block.pItems[block.FirstFreeIndex];
4446 block.FirstFreeIndex = pItem->NextFreeIndex;
4447 T* result = (T*)&pItem->Value;
4448 new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
4449 return result;
4450 }
4451 }
4452
4453 // No block has free item: Create new one and use it.
4454 ItemBlock& newBlock = CreateNewBlock();
4455 Item* const pItem = &newBlock.pItems[0];
4456 newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4457 T* result = (T*)&pItem->Value;
4458 new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
4459 return result;
4460}
4461
4462template<typename T>
4464{
4465 // Search all memory blocks to find ptr.
4466 for (size_t i = m_ItemBlocks.size(); i--; )
4467 {
4468 ItemBlock& block = m_ItemBlocks[i];
4469
4470 // Casting to union.
4471 Item* pItemPtr;
4472 memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4473
4474 // Check if pItemPtr is in address range of this block.
4475 if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
4476 {
4477 ptr->~T(); // Explicit destructor call.
4478 const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4479 pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4480 block.FirstFreeIndex = index;
4481 return;
4482 }
4483 }
4484 VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4485}
4486
4487template<typename T>
4488typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4489{
4490 const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
4491 m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
4492
4493 const ItemBlock newBlock =
4494 {
4495 vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
4496 newBlockCapacity,
4497 0
4498 };
4499
4500 m_ItemBlocks.push_back(newBlock);
4501
4502 // Setup singly-linked list of all free items in this block.
4503 for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
4504 newBlock.pItems[i].NextFreeIndex = i + 1;
4505 newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
4506 return m_ItemBlocks.back();
4507}
4508#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
4509#endif // _VMA_POOL_ALLOCATOR
4510
4511#ifndef _VMA_RAW_LIST
4512template<typename T>
4513struct VmaListItem
4514{
4515 VmaListItem* pPrev;
4516 VmaListItem* pNext;
4517 T Value;
4518};
4519
4520// Doubly linked list.
4521template<typename T>
4522class VmaRawList
4523{
4524 VMA_CLASS_NO_COPY(VmaRawList)
4525public:
4526 typedef VmaListItem<T> ItemType;
4527
4528 VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4529 // Intentionally not calling Clear, because that would be unnecessary
4530 // computations to return all items to m_ItemAllocator as free.
4531 ~VmaRawList() = default;
4532
4533 size_t GetCount() const { return m_Count; }
4534 bool IsEmpty() const { return m_Count == 0; }
4535
4536 ItemType* Front() { return m_pFront; }
4537 ItemType* Back() { return m_pBack; }
4538 const ItemType* Front() const { return m_pFront; }
4539 const ItemType* Back() const { return m_pBack; }
4540
4541 ItemType* PushFront();
4542 ItemType* PushBack();
4543 ItemType* PushFront(const T& value);
4544 ItemType* PushBack(const T& value);
4545 void PopFront();
4546 void PopBack();
4547
4548 // Item can be null - it means PushBack.
4549 ItemType* InsertBefore(ItemType* pItem);
4550 // Item can be null - it means PushFront.
4551 ItemType* InsertAfter(ItemType* pItem);
4552 ItemType* InsertBefore(ItemType* pItem, const T& value);
4553 ItemType* InsertAfter(ItemType* pItem, const T& value);
4554
4555 void Clear();
4556 void Remove(ItemType* pItem);
4557
4558private:
4559 const VkAllocationCallbacks* const m_pAllocationCallbacks;
4560 VmaPoolAllocator<ItemType> m_ItemAllocator;
4561 ItemType* m_pFront;
4562 ItemType* m_pBack;
4563 size_t m_Count;
4564};
4565
4566#ifndef _VMA_RAW_LIST_FUNCTIONS
4567template<typename T>
4568VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
4569 : m_pAllocationCallbacks(pAllocationCallbacks),
4570 m_ItemAllocator(pAllocationCallbacks, 128),
4571 m_pFront(VMA_NULL),
4572 m_pBack(VMA_NULL),
4573 m_Count(0) {}
4574
4575template<typename T>
4576VmaListItem<T>* VmaRawList<T>::PushFront()
4577{
4578 ItemType* const pNewItem = m_ItemAllocator.Alloc();
4579 pNewItem->pPrev = VMA_NULL;
4580 if (IsEmpty())
4581 {
4582 pNewItem->pNext = VMA_NULL;
4583 m_pFront = pNewItem;
4584 m_pBack = pNewItem;
4585 m_Count = 1;
4586 }
4587 else
4588 {
4589 pNewItem->pNext = m_pFront;
4590 m_pFront->pPrev = pNewItem;
4591 m_pFront = pNewItem;
4592 ++m_Count;
4593 }
4594 return pNewItem;
4595}
4596
4597template<typename T>
4598VmaListItem<T>* VmaRawList<T>::PushBack()
4599{
4600 ItemType* const pNewItem = m_ItemAllocator.Alloc();
4601 pNewItem->pNext = VMA_NULL;
4602 if(IsEmpty())
4603 {
4604 pNewItem->pPrev = VMA_NULL;
4605 m_pFront = pNewItem;
4606 m_pBack = pNewItem;
4607 m_Count = 1;
4608 }
4609 else
4610 {
4611 pNewItem->pPrev = m_pBack;
4612 m_pBack->pNext = pNewItem;
4613 m_pBack = pNewItem;
4614 ++m_Count;
4615 }
4616 return pNewItem;
4617}
4618
4619template<typename T>
4620VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4621{
4622 ItemType* const pNewItem = PushFront();
4623 pNewItem->Value = value;
4624 return pNewItem;
4625}
4626
4627template<typename T>
4628VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4629{
4630 ItemType* const pNewItem = PushBack();
4631 pNewItem->Value = value;
4632 return pNewItem;
4633}
4634
4635template<typename T>
4636void VmaRawList<T>::PopFront()
4637{
4638 VMA_HEAVY_ASSERT(m_Count > 0);
4639 ItemType* const pFrontItem = m_pFront;
4640 ItemType* const pNextItem = pFrontItem->pNext;
4641 if (pNextItem != VMA_NULL)
4642 {
4643 pNextItem->pPrev = VMA_NULL;
4644 }
4645 m_pFront = pNextItem;
4646 m_ItemAllocator.Free(pFrontItem);
4647 --m_Count;
4648}
4649
4650template<typename T>
4651void VmaRawList<T>::PopBack()
4652{
4653 VMA_HEAVY_ASSERT(m_Count > 0);
4654 ItemType* const pBackItem = m_pBack;
4655 ItemType* const pPrevItem = pBackItem->pPrev;
4656 if(pPrevItem != VMA_NULL)
4657 {
4658 pPrevItem->pNext = VMA_NULL;
4659 }
4660 m_pBack = pPrevItem;
4661 m_ItemAllocator.Free(pBackItem);
4662 --m_Count;
4663}
4664
4665template<typename T>
4666void VmaRawList<T>::Clear()
4667{
4668 if (IsEmpty() == false)
4669 {
4670 ItemType* pItem = m_pBack;
4671 while (pItem != VMA_NULL)
4672 {
4673 ItemType* const pPrevItem = pItem->pPrev;
4674 m_ItemAllocator.Free(pItem);
4675 pItem = pPrevItem;
4676 }
4677 m_pFront = VMA_NULL;
4678 m_pBack = VMA_NULL;
4679 m_Count = 0;
4680 }
4681}
4682
4683template<typename T>
4684void VmaRawList<T>::Remove(ItemType* pItem)
4685{
4686 VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4687 VMA_HEAVY_ASSERT(m_Count > 0);
4688
4689 if(pItem->pPrev != VMA_NULL)
4690 {
4691 pItem->pPrev->pNext = pItem->pNext;
4692 }
4693 else
4694 {
4695 VMA_HEAVY_ASSERT(m_pFront == pItem);
4696 m_pFront = pItem->pNext;
4697 }
4698
4699 if(pItem->pNext != VMA_NULL)
4700 {
4701 pItem->pNext->pPrev = pItem->pPrev;
4702 }
4703 else
4704 {
4705 VMA_HEAVY_ASSERT(m_pBack == pItem);
4706 m_pBack = pItem->pPrev;
4707 }
4708
4709 m_ItemAllocator.Free(pItem);
4710 --m_Count;
4711}
4712
4713template<typename T>
4714VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4715{
4716 if(pItem != VMA_NULL)
4717 {
4718 ItemType* const prevItem = pItem->pPrev;
4719 ItemType* const newItem = m_ItemAllocator.Alloc();
4720 newItem->pPrev = prevItem;
4721 newItem->pNext = pItem;
4722 pItem->pPrev = newItem;
4723 if(prevItem != VMA_NULL)
4724 {
4725 prevItem->pNext = newItem;
4726 }
4727 else
4728 {
4729 VMA_HEAVY_ASSERT(m_pFront == pItem);
4730 m_pFront = newItem;
4731 }
4732 ++m_Count;
4733 return newItem;
4734 }
4735 else
4736 return PushBack();
4737}
4738
4739template<typename T>
4740VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4741{
4742 if(pItem != VMA_NULL)
4743 {
4744 ItemType* const nextItem = pItem->pNext;
4745 ItemType* const newItem = m_ItemAllocator.Alloc();
4746 newItem->pNext = nextItem;
4747 newItem->pPrev = pItem;
4748 pItem->pNext = newItem;
4749 if(nextItem != VMA_NULL)
4750 {
4751 nextItem->pPrev = newItem;
4752 }
4753 else
4754 {
4755 VMA_HEAVY_ASSERT(m_pBack == pItem);
4756 m_pBack = newItem;
4757 }
4758 ++m_Count;
4759 return newItem;
4760 }
4761 else
4762 return PushFront();
4763}
4764
4765template<typename T>
4766VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4767{
4768 ItemType* const newItem = InsertBefore(pItem);
4769 newItem->Value = value;
4770 return newItem;
4771}
4772
4773template<typename T>
4774VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4775{
4776 ItemType* const newItem = InsertAfter(pItem);
4777 newItem->Value = value;
4778 return newItem;
4779}
4780#endif // _VMA_RAW_LIST_FUNCTIONS
4781#endif // _VMA_RAW_LIST
4782
4783#ifndef _VMA_LIST
4784template<typename T, typename AllocatorT>
4785class VmaList
4786{
4787 VMA_CLASS_NO_COPY(VmaList)
4788public:
4789 class reverse_iterator;
4790 class const_iterator;
4791 class const_reverse_iterator;
4792
4793 class iterator
4794 {
4795 friend class const_iterator;
4796 friend class VmaList<T, AllocatorT>;
4797 public:
4798 iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4799 iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4800
4801 T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4802 T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4803
4804 bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4805 bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4806
4807 iterator operator++(int) { iterator result = *this; ++*this; return result; }
4808 iterator operator--(int) { iterator result = *this; --*this; return result; }
4809
4810 iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4811 iterator& operator--();
4812
4813 private:
4814 VmaRawList<T>* m_pList;
4815 VmaListItem<T>* m_pItem;
4816
4817 iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4818 };
4819 class reverse_iterator
4820 {
4821 friend class const_reverse_iterator;
4822 friend class VmaList<T, AllocatorT>;
4823 public:
4824 reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4825 reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4826
4827 T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4828 T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4829
4830 bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4831 bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4832
4833 reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
4834 reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
4835
4836 reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4837 reverse_iterator& operator--();
4838
4839 private:
4840 VmaRawList<T>* m_pList;
4841 VmaListItem<T>* m_pItem;
4842
4843 reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4844 };
4845 class const_iterator
4846 {
4847 friend class VmaList<T, AllocatorT>;
4848 public:
4849 const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4850 const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4851 const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4852
4853 iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4854
4855 const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4856 const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4857
4858 bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4859 bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4860
4861 const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
4862 const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
4863
4864 const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
4865 const_iterator& operator--();
4866
4867 private:
4868 const VmaRawList<T>* m_pList;
4869 const VmaListItem<T>* m_pItem;
4870
4871 const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4872 };
4873 class const_reverse_iterator
4874 {
4875 friend class VmaList<T, AllocatorT>;
4876 public:
4877 const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
4878 const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4879 const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
4880
4881 reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
4882
4883 const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
4884 const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
4885
4886 bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
4887 bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
4888
4889 const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
4890 const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
4891
4892 const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
4893 const_reverse_iterator& operator--();
4894
4895 private:
4896 const VmaRawList<T>* m_pList;
4897 const VmaListItem<T>* m_pItem;
4898
4899 const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
4900 };
4901
4902 VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
4903
4904 bool empty() const { return m_RawList.IsEmpty(); }
4905 size_t size() const { return m_RawList.GetCount(); }
4906
4907 iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4908 iterator end() { return iterator(&m_RawList, VMA_NULL); }
4909
4910 const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4911 const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4912
4913 const_iterator begin() const { return cbegin(); }
4914 const_iterator end() const { return cend(); }
4915
4916 reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
4917 reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
4918
4919 const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
4920 const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
4921
4922 const_reverse_iterator rbegin() const { return crbegin(); }
4923 const_reverse_iterator rend() const { return crend(); }
4924
4925 void push_back(const T& value) { m_RawList.PushBack(value); }
4926 iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4927
4928 void clear() { m_RawList.Clear(); }
4929 void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4930
4931private:
4932 VmaRawList<T> m_RawList;
4933};
4934
4935#ifndef _VMA_LIST_FUNCTIONS
4936template<typename T, typename AllocatorT>
4937typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
4938{
4939 if (m_pItem != VMA_NULL)
4940 {
4941 m_pItem = m_pItem->pPrev;
4942 }
4943 else
4944 {
4945 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4946 m_pItem = m_pList->Back();
4947 }
4948 return *this;
4949}
4950
4951template<typename T, typename AllocatorT>
4952typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
4953{
4954 if (m_pItem != VMA_NULL)
4955 {
4956 m_pItem = m_pItem->pNext;
4957 }
4958 else
4959 {
4960 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4961 m_pItem = m_pList->Front();
4962 }
4963 return *this;
4964}
4965
4966template<typename T, typename AllocatorT>
4967typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
4968{
4969 if (m_pItem != VMA_NULL)
4970 {
4971 m_pItem = m_pItem->pPrev;
4972 }
4973 else
4974 {
4975 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4976 m_pItem = m_pList->Back();
4977 }
4978 return *this;
4979}
4980
4981template<typename T, typename AllocatorT>
4982typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
4983{
4984 if (m_pItem != VMA_NULL)
4985 {
4986 m_pItem = m_pItem->pNext;
4987 }
4988 else
4989 {
4990 VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4991 m_pItem = m_pList->Back();
4992 }
4993 return *this;
4994}
4995#endif // _VMA_LIST_FUNCTIONS
4996#endif // _VMA_LIST
4997
4998#ifndef _VMA_INTRUSIVE_LINKED_LIST
4999/*
5000Expected interface of ItemTypeTraits:
5001struct MyItemTypeTraits
5002{
5003 typedef MyItem ItemType;
5004 static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
5005 static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
5006 static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
5007 static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
5008};
5009*/
5010template<typename ItemTypeTraits>
5011class VmaIntrusiveLinkedList
5012{
5013public:
5014 typedef typename ItemTypeTraits::ItemType ItemType;
5015 static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
5016 static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
5017
5018 // Movable, not copyable.
5019 VmaIntrusiveLinkedList() = default;
5020 VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
5021 VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
5022 VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
5023 VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
5024 ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
5025
5026 size_t GetCount() const { return m_Count; }
5027 bool IsEmpty() const { return m_Count == 0; }
5028 ItemType* Front() { return m_Front; }
5029 ItemType* Back() { return m_Back; }
5030 const ItemType* Front() const { return m_Front; }
5031 const ItemType* Back() const { return m_Back; }
5032
5033 void PushBack(ItemType* item);
5034 void PushFront(ItemType* item);
5035 ItemType* PopBack();
5036 ItemType* PopFront();
5037
5038 // MyItem can be null - it means PushBack.
5039 void InsertBefore(ItemType* existingItem, ItemType* newItem);
5040 // MyItem can be null - it means PushFront.
5041 void InsertAfter(ItemType* existingItem, ItemType* newItem);
5042 void Remove(ItemType* item);
5043 void RemoveAll();
5044
5045private:
5046 ItemType* m_Front = VMA_NULL;
5047 ItemType* m_Back = VMA_NULL;
5048 size_t m_Count = 0;
5049};
5050
5051#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5052template<typename ItemTypeTraits>
5053VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
5054 : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
5055{
5056 src.m_Front = src.m_Back = VMA_NULL;
5057 src.m_Count = 0;
5058}
5059
5060template<typename ItemTypeTraits>
5061VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
5062{
5063 if (&src != this)
5064 {
5065 VMA_HEAVY_ASSERT(IsEmpty());
5066 m_Front = src.m_Front;
5067 m_Back = src.m_Back;
5068 m_Count = src.m_Count;
5069 src.m_Front = src.m_Back = VMA_NULL;
5070 src.m_Count = 0;
5071 }
5072 return *this;
5073}
5074
5075template<typename ItemTypeTraits>
5076void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
5077{
5078 VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5079 if (IsEmpty())
5080 {
5081 m_Front = item;
5082 m_Back = item;
5083 m_Count = 1;
5084 }
5085 else
5086 {
5087 ItemTypeTraits::AccessPrev(item) = m_Back;
5088 ItemTypeTraits::AccessNext(m_Back) = item;
5089 m_Back = item;
5090 ++m_Count;
5091 }
5092}
5093
5094template<typename ItemTypeTraits>
5095void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
5096{
5097 VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
5098 if (IsEmpty())
5099 {
5100 m_Front = item;
5101 m_Back = item;
5102 m_Count = 1;
5103 }
5104 else
5105 {
5106 ItemTypeTraits::AccessNext(item) = m_Front;
5107 ItemTypeTraits::AccessPrev(m_Front) = item;
5108 m_Front = item;
5109 ++m_Count;
5110 }
5111}
5112
5113template<typename ItemTypeTraits>
5114typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
5115{
5116 VMA_HEAVY_ASSERT(m_Count > 0);
5117 ItemType* const backItem = m_Back;
5118 ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
5119 if (prevItem != VMA_NULL)
5120 {
5121 ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
5122 }
5123 m_Back = prevItem;
5124 --m_Count;
5125 ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
5126 ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
5127 return backItem;
5128}
5129
5130template<typename ItemTypeTraits>
5131typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
5132{
5133 VMA_HEAVY_ASSERT(m_Count > 0);
5134 ItemType* const frontItem = m_Front;
5135 ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
5136 if (nextItem != VMA_NULL)
5137 {
5138 ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
5139 }
5140 m_Front = nextItem;
5141 --m_Count;
5142 ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
5143 ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
5144 return frontItem;
5145}
5146
5147template<typename ItemTypeTraits>
5148void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
5149{
5150 VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5151 if (existingItem != VMA_NULL)
5152 {
5153 ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
5154 ItemTypeTraits::AccessPrev(newItem) = prevItem;
5155 ItemTypeTraits::AccessNext(newItem) = existingItem;
5156 ItemTypeTraits::AccessPrev(existingItem) = newItem;
5157 if (prevItem != VMA_NULL)
5158 {
5159 ItemTypeTraits::AccessNext(prevItem) = newItem;
5160 }
5161 else
5162 {
5163 VMA_HEAVY_ASSERT(m_Front == existingItem);
5164 m_Front = newItem;
5165 }
5166 ++m_Count;
5167 }
5168 else
5169 PushBack(newItem);
5170}
5171
5172template<typename ItemTypeTraits>
5173void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
5174{
5175 VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
5176 if (existingItem != VMA_NULL)
5177 {
5178 ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
5179 ItemTypeTraits::AccessNext(newItem) = nextItem;
5180 ItemTypeTraits::AccessPrev(newItem) = existingItem;
5181 ItemTypeTraits::AccessNext(existingItem) = newItem;
5182 if (nextItem != VMA_NULL)
5183 {
5184 ItemTypeTraits::AccessPrev(nextItem) = newItem;
5185 }
5186 else
5187 {
5188 VMA_HEAVY_ASSERT(m_Back == existingItem);
5189 m_Back = newItem;
5190 }
5191 ++m_Count;
5192 }
5193 else
5194 return PushFront(newItem);
5195}
5196
5197template<typename ItemTypeTraits>
5198void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
5199{
5200 VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
5201 if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
5202 {
5203 ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
5204 }
5205 else
5206 {
5207 VMA_HEAVY_ASSERT(m_Front == item);
5208 m_Front = ItemTypeTraits::GetNext(item);
5209 }
5210
5211 if (ItemTypeTraits::GetNext(item) != VMA_NULL)
5212 {
5213 ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
5214 }
5215 else
5216 {
5217 VMA_HEAVY_ASSERT(m_Back == item);
5218 m_Back = ItemTypeTraits::GetPrev(item);
5219 }
5220 ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5221 ItemTypeTraits::AccessNext(item) = VMA_NULL;
5222 --m_Count;
5223}
5224
5225template<typename ItemTypeTraits>
5226void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
5227{
5228 if (!IsEmpty())
5229 {
5230 ItemType* item = m_Back;
5231 while (item != VMA_NULL)
5232 {
5233 ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
5234 ItemTypeTraits::AccessPrev(item) = VMA_NULL;
5235 ItemTypeTraits::AccessNext(item) = VMA_NULL;
5236 item = prevItem;
5237 }
5238 m_Front = VMA_NULL;
5239 m_Back = VMA_NULL;
5240 m_Count = 0;
5241 }
5242}
5243#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
5244#endif // _VMA_INTRUSIVE_LINKED_LIST
5245
5246// Unused in this version.
5247#if 0
5248
5249#ifndef _VMA_PAIR
5250template<typename T1, typename T2>
5251struct VmaPair
5252{
5253 T1 first;
5254 T2 second;
5255
5256 VmaPair() : first(), second() {}
5257 VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
5258};
5259
5260template<typename FirstT, typename SecondT>
5261struct VmaPairFirstLess
5262{
5263 bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
5264 {
5265 return lhs.first < rhs.first;
5266 }
5267 bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
5268 {
5269 return lhs.first < rhsFirst;
5270 }
5271};
5272#endif // _VMA_PAIR
5273
5274#ifndef _VMA_MAP
5275/* Class compatible with subset of interface of std::unordered_map.
5276KeyT, ValueT must be POD because they will be stored in VmaVector.
5277*/
5278template<typename KeyT, typename ValueT>
5279class VmaMap
5280{
5281public:
5282 typedef VmaPair<KeyT, ValueT> PairType;
5283 typedef PairType* iterator;
5284
5285 VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
5286
5287 iterator begin() { return m_Vector.begin(); }
5288 iterator end() { return m_Vector.end(); }
5289 size_t size() { return m_Vector.size(); }
5290
5291 void insert(const PairType& pair);
5292 iterator find(const KeyT& key);
5293 void erase(iterator it);
5294
5295private:
5296 VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
5297};
5298
5299#ifndef _VMA_MAP_FUNCTIONS
5300template<typename KeyT, typename ValueT>
5301void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
5302{
5303 const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5304 m_Vector.data(),
5305 m_Vector.data() + m_Vector.size(),
5306 pair,
5307 VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
5308 VmaVectorInsert(m_Vector, indexToInsert, pair);
5309}
5310
5311template<typename KeyT, typename ValueT>
5312VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
5313{
5314 PairType* it = VmaBinaryFindFirstNotLess(
5315 m_Vector.data(),
5316 m_Vector.data() + m_Vector.size(),
5317 key,
5318 VmaPairFirstLess<KeyT, ValueT>());
5319 if ((it != m_Vector.end()) && (it->first == key))
5320 {
5321 return it;
5322 }
5323 else
5324 {
5325 return m_Vector.end();
5326 }
5327}
5328
5329template<typename KeyT, typename ValueT>
5330void VmaMap<KeyT, ValueT>::erase(iterator it)
5331{
5332 VmaVectorRemove(m_Vector, it - m_Vector.begin());
5333}
5334#endif // _VMA_MAP_FUNCTIONS
5335#endif // _VMA_MAP
5336
5337#endif // #if 0
5338
5339#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
5340class VmaStringBuilder
5341{
5342public:
5343 VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
5344 ~VmaStringBuilder() = default;
5345
5346 size_t GetLength() const { return m_Data.size(); }
5347 const char* GetData() const { return m_Data.data(); }
5348 void AddNewLine() { Add('\n'); }
5349 void Add(char ch) { m_Data.push_back(ch); }
5350
5351 void Add(const char* pStr);
5352 void AddNumber(uint32_t num);
5353 void AddNumber(uint64_t num);
5354 void AddPointer(const void* ptr);
5355
5356private:
5357 VmaVector<char, VmaStlAllocator<char>> m_Data;
5358};
5359
5360#ifndef _VMA_STRING_BUILDER_FUNCTIONS
5361void VmaStringBuilder::Add(const char* pStr)
5362{
5363 const size_t strLen = strlen(pStr);
5364 if (strLen > 0)
5365 {
5366 const size_t oldCount = m_Data.size();
5367 m_Data.resize(oldCount + strLen);
5368 memcpy(m_Data.data() + oldCount, pStr, strLen);
5369 }
5370}
5371
5372void VmaStringBuilder::AddNumber(uint32_t num)
5373{
5374 char buf[11];
5375 buf[10] = '\0';
5376 char* p = &buf[10];
5377 do
5378 {
5379 *--p = '0' + (num % 10);
5380 num /= 10;
5381 } while (num);
5382 Add(p);
5383}
5384
5385void VmaStringBuilder::AddNumber(uint64_t num)
5386{
5387 char buf[21];
5388 buf[20] = '\0';
5389 char* p = &buf[20];
5390 do
5391 {
5392 *--p = '0' + (num % 10);
5393 num /= 10;
5394 } while (num);
5395 Add(p);
5396}
5397
5398void VmaStringBuilder::AddPointer(const void* ptr)
5399{
5400 char buf[21];
5401 VmaPtrToStr(buf, sizeof(buf), ptr);
5402 Add(buf);
5403}
5404#endif //_VMA_STRING_BUILDER_FUNCTIONS
5405#endif // _VMA_STRING_BUILDER
5406
5407#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
5408/*
5409Allows to conveniently build a correct JSON document to be written to the
5410VmaStringBuilder passed to the constructor.
5411*/
5412class VmaJsonWriter
5413{
5414 VMA_CLASS_NO_COPY(VmaJsonWriter)
5415public:
5416 // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
5417 VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
5418 ~VmaJsonWriter();
5419
5420 // Begins object by writing "{".
5421 // Inside an object, you must call pairs of WriteString and a value, e.g.:
5422 // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
5423 // Will write: { "A": 1, "B": 2 }
5424 void BeginObject(bool singleLine = false);
5425 // Ends object by writing "}".
5426 void EndObject();
5427
5428 // Begins array by writing "[".
5429 // Inside an array, you can write a sequence of any values.
5430 void BeginArray(bool singleLine = false);
5431 // Ends array by writing "[".
5432 void EndArray();
5433
5434 // Writes a string value inside "".
5435 // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
5436 void WriteString(const char* pStr);
5437
5438 // Begins writing a string value.
5439 // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
5440 // WriteString to conveniently build the string content incrementally, made of
5441 // parts including numbers.
5442 void BeginString(const char* pStr = VMA_NULL);
5443 // Posts next part of an open string.
5444 void ContinueString(const char* pStr);
5445 // Posts next part of an open string. The number is converted to decimal characters.
5446 void ContinueString(uint32_t n);
5447 void ContinueString(uint64_t n);
5448 void ContinueString_Size(size_t n);
5449 // Posts next part of an open string. Pointer value is converted to characters
5450 // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
5451 void ContinueString_Pointer(const void* ptr);
5452 // Ends writing a string value by writing '"'.
5453 void EndString(const char* pStr = VMA_NULL);
5454
5455 // Writes a number value.
5456 void WriteNumber(uint32_t n);
5457 void WriteNumber(uint64_t n);
5458 void WriteSize(size_t n);
5459 // Writes a boolean value - false or true.
5460 void WriteBool(bool b);
5461 // Writes a null value.
5462 void WriteNull();
5463
5464private:
5465 enum COLLECTION_TYPE
5466 {
5467 COLLECTION_TYPE_OBJECT,
5468 COLLECTION_TYPE_ARRAY,
5469 };
5470 struct StackItem
5471 {
5472 COLLECTION_TYPE type;
5473 uint32_t valueCount;
5474 bool singleLineMode;
5475 };
5476
5477 static const char* const INDENT;
5478
5479 VmaStringBuilder& m_SB;
5480 VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
5481 bool m_InsideString;
5482
5483 // Write size_t for less than 64bits
5484 void WriteSize(size_t n, std::integral_constant<bool, false>) { m_SB.AddNumber(static_cast<uint32_t>(n)); }
5485 // Write size_t for 64bits
5486 void WriteSize(size_t n, std::integral_constant<bool, true>) { m_SB.AddNumber(static_cast<uint64_t>(n)); }
5487
5488 void BeginValue(bool isString);
5489 void WriteIndent(bool oneLess = false);
5490};
5491const char* const VmaJsonWriter::INDENT = " ";
5492
5493#ifndef _VMA_JSON_WRITER_FUNCTIONS
5494VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
5495 : m_SB(sb),
5496 m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
5497 m_InsideString(false) {}
5498
5499VmaJsonWriter::~VmaJsonWriter()
5500{
5501 VMA_ASSERT(!m_InsideString);
5502 VMA_ASSERT(m_Stack.empty());
5503}
5504
5505void VmaJsonWriter::BeginObject(bool singleLine)
5506{
5507 VMA_ASSERT(!m_InsideString);
5508
5509 BeginValue(false);
5510 m_SB.Add('{');
5511
5512 StackItem item;
5513 item.type = COLLECTION_TYPE_OBJECT;
5514 item.valueCount = 0;
5515 item.singleLineMode = singleLine;
5516 m_Stack.push_back(item);
5517}
5518
5519void VmaJsonWriter::EndObject()
5520{
5521 VMA_ASSERT(!m_InsideString);
5522
5523 WriteIndent(true);
5524 m_SB.Add('}');
5525
5526 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
5527 m_Stack.pop_back();
5528}
5529
5530void VmaJsonWriter::BeginArray(bool singleLine)
5531{
5532 VMA_ASSERT(!m_InsideString);
5533
5534 BeginValue(false);
5535 m_SB.Add('[');
5536
5537 StackItem item;
5538 item.type = COLLECTION_TYPE_ARRAY;
5539 item.valueCount = 0;
5540 item.singleLineMode = singleLine;
5541 m_Stack.push_back(item);
5542}
5543
5544void VmaJsonWriter::EndArray()
5545{
5546 VMA_ASSERT(!m_InsideString);
5547
5548 WriteIndent(true);
5549 m_SB.Add(']');
5550
5551 VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
5552 m_Stack.pop_back();
5553}
5554
5555void VmaJsonWriter::WriteString(const char* pStr)
5556{
5557 BeginString(pStr);
5558 EndString();
5559}
5560
5561void VmaJsonWriter::BeginString(const char* pStr)
5562{
5563 VMA_ASSERT(!m_InsideString);
5564
5565 BeginValue(true);
5566 m_SB.Add('"');
5567 m_InsideString = true;
5568 if (pStr != VMA_NULL && pStr[0] != '\0')
5569 {
5570 ContinueString(pStr);
5571 }
5572}
5573
5574void VmaJsonWriter::ContinueString(const char* pStr)
5575{
5576 VMA_ASSERT(m_InsideString);
5577
5578 const size_t strLen = strlen(pStr);
5579 for (size_t i = 0; i < strLen; ++i)
5580 {
5581 char ch = pStr[i];
5582 if (ch == '\\')
5583 {
5584 m_SB.Add("\\\\");
5585 }
5586 else if (ch == '"')
5587 {
5588 m_SB.Add("\\\"");
5589 }
5590 else if (ch >= 32)
5591 {
5592 m_SB.Add(ch);
5593 }
5594 else switch (ch)
5595 {
5596 case '\b':
5597 m_SB.Add("\\b");
5598 break;
5599 case '\f':
5600 m_SB.Add("\\f");
5601 break;
5602 case '\n':
5603 m_SB.Add("\\n");
5604 break;
5605 case '\r':
5606 m_SB.Add("\\r");
5607 break;
5608 case '\t':
5609 m_SB.Add("\\t");
5610 break;
5611 default:
5612 VMA_ASSERT(0 && "Character not currently supported.");
5613 break;
5614 }
5615 }
5616}
5617
5618void VmaJsonWriter::ContinueString(uint32_t n)
5619{
5620 VMA_ASSERT(m_InsideString);
5621 m_SB.AddNumber(n);
5622}
5623
5624void VmaJsonWriter::ContinueString(uint64_t n)
5625{
5626 VMA_ASSERT(m_InsideString);
5627 m_SB.AddNumber(n);
5628}
5629
5630void VmaJsonWriter::ContinueString_Size(size_t n)
5631{
5632 VMA_ASSERT(m_InsideString);
5633 // Fix for AppleClang incorrect type casting
5634 // TODO: Change to if constexpr when C++17 used as minimal standard
5635 WriteSize(n, std::is_same<size_t, uint64_t>{});
5636}
5637
5638void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
5639{
5640 VMA_ASSERT(m_InsideString);
5641 m_SB.AddPointer(ptr);
5642}
5643
5644void VmaJsonWriter::EndString(const char* pStr)
5645{
5646 VMA_ASSERT(m_InsideString);
5647 if (pStr != VMA_NULL && pStr[0] != '\0')
5648 {
5649 ContinueString(pStr);
5650 }
5651 m_SB.Add('"');
5652 m_InsideString = false;
5653}
5654
5655void VmaJsonWriter::WriteNumber(uint32_t n)
5656{
5657 VMA_ASSERT(!m_InsideString);
5658 BeginValue(false);
5659 m_SB.AddNumber(n);
5660}
5661
5662void VmaJsonWriter::WriteNumber(uint64_t n)
5663{
5664 VMA_ASSERT(!m_InsideString);
5665 BeginValue(false);
5666 m_SB.AddNumber(n);
5667}
5668
5669void VmaJsonWriter::WriteSize(size_t n)
5670{
5671 VMA_ASSERT(!m_InsideString);
5672 BeginValue(false);
5673 // Fix for AppleClang incorrect type casting