aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Nagy <rnagy@FreeBSD.org>2023-12-23 21:38:28 +0000
committerRobert Nagy <rnagy@FreeBSD.org>2023-12-23 21:40:09 +0000
commit3f144c348f95ac9df705e7bc713c382e370589ad (patch)
tree64e4e02cca28cc057f2a06deebbcd989e8a36613
parentfe8a74f7f89cf9287f5162ffced3f0838a5e3552 (diff)
downloadports-3f144c348f95ac9df705e7bc713c382e370589ad.tar.gz
ports-3f144c348f95ac9df705e7bc713c382e370589ad.zip
www/ungoogled-chromium: update to 120.0.6099.129
Security: https://vuxml.freebsd.org/freebsd/1b2a8e8a-9fd5-11ee-86bb-a8a1599412c6.html (cherry picked from commit 2b72ddf76578b82d7d146d5268cd88a00650b88f)
-rw-r--r--www/ungoogled-chromium/Makefile2
-rw-r--r--www/ungoogled-chromium/distinfo14
-rw-r--r--www/ungoogled-chromium/files/patch-BUILD.gn16
-rw-r--r--www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-base_BUILD.gn20
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_threading_platform__thread__posix.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_BUILD.gn)6
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_address__space__randomization.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_address__space__randomization.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator__constants.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator__constants.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator__internals__posix.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator__internals__posix.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__address__space.cc (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__address__space.cc)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_compiler__specific.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_compiler__specific.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_debug_stack__trace__posix.cc)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_rand__util__posix.cc (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_rand__util__posix.cc)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__internal__posix.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_threading_platform__thread__internal__posix.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__config.h)8
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__constants.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__forward.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__forward.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__page__constants.h (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__page__constants.h)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__root.cc (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__root.cc)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_spinning__mutex.cc (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_spinning__mutex.cc)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_starscan_stack_stack.cc (renamed from www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_starscan_stack_stack.cc)4
-rw-r--r--www/ungoogled-chromium/files/patch-base_debug_stack__trace.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-base_files_file__util__unittest.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-base_linux__util.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-base_process_process__metrics.h20
-rw-r--r--www/ungoogled-chromium/files/patch-base_process_process__metrics__unittest.cc28
-rw-r--r--www/ungoogled-chromium/files/patch-base_profiler_module__cache.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-base_rand__util.h4
-rw-r--r--www/ungoogled-chromium/files/patch-base_system_sys__info.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-base_system_sys__info.h6
-rw-r--r--www/ungoogled-chromium/files/patch-base_system_sys__info__posix.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn40
-rw-r--r--www/ungoogled-chromium/files/patch-build_linux_unbundle_libusb.gn7
-rw-r--r--www/ungoogled-chromium/files/patch-build_linux_unbundle_replace__gn__files.py10
-rw-r--r--www/ungoogled-chromium/files/patch-build_toolchain_gcc__toolchain.gni4
-rw-r--r--www/ungoogled-chromium/files/patch-cc_BUILD.gn6
-rw-r--r--www/ungoogled-chromium/files/patch-cc_paint_paint__op__writer.h6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc22
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc85
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_background_background__mode__manager.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc18
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main__linux.cc13
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc38
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_bubble_download__bubble__update__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_download__commands.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.h6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_connectors__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__connector__service__factory.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__service__factory.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_reporting_realtime__reporting__client.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_enterprise_remote__commands_cbcm__remote__commands__factory.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_messaging_native__process__launcher__posix.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_runtime_chrome__runtime__api__delegate.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_external__provider__impl.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc38
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h38
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_intranet__redirect__detector.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_media_router_discovery_BUILD.gn6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_metrics_power_process__monitor.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc38
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_performance__manager_metrics_cpu__probe_cpu__probe.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc53
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_printing_print__backend__service__manager.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_profiles_profile__impl.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.h6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_incident__reporting_incident__reporting__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_sync_chrome__sync__client.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__factory.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__util.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_themes_theme__service.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_actions_chrome__action__id.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__command__controller.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__dialogs.h29
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_omnibox_omnibox__pedal__implementations.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_tabs_organization_trigger__observer.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_task__manager_task__manager__table__model.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_hung__renderer__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__drag__controller.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__hover__card__bubble__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_web__apps_web__app__integration__test__driver.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_ntp_app__launcher__handler.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_os__integration__test__override.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__file__handler__registration.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__shortcut.h6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_web__applications_test_os__integration__test__override__impl.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_web__applications_web__app__install__info.h6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc13
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_media_cdm__registration.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_pref__names.h20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_url__constants.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_url__constants.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome__launcher.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome_chrome__finder.cc25
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h16
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_crash_core_app_BUILD.gn10
-rw-r--r--www/ungoogled-chromium/files/patch-components_discardable__memory_service_discardable__shared__memory__manager.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_eye__dropper_eye__dropper__view.cc (renamed from www/ungoogled-chromium/files/patch-chrome_browser_ui_views_eye__dropper_eye__dropper__view.cc)6
-rw-r--r--www/ungoogled-chromium/files/patch-components_eye__dropper_eye__dropper__view__aura.cc (renamed from www/ungoogled-chromium/files/patch-chrome_browser_ui_views_eye__dropper_eye__dropper__view__aura.cc)6
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h4
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h12
-rw-r--r--www/ungoogled-chromium/files/patch-components_feed_core_v2_feed__network__impl__unittest.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_miracle__parameter_common_public_miracle__parameter.h12
-rw-r--r--www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js4
-rw-r--r--www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc17
-rw-r--r--www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_password__manager_core_common_password__manager__features.h4
-rw-r--r--www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_safe__browsing_core_browser_db_v4__protocol__manager__util.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_sync_base_features.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_variations_service_variations__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.h4
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h11
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.h20
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl__receiver__bindings.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_child__thread__type__switcher__linux.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_download_save__package.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_gpu_gpu__process__host.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_media_service__video__capture__device__launcher.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.h4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__view__host__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_service__worker_service__worker__context__wrapper.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_utility__process__host.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_web__contents_web__contents__view__aura.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-content_child_BUILD.gn16
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_BUILD.gn13
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_features.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_features.h8
-rw-r--r--www/ungoogled-chromium/files/patch-content_gpu_gpu__child__thread.cc21
-rw-r--r--www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc26
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_common_content__features.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_common_content__switches.h4
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_test_mock__render__thread.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_BUILD.gn16
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h4
-rw-r--r--www/ungoogled-chromium/files/patch-content_shell_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-content_shell_app_shell__main__delegate.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_test_BUILD.gn6
-rw-r--r--www/ungoogled-chromium/files/patch-content_utility_services.cc29
-rw-r--r--www/ungoogled-chromium/files/patch-content_utility_utility__main.cc32
-rw-r--r--www/ungoogled-chromium/files/patch-content_utility_utility__thread__impl.cc21
-rw-r--r--www/ungoogled-chromium/files/patch-extensions_browser_api_api__browser__context__keyed__service__factories.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-extensions_browser_api_management_management__api.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-extensions_browser_api_messaging_message__service.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-extensions_common_api___permission__features.json4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_gles2__cmd__decoder.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.cc32
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.h11
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_ozone__image__gl__textures__holder.h10
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__util.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-headless_lib_headless__content__main__delegate.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-media_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-media_base_media__switches.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-media_base_media__switches.h12
-rw-r--r--www/ungoogled-chromium/files/patch-media_capture_video_video__capture__buffer__tracker__factory__impl.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-media_capture_video_video__capture__device__client.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-media_media__options.gni6
-rw-r--r--www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni4
-rw-r--r--www/ungoogled-chromium/files/patch-net_BUILD.gn16
-rw-r--r--www/ungoogled-chromium/files/patch-net_base_features.cc24
-rw-r--r--www/ungoogled-chromium/files/patch-net_cert_cert__verifier.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc__unittest.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-net_cert_test__root__certs__unittest.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc18
-rw-r--r--www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc15
-rw-r--r--www/ungoogled-chromium/files/patch-printing_printing__context__linux.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_heartbeat__sender.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_policy__watcher.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc36
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_protocol_webrtc__video__stream.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-sandbox_policy_BUILD.gn12
-rw-r--r--www/ungoogled-chromium/files/patch-sandbox_policy_mojom_sandbox.mojom4
-rw-r--r--www/ungoogled-chromium/files/patch-sandbox_policy_sandbox__type.cc24
-rw-r--r--www/ungoogled-chromium/files/patch-sandbox_policy_switches.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-sandbox_policy_switches.h6
-rw-r--r--www/ungoogled-chromium/files/patch-services_cert__verifier_cert__verifier__creation.cc28
-rw-r--r--www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_BUILD.gn6
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__context.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__context.h4
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__service.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__service.h8
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_public_cpp_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_public_cpp_features.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_editing_editing__behavior.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_parser_html__document__parser__fastpath.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc (renamed from www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_layout__ng__view.cc)12
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_grid_ng__grid__layout__algorithm.cc30
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_canvas_canvas2d_canvas__style.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__description.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json512
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_video__capture_video__capture__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_boringssl_src_util_generate__build__files.py6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_posix_close__multiple.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_dawn_include_dawn_native_VulkanBackend.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_common_Platform.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_native_vulkan_BackendVk.cpp4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_ffmpeg_chromium_scripts_build__ffmpeg.py12
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavcodec_x86_fft.asm108
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_node_node.gni11
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_pdfium_pdfium.gni8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_ext_base_thread__utils.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_perfetto_src_base_string__utils.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h32
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_speechd__types.h142
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_sqlite_src_amalgamation_sqlite3.c6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_tflite_features.gni15
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h39390
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.h8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_widevine_cdm_widevine.gni4
-rw-r--r--www/ungoogled-chromium/files/patch-tools_perf_chrome__telemetry__build_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.h4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__non__backed.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_test_ui__controls.h11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc21
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_ui__base__features.h11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-ui_color_color__id.h4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_color_color__provider__utils.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_compositor_compositor.h11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_linux_gbm__wrapper.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gtk_gtk__ui.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_message__center_views_message__popup__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.h11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_test_ui__controls__factory__desktop__aura__ozone.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_window_dialog__delegate.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_BUILD.gn10
-rw-r--r--www/ungoogled-chromium/files/patch-v8_include_v8-internal.h4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_api_api.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_base_platform_platform-posix.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_trap-handler_handler-inside-posix.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_wasm_baseline_ia32_liftoff-assembler-ia32-inl.h (renamed from www/ungoogled-chromium/files/patch-v8_src_wasm_baseline_ia32_liftoff-assembler-ia32.h)32
362 files changed, 41239 insertions, 1792 deletions
diff --git a/www/ungoogled-chromium/Makefile b/www/ungoogled-chromium/Makefile
index f0cefaa7b3b8..135d5c48c776 100644
--- a/www/ungoogled-chromium/Makefile
+++ b/www/ungoogled-chromium/Makefile
@@ -1,5 +1,5 @@
PORTNAME= ungoogled-chromium
-PORTVERSION= 119.0.6045.199
+PORTVERSION= 120.0.6099.129
UGVERSION= ${DISTVERSION}-1
CATEGORIES= www wayland
MASTER_SITES= https://commondatastorage.googleapis.com/chromium-browser-official/ \
diff --git a/www/ungoogled-chromium/distinfo b/www/ungoogled-chromium/distinfo
index 6e0c5609dabe..7b516629732a 100644
--- a/www/ungoogled-chromium/distinfo
+++ b/www/ungoogled-chromium/distinfo
@@ -1,9 +1,9 @@
-TIMESTAMP = 1701295347
-SHA256 (chromium-119.0.6045.199.tar.xz) = b1ae62beb7907d99802b74821d5198bd54a7456df1116d317da653bde8ce6388
-SIZE (chromium-119.0.6045.199.tar.xz) = 3324886804
-SHA256 (ungoogled-chromium-119.0.6045.199-1.tar.gz) = c9f8747c4e96d2d62b67909895606c546fa7986e184ce1d924a25ca1c11e69e8
-SIZE (ungoogled-chromium-119.0.6045.199-1.tar.gz) = 660433
-SHA256 (chromium-119.0.6045.199-testdata.tar.xz) = e71ca534121263aa89b2d91823aada57fa16e4ccf2cf61dadbba53c06f453757
-SIZE (chromium-119.0.6045.199-testdata.tar.xz) = 271055096
+TIMESTAMP = 1703336363
+SHA256 (chromium-120.0.6099.129.tar.xz) = be36d5abecfafdc68d9b27b0bee65136316610a295e844b99483a7520b245f85
+SIZE (chromium-120.0.6099.129.tar.xz) = 3283749920
+SHA256 (ungoogled-chromium-120.0.6099.129-1.tar.gz) = 632265238aea25b1c0076e99b4de2f5e53bbafccbb89356d4586c4415dae6ff9
+SIZE (ungoogled-chromium-120.0.6099.129-1.tar.gz) = 665271
+SHA256 (chromium-120.0.6099.129-testdata.tar.xz) = 44d4507584fd340704dfd314bd9b651c3887e97835b5ca86bcb1dbe20665bed1
+SIZE (chromium-120.0.6099.129-testdata.tar.xz) = 274371756
SHA256 (test_fonts-336e775eec536b2d785cc80eff6ac39051931286.tar.gz) = a2ca2962daf482a8f943163541e1c73ba4b2694fabcd2510981f2db4eda493c8
SIZE (test_fonts-336e775eec536b2d785cc80eff6ac39051931286.tar.gz) = 32624734
diff --git a/www/ungoogled-chromium/files/patch-BUILD.gn b/www/ungoogled-chromium/files/patch-BUILD.gn
index 5e53f231fe01..cb8b72117f53 100644
--- a/www/ungoogled-chromium/files/patch-BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-BUILD.gn
@@ -1,4 +1,4 @@
---- BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ BUILD.gn
@@ -61,7 +61,7 @@ declare_args() {
root_extra_deps = []
@@ -17,7 +17,7 @@
"//net:net_unittests",
"//sandbox:sandbox_unittests",
"//services:services_unittests",
-@@ -417,7 +416,7 @@ group("gn_all") {
+@@ -415,7 +414,7 @@ group("gn_all") {
}
}
@@ -26,7 +26,7 @@
deps += [
"//third_party/breakpad:breakpad_unittests",
"//third_party/breakpad:core-2-minidump",
-@@ -609,6 +608,15 @@ group("gn_all") {
+@@ -607,6 +606,15 @@ group("gn_all") {
}
}
@@ -42,7 +42,7 @@
if (is_mac) {
deps += [
"//third_party/breakpad:dump_syms",
-@@ -658,7 +666,7 @@ group("gn_all") {
+@@ -656,7 +664,7 @@ group("gn_all") {
host_os == "win") {
deps += [ "//chrome/test/mini_installer:mini_installer_tests" ]
}
@@ -51,7 +51,7 @@
deps += [ "//third_party/breakpad:symupload($host_toolchain)" ]
}
-@@ -1086,7 +1094,7 @@ if (use_blink && !is_cronet_build) {
+@@ -1077,7 +1085,7 @@ if (use_blink && !is_cronet_build) {
]
}
@@ -60,7 +60,7 @@
script_test("chrome_wpt_tests") {
script = "//third_party/blink/tools/run_wpt_tests.py"
args = [
-@@ -1159,7 +1167,7 @@ if (use_blink && !is_cronet_build) {
+@@ -1150,7 +1158,7 @@ if (use_blink && !is_cronet_build) {
data_deps += [ "//content/web_test:web_test_common_mojom_js_data_deps" ]
}
@@ -69,7 +69,7 @@
data_deps +=
[ "//third_party/breakpad:minidump_stackwalk($host_toolchain)" ]
}
-@@ -1168,7 +1176,7 @@ if (use_blink && !is_cronet_build) {
+@@ -1159,7 +1167,7 @@ if (use_blink && !is_cronet_build) {
data_deps += [ "//third_party/breakpad:dump_syms($host_toolchain)" ]
}
@@ -78,7 +78,7 @@
data_deps += [ "//third_party/breakpad:dump_syms($host_toolchain)" ]
}
-@@ -1629,7 +1637,7 @@ group("chromium_builder_perf") {
+@@ -1624,7 +1632,7 @@ group("chromium_builder_perf") {
data_deps += [ "//chrome/test:performance_browser_tests" ]
}
diff --git a/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc b/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc
index 14f75eb0a3ae..91ad45d16a5a 100644
--- a/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc
+++ b/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc
@@ -1,6 +1,6 @@
---- apps/ui/views/app_window_frame_view.cc.orig 2022-10-01 07:40:07 UTC
+--- apps/ui/views/app_window_frame_view.cc.orig 2023-12-23 12:33:28 UTC
+++ apps/ui/views/app_window_frame_view.cc
-@@ -137,7 +137,7 @@ gfx::Rect AppWindowFrameView::GetWindowBoundsForClient
+@@ -149,7 +149,7 @@ gfx::Rect AppWindowFrameView::GetWindowBoundsForClient
gfx::Rect window_bounds = client_bounds;
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-base_BUILD.gn b/www/ungoogled-chromium/files/patch-base_BUILD.gn
index 17a30c32de0c..a9c528e7a0b8 100644
--- a/www/ungoogled-chromium/files/patch-base_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-base_BUILD.gn
@@ -1,4 +1,4 @@
---- base/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- base/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ base/BUILD.gn
@@ -186,7 +186,7 @@ buildflag_header("ios_cronet_buildflags") {
flags = [ "CRONET_BUILD=$is_cronet_build" ]
@@ -9,7 +9,7 @@
buildflag_header("message_pump_buildflags") {
header = "message_pump_buildflags.h"
header_dir = "base/message_loop"
-@@ -1101,11 +1101,23 @@ component("base") {
+@@ -1105,11 +1105,23 @@ component("base") {
# Needed for <atomic> if using newer C++ library than sysroot, except if
# building inside the cros_sdk environment - use host_toolchain as a
# more robust check for this.
@@ -34,7 +34,7 @@
if (use_allocator_shim) {
if (is_apple) {
sources += [ "allocator/early_zone_registration_apple.h" ]
-@@ -1125,7 +1137,7 @@ component("base") {
+@@ -1129,7 +1141,7 @@ component("base") {
# Allow more direct string conversions on platforms with native utf8
# strings
@@ -43,7 +43,7 @@
defines += [ "SYSTEM_NATIVE_UTF8" ]
}
-@@ -2077,6 +2089,22 @@ component("base") {
+@@ -2088,6 +2100,22 @@ component("base") {
]
}
@@ -66,7 +66,7 @@
# iOS
if (is_ios) {
sources += [
-@@ -2209,6 +2237,29 @@ component("base") {
+@@ -2220,6 +2248,29 @@ component("base") {
}
}
@@ -96,7 +96,7 @@
if (use_blink) {
sources += [
"files/file_path_watcher.cc",
-@@ -2219,7 +2270,7 @@ component("base") {
+@@ -2230,7 +2281,7 @@ component("base") {
}
if (dep_libevent) {
@@ -105,7 +105,7 @@
}
if (use_libevent) {
-@@ -3599,7 +3650,7 @@ test("base_unittests") {
+@@ -3619,7 +3670,7 @@ test("base_unittests") {
]
}
@@ -114,7 +114,7 @@
sources += [
"debug/proc_maps_linux_unittest.cc",
"files/scoped_file_linux_unittest.cc",
-@@ -3620,7 +3671,7 @@ test("base_unittests") {
+@@ -3640,7 +3691,7 @@ test("base_unittests") {
"posix/file_descriptor_shuffle_unittest.cc",
"posix/unix_domain_socket_unittest.cc",
]
@@ -123,7 +123,7 @@
sources += [
"profiler/stack_base_address_posix_unittest.cc",
"profiler/stack_copier_signal_unittest.cc",
-@@ -3631,7 +3682,7 @@ test("base_unittests") {
+@@ -3651,7 +3702,7 @@ test("base_unittests") {
# Allow more direct string conversions on platforms with native utf8
# strings
@@ -132,7 +132,7 @@
defines += [ "SYSTEM_NATIVE_UTF8" ]
}
-@@ -3892,7 +3943,7 @@ test("base_unittests") {
+@@ -3913,7 +3964,7 @@ test("base_unittests") {
}
}
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_threading_platform__thread__posix.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_threading_platform__thread__posix.cc
deleted file mode 100644
index 3b22be9a746d..000000000000
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_threading_platform__thread__posix.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix.cc.orig 2022-10-01 07:40:07 UTC
-+++ base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_posix.cc
-@@ -17,7 +17,7 @@
- #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h"
- #include "build/build_config.h"
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- #include <sys/syscall.h>
- #include <atomic>
- #endif
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_BUILD.gn b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn
index 6fad70ce0381..1ea41bbd5f5f 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn
@@ -1,6 +1,6 @@
---- base/allocator/partition_allocator/BUILD.gn.orig 2023-11-04 07:08:51 UTC
-+++ base/allocator/partition_allocator/BUILD.gn
-@@ -569,7 +569,7 @@ source_set("allocator_shim") {
+--- base/allocator/partition_allocator/src/partition_alloc/BUILD.gn.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/BUILD.gn
+@@ -582,7 +582,7 @@ source_set("allocator_shim") {
]
configs += [ ":mac_no_default_new_delete_symbols" ]
}
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_address__space__randomization.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_address__space__randomization.h
index 23aea4819f3c..71c8c2e23589 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_address__space__randomization.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_address__space__randomization.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/address_space_randomization.h.orig 2023-09-17 07:59:53 UTC
-+++ base/allocator/partition_allocator/address_space_randomization.h
+--- base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/address_space_randomization.h
@@ -38,7 +38,7 @@ AslrMask(uintptr_t bits) {
#if defined(ARCH_CPU_64_BITS)
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator.h
index ffa78e1733ff..65dd9d3e690c 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/page_allocator.h.orig 2023-09-17 07:59:53 UTC
-+++ base/allocator/partition_allocator/page_allocator.h
+--- base/allocator/partition_allocator/src/partition_alloc/page_allocator.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/page_allocator.h
@@ -258,7 +258,7 @@ void DecommitAndZeroSystemPages(void* address,
// recommitted. Do not assume that this will not change over time.
constexpr PA_COMPONENT_EXPORT(
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator__constants.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator__constants.h
index c763287c00b9..175e765df3a7 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator__constants.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator__constants.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/page_allocator_constants.h.orig 2023-09-17 07:59:53 UTC
-+++ base/allocator/partition_allocator/page_allocator_constants.h
+--- base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/page_allocator_constants.h
@@ -25,7 +25,7 @@
// elimination.
#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator__internals__posix.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator__internals__posix.h
index 3db16f722ee9..7e52a7b9473f 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_page__allocator__internals__posix.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_page__allocator__internals__posix.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/page_allocator_internals_posix.h.orig 2023-09-17 07:59:53 UTC
-+++ base/allocator/partition_allocator/page_allocator_internals_posix.h
+--- base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/page_allocator_internals_posix.h
@@ -403,8 +403,12 @@ bool TryRecommitSystemPagesInternal(
void DiscardSystemPagesInternal(uintptr_t address, size_t length) {
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__address__space.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__address__space.cc
index 3ba7e8d20ab3..150fce78566c 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__address__space.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__address__space.cc
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_address_space.cc.orig 2023-07-21 09:49:17 UTC
-+++ base/allocator/partition_allocator/partition_address_space.cc
+--- base/allocator/partition_allocator/src/partition_alloc/partition_address_space.cc.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_address_space.cc
@@ -420,7 +420,7 @@ void PartitionAddressSpace::UninitThreadIsolatedPoolFo
}
#endif
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_compiler__specific.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_compiler__specific.h
index 2f6869ec88fd..4431128e9ba9 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_compiler__specific.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_compiler__specific.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h.orig 2023-05-05 12:12:41 UTC
-+++ base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/compiler_specific.h
@@ -24,9 +24,9 @@
// Annotate a function indicating it should not be inlined.
// Use like:
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_debug_stack__trace__posix.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc
index 10b74b577ee3..7a76584417bf 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_debug_stack__trace__posix.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_alloc_base/debug/stack_trace_posix.cc.orig 2023-11-04 07:08:51 UTC
-+++ base/allocator/partition_allocator/partition_alloc_base/debug/stack_trace_posix.cc
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc
@@ -12,11 +12,11 @@
#include <string.h>
#include <unistd.h>
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_rand__util__posix.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_rand__util__posix.cc
index 1401638abc1b..a9500c88ce31 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_rand__util__posix.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_rand__util__posix.cc
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc.orig 2023-09-17 07:59:53 UTC
-+++ base/allocator/partition_allocator/partition_alloc_base/rand_util_posix.cc
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_posix.cc.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/rand_util_posix.cc
@@ -90,6 +90,10 @@ void RandBytes(void* output, size_t output_length) {
if (getentropy(output, output_length) == 0) {
return;
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_threading_platform__thread__internal__posix.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__internal__posix.h
index 025d6762fd2f..649410c3c32c 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__base_threading_platform__thread__internal__posix.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__internal__posix.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h.orig 2022-10-01 07:40:07 UTC
-+++ base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_internal_posix.h
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h
@@ -10,7 +10,7 @@
namespace partition_alloc::internal::base::internal {
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc
new file mode 100644
index 000000000000..4dc47dee29c3
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc
@@ -0,0 +1,11 @@
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc
+@@ -17,7 +17,7 @@
+ #include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h"
+ #include "build/build_config.h"
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ #include <sys/syscall.h>
+ #include <atomic>
+ #endif
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__config.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h
index 1ffbccb4d744..85a99bacad9e 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__config.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_alloc_config.h.orig 2023-10-13 13:20:35 UTC
-+++ base/allocator/partition_allocator/partition_alloc_config.h
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h
@@ -94,7 +94,7 @@ static_assert(sizeof(void*) != 8, "");
// POSIX is not only UNIX, e.g. macOS and other OSes. We do use Linux-specific
// features such as futex(2).
@@ -9,7 +9,7 @@
// On some platforms, we implement locking by spinning in userspace, then going
// into the kernel only if there is contention. This requires platform support,
-@@ -237,7 +237,7 @@ constexpr bool kUseLazyCommit = false;
+@@ -241,7 +241,7 @@ constexpr bool kUseLazyCommit = false;
// On these platforms, lock all the partitions before fork(), and unlock after.
// This may be required on more platforms in the future.
#define PA_CONFIG_HAS_ATFORK_HANDLER() \
@@ -18,7 +18,7 @@
// PartitionAlloc uses PartitionRootEnumerator to acquire all
// PartitionRoots at BeforeFork and to release at AfterFork.
-@@ -284,7 +284,7 @@ constexpr bool kUseLazyCommit = false;
+@@ -288,7 +288,7 @@ constexpr bool kUseLazyCommit = false;
// Also enabled on ARM64 macOS, as the 16kiB pages on this platform lead to
// larger slot spans.
#define PA_CONFIG_PREFER_SMALLER_SLOT_SPANS() \
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__constants.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h
index 9545b1bfbd47..b2aeb5832373 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__constants.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_alloc_constants.h.orig 2023-11-04 07:08:51 UTC
-+++ base/allocator/partition_allocator/partition_alloc_constants.h
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h
@@ -107,7 +107,7 @@ PartitionPageShift() {
return 18; // 256 KiB
}
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__forward.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__forward.h
index 2f04050c0162..aa779bc7f124 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc__forward.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__forward.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_alloc_forward.h.orig 2023-08-18 10:26:52 UTC
-+++ base/allocator/partition_allocator/partition_alloc_forward.h
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_forward.h
@@ -28,9 +28,13 @@ namespace internal {
// the second one 16. We could technically return something different for
// malloc() and operator new(), but this would complicate things, and most of
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__page__constants.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__page__constants.h
index 165058c129c5..41a6e87ea5f6 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__page__constants.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__page__constants.h
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_page_constants.h.orig 2023-08-18 10:26:52 UTC
-+++ base/allocator/partition_allocator/partition_page_constants.h
+--- base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_page_constants.h
@@ -16,7 +16,7 @@ namespace partition_alloc::internal {
// (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
// PartitionPageSize() is 4 times the OS page size.
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__root.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__root.cc
index 502f4226c974..d4c8f066a440 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__root.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__root.cc
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/partition_root.cc.orig 2023-10-13 13:20:35 UTC
-+++ base/allocator/partition_allocator/partition_root.cc
+--- base/allocator/partition_allocator/src/partition_alloc/partition_root.cc.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/partition_root.cc
@@ -47,7 +47,7 @@
#include "wow64apiset.h"
#endif
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_spinning__mutex.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_spinning__mutex.cc
index 2b6b22d395d5..63bfe6e25db8 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_spinning__mutex.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_spinning__mutex.cc
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/spinning_mutex.cc.orig 2023-05-05 12:12:41 UTC
-+++ base/allocator/partition_allocator/spinning_mutex.cc
+--- base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.cc.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/spinning_mutex.cc
@@ -18,7 +18,16 @@
#if PA_CONFIG(HAS_LINUX_KERNEL)
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_starscan_stack_stack.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_starscan_stack_stack.cc
index e5d3e9bca303..17c7c31f634f 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_starscan_stack_stack.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_starscan_stack_stack.cc
@@ -1,5 +1,5 @@
---- base/allocator/partition_allocator/starscan/stack/stack.cc.orig 2023-09-17 07:59:53 UTC
-+++ base/allocator/partition_allocator/starscan/stack/stack.cc
+--- base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.cc.orig 2023-12-23 12:33:28 UTC
++++ base/allocator/partition_allocator/src/partition_alloc/starscan/stack/stack.cc
@@ -18,6 +18,10 @@
#include <pthread.h>
#endif
diff --git a/www/ungoogled-chromium/files/patch-base_debug_stack__trace.cc b/www/ungoogled-chromium/files/patch-base_debug_stack__trace.cc
index e7e599c9e9ca..18f30776b747 100644
--- a/www/ungoogled-chromium/files/patch-base_debug_stack__trace.cc
+++ b/www/ungoogled-chromium/files/patch-base_debug_stack__trace.cc
@@ -1,6 +1,6 @@
---- base/debug/stack_trace.cc.orig 2023-03-10 11:01:21 UTC
+--- base/debug/stack_trace.cc.orig 2023-12-23 12:33:28 UTC
+++ base/debug/stack_trace.cc
-@@ -273,7 +273,9 @@ void StackTrace::Print() const {
+@@ -266,7 +266,9 @@ void StackTrace::Print() const {
}
void StackTrace::OutputToStream(std::ostream* os) const {
@@ -10,7 +10,7 @@
}
std::string StackTrace::ToString() const {
-@@ -281,7 +283,7 @@ std::string StackTrace::ToString() const {
+@@ -274,7 +276,7 @@ std::string StackTrace::ToString() const {
}
std::string StackTrace::ToStringWithPrefix(const char* prefix_string) const {
std::stringstream stream;
diff --git a/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc b/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc
index d2e585447ba4..ae01847cfb6b 100644
--- a/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc
@@ -1,6 +1,6 @@
---- base/files/file_path_watcher_unittest.cc.orig 2023-11-04 07:08:51 UTC
+--- base/files/file_path_watcher_unittest.cc.orig 2023-12-23 12:33:28 UTC
+++ base/files/file_path_watcher_unittest.cc
-@@ -705,7 +705,7 @@ TEST_F(FilePathWatcherTest, WatchDirectory) {
+@@ -703,7 +703,7 @@ TEST_F(FilePathWatcherTest, WatchDirectory) {
}
delegate.RunUntilEventsMatch(event_expecter);
@@ -9,7 +9,7 @@
ASSERT_TRUE(WriteFile(file1, "content v2"));
// Mac implementation does not detect files modified in a directory.
// TODO(https://crbug.com/1432064): Expect that no events are fired on Mac.
-@@ -1641,7 +1641,7 @@ namespace {
+@@ -1639,7 +1639,7 @@ namespace {
enum Permission { Read, Write, Execute };
@@ -18,7 +18,7 @@
bool ChangeFilePermissions(const FilePath& path, Permission perm, bool allow) {
struct stat stat_buf;
-@@ -1675,7 +1675,7 @@ bool ChangeFilePermissions(const FilePath& path, Permi
+@@ -1673,7 +1673,7 @@ bool ChangeFilePermissions(const FilePath& path, Permi
} // namespace
diff --git a/www/ungoogled-chromium/files/patch-base_files_file__util__unittest.cc b/www/ungoogled-chromium/files/patch-base_files_file__util__unittest.cc
index a7388b6af925..02e88eb5efc5 100644
--- a/www/ungoogled-chromium/files/patch-base_files_file__util__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-base_files_file__util__unittest.cc
@@ -1,6 +1,6 @@
---- base/files/file_util_unittest.cc.orig 2023-06-05 19:39:05 UTC
+--- base/files/file_util_unittest.cc.orig 2023-12-23 12:33:28 UTC
+++ base/files/file_util_unittest.cc
-@@ -3871,7 +3871,7 @@ TEST_F(FileUtilTest, ReadFileToStringWithNamedPipe) {
+@@ -3878,7 +3878,7 @@ TEST_F(FileUtilTest, ReadFileToStringWithNamedPipe) {
}
#endif // BUILDFLAG(IS_WIN)
@@ -9,7 +9,7 @@
TEST_F(FileUtilTest, ReadFileToStringWithProcFileSystem) {
FilePath file_path("/proc/cpuinfo");
std::string data = "temp";
-@@ -4594,7 +4594,7 @@ TEST(FileUtilMultiThreadedTest, MultiThreadedTempFiles
+@@ -4601,7 +4601,7 @@ TEST(FileUtilMultiThreadedTest, MultiThreadedTempFiles
NULL);
#else
size_t bytes_written =
diff --git a/www/ungoogled-chromium/files/patch-base_linux__util.cc b/www/ungoogled-chromium/files/patch-base_linux__util.cc
index 1a5c4ad1d1f3..447a0d78838a 100644
--- a/www/ungoogled-chromium/files/patch-base_linux__util.cc
+++ b/www/ungoogled-chromium/files/patch-base_linux__util.cc
@@ -1,4 +1,4 @@
---- base/linux_util.cc.orig 2023-09-17 07:59:53 UTC
+--- base/linux_util.cc.orig 2023-12-23 12:33:28 UTC
+++ base/linux_util.cc
@@ -15,6 +15,7 @@
@@ -8,7 +8,7 @@
#include "base/base_export.h"
#include "base/files/dir_reader_posix.h"
-@@ -135,6 +136,9 @@ void SetLinuxDistro(const std::string& distro) {
+@@ -153,10 +154,14 @@ void SetLinuxDistro(const std::string& distro) {
}
bool GetThreadsForProcess(pid_t pid, std::vector<pid_t>* tids) {
@@ -18,11 +18,8 @@
// 25 > strlen("/proc//task") + strlen(std::to_string(INT_MAX)) + 1 = 22
char buf[25];
strings::SafeSPrintf(buf, "/proc/%d/task", pid);
-@@ -152,6 +156,7 @@ bool GetThreadsForProcess(pid_t pid, std::vector<pid_t
- }
-
- return true;
+ return GetThreadsFromProcessDir(buf, tids);
+#endif
}
- pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
+ bool GetThreadsForCurrentProcess(std::vector<pid_t>* tids) {
diff --git a/www/ungoogled-chromium/files/patch-base_process_process__metrics.h b/www/ungoogled-chromium/files/patch-base_process_process__metrics.h
index aa9bcf58307a..48e5ff32c708 100644
--- a/www/ungoogled-chromium/files/patch-base_process_process__metrics.h
+++ b/www/ungoogled-chromium/files/patch-base_process_process__metrics.h
@@ -1,4 +1,4 @@
---- base/process/process_metrics.h.orig 2023-09-17 07:59:53 UTC
+--- base/process/process_metrics.h.orig 2023-12-23 12:33:28 UTC
+++ base/process/process_metrics.h
@@ -37,7 +37,7 @@
#endif
@@ -36,7 +36,7 @@
// Emits the cumulative CPU usage for all currently active threads since they
// were started into the output parameter (replacing its current contents).
// Threads that have already terminated will not be reported. Thus, the sum of
-@@ -223,7 +223,7 @@ class BASE_EXPORT ProcessMetrics {
+@@ -219,7 +219,7 @@ class BASE_EXPORT ProcessMetrics {
int GetOpenFdSoftLimit() const;
#endif // BUILDFLAG(IS_POSIX)
@@ -45,7 +45,7 @@
// Bytes of swap as reported by /proc/[pid]/status.
uint64_t GetVmSwapBytes() const;
-@@ -244,7 +244,7 @@ class BASE_EXPORT ProcessMetrics {
+@@ -240,7 +240,7 @@ class BASE_EXPORT ProcessMetrics {
#endif // !BUILDFLAG(IS_MAC)
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -54,7 +54,7 @@
int CalculateIdleWakeupsPerSecond(uint64_t absolute_idle_wakeups);
#endif
#if BUILDFLAG(IS_APPLE)
-@@ -266,9 +266,7 @@ class BASE_EXPORT ProcessMetrics {
+@@ -262,9 +262,7 @@ class BASE_EXPORT ProcessMetrics {
// Used to store the previous times and CPU usage counts so we can
// compute the CPU usage between calls.
TimeTicks last_cpu_time_;
@@ -64,7 +64,7 @@
#if BUILDFLAG(IS_WIN)
TimeTicks last_cpu_time_for_precise_cpu_usage_;
-@@ -276,7 +274,7 @@ class BASE_EXPORT ProcessMetrics {
+@@ -272,7 +270,7 @@ class BASE_EXPORT ProcessMetrics {
#endif
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -73,7 +73,7 @@
// Same thing for idle wakeups.
TimeTicks last_idle_wakeups_time_;
uint64_t last_absolute_idle_wakeups_;
-@@ -316,7 +314,7 @@ BASE_EXPORT void IncreaseFdLimitTo(unsigned int max_de
+@@ -313,7 +311,7 @@ BASE_EXPORT void IncreaseFdLimitTo(unsigned int max_de
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_AIX) || \
@@ -82,7 +82,7 @@
// Data about system-wide memory consumption. Values are in KB. Available on
// Windows, Mac, Linux, Android and Chrome OS.
//
-@@ -351,7 +349,7 @@ struct BASE_EXPORT SystemMemoryInfoKB {
+@@ -348,7 +346,7 @@ struct BASE_EXPORT SystemMemoryInfoKB {
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
@@ -91,7 +91,7 @@
// This provides an estimate of available memory as described here:
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
// NOTE: this is ONLY valid in kernels 3.14 and up. Its value will always
-@@ -366,7 +364,7 @@ struct BASE_EXPORT SystemMemoryInfoKB {
+@@ -363,7 +361,7 @@ struct BASE_EXPORT SystemMemoryInfoKB {
#endif
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -100,7 +100,7 @@
int buffers = 0;
int cached = 0;
int active_anon = 0;
-@@ -403,7 +401,7 @@ BASE_EXPORT bool GetSystemMemoryInfo(SystemMemoryInfoK
+@@ -400,7 +398,7 @@ BASE_EXPORT bool GetSystemMemoryInfo(SystemMemoryInfoK
// BUILDFLAG(IS_FUCHSIA)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
@@ -109,7 +109,7 @@
// Parse the data found in /proc/<pid>/stat and return the sum of the
// CPU-related ticks. Returns -1 on parse error.
// Exposed for testing.
-@@ -591,7 +589,7 @@ class BASE_EXPORT SystemMetrics {
+@@ -588,7 +586,7 @@ class BASE_EXPORT SystemMetrics {
FRIEND_TEST_ALL_PREFIXES(SystemMetricsTest, SystemMetrics);
size_t committed_memory_;
diff --git a/www/ungoogled-chromium/files/patch-base_process_process__metrics__unittest.cc b/www/ungoogled-chromium/files/patch-base_process_process__metrics__unittest.cc
index 70839ae96f8e..c20469935b12 100644
--- a/www/ungoogled-chromium/files/patch-base_process_process__metrics__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-base_process_process__metrics__unittest.cc
@@ -1,20 +1,18 @@
---- base/process/process_metrics_unittest.cc.orig 2022-10-01 07:40:07 UTC
+--- base/process/process_metrics_unittest.cc.orig 2023-12-23 12:33:28 UTC
+++ base/process/process_metrics_unittest.cc
-@@ -44,7 +44,7 @@ namespace debug {
+@@ -35,13 +35,13 @@
+ #include <sys/mman.h>
+ #endif
- #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
- BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) || \
-- BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
-+ BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
-
- namespace {
-
-@@ -347,7 +347,7 @@ TEST_F(SystemMetricsTest, ParseVmstat) {
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
+ #include "base/process/internal_linux.h"
+ #endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) || \
-- BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
-+ BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
-
- // Test that ProcessMetrics::GetPlatformIndependentCPUUsage() doesn't return
- // negative values when the number of threads running on the process decreases
+- BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE)
++ BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_BSD)
+ #define ENABLE_CPU_TESTS 1
+ #else
+ #define ENABLE_CPU_TESTS 0
diff --git a/www/ungoogled-chromium/files/patch-base_profiler_module__cache.cc b/www/ungoogled-chromium/files/patch-base_profiler_module__cache.cc
index cfa487a2b276..3952155ee7a0 100644
--- a/www/ungoogled-chromium/files/patch-base_profiler_module__cache.cc
+++ b/www/ungoogled-chromium/files/patch-base_profiler_module__cache.cc
@@ -1,6 +1,6 @@
---- base/profiler/module_cache.cc.orig 2023-09-17 07:59:53 UTC
+--- base/profiler/module_cache.cc.orig 2023-12-23 12:33:28 UTC
+++ base/profiler/module_cache.cc
-@@ -52,7 +52,7 @@ std::string TransformModuleIDToSymbolServerFormat(Stri
+@@ -37,7 +37,7 @@ std::string TransformModuleIDToSymbolServerFormat(Stri
// Android and Linux Chrome builds use the "breakpad" format to index their
// build id, so we transform the build id for these platforms. All other
// platforms keep their symbols indexed by the original build ID.
diff --git a/www/ungoogled-chromium/files/patch-base_rand__util.h b/www/ungoogled-chromium/files/patch-base_rand__util.h
index 412bf8de4730..8588ca910ef5 100644
--- a/www/ungoogled-chromium/files/patch-base_rand__util.h
+++ b/www/ungoogled-chromium/files/patch-base_rand__util.h
@@ -1,6 +1,6 @@
---- base/rand_util.h.orig 2023-06-05 19:39:05 UTC
+--- base/rand_util.h.orig 2023-12-23 12:33:28 UTC
+++ base/rand_util.h
-@@ -121,7 +121,7 @@ void RandomShuffle(Itr first, Itr last) {
+@@ -136,7 +136,7 @@ void RandomShuffle(Itr first, Itr last) {
std::shuffle(first, last, RandomBitGenerator());
}
diff --git a/www/ungoogled-chromium/files/patch-base_system_sys__info.cc b/www/ungoogled-chromium/files/patch-base_system_sys__info.cc
index c25d697b7d4f..1565f1241649 100644
--- a/www/ungoogled-chromium/files/patch-base_system_sys__info.cc
+++ b/www/ungoogled-chromium/files/patch-base_system_sys__info.cc
@@ -1,6 +1,6 @@
---- base/system/sys_info.cc.orig 2023-09-17 07:59:53 UTC
+--- base/system/sys_info.cc.orig 2023-12-23 12:33:28 UTC
+++ base/system/sys_info.cc
-@@ -166,7 +166,7 @@ std::string SysInfo::HardwareModelName() {
+@@ -225,7 +225,7 @@ std::string SysInfo::HardwareModelName() {
#endif
void SysInfo::GetHardwareInfo(base::OnceCallback<void(HardwareInfo)> callback) {
diff --git a/www/ungoogled-chromium/files/patch-base_system_sys__info.h b/www/ungoogled-chromium/files/patch-base_system_sys__info.h
index 8cf23c746362..e25e3466169b 100644
--- a/www/ungoogled-chromium/files/patch-base_system_sys__info.h
+++ b/www/ungoogled-chromium/files/patch-base_system_sys__info.h
@@ -1,6 +1,6 @@
---- base/system/sys_info.h.orig 2023-11-04 07:08:51 UTC
+--- base/system/sys_info.h.orig 2023-12-23 12:33:28 UTC
+++ base/system/sys_info.h
-@@ -304,6 +304,8 @@ class BASE_EXPORT SysInfo {
+@@ -321,6 +321,8 @@ class BASE_EXPORT SysInfo {
static void ResetCpuSecurityMitigationsEnabledForTesting();
#endif
@@ -9,7 +9,7 @@
private:
friend class test::ScopedAmountOfPhysicalMemoryOverride;
FRIEND_TEST_ALL_PREFIXES(SysInfoTest, AmountOfAvailablePhysicalMemory);
-@@ -316,7 +318,7 @@ class BASE_EXPORT SysInfo {
+@@ -333,7 +335,7 @@ class BASE_EXPORT SysInfo {
static HardwareInfo GetHardwareInfoSync();
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
diff --git a/www/ungoogled-chromium/files/patch-base_system_sys__info__posix.cc b/www/ungoogled-chromium/files/patch-base_system_sys__info__posix.cc
index 41027f3cdff0..152e9454d826 100644
--- a/www/ungoogled-chromium/files/patch-base_system_sys__info__posix.cc
+++ b/www/ungoogled-chromium/files/patch-base_system_sys__info__posix.cc
@@ -1,4 +1,4 @@
---- base/system/sys_info_posix.cc.orig 2023-08-18 10:26:52 UTC
+--- base/system/sys_info_posix.cc.orig 2023-12-23 12:33:28 UTC
+++ base/system/sys_info_posix.cc
@@ -117,7 +117,7 @@ bool GetDiskSpaceInfo(const base::FilePath& path,
@@ -6,19 +6,19 @@
-#if !BUILDFLAG(IS_OPENBSD)
+#if !BUILDFLAG(IS_BSD)
+ // static
int SysInfo::NumberOfProcessors() {
#if BUILDFLAG(IS_MAC)
- absl::optional<int> number_of_physical_cores =
-@@ -161,7 +161,7 @@ int SysInfo::NumberOfProcessors() {
+@@ -174,7 +174,7 @@ int SysInfo::NumberOfProcessors() {
- return num_cpus;
+ return cached_num_cpus;
}
-#endif // !BUILDFLAG(IS_OPENBSD)
+#endif // !BUILDFLAG(IS_BSD)
// static
uint64_t SysInfo::AmountOfVirtualMemory() {
-@@ -251,6 +251,8 @@ std::string SysInfo::OperatingSystemArchitecture() {
+@@ -264,6 +264,8 @@ std::string SysInfo::OperatingSystemArchitecture() {
arch = "x86";
} else if (arch == "amd64") {
arch = "x86_64";
diff --git a/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn b/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn
index f3a9eb61ac8c..e34084854863 100644
--- a/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn
@@ -1,6 +1,6 @@
---- build/config/compiler/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- build/config/compiler/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ build/config/compiler/BUILD.gn
-@@ -196,7 +196,7 @@ declare_args() {
+@@ -202,7 +202,7 @@ declare_args() {
# This greatly reduces the size of debug builds, at the cost of
# debugging information which is required by some specialized
# debugging tools.
@@ -9,7 +9,7 @@
}
declare_args() {
-@@ -260,13 +260,16 @@ config("default_include_dirs") {
+@@ -266,13 +266,16 @@ config("default_include_dirs") {
# Compiler instrumentation can introduce dependencies in DSOs to symbols in
# the executable they are loaded into, so they are unresolved at link-time.
config("no_unresolved_symbols") {
@@ -27,7 +27,7 @@
}
# compiler ---------------------------------------------------------------------
-@@ -507,6 +510,10 @@ config("compiler") {
+@@ -518,6 +521,10 @@ config("compiler") {
}
}
@@ -38,7 +38,7 @@
# Linux-specific compiler flags setup.
# ------------------------------------
if (use_gold) {
-@@ -582,7 +589,7 @@ config("compiler") {
+@@ -593,7 +600,7 @@ config("compiler") {
ldflags += [ "-Wl,-z,keep-text-section-prefix" ]
}
@@ -47,16 +47,16 @@
cflags += [ "-fcrash-diagnostics-dir=" + clang_diagnostic_dir ]
if (save_reproducers_on_lld_crash && use_lld) {
ldflags += [
-@@ -768,7 +775,7 @@ config("compiler") {
-
- ldflags += [ "-Wl,-mllvm,-import-instr-limit=$import_instr_limit" ]
-
+@@ -800,7 +807,7 @@ config("compiler") {
+ if (is_apple) {
+ ldflags += [ "-Wcrl,object_path_lto" ]
+ }
- if (!is_chromeos) {
+ if (!is_chromeos && !is_bsd) {
# TODO(https://crbug.com/972449): turn on for ChromeOS when that
# toolchain has this flag.
# We only use one version of LLVM within a build so there's no need to
-@@ -1133,7 +1140,7 @@ config("compiler_cpu_abi") {
+@@ -1173,7 +1180,7 @@ config("compiler_cpu_abi") {
]
}
} else if (current_cpu == "arm") {
@@ -65,7 +65,7 @@
!(is_chromeos_lacros && is_chromeos_device)) {
cflags += [ "--target=arm-linux-gnueabihf" ]
ldflags += [ "--target=arm-linux-gnueabihf" ]
-@@ -1148,7 +1155,7 @@ config("compiler_cpu_abi") {
+@@ -1188,7 +1195,7 @@ config("compiler_cpu_abi") {
cflags += [ "-mtune=$arm_tune" ]
}
} else if (current_cpu == "arm64") {
@@ -74,7 +74,7 @@
!(is_chromeos_lacros && is_chromeos_device)) {
cflags += [ "--target=aarch64-linux-gnu" ]
ldflags += [ "--target=aarch64-linux-gnu" ]
-@@ -1483,7 +1490,7 @@ config("compiler_deterministic") {
+@@ -1523,7 +1530,7 @@ config("compiler_deterministic") {
# different build directory like "out/feature_a" and "out/feature_b" if
# we build same files with same compile flag.
# Other paths are already given in relative, no need to normalize them.
@@ -83,7 +83,7 @@
# TODO(https://crbug.com/1231236): Use -ffile-compilation-dir= here.
cflags += [
"-Xclang",
-@@ -1535,7 +1542,7 @@ config("compiler_deterministic") {
+@@ -1575,7 +1582,7 @@ config("compiler_deterministic") {
}
config("clang_revision") {
@@ -92,16 +92,16 @@
update_args = [
"--print-revision",
"--verify-version=$clang_version",
-@@ -1828,7 +1835,7 @@ config("default_warnings") {
- ]
- }
+@@ -1860,7 +1867,7 @@ config("default_warnings") {
+ "-Wno-ignored-pragma-optimize",
+ ]
- if (!is_nacl) {
+ if (!is_nacl && !is_bsd) {
cflags += [
# TODO(crbug.com/1343975) Evaluate and possibly enable.
"-Wno-deprecated-builtins",
-@@ -2021,7 +2028,7 @@ config("no_chromium_code") {
+@@ -2066,7 +2073,7 @@ config("no_chromium_code") {
# third-party libraries.
"-Wno-c++11-narrowing",
]
@@ -110,7 +110,7 @@
cflags += [
# Disabled for similar reasons as -Wunused-variable.
"-Wno-unused-but-set-variable",
-@@ -2550,7 +2557,7 @@ config("afdo_optimize_size") {
+@@ -2595,7 +2602,7 @@ config("afdo_optimize_size") {
# There are some targeted places that AFDO regresses, so we provide a separate
# config to allow AFDO to be disabled per-target.
config("afdo") {
@@ -119,7 +119,7 @@
cflags = []
if (clang_emit_debug_info_for_profiling) {
# Add the following flags to generate debug info for profiling.
-@@ -2577,7 +2584,7 @@ config("afdo") {
+@@ -2622,7 +2629,7 @@ config("afdo") {
cflags += [ "-Wno-backend-plugin" ]
inputs = [ _clang_sample_profile ]
}
@@ -128,7 +128,7 @@
cflags = [ "-fauto-profile=${auto_profile_path}" ]
inputs = [ auto_profile_path ]
}
-@@ -2741,7 +2748,8 @@ config("symbols") {
+@@ -2786,7 +2793,8 @@ config("symbols") {
configs += [ "//build/config:compress_debug_sections" ]
}
diff --git a/www/ungoogled-chromium/files/patch-build_linux_unbundle_libusb.gn b/www/ungoogled-chromium/files/patch-build_linux_unbundle_libusb.gn
index 2144bd3922d1..1a7ba94027e8 100644
--- a/www/ungoogled-chromium/files/patch-build_linux_unbundle_libusb.gn
+++ b/www/ungoogled-chromium/files/patch-build_linux_unbundle_libusb.gn
@@ -1,6 +1,6 @@
---- build/linux/unbundle/libusb.gn.orig 2022-10-01 07:40:07 UTC
+--- build/linux/unbundle/libusb.gn.orig 2023-12-23 12:33:28 UTC
+++ build/linux/unbundle/libusb.gn
-@@ -0,0 +1,24 @@
+@@ -1,3 +1,27 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
@@ -25,3 +25,6 @@
+ ]
+ public_configs = [ ":system_libusb" ]
+}
+ import("//build/config/linux/pkg_config.gni")
+ import("//build/shim_headers.gni")
+
diff --git a/www/ungoogled-chromium/files/patch-build_linux_unbundle_replace__gn__files.py b/www/ungoogled-chromium/files/patch-build_linux_unbundle_replace__gn__files.py
deleted file mode 100644
index 95dd8efda0f0..000000000000
--- a/www/ungoogled-chromium/files/patch-build_linux_unbundle_replace__gn__files.py
+++ /dev/null
@@ -1,10 +0,0 @@
---- build/linux/unbundle/replace_gn_files.py.orig 2023-02-11 09:11:04 UTC
-+++ build/linux/unbundle/replace_gn_files.py
-@@ -54,6 +54,7 @@ REPLACEMENTS = {
- 'libevent': 'third_party/libevent/BUILD.gn',
- 'libjpeg': 'third_party/libjpeg.gni',
- 'libpng': 'third_party/libpng/BUILD.gn',
-+ 'libusb': 'third_party/libusb/BUILD.gn',
- 'libvpx': 'third_party/libvpx/BUILD.gn',
- 'libwebp': 'third_party/libwebp/BUILD.gn',
- 'libxml': 'third_party/libxml/BUILD.gn',
diff --git a/www/ungoogled-chromium/files/patch-build_toolchain_gcc__toolchain.gni b/www/ungoogled-chromium/files/patch-build_toolchain_gcc__toolchain.gni
index d5ac2463d0c3..ac927c6bd503 100644
--- a/www/ungoogled-chromium/files/patch-build_toolchain_gcc__toolchain.gni
+++ b/www/ungoogled-chromium/files/patch-build_toolchain_gcc__toolchain.gni
@@ -1,4 +1,4 @@
---- build/toolchain/gcc_toolchain.gni.orig 2023-11-04 07:08:51 UTC
+--- build/toolchain/gcc_toolchain.gni.orig 2023-12-23 12:33:28 UTC
+++ build/toolchain/gcc_toolchain.gni
@@ -53,6 +53,13 @@ if (enable_resource_allowlist_generation) {
"enable_resource_allowlist_generation=true does not work for target_os=$target_os")
@@ -14,7 +14,7 @@
# This template defines a toolchain for something that works like gcc
# (including clang).
#
-@@ -875,22 +882,12 @@ template("gcc_toolchain") {
+@@ -878,22 +885,12 @@ template("gcc_toolchain") {
# use_gold too.
template("clang_toolchain") {
gcc_toolchain(target_name) {
diff --git a/www/ungoogled-chromium/files/patch-cc_BUILD.gn b/www/ungoogled-chromium/files/patch-cc_BUILD.gn
index 11d0d7c8c032..a000d3ec36d6 100644
--- a/www/ungoogled-chromium/files/patch-cc_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-cc_BUILD.gn
@@ -1,6 +1,6 @@
---- cc/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- cc/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ cc/BUILD.gn
-@@ -675,7 +675,7 @@ cc_test_static_library("test_support") {
+@@ -676,7 +676,7 @@ cc_test_static_library("test_support") {
if (enable_vulkan) {
deps += [ "//gpu/vulkan/init" ]
}
@@ -9,7 +9,7 @@
data_deps = [ "//third_party/mesa_headers" ]
}
if (skia_use_dawn) {
-@@ -930,7 +930,6 @@ cc_test("cc_unittests") {
+@@ -931,7 +931,6 @@ cc_test("cc_unittests") {
data = [ "//components/test/data/viz/" ]
data_deps = [
"//testing/buildbot/filters:cc_unittests_filters",
diff --git a/www/ungoogled-chromium/files/patch-cc_paint_paint__op__writer.h b/www/ungoogled-chromium/files/patch-cc_paint_paint__op__writer.h
index c7f9d3cd3b20..da0334b9c4b9 100644
--- a/www/ungoogled-chromium/files/patch-cc_paint_paint__op__writer.h
+++ b/www/ungoogled-chromium/files/patch-cc_paint_paint__op__writer.h
@@ -1,6 +1,6 @@
---- cc/paint/paint_op_writer.h.orig 2023-08-18 10:26:52 UTC
+--- cc/paint/paint_op_writer.h.orig 2023-12-23 12:33:28 UTC
+++ cc/paint/paint_op_writer.h
-@@ -114,10 +114,17 @@ class CC_PAINT_EXPORT PaintOpWriter {
+@@ -122,10 +122,17 @@ class CC_PAINT_EXPORT PaintOpWriter {
// easier to keep serialized size calculation in sync with serialization and
// deserialization, and make it possible to allow dynamic sizing for some
// data types (see the specialized/overloaded functions).
@@ -18,7 +18,7 @@
static size_t SerializedSize(const PaintImage& image);
static size_t SerializedSize(const PaintRecord& record);
static size_t SerializedSize(const SkHighContrastConfig& config);
-@@ -386,12 +393,20 @@ constexpr size_t PaintOpWriter::SerializedSize<SkGainm
+@@ -463,12 +470,20 @@ constexpr size_t PaintOpWriter::SerializedSize<SkGainm
}
template <typename T>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp b/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp
index 12aa7968e9ce..414d9b737109 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp
+++ b/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp
@@ -1,6 +1,6 @@
---- chrome/app/app_management_strings.grdp.orig 2023-09-17 07:59:53 UTC
+--- chrome/app/app_management_strings.grdp.orig 2023-12-23 12:33:28 UTC
+++ chrome/app/app_management_strings.grdp
-@@ -188,7 +188,7 @@
+@@ -194,7 +194,7 @@
You can open and edit supported files with this app from Finder or other apps. To control which files open this app by default, <ph name="BEGIN_LINK">&lt;a href="#"&gt;</ph>learn how to set default apps on your device<ph name="END_LINK">&lt;/a&gt;</ph>.
</message>
</if>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc b/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc
index 762c65ae80be..10fa79af0ded 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc
@@ -1,4 +1,4 @@
---- chrome/app/chrome_main_delegate.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/app/chrome_main_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/app/chrome_main_delegate.cc
@@ -145,7 +145,7 @@
#include "components/about_ui/credit_utils.h"
@@ -9,7 +9,7 @@
#include "components/nacl/common/nacl_paths.h"
#include "components/nacl/zygote/nacl_fork_delegate_linux.h"
#endif
-@@ -188,16 +188,16 @@
+@@ -189,16 +189,16 @@
#include "v8/include/v8.h"
#endif
@@ -29,7 +29,7 @@
#include "chrome/browser/policy/policy_path_parser.h"
#include "components/crash/core/app/crashpad.h"
#endif
-@@ -336,7 +336,7 @@ void AdjustLinuxOOMScore(const std::string& process_ty
+@@ -337,7 +337,7 @@ void AdjustLinuxOOMScore(const std::string& process_ty
// and resources loaded.
bool SubprocessNeedsResourceBundle(const std::string& process_type) {
return
@@ -38,7 +38,7 @@
// The zygote process opens the resources for the renderers.
process_type == switches::kZygoteProcess ||
#endif
-@@ -421,7 +421,7 @@ bool HandleVersionSwitches(const base::CommandLine& co
+@@ -422,7 +422,7 @@ bool HandleVersionSwitches(const base::CommandLine& co
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -47,7 +47,7 @@
// Show the man page if --help or -h is on the command line.
void HandleHelpSwitches(const base::CommandLine& command_line) {
if (command_line.HasSwitch(switches::kHelp) ||
-@@ -593,7 +593,7 @@ void InitializeUserDataDir(base::CommandLine* command_
+@@ -600,7 +600,7 @@ void InitializeUserDataDir(base::CommandLine* command_
std::string process_type =
command_line->GetSwitchValueASCII(switches::kProcessType);
@@ -56,7 +56,7 @@
// On Linux, Chrome does not support running multiple copies under different
// DISPLAYs, so the profile directory can be specified in the environment to
// support the virtual desktop use-case.
-@@ -683,7 +683,7 @@ void RecordMainStartupMetrics(base::TimeTicks applicat
+@@ -690,7 +690,7 @@ void RecordMainStartupMetrics(base::TimeTicks applicat
#endif
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
@@ -65,7 +65,7 @@
// Record the startup process creation time on supported platforms. On Android
// this is recorded in ChromeMainDelegateAndroid.
startup_metric_utils::GetCommon().RecordStartupProcessCreationTime(
-@@ -1012,7 +1012,7 @@ void ChromeMainDelegate::CommonEarlyInitialization(Inv
+@@ -1044,7 +1044,7 @@ void ChromeMainDelegate::CommonEarlyInitialization(Inv
base::InitializeCpuReductionExperiment();
base::sequence_manager::internal::SequenceManagerImpl::InitializeFeatures();
base::sequence_manager::internal::ThreadController::InitializeFeatures();
@@ -74,7 +74,7 @@
base::MessagePumpLibevent::InitializeFeatures();
#elif BUILDFLAG(IS_MAC)
base::PlatformThread::InitFeaturesPostFieldTrial();
-@@ -1159,7 +1159,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
+@@ -1196,7 +1196,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
// TODO(crbug.com/1052397): Revisit the macro expression once build flag
// switch of lacros-chrome is complete.
@@ -83,7 +83,7 @@
// This will directly exit if the user asked for help.
HandleHelpSwitches(command_line);
#endif
-@@ -1189,7 +1189,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
+@@ -1226,7 +1226,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
#if BUILDFLAG(IS_CHROMEOS)
chromeos::dbus_paths::RegisterPathProvider();
#endif
@@ -92,7 +92,7 @@
nacl::RegisterPathProvider();
#endif
-@@ -1589,7 +1589,7 @@ void ChromeMainDelegate::PreSandboxStartup() {
+@@ -1637,7 +1637,7 @@ void ChromeMainDelegate::PreSandboxStartup() {
CHECK(!loaded_locale.empty()) << "Locale could not be found for " << locale;
}
@@ -101,7 +101,7 @@
// Zygote needs to call InitCrashReporter() in RunZygote().
if (process_type != switches::kZygoteProcess) {
if (command_line.HasSwitch(switches::kPreCrashpadCrashTest)) {
-@@ -1691,7 +1691,7 @@ absl::variant<int, content::MainFunctionParams> Chrome
+@@ -1746,7 +1746,7 @@ absl::variant<int, content::MainFunctionParams> Chrome
// This entry is not needed on Linux, where the NaCl loader
// process is launched via nacl_helper instead.
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd b/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd
index 2aec929902cc..15677f01c8b2 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd
@@ -1,4 +1,4 @@
---- chrome/app/chromium_strings.grd.orig 2023-11-04 07:08:51 UTC
+--- chrome/app/chromium_strings.grd.orig 2023-12-23 12:33:28 UTC
+++ chrome/app/chromium_strings.grd
@@ -315,7 +315,7 @@ If you update this file, be sure also to update google
Welcome to Chromium; new browser window opened
@@ -18,7 +18,7 @@
<message name="IDS_LINUX_OBSOLETE" desc="A message displayed on an at-launch infobar and about:help warning the user that the OS version they are using is no longer supported.">
Chromium may not function correctly because it is no longer supported on this Linux distribution
</message>
-@@ -896,7 +896,7 @@ Permissions you've already given to websites and apps
+@@ -892,7 +892,7 @@ Permissions you've already given to websites and apps
</message>
</if>
@@ -27,7 +27,7 @@
<message name="IDS_RELAUNCH_TO_UPDATE_ALT" desc="Alternate text label of the relaunch to update Chrome menu item" translateable="false">
Not used in Chromium. Placeholder to keep resource maps in sync.
</message>
-@@ -1272,7 +1272,7 @@ Permissions you've already given to websites and apps
+@@ -1277,7 +1277,7 @@ Permissions you've already given to websites and apps
</message>
</if>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd b/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd
index d84988236942..dd28a6a845f5 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd
@@ -1,4 +1,4 @@
---- chrome/app/generated_resources.grd.orig 2023-11-04 07:08:51 UTC
+--- chrome/app/generated_resources.grd.orig 2023-12-23 12:33:28 UTC
+++ chrome/app/generated_resources.grd
@@ -2,7 +2,7 @@
@@ -9,7 +9,7 @@
for making strings OS specific. Other platform defines such as use_titlecase
are declared in tools/grit/grit_rule.gni.
-->
-@@ -3605,7 +3605,7 @@ are declared in tools/grit/grit_rule.gni.
+@@ -3609,7 +3609,7 @@ are declared in tools/grit/grit_rule.gni.
</if>
<!-- Bluetooth Device Credentials (i.e. PIN/Passkey) dialog -->
@@ -18,7 +18,7 @@
<message name="IDS_BLUETOOTH_DEVICE_CREDENTIALS_TITLE" desc="Title of the Bluetooth device credentials prompt dialog.">
Device Credentials
</message>
-@@ -5445,7 +5445,7 @@ are declared in tools/grit/grit_rule.gni.
+@@ -5494,7 +5494,7 @@ are declared in tools/grit/grit_rule.gni.
Read information about your browser, OS, device, installed software, registry values and files
</message>
</if>
@@ -27,7 +27,7 @@
<message name="IDS_EXTENSION_PROMPT_WARNING_ENTERPRISE_REPORTING_PRIVATE_ENABLED_LINUX_AND_MACOS" desc="Permission string for enterprise private reporting permission on Linux and MacOS.">
Read information about your browser, OS, device, installed software and files
</message>
-@@ -6158,7 +6158,7 @@ Keep your key file in a safe place. You will need it t
+@@ -6207,7 +6207,7 @@ Keep your key file in a safe place. You will need it t
Old versions of Chrome Apps won't open on Windows devices after December 2022. Contact your administrator to update to a new version or remove this app.
</message>
</if>
@@ -36,7 +36,7 @@
<message name="IDS_FORCE_INSTALLED_DEPRECATED_APPS_CONTENT" desc="Content of the force installed deprecated app dialog">
Old versions of Chrome Apps won't open on Linux devices after December 2022. Contact your administrator to update to a new version or remove this app.
</message>
-@@ -6204,7 +6204,7 @@ Keep your key file in a safe place. You will need it t
+@@ -6253,7 +6253,7 @@ Keep your key file in a safe place. You will need it t
Old versions of Chrome apps won't open on Windows devices after December 2022. You can check if there's a new version available.
</message>
</if>
@@ -45,7 +45,7 @@
<message name="IDS_DEPRECATED_APPS_MONITOR_RENDERER" desc="Dialog content that educates users that Chrome Apps will soon no longer launch.">
Old versions of Chrome apps won't open on Linux devices after December 2022. You can check if there's a new version available.
</message>
-@@ -10396,7 +10396,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
+@@ -10611,7 +10611,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
<message name="IDS_APP_MENU_BUTTON_UPDATE" desc="Short label next to app-menu button when an update is available.">
Update
</message>
@@ -54,7 +54,7 @@
<message name="IDS_APP_MENU_BUTTON_UPDATE_ALT1" desc="Alternate short label next to app-menu button when an update is available.">
Finish update
</message>
-@@ -10743,7 +10743,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
+@@ -10958,7 +10958,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
Google Pay
</message>
@@ -63,7 +63,7 @@
<message name="IDS_SHOW_WINDOW_DECORATIONS" desc="The label of a radio button in the options dialog for using the system title bar and borders.">
Use system title bar and borders
</message>
-@@ -11734,7 +11734,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
+@@ -11956,7 +11956,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
</message>
<!-- Device Trust Consent dialog -->
@@ -72,7 +72,7 @@
<message name="IDS_DEVICE_SIGNALS_CONSENT_DIALOG_TITLE" desc="Title of the dialog shown when user consent is required to share device signals.">
Share information about your device?
</message>
-@@ -12142,7 +12142,7 @@ Please help our engineers fix this problem. Tell us wh
+@@ -12364,7 +12364,7 @@ Please help our engineers fix this problem. Tell us wh
Set as default
</message>
@@ -81,7 +81,7 @@
<message name="IDS_MINIMIZE_WINDOW_MENU" desc="The Linux browser window menu item text for minimizing the window.">
Minimize
</message>
-@@ -14349,7 +14349,7 @@ Please help our engineers fix this problem. Tell us wh
+@@ -14571,7 +14571,7 @@ Please help our engineers fix this problem. Tell us wh
Open Anyway
</message>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd b/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd
index 5543890f4f2d..ebcc1f209c09 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd
@@ -1,4 +1,4 @@
---- chrome/app/google_chrome_strings.grd.orig 2023-11-04 07:08:51 UTC
+--- chrome/app/google_chrome_strings.grd.orig 2023-12-23 12:33:28 UTC
+++ chrome/app/google_chrome_strings.grd
@@ -301,7 +301,7 @@ chromium_strings.grd. -->
Welcome to Chrome; new browser window opened
@@ -18,7 +18,7 @@
<message name="IDS_LINUX_OBSOLETE" desc="A message displayed on an at-launch infobar and about:help warning the user that the OS version they are using is no longer supported.">
Google Chrome may not function correctly because it is no longer supported on this Linux distribution
</message>
-@@ -881,7 +881,7 @@ Permissions you've already given to websites and apps
+@@ -877,7 +877,7 @@ Permissions you've already given to websites and apps
</if>
</if>
@@ -27,7 +27,7 @@
<if expr="use_titlecase">
<message name="IDS_RELAUNCH_TO_UPDATE_ALT" desc="Alternate text label of the relaunch to update Chrome menu item">
Relaunch to Update - Your tabs will reopen
-@@ -1297,7 +1297,7 @@ Permissions you've already given to websites and apps
+@@ -1302,7 +1302,7 @@ Permissions you've already given to websites and apps
</message>
</if>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd b/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd
index 29a1e713a68b..196d15c1de10 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd
@@ -1,4 +1,4 @@
---- chrome/app/theme/chrome_unscaled_resources.grd.orig 2023-07-21 09:49:17 UTC
+--- chrome/app/theme/chrome_unscaled_resources.grd.orig 2023-12-23 12:33:28 UTC
+++ chrome/app/theme/chrome_unscaled_resources.grd
@@ -16,7 +16,7 @@
<includes>
@@ -9,7 +9,7 @@
<then>
<include name="IDR_PRODUCT_LOGO_64" file="google_chrome/linux/product_logo_64.png" type="BINDATA" />
<include name="IDR_PRODUCT_LOGO_128" file="google_chrome/linux/product_logo_128.png" type="BINDATA" />
-@@ -61,7 +61,7 @@
+@@ -63,7 +63,7 @@
<else> <!-- not _google_chrome -->
<if expr="_is_chrome_for_testing_branded">
<then>
@@ -18,7 +18,7 @@
<then>
<include name="IDR_PRODUCT_LOGO_64" file="google_chrome/google_chrome_for_testing/linux/product_logo_64.png" type="BINDATA" />
<include name="IDR_PRODUCT_LOGO_128" file="google_chrome/google_chrome_for_testing/linux/product_logo_128.png" type="BINDATA" />
-@@ -75,7 +75,7 @@
+@@ -77,7 +77,7 @@
</if>
</then>
<else> <!-- not _is_chrome_for_testing_branded -->
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn b/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn
index 7b4ddb342f93..faa19f3b3f7f 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn
@@ -1,6 +1,6 @@
---- chrome/browser/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/BUILD.gn
-@@ -6462,6 +6462,13 @@ static_library("browser") {
+@@ -6508,6 +6508,13 @@ static_library("browser") {
}
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc b/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc
index 28e28f4b6a23..d0fd06a31585 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc
@@ -1,6 +1,6 @@
---- chrome/browser/about_flags.cc.orig 2023-11-11 14:10:41 UTC
+--- chrome/browser/about_flags.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/about_flags.cc
-@@ -227,7 +227,7 @@
+@@ -232,7 +232,7 @@
#include "ui/ui_features.h"
#include "url/url_features.h"
@@ -9,7 +9,7 @@
#include "base/allocator/buildflags.h"
#endif
-@@ -327,7 +327,7 @@
+@@ -331,7 +331,7 @@
#include "device/vr/public/cpp/features.h"
#endif
@@ -18,7 +18,7 @@
#include "ui/ozone/buildflags.h"
#include "ui/ozone/public/ozone_switches.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -341,7 +341,7 @@
+@@ -345,7 +345,7 @@
#include "chrome/browser/win/titlebar_config.h"
#endif
@@ -27,7 +27,7 @@
#include "chrome/browser/enterprise/profile_management/profile_management_features.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-@@ -471,7 +471,7 @@ const FeatureEntry::FeatureVariation kDXGIWaitableSwap
+@@ -475,7 +475,7 @@ const FeatureEntry::FeatureVariation kDXGIWaitableSwap
{"Max 3 Frames", &kDXGIWaitableSwapChain3Frames, 1, nullptr}};
#endif
@@ -36,7 +36,7 @@
const FeatureEntry::Choice kOzonePlatformHintRuntimeChoices[] = {
{flag_descriptions::kOzonePlatformHintChoiceDefault, "", ""},
{flag_descriptions::kOzonePlatformHintChoiceAuto,
-@@ -1563,7 +1563,7 @@ const FeatureEntry::FeatureVariation kChromeRefresh202
+@@ -1464,7 +1464,7 @@ const FeatureEntry::FeatureVariation kChromeRefresh202
nullptr}};
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
@@ -45,7 +45,7 @@
const FeatureEntry::FeatureParam kShortcutBoostSearchAndUrl1414[] = {
{"ShortcutBoostSearchScore", "1414"},
{"ShortcutBoostUrlScore", "1414"}};
-@@ -5321,13 +5321,13 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -5304,13 +5304,13 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(features::kWebShare)},
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
@@ -61,7 +61,7 @@
{"skip-undecryptable-passwords",
flag_descriptions::kSkipUndecryptablePasswordsName,
flag_descriptions::kSkipUndecryptablePasswordsDescription,
-@@ -5638,7 +5638,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -5621,7 +5621,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(feed::kFeedSportsCard)},
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
@@ -70,7 +70,7 @@
{"following-feed-sidepanel", flag_descriptions::kFollowingFeedSidepanelName,
flag_descriptions::kFollowingFeedSidepanelDescription, kOsDesktop,
FEATURE_VALUE_TYPE(feed::kWebUiFeed)},
-@@ -6305,7 +6305,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -6260,7 +6260,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(omnibox::kZeroSuggestInMemoryCaching)},
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
@@ -79,7 +79,7 @@
{"omnibox-actions-ui-simplification",
flag_descriptions::kOmniboxActionsUISimplificationName,
flag_descriptions::kOmniboxActionsUISimplificationDescription, kOsDesktop,
-@@ -7140,7 +7140,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -7096,7 +7096,7 @@ const FeatureEntry kFeatureEntries[] = {
flag_descriptions::kParallelDownloadingDescription, kOsAll,
FEATURE_VALUE_TYPE(download::features::kParallelDownloading)},
@@ -88,7 +88,7 @@
{"enable-async-dns", flag_descriptions::kAsyncDnsName,
flag_descriptions::kAsyncDnsDescription, kOsWin | kOsLinux,
FEATURE_VALUE_TYPE(features::kAsyncDns)},
-@@ -8152,7 +8152,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -8131,7 +8131,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(supervised_user::kEnableProtoApiForClassifyUrl)},
#endif // BUILDFLAG(ENABLE_SUPERVISED_USERS)
@@ -97,7 +97,16 @@
{"enable-network-service-sandbox",
flag_descriptions::kEnableNetworkServiceSandboxName,
flag_descriptions::kEnableNetworkServiceSandboxDescription,
-@@ -8845,7 +8845,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -8156,7 +8156,7 @@ const FeatureEntry kFeatureEntries[] = {
+ FEATURE_VALUE_TYPE(
+ supervised_user::kFilterWebsitesForSupervisedUsersOnDesktopAndIOS)},
+
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ {"enable-family-link-extensions-permissions",
+ flag_descriptions::
+ kEnableExtensionsPermissionsForSupervisedUsersOnDesktopName,
+@@ -8826,7 +8826,7 @@ const FeatureEntry kFeatureEntries[] = {
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -106,8 +115,8 @@
{"quick-commands", flag_descriptions::kQuickCommandsName,
flag_descriptions::kQuickCommandsDescription, kOsDesktop,
FEATURE_VALUE_TYPE(features::kQuickCommands)},
-@@ -9149,7 +9149,7 @@ const FeatureEntry kFeatureEntries[] = {
- FEATURE_VALUE_TYPE(ash::features::kWallpaperRefreshRevamp)},
+@@ -9097,7 +9097,7 @@ const FeatureEntry kFeatureEntries[] = {
+ FEATURE_VALUE_TYPE(ash::features::kWallpaperPerDesk)},
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
-#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
@@ -115,7 +124,7 @@
{"enable-get-all-screens-media", flag_descriptions::kGetAllScreensMediaName,
flag_descriptions::kGetAllScreensMediaDescription,
kOsCrOS | kOsLacros | kOsLinux,
-@@ -9205,7 +9205,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -9139,7 +9139,7 @@ const FeatureEntry kFeatureEntries[] = {
#if BUILDFLAG(IS_WIN) || \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
@@ -124,7 +133,7 @@
{
"ui-debug-tools",
flag_descriptions::kUIDebugToolsName,
-@@ -9769,7 +9769,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -9697,7 +9697,7 @@ const FeatureEntry kFeatureEntries[] = {
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
@@ -133,25 +142,7 @@
{"document-picture-in-picture-api",
flag_descriptions::kDocumentPictureInPictureApiName,
flag_descriptions::kDocumentPictureInPictureApiDescription,
-@@ -10368,7 +10368,7 @@ const FeatureEntry kFeatureEntries[] = {
- flag_descriptions::kWebUIOmniboxPopupDescription, kOsDesktop,
- FEATURE_VALUE_TYPE(omnibox::kWebUIOmniboxPopup)},
-
--#if !BUILDFLAG(IS_LINUX)
-+#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_BSD)
- {"webui-system-font", flag_descriptions::kWebUiSystemFontName,
- flag_descriptions::kWebUiSystemFontDescription, kOsAll,
- FEATURE_VALUE_TYPE(features::kWebUiSystemFont)},
-@@ -10569,7 +10569,7 @@ const FeatureEntry kFeatureEntries[] = {
- #endif
-
- #if BUILDFLAG(IS_WIN) || (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) || \
-- BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID)
-+ BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
- {"data-retention-policies-disable-sync-types-needed",
- flag_descriptions::kDataRetentionPoliciesDisableSyncTypesNeededName,
- flag_descriptions::kDataRetentionPoliciesDisableSyncTypesNeededDescription,
-@@ -10975,7 +10975,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -10804,7 +10804,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(features::kProcessPerSiteUpToMainFrameThreshold)},
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
@@ -160,12 +151,30 @@
{"camera-mic-effects", flag_descriptions::kCameraMicEffectsName,
flag_descriptions::kCameraMicEffectsDescription,
static_cast<unsigned short>(kOsMac | kOsWin | kOsLinux | kOsFuchsia),
-@@ -11162,7 +11162,7 @@ const FeatureEntry kFeatureEntries[] = {
- flag_descriptions::kDigestAuthEnableSecureAlgorithmsDescription, kOsAll,
- FEATURE_VALUE_TYPE(net::features::kDigestAuthEnableSecureAlgorithms)},
+@@ -10986,7 +10986,7 @@ const FeatureEntry kFeatureEntries[] = {
+ password_manager::features::kFillingAcrossAffiliatedWebsitesAndroid)},
+ #endif
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
{"third-party-profile-management",
flag_descriptions::kThirdPartyProfileManagementName,
flag_descriptions::kThirdPartyProfileManagementDescription,
+@@ -11300,7 +11300,7 @@ const FeatureEntry kFeatureEntries[] = {
+ kOsDesktop, FEATURE_VALUE_TYPE(blink::features::kPasswordStrongLabel)},
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ {"attach-logs-to-autofill-rater-extentsion-report",
+ flag_descriptions::kAttachLogsToAutofillRaterExtensionReportName,
+ flag_descriptions::kAttachLogsToAutofillRaterExtensionReportDescription,
+@@ -11309,7 +11309,7 @@ const FeatureEntry kFeatureEntries[] = {
+ kAttachLogsToAutofillRaterExtensionReport)},
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ {"fill-multi-line", flag_descriptions::kFillMultiLineName,
+ flag_descriptions::kFillMultiLineDescription, kOsWin | kOsLinux | kOsMac,
+ FEATURE_VALUE_TYPE(compose::features::kFillMultiLine)},
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_background_background__mode__manager.cc b/www/ungoogled-chromium/files/patch-chrome_browser_background_background__mode__manager.cc
index 32587d7dbc35..36cbc3931f1a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_background_background__mode__manager.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_background_background__mode__manager.cc
@@ -1,6 +1,6 @@
---- chrome/browser/background/background_mode_manager.cc.orig 2023-08-18 10:26:52 UTC
+--- chrome/browser/background/background_mode_manager.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/background/background_mode_manager.cc
-@@ -870,7 +870,7 @@ gfx::ImageSkia GetStatusTrayIcon() {
+@@ -869,7 +869,7 @@ gfx::ImageSkia GetStatusTrayIcon() {
return gfx::ImageSkia();
return family->CreateExact(size).AsImageSkia();
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc b/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc
index 90714d356239..96dce6508dba 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc
@@ -1,4 +1,4 @@
---- chrome/browser/browser_features.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/browser_features.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/browser_features.cc
@@ -28,7 +28,7 @@ BASE_FEATURE(kClosedTabCache,
BASE_FEATURE(kDestroyProfileOnBrowserClose,
@@ -9,3 +9,12 @@
base::FEATURE_ENABLED_BY_DEFAULT);
#else
base::FEATURE_DISABLED_BY_DEFAULT);
+@@ -292,7 +292,7 @@ BASE_FEATURE(kOmniboxTriggerForNoStatePrefetch,
+ "OmniboxTriggerForNoStatePrefetch",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ BASE_FEATURE(kPayloadTestComponent,
+ "PayloadTestComponent",
+ base::FEATURE_DISABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h b/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h
new file mode 100644
index 000000000000..5fa408811eee
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h
@@ -0,0 +1,11 @@
+--- chrome/browser/browser_features.h.orig 2023-12-23 12:33:28 UTC
++++ chrome/browser/browser_features.h
+@@ -121,7 +121,7 @@ BASE_DECLARE_FEATURE(kAutocompleteActionPredictorConfi
+
+ BASE_DECLARE_FEATURE(kOmniboxTriggerForNoStatePrefetch);
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ BASE_DECLARE_FEATURE(kPayloadTestComponent);
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc
index 3c62a8141ec6..1ead4591c186 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc
@@ -1,6 +1,6 @@
---- chrome/browser/browser_process_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/browser_process_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/browser_process_impl.cc
-@@ -219,7 +219,7 @@
+@@ -218,7 +218,7 @@
#include "components/enterprise/browser/controller/chrome_browser_cloud_management_controller.h"
#endif
@@ -9,7 +9,7 @@
#include "chrome/browser/error_reporting/chrome_js_error_report_processor.h" // nogncheck
#endif
-@@ -1213,7 +1213,7 @@ void BrowserProcessImpl::PreMainMessageLoopRun() {
+@@ -1217,7 +1217,7 @@ void BrowserProcessImpl::PreMainMessageLoopRun() {
ApplyMetricsReportingPolicy();
@@ -18,7 +18,7 @@
ChromeJsErrorReportProcessor::Create();
#endif
-@@ -1447,7 +1447,7 @@ void BrowserProcessImpl::Unpin() {
+@@ -1462,7 +1462,7 @@ void BrowserProcessImpl::Unpin() {
// Mac is currently not supported.
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h
index 038b97878e57..e348b2a30c7e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h
@@ -1,6 +1,6 @@
---- chrome/browser/browser_process_impl.h.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/browser_process_impl.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/browser_process_impl.h
-@@ -378,7 +378,7 @@ class BrowserProcessImpl : public BrowserProcess,
+@@ -384,7 +384,7 @@ class BrowserProcessImpl : public BrowserProcess,
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc
index 15266bef8ec3..bb2823ce451a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_browser_interface_binders.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/chrome_browser_interface_binders.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/chrome_browser_interface_binders.cc
-@@ -129,13 +129,13 @@
+@@ -130,13 +130,13 @@
#endif // BUILDFLAG(FULL_SAFE_BROWSING)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -16,7 +16,7 @@
#include "chrome/browser/ui/webui/app_settings/web_app_settings_ui.h"
#include "ui/webui/resources/cr_components/app_management/app_management.mojom.h"
#endif
-@@ -218,7 +218,7 @@
+@@ -224,7 +224,7 @@
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -25,7 +25,7 @@
#include "chrome/browser/companion/visual_search/visual_search_suggestions_service_factory.h"
#include "chrome/browser/ui/web_applications/sub_apps_service_impl.h"
#include "chrome/browser/ui/webui/discards/discards.mojom.h"
-@@ -841,7 +841,7 @@ void BindScreen2xMainContentExtractor(
+@@ -863,7 +863,7 @@ void BindScreen2xMainContentExtractor(
#endif
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
@@ -34,7 +34,7 @@
void BindVisualSuggestionsModelProvider(
content::RenderFrameHost* frame_host,
mojo::PendingReceiver<
-@@ -988,7 +988,7 @@ void PopulateChromeFrameBinders(
+@@ -1009,7 +1009,7 @@ void PopulateChromeFrameBinders(
#endif // BUILDFLAG(ENABLE_SPEECH_SERVICE)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -43,7 +43,7 @@
if (!render_frame_host->GetParent()) {
map->Add<chrome::mojom::DraggableRegions>(
base::BindRepeating(&DraggableRegionsHostImpl::CreateIfAllowed));
-@@ -996,7 +996,7 @@ void PopulateChromeFrameBinders(
+@@ -1017,7 +1017,7 @@ void PopulateChromeFrameBinders(
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -52,7 +52,7 @@
if (base::FeatureList::IsEnabled(blink::features::kDesktopPWAsSubApps) &&
!render_frame_host->GetParentOrOuterDocument()) {
// The service binder will reject non-primary main frames, but we still need
-@@ -1076,7 +1076,7 @@ void PopulateChromeWebUIFrameBinders(
+@@ -1097,7 +1097,7 @@ void PopulateChromeWebUIFrameBinders(
commerce::CommerceInternalsUI>(map);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -61,7 +61,7 @@
RegisterWebUIControllerInterfaceBinder<
connectors_internals::mojom::PageHandler,
enterprise_connectors::ConnectorsInternalsUI>(map);
-@@ -1092,7 +1092,7 @@ void PopulateChromeWebUIFrameBinders(
+@@ -1118,7 +1118,7 @@ void PopulateChromeWebUIFrameBinders(
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -70,7 +70,7 @@
RegisterWebUIControllerInterfaceBinder<
app_management::mojom::PageHandlerFactory, WebAppSettingsUI>(map);
#endif
-@@ -1601,7 +1601,7 @@ void PopulateChromeWebUIFrameBinders(
+@@ -1651,7 +1651,7 @@ void PopulateChromeWebUIFrameBinders(
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc
index 44f8b30e4c72..2609d971e6d9 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_browser_main.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/chrome_browser_main.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/chrome_browser_main.cc
-@@ -248,11 +248,11 @@
+@@ -246,11 +246,11 @@
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -14,7 +14,7 @@
#include "components/crash/core/app/crashpad.h"
#endif
-@@ -287,14 +287,14 @@
+@@ -284,14 +284,14 @@
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -31,7 +31,7 @@
#include "chrome/browser/headless/headless_mode_metrics.h" // nogncheck
#include "chrome/browser/headless/headless_mode_util.h" // nogncheck
#include "components/headless/select_file_dialog/headless_select_file_dialog.h"
-@@ -1041,7 +1041,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
+@@ -1042,7 +1042,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
browser_creator_->AddFirstRunTabs(master_prefs_->new_tabs);
}
@@ -40,7 +40,7 @@
// Create directory for user-level Native Messaging manifest files. This
// makes it less likely that the directory will be created by third-party
// software with incorrect owner or permission. See crbug.com/725513 .
-@@ -1097,7 +1097,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
+@@ -1098,7 +1098,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -49,7 +49,7 @@
metrics::DesktopSessionDurationTracker::Initialize();
ProfileActivityMetricsRecorder::Initialize();
TouchModeStatsTracker::Initialize(
-@@ -1333,7 +1333,7 @@ void ChromeBrowserMainParts::PostProfileInit(Profile*
+@@ -1334,7 +1334,7 @@ void ChromeBrowserMainParts::PostProfileInit(Profile*
*UrlLanguageHistogramFactory::GetForBrowserContext(profile));
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -58,7 +58,7 @@
if (headless::IsHeadlessMode()) {
headless::ReportHeadlessActionMetrics();
}
-@@ -1439,7 +1439,7 @@ int ChromeBrowserMainParts::PreMainMessageLoopRunImpl(
+@@ -1440,7 +1440,7 @@ int ChromeBrowserMainParts::PreMainMessageLoopRunImpl(
// In headless mode provide alternate SelectFileDialog factory overriding
// any platform specific SelectFileDialog implementation that may have been
// set.
@@ -67,7 +67,7 @@
if (headless::IsHeadlessMode()) {
headless::HeadlessSelectFileDialogFactory::SetUp();
}
-@@ -2020,7 +2020,7 @@ bool ChromeBrowserMainParts::ProcessSingletonNotificat
+@@ -1967,7 +1967,7 @@ bool ChromeBrowserMainParts::ProcessSingletonNotificat
// Drop the request if headless mode is in effect or the request is from
// a headless Chrome process.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main__linux.cc b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main__linux.cc
index 7d9d9923e7e5..1927f31d7d51 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main__linux.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main__linux.cc
@@ -1,13 +1,6 @@
---- chrome/browser/chrome_browser_main_linux.cc.orig 2023-08-18 10:26:52 UTC
+--- chrome/browser/chrome_browser_main_linux.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/chrome_browser_main_linux.cc
-@@ -53,13 +53,15 @@ ChromeBrowserMainPartsLinux::~ChromeBrowserMainPartsLi
- }
-
- void ChromeBrowserMainPartsLinux::PostCreateMainMessageLoop() {
--#if BUILDFLAG(IS_CHROMEOS)
-+#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- // No-op: Ash and Lacros Bluetooth DBusManager initialization depend on
- // FeatureList, and is done elsewhere.
+@@ -67,7 +67,9 @@ void ChromeBrowserMainPartsLinux::PostCreateMainMessag
#endif // BUILDFLAG(IS_CHROMEOS)
#if !BUILDFLAG(IS_CHROMEOS)
@@ -17,7 +10,7 @@
// Set up crypt config. This needs to be done before anything starts the
// network service, as the raw encryption key needs to be shared with the
-@@ -117,7 +119,7 @@ void ChromeBrowserMainPartsLinux::PostBrowserStart() {
+@@ -124,7 +126,7 @@ void ChromeBrowserMainPartsLinux::PostBrowserStart() {
#endif // defined(USE_DBUS) && !BUILDFLAG(IS_CHROMEOS)
void ChromeBrowserMainPartsLinux::PostDestroyThreads() {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc
index 67ba36a57ab1..250a74c43658 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_content_browser_client.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/chrome_content_browser_client.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/chrome_content_browser_client.cc
-@@ -442,7 +442,7 @@
+@@ -446,7 +446,7 @@
#include "storage/browser/file_system/external_mount_points.h"
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,7 +9,7 @@
#include "chrome/browser/chrome_browser_main_linux.h"
#include "chrome/browser/ui/views/chrome_browser_main_extra_parts_views_linux.h"
#elif BUILDFLAG(IS_ANDROID)
-@@ -542,12 +542,12 @@
+@@ -547,12 +547,12 @@
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -24,7 +24,7 @@
#include "components/crash/core/app/crash_switches.h"
#include "components/crash/core/app/crashpad.h"
#endif
-@@ -558,14 +558,14 @@
+@@ -563,14 +563,14 @@
#include "chrome/browser/apps/link_capturing/web_app_link_capturing_delegate.h"
#endif
@@ -41,7 +41,7 @@
#include "chrome/browser/enterprise/connectors/device_trust/navigation_throttle.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) ||
// BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -581,7 +581,7 @@
+@@ -586,7 +586,7 @@
#include "components/lens/lens_features.h"
#endif
@@ -50,7 +50,7 @@
#include "chrome/browser/chrome_browser_main_extra_parts_linux.h"
#elif BUILDFLAG(IS_OZONE)
#include "chrome/browser/chrome_browser_main_extra_parts_ozone.h"
-@@ -1573,7 +1573,7 @@ void ChromeContentBrowserClient::RegisterLocalStatePre
+@@ -1566,7 +1566,7 @@ void ChromeContentBrowserClient::RegisterLocalStatePre
registry->RegisterBooleanPref(prefs::kNativeClientForceAllowed, false);
registry->RegisterBooleanPref(
policy::policy_prefs::kPPAPISharedImagesForVideoDecoderAllowed, true);
@@ -59,7 +59,7 @@
registry->RegisterBooleanPref(prefs::kOutOfProcessSystemDnsResolutionEnabled,
true);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)
-@@ -1695,7 +1695,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1690,7 +1690,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
#elif BUILDFLAG(IS_CHROMEOS_LACROS)
main_parts = std::make_unique<ChromeBrowserMainPartsLacros>(
is_integration_test, &startup_data_);
@@ -68,7 +68,7 @@
main_parts = std::make_unique<ChromeBrowserMainPartsLinux>(
is_integration_test, &startup_data_);
#elif BUILDFLAG(IS_ANDROID)
-@@ -1732,7 +1732,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1727,7 +1727,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
std::make_unique<ChromeBrowserMainExtraPartsViewsLacros>());
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -77,7 +77,7 @@
main_parts->AddParts(
std::make_unique<ChromeBrowserMainExtraPartsViewsLinux>());
#else
-@@ -1753,7 +1753,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1748,7 +1748,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
main_parts->AddParts(std::make_unique<ChromeBrowserMainExtraPartsLacros>());
#endif
@@ -86,7 +86,7 @@
main_parts->AddParts(std::make_unique<ChromeBrowserMainExtraPartsLinux>());
#elif BUILDFLAG(IS_OZONE)
main_parts->AddParts(std::make_unique<ChromeBrowserMainExtraPartsOzone>());
-@@ -1772,7 +1772,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1767,7 +1767,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
chrome::AddMetricsExtraParts(main_parts.get());
@@ -95,7 +95,7 @@
main_parts->AddParts(
std::make_unique<
chrome::enterprise_util::ChromeBrowserMainExtraPartsEnterprise>());
-@@ -2603,6 +2603,8 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
+@@ -2601,6 +2601,8 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
#if(0)
#if BUILDFLAG(IS_ANDROID)
bool enable_crash_reporter = true;
@@ -104,7 +104,7 @@
#elif BUILDFLAG(IS_CHROMEOS)
bool enable_crash_reporter = false;
if (crash_reporter::IsCrashpadEnabled()) {
-@@ -2986,7 +2988,7 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
+@@ -2966,7 +2968,7 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
ThreadProfilerConfiguration::Get()->AppendCommandLineSwitchForChildProcess(
command_line);
@@ -113,7 +113,7 @@
// Opt into a hardened stack canary mitigation if it hasn't already been
// force-disabled.
if (!browser_command_line.HasSwitch(switches::kChangeStackGuardOnFork)) {
-@@ -4617,7 +4619,7 @@ void ChromeContentBrowserClient::GetAdditionalFileSyst
+@@ -4637,7 +4639,7 @@ void ChromeContentBrowserClient::GetAdditionalFileSyst
}
}
@@ -122,7 +122,7 @@
void ChromeContentBrowserClient::GetAdditionalMappedFilesForChildProcess(
const base::CommandLine& command_line,
int child_process_id,
-@@ -5159,7 +5161,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
+@@ -5192,7 +5194,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
&throttles);
}
@@ -131,7 +131,7 @@
MaybeAddThrottle(
WebAppSettingsNavigationThrottle::MaybeCreateThrottleFor(handle),
&throttles);
-@@ -5169,7 +5171,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
+@@ -5202,7 +5204,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || \
@@ -140,7 +140,7 @@
MaybeAddThrottle(enterprise_connectors::DeviceTrustNavigationThrottle::
MaybeCreateThrottleFor(handle),
&throttles);
-@@ -5200,7 +5202,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
+@@ -5233,7 +5235,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
}
#endif
@@ -149,10 +149,10 @@
MaybeAddThrottle(browser_switcher::BrowserSwitcherNavigationThrottle::
MaybeCreateThrottleFor(handle),
&throttles);
-@@ -7061,7 +7063,7 @@ bool ChromeContentBrowserClient::ShouldSandboxNetworkS
- }
-
+@@ -7101,7 +7103,7 @@ bool ChromeContentBrowserClient::ShouldSandboxNetworkS
bool ChromeContentBrowserClient::ShouldRunOutOfProcessSystemDnsResolution() {
+ // This enterprise policy is supported on Android, but the feature will not be
+ // launched there.
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
// This is possibly called before `g_browser_process` is initialized.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h
index 2d24d8108e40..75b592168657 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_content_browser_client.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/chrome_content_browser_client.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/chrome_content_browser_client.h
-@@ -482,7 +482,7 @@ class ChromeContentBrowserClient : public content::Con
+@@ -484,7 +484,7 @@ class ChromeContentBrowserClient : public content::Con
void OverridePageVisibilityState(
content::RenderFrameHost* render_frame_host,
content::PageVisibilityState* visibility_state) override;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc b/www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc
new file mode 100644
index 000000000000..fdcdb8513898
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc
@@ -0,0 +1,11 @@
+--- chrome/browser/component_updater/registration.cc.orig 2023-12-23 12:33:28 UTC
++++ chrome/browser/component_updater/registration.cc
+@@ -223,7 +223,7 @@ void RegisterComponentsForUpdate() {
+
+ RegisterTpcdMetadataComponent(cus);
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ // TODO(crbug.com/1490685): Remove this test component once the
+ // experiment has concluded.
+ if (base::FeatureList::IsEnabled(features::kPayloadTestComponent)) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_bubble_download__bubble__update__service.cc b/www/ungoogled-chromium/files/patch-chrome_browser_download_bubble_download__bubble__update__service.cc
index a056c38ce009..d0aca2998b82 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_bubble_download__bubble__update__service.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_bubble_download__bubble__update__service.cc
@@ -1,4 +1,4 @@
---- chrome/browser/download/bubble/download_bubble_update_service.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/download/bubble/download_bubble_update_service.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/download/bubble/download_bubble_update_service.cc
@@ -89,7 +89,11 @@ ItemSortKey GetSortKey(const Item& item) {
// Helper to get an iterator to the last element in the cache. The cache
@@ -12,7 +12,7 @@
CHECK(!cache.empty());
auto it = cache.end();
return std::prev(it);
-@@ -1029,9 +1033,17 @@ bool DownloadBubbleUpdateService::CacheManager::Remove
+@@ -1089,9 +1093,17 @@ bool DownloadBubbleUpdateService::CacheManager::Remove
}
template <typename Id, typename Item>
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc b/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc
index c7d4eef00158..18c7c76ed788 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc
@@ -1,6 +1,6 @@
---- chrome/browser/download/chrome_download_manager_delegate.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/download/chrome_download_manager_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/download/chrome_download_manager_delegate.cc
-@@ -1582,7 +1582,7 @@ void ChromeDownloadManagerDelegate::OnDownloadTargetDe
+@@ -1594,7 +1594,7 @@ void ChromeDownloadManagerDelegate::OnDownloadTargetDe
bool ChromeDownloadManagerDelegate::IsOpenInBrowserPreferreredForFile(
const base::FilePath& path) {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -9,7 +9,7 @@
if (path.MatchesExtension(FILE_PATH_LITERAL(".pdf"))) {
return !download_prefs_->ShouldOpenPdfInSystemReader();
}
-@@ -1648,7 +1648,7 @@ void ChromeDownloadManagerDelegate::CheckDownloadAllow
+@@ -1660,7 +1660,7 @@ void ChromeDownloadManagerDelegate::CheckDownloadAllow
content::CheckDownloadAllowedCallback check_download_allowed_cb) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__commands.h b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__commands.h
index b8ff00ad4adf..c87159b2e0c4 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__commands.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__commands.h
@@ -1,6 +1,6 @@
---- chrome/browser/download/download_commands.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/download/download_commands.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/download/download_commands.h
-@@ -62,7 +62,7 @@ class DownloadCommands {
+@@ -63,7 +63,7 @@ class DownloadCommands {
void ExecuteCommand(Command command);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc
index c026265805b8..896bea41fc9d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc
@@ -1,6 +1,6 @@
---- chrome/browser/download/download_item_model.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/download/download_item_model.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/download/download_item_model.cc
-@@ -754,7 +754,7 @@ bool DownloadItemModel::IsCommandChecked(
+@@ -752,7 +752,7 @@ bool DownloadItemModel::IsCommandChecked(
download_crx_util::IsExtensionDownload(*download_);
case DownloadCommands::ALWAYS_OPEN_TYPE:
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -9,7 +9,7 @@
if (download_commands->CanOpenPdfInSystemViewer()) {
DownloadPrefs* prefs = DownloadPrefs::FromBrowserContext(profile());
return prefs->ShouldOpenPdfInSystemReader();
-@@ -800,7 +800,7 @@ void DownloadItemModel::ExecuteCommand(DownloadCommand
+@@ -798,7 +798,7 @@ void DownloadItemModel::ExecuteCommand(DownloadCommand
DownloadCommands::ALWAYS_OPEN_TYPE);
DownloadPrefs* prefs = DownloadPrefs::FromBrowserContext(profile());
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -18,7 +18,7 @@
if (download_commands->CanOpenPdfInSystemViewer()) {
prefs->SetShouldOpenPdfInSystemReader(!is_checked);
SetShouldPreferOpeningInBrowser(is_checked);
-@@ -1082,7 +1082,7 @@ void DownloadItemModel::DetermineAndSetShouldPreferOpe
+@@ -1078,7 +1078,7 @@ void DownloadItemModel::DetermineAndSetShouldPreferOpe
return;
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc
index de18029a0e40..3b01f73d9a1f 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc
@@ -1,4 +1,4 @@
---- chrome/browser/download/download_prefs.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/download/download_prefs.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/download/download_prefs.cc
@@ -11,6 +11,7 @@
#include <vector>
@@ -37,16 +37,16 @@
should_open_pdf_in_system_reader_ =
prefs->GetBoolean(prefs::kOpenPdfDownloadInSystemReader);
#endif
-@@ -305,7 +310,7 @@ void DownloadPrefs::RegisterProfilePrefs(
- registry->RegisterTimePref(prefs::kDownloadLastCompleteTime,
- /*default_value=*/base::Time());
+@@ -303,7 +308,7 @@ void DownloadPrefs::RegisterProfilePrefs(
+ registry->RegisterFilePathPref(prefs::kSaveFileDefaultDirectory,
+ default_download_path);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
- BUILDFLAG(IS_MAC)
+ BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
registry->RegisterBooleanPref(prefs::kOpenPdfDownloadInSystemReader, false);
#endif
#if BUILDFLAG(IS_ANDROID)
-@@ -474,7 +479,7 @@ void DownloadPrefs::DisableAutoOpenByUserBasedOnExtens
+@@ -463,7 +468,7 @@ void DownloadPrefs::DisableAutoOpenByUserBasedOnExtens
}
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -55,7 +55,7 @@
void DownloadPrefs::SetShouldOpenPdfInSystemReader(bool should_open) {
if (should_open_pdf_in_system_reader_ == should_open)
return;
-@@ -506,7 +511,7 @@ bool DownloadPrefs::ShouldOpenPdfInSystemReader() cons
+@@ -495,7 +500,7 @@ bool DownloadPrefs::ShouldOpenPdfInSystemReader() cons
void DownloadPrefs::ResetAutoOpenByUser() {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -64,7 +64,7 @@
SetShouldOpenPdfInSystemReader(false);
#endif
auto_open_by_user_.clear();
-@@ -537,7 +542,7 @@ void DownloadPrefs::SaveAutoOpenState() {
+@@ -526,7 +531,7 @@ void DownloadPrefs::SaveAutoOpenState() {
bool DownloadPrefs::CanPlatformEnableAutoOpenForPdf() const {
#if BUILDFLAG(IS_CHROMEOS)
return false; // There is no UI for auto-open on ChromeOS.
@@ -73,7 +73,7 @@
return ShouldOpenPdfInSystemReader();
#else
return false;
-@@ -661,7 +666,14 @@ base::FilePath DownloadPrefs::SanitizeDownloadTargetPa
+@@ -650,7 +655,14 @@ base::FilePath DownloadPrefs::SanitizeDownloadTargetPa
#else
// If the stored download directory is an absolute path, we presume it's
// correct; there's not really much more validation we can do here.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.h b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.h
index f156c2dda185..278af77a6e69 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.h
@@ -1,6 +1,6 @@
---- chrome/browser/download/download_prefs.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/download/download_prefs.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/download/download_prefs.h
-@@ -118,7 +118,7 @@ class DownloadPrefs {
+@@ -115,7 +115,7 @@ class DownloadPrefs {
void DisableAutoOpenByUserBasedOnExtension(const base::FilePath& file_name);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -9,7 +9,7 @@
// Store the user preference to disk. If |should_open| is true, also disable
// the built-in PDF plugin. If |should_open| is false, enable the PDF plugin.
void SetShouldOpenPdfInSystemReader(bool should_open);
-@@ -182,7 +182,7 @@ class DownloadPrefs {
+@@ -179,7 +179,7 @@ class DownloadPrefs {
std::unique_ptr<policy::URLBlocklist> auto_open_allowed_by_urls_;
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_connectors__service.cc b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_connectors__service.cc
index a9e4bbf111e2..21aafa6d8464 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_connectors__service.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_connectors__service.cc
@@ -1,6 +1,6 @@
---- chrome/browser/enterprise/connectors/connectors_service.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/enterprise/connectors/connectors_service.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/enterprise/connectors/connectors_service.cc
-@@ -519,7 +519,7 @@ bool ConnectorsService::ConnectorsEnabled() const {
+@@ -531,7 +531,7 @@ bool ConnectorsService::ConnectorsEnabled() const {
Profile* profile = Profile::FromBrowserContext(context_);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__connector__service__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__connector__service__factory.cc
index bf783f206e34..eab5649000b8 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__connector__service__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__connector__service__factory.cc
@@ -1,6 +1,6 @@
---- chrome/browser/enterprise/connectors/device_trust/device_trust_connector_service_factory.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/enterprise/connectors/device_trust/device_trust_connector_service_factory.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/enterprise/connectors/device_trust/device_trust_connector_service_factory.cc
-@@ -11,7 +11,7 @@
+@@ -10,7 +10,7 @@
#include "chrome/browser/profiles/profile.h"
#include "components/keyed_service/core/keyed_service.h"
@@ -9,21 +9,21 @@
#include "chrome/browser/browser_process.h"
#include "chrome/browser/enterprise/connectors/device_trust/browser/signing_key_policy_observer.h"
#include "chrome/browser/policy/chrome_browser_policy_connector.h"
-@@ -42,7 +42,7 @@ DeviceTrustConnectorService* DeviceTrustConnectorServi
+@@ -40,7 +40,7 @@ DeviceTrustConnectorService* DeviceTrustConnectorServi
bool DeviceTrustConnectorServiceFactory::ServiceIsCreatedWithBrowserContext()
const {
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- return IsDeviceTrustConnectorFeatureEnabled();
+ return true;
#else
return false;
-@@ -80,7 +80,7 @@ DeviceTrustConnectorServiceFactory::BuildServiceInstan
+@@ -79,7 +79,7 @@ DeviceTrustConnectorServiceFactory::BuildServiceInstan
std::unique_ptr<DeviceTrustConnectorService> service =
std::make_unique<DeviceTrustConnectorService>(profile->GetPrefs());
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- if (IsDeviceTrustConnectorFeatureEnabled()) {
- auto* key_manager = g_browser_process->browser_policy_connector()
- ->chrome_browser_cloud_management_controller()
+ auto* key_manager = g_browser_process->browser_policy_connector()
+ ->chrome_browser_cloud_management_controller()
+ ->GetDeviceTrustKeyManager();
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__service__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__service__factory.cc
index e3632bfaae31..782e19a6bd35 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__service__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_device__trust_device__trust__service__factory.cc
@@ -1,4 +1,4 @@
---- chrome/browser/enterprise/connectors/device_trust/device_trust_service_factory.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/enterprise/connectors/device_trust/device_trust_service_factory.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/enterprise/connectors/device_trust/device_trust_service_factory.cc
@@ -23,7 +23,7 @@
#include "components/policy/core/common/management/management_service.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/browser_process.h"
#include "chrome/browser/enterprise/connectors/device_trust/attestation/browser/browser_attestation_service.h"
#include "chrome/browser/enterprise/connectors/device_trust/attestation/browser/device_attester.h"
-@@ -56,7 +56,7 @@ bool IsProfileManaged(Profile* profile) {
+@@ -54,7 +54,7 @@ bool IsProfileManaged(Profile* profile) {
return management_service && management_service->IsManaged();
}
@@ -18,7 +18,7 @@
policy::CloudPolicyStore* GetUserCloudPolicyStore(Profile* profile) {
policy::CloudPolicyManager* user_policy_manager =
profile->GetUserCloudPolicyManager();
-@@ -106,7 +106,7 @@ DeviceTrustServiceFactory::DeviceTrustServiceFactory()
+@@ -104,7 +104,7 @@ DeviceTrustServiceFactory::DeviceTrustServiceFactory()
DependsOn(DeviceTrustConnectorServiceFactory::GetInstance());
DependsOn(policy::ManagementServiceFactory::GetInstance());
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_reporting_realtime__reporting__client.cc b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_reporting_realtime__reporting__client.cc
index 00fb17e0abff..5e6539a670ea 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_reporting_realtime__reporting__client.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_reporting_realtime__reporting__client.cc
@@ -1,6 +1,6 @@
---- chrome/browser/enterprise/connectors/reporting/realtime_reporting_client.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/enterprise/connectors/reporting/realtime_reporting_client.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/enterprise/connectors/reporting/realtime_reporting_client.cc
-@@ -55,7 +55,7 @@
+@@ -58,7 +58,7 @@
#include "base/strings/utf_string_conversions.h"
#endif
@@ -9,7 +9,7 @@
#include "chrome/browser/enterprise/signals/signals_aggregator_factory.h"
#include "components/device_signals/core/browser/signals_aggregator.h"
#include "components/device_signals/core/common/signals_constants.h"
-@@ -129,7 +129,7 @@ void UploadSecurityEventReport(base::Value::Dict event
+@@ -132,7 +132,7 @@ void UploadSecurityEventReport(base::Value::Dict event
std::move(upload_callback));
}
@@ -18,7 +18,7 @@
void PopulateSignals(base::Value::Dict event,
policy::CloudPolicyClient* client,
std::string name,
-@@ -416,7 +416,7 @@ void RealtimeReportingClient::ReportPastEvent(const st
+@@ -427,7 +427,7 @@ void RealtimeReportingClient::ReportPastEvent(const st
/*include_profile_user_name=*/false);
}
@@ -27,7 +27,7 @@
void AddCrowdstrikeSignalsToEvent(
base::Value::Dict& event,
-@@ -475,7 +475,7 @@ void RealtimeReportingClient::ReportEventWithTimestamp
+@@ -486,7 +486,7 @@ void RealtimeReportingClient::ReportEventWithTimestamp
if (include_profile_user_name) {
event.Set(kKeyProfileUserName, GetProfileUserName());
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_remote__commands_cbcm__remote__commands__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_remote__commands_cbcm__remote__commands__factory.cc
index 653057a9d750..21452be3684a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_remote__commands_cbcm__remote__commands__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_remote__commands_cbcm__remote__commands__factory.cc
@@ -1,4 +1,4 @@
---- chrome/browser/enterprise/remote_commands/cbcm_remote_commands_factory.cc.orig 2023-08-18 10:26:52 UTC
+--- chrome/browser/enterprise/remote_commands/cbcm_remote_commands_factory.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/enterprise/remote_commands/cbcm_remote_commands_factory.cc
@@ -11,7 +11,7 @@
#include "chrome/browser/profiles/profile_manager.h"
@@ -6,15 +6,15 @@
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- #include "chrome/browser/enterprise/connectors/device_trust/device_trust_features.h" // nogncheck
#include "chrome/browser/enterprise/remote_commands/rotate_attestation_credential_job.h"
#include "chrome/browser/policy/chrome_browser_policy_connector.h"
-@@ -31,7 +31,7 @@ CBCMRemoteCommandsFactory::BuildJobForType(
+ #include "components/enterprise/browser/controller/chrome_browser_cloud_management_controller.h"
+@@ -30,7 +30,7 @@ CBCMRemoteCommandsFactory::BuildJobForType(
g_browser_process->profile_manager());
}
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- if (enterprise_connectors::IsDeviceTrustConnectorFeatureEnabled() &&
- type == enterprise_management::
+ if (type == enterprise_management::
RemoteCommand_Type_BROWSER_ROTATE_ATTESTATION_CREDENTIAL) {
+ return std::make_unique<RotateAttestationCredentialJob>(
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn
index 15956fc13ef9..013de666cc0d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/extensions/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/extensions/BUILD.gn
-@@ -1369,6 +1369,10 @@ static_library("extensions") {
+@@ -1366,6 +1366,10 @@ static_library("extensions") {
deps += [ "//chrome/services/printing/public/mojom" ]
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc
index 27f2b66606d3..58ff5224bd5c 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc
@@ -1,4 +1,4 @@
---- chrome/browser/extensions/api/api_browser_context_keyed_service_factories.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/extensions/api/api_browser_context_keyed_service_factories.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/extensions/api/api_browser_context_keyed_service_factories.cc
@@ -43,7 +43,7 @@
#include "extensions/browser/api/bluetooth_low_energy/bluetooth_low_energy_api.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/extensions/api/system_indicator/system_indicator_manager_factory.h"
#endif
-@@ -114,7 +114,7 @@ void EnsureApiBrowserContextKeyedServiceFactoriesBuilt
+@@ -118,7 +118,7 @@ void EnsureApiBrowserContextKeyedServiceFactoriesBuilt
extensions::SettingsPrivateEventRouterFactory::GetInstance();
extensions::SettingsOverridesAPI::GetFactoryInstance();
extensions::SidePanelService::GetFactoryInstance();
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_messaging_native__process__launcher__posix.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_messaging_native__process__launcher__posix.cc
index e10be280890f..1bcd3ded4a62 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_messaging_native__process__launcher__posix.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_messaging_native__process__launcher__posix.cc
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/api/messaging/native_process_launcher_posix.cc.orig 2022-10-01 07:40:07 UTC
+--- chrome/browser/extensions/api/messaging/native_process_launcher_posix.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/extensions/api/messaging/native_process_launcher_posix.cc
-@@ -82,7 +82,7 @@ bool NativeProcessLauncher::LaunchNativeProcess(
+@@ -84,7 +84,7 @@ bool NativeProcessLauncher::LaunchNativeProcess(
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc
index 685771ed062d..650059c88b6b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/api/passwords_private/passwords_private_delegate_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/extensions/api/passwords_private/passwords_private_delegate_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/extensions/api/passwords_private/passwords_private_delegate_impl.cc
-@@ -605,7 +605,7 @@ void PasswordsPrivateDelegateImpl::OnFetchingFamilyMem
+@@ -601,7 +601,7 @@ void PasswordsPrivateDelegateImpl::OnFetchingFamilyMem
}
void PasswordsPrivateDelegateImpl::OsReauthTimeoutCall() {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_runtime_chrome__runtime__api__delegate.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_runtime_chrome__runtime__api__delegate.cc
index c86dc93edaed..833bb47a67e4 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_runtime_chrome__runtime__api__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_runtime_chrome__runtime__api__delegate.cc
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/api/runtime/chrome_runtime_api_delegate.cc.orig 2023-09-29 10:30:11 UTC
+--- chrome/browser/extensions/api/runtime/chrome_runtime_api_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/extensions/api/runtime/chrome_runtime_api_delegate.cc
-@@ -280,7 +280,9 @@ bool ChromeRuntimeAPIDelegate::GetPlatformInfo(Platfor
+@@ -279,7 +279,9 @@ bool ChromeRuntimeAPIDelegate::GetPlatformInfo(Platfor
} else if (strcmp(os, "linux") == 0) {
info->os = extensions::api::runtime::PlatformOs::kLinux;
} else if (strcmp(os, "openbsd") == 0) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_external__provider__impl.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_external__provider__impl.cc
index 94f0aa50790b..e02b215d1206 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_external__provider__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_external__provider__impl.cc
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/external_provider_impl.cc.orig 2023-07-21 09:49:17 UTC
+--- chrome/browser/extensions/external_provider_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/extensions/external_provider_impl.cc
-@@ -829,7 +829,7 @@ void ExternalProviderImpl::CreateExternalProviders(
+@@ -830,7 +830,7 @@ void ExternalProviderImpl::CreateExternalProviders(
if (!profile->GetPrefs()->GetBoolean(pref_names::kBlockExternalExtensions)) {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,7 +9,7 @@
provider_list->push_back(std::make_unique<ExternalProviderImpl>(
service,
base::MakeRefCounted<ExternalPrefLoader>(
-@@ -857,7 +857,7 @@ void ExternalProviderImpl::CreateExternalProviders(
+@@ -858,7 +858,7 @@ void ExternalProviderImpl::CreateExternalProviders(
bundled_extension_creation_flags));
// Define a per-user source of external extensions.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc b/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc
index 79e779823929..a38aaf39990d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc
@@ -1,6 +1,6 @@
---- chrome/browser/file_system_access/chrome_file_system_access_permission_context.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/file_system_access/chrome_file_system_access_permission_context.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/file_system_access/chrome_file_system_access_permission_context.cc
-@@ -323,7 +323,7 @@ const struct {
+@@ -321,7 +321,7 @@ const struct {
FILE_PATH_LITERAL("Library/Mobile Documents/com~apple~CloudDocs"),
kDontBlockChildren},
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc
index 41e8c1e0b9d5..355357c4f623 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc
@@ -1,15 +1,6 @@
---- chrome/browser/flag_descriptions.cc.orig 2023-11-11 14:10:41 UTC
+--- chrome/browser/flag_descriptions.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/flag_descriptions.cc
-@@ -2603,7 +2603,7 @@ const char kWebUIOmniboxPopupName[] = "WebUI Omnibox P
- const char kWebUIOmniboxPopupDescription[] =
- "If enabled, shows the omnibox suggestions popup in WebUI.";
-
--#if !BUILDFLAG(IS_LINUX)
-+#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_BSD)
- const char kWebUiSystemFontName[] = "WebUI System font";
- const char kWebUiSystemFontDescription[] =
- "If enabled, all WebUI surfaces will use the default UI font of the "
-@@ -7268,7 +7268,7 @@ const char kLacrosMergeIcuDataFileDescription[] =
+@@ -7255,7 +7255,7 @@ const char kLacrosMergeIcuDataFileDescription[] =
"Enables sharing common areas of icudtl.dat between Ash and Lacros.";
#endif // #if BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -18,7 +9,7 @@
const char kGetAllScreensMediaName[] = "GetAllScreensMedia API";
const char kGetAllScreensMediaDescription[] =
"When enabled, the getAllScreensMedia API for capturing multiple screens "
-@@ -7485,7 +7485,7 @@ const char kSearchWebInSidePanelDescription[] =
+@@ -7494,7 +7494,7 @@ const char kSearchWebInSidePanelDescription[] =
// Random platform combinations -----------------------------------------------
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -27,7 +18,7 @@
const char kQuickCommandsName[] = "Quick Commands";
const char kQuickCommandsDescription[] =
"Enable a text interface to browser features. Invoke with Ctrl-Space.";
-@@ -7494,7 +7494,7 @@ const char kQuickCommandsDescription[] =
+@@ -7503,7 +7503,7 @@ const char kQuickCommandsDescription[] =
// BUILDFLAG(IS_FUCHSIA)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -36,7 +27,7 @@
const char kFollowingFeedSidepanelName[] = "Following feed in the sidepanel";
const char kFollowingFeedSidepanelDescription[] =
"Enables the following feed in the sidepanel.";
-@@ -7509,7 +7509,7 @@ const char kEnableProtoApiForClassifyUrlDescription[]
+@@ -7518,7 +7518,7 @@ const char kEnableProtoApiForClassifyUrlDescription[]
"instead of JSON.";
#endif
@@ -45,7 +36,7 @@
const char kEnableNetworkServiceSandboxName[] =
"Enable the network service sandbox.";
const char kEnableNetworkServiceSandboxDescription[] =
-@@ -7533,7 +7533,7 @@ const char kWebShareDescription[] =
+@@ -7542,7 +7542,7 @@ const char kWebShareDescription[] =
"platforms.";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
@@ -54,7 +45,7 @@
const char kOzonePlatformHintChoiceDefault[] = "Default";
const char kOzonePlatformHintChoiceAuto[] = "Auto";
const char kOzonePlatformHintChoiceX11[] = "X11";
-@@ -7553,7 +7553,7 @@ const char kWebBluetoothConfirmPairingSupportDescripti
+@@ -7562,7 +7562,7 @@ const char kWebBluetoothConfirmPairingSupportDescripti
"Bluetooth";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
@@ -63,7 +54,7 @@
const char kSkipUndecryptablePasswordsName[] =
"Skip undecryptable passwords to use the available decryptable "
"passwords.";
-@@ -7567,7 +7567,7 @@ const char kForcePasswordInitialSyncWhenDecryptionFail
+@@ -7576,7 +7576,7 @@ const char kForcePasswordInitialSyncWhenDecryptionFail
"storage and requests initial sync.";
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
@@ -72,7 +63,7 @@
const char kAsyncDnsName[] = "Async DNS resolver";
const char kAsyncDnsDescription[] = "Enables the built-in DNS resolver.";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
-@@ -7666,7 +7666,7 @@ const char kElasticOverscrollDescription[] =
+@@ -7675,7 +7675,7 @@ const char kElasticOverscrollDescription[] =
#if BUILDFLAG(IS_WIN) || \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
@@ -81,16 +72,7 @@
const char kUIDebugToolsName[] = "Debugging tools for UI";
const char kUIDebugToolsDescription[] =
"Enables additional keyboard shortcuts to help debugging.";
-@@ -7678,7 +7678,7 @@ const char kSyncPollImmediatelyOnEveryStartupDescripti
- #endif
-
- #if BUILDFLAG(IS_WIN) || (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) || \
-- BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID)
-+ BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
- const char kDataRetentionPoliciesDisableSyncTypesNeededName[] =
- "Data Retention Policies Disable Sync Types";
- const char kDataRetentionPoliciesDisableSyncTypesNeededDescription[] =
-@@ -7723,7 +7723,7 @@ const char kEnableAudioFocusEnforcementDescription[] =
+@@ -7721,7 +7721,7 @@ const char kEnableAudioFocusEnforcementDescription[] =
"any one time. Requires #enable-media-session-service to be enabled too.";
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h
index bd504b6c4f86..096de91b0cff 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h
@@ -1,15 +1,6 @@
---- chrome/browser/flag_descriptions.h.orig 2023-11-11 14:10:41 UTC
+--- chrome/browser/flag_descriptions.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/flag_descriptions.h
-@@ -1488,7 +1488,7 @@ extern const char kOmnibox2023RefreshConnectionSecurit
- extern const char kWebUIOmniboxPopupName[];
- extern const char kWebUIOmniboxPopupDescription[];
-
--#if !BUILDFLAG(IS_LINUX)
-+#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_BSD)
- extern const char kWebUiSystemFontName[];
- extern const char kWebUiSystemFontDescription[];
- #endif
-@@ -4175,7 +4175,7 @@ extern const char kLacrosMergeIcuDataFileName[];
+@@ -4172,7 +4172,7 @@ extern const char kLacrosMergeIcuDataFileName[];
extern const char kLacrosMergeIcuDataFileDescription[];
#endif // #if BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -18,7 +9,7 @@
extern const char kGetAllScreensMediaName[];
extern const char kGetAllScreensMediaDescription[];
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
-@@ -4305,14 +4305,14 @@ extern const char kSearchWebInSidePanelDescription[];
+@@ -4312,14 +4312,14 @@ extern const char kSearchWebInSidePanelDescription[];
// Random platform combinations -----------------------------------------------
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -35,7 +26,7 @@
extern const char kWebShareName[];
extern const char kWebShareDescription[];
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
-@@ -4322,7 +4322,7 @@ extern const char kWebBluetoothConfirmPairingSupportNa
+@@ -4329,7 +4329,7 @@ extern const char kWebBluetoothConfirmPairingSupportNa
extern const char kWebBluetoothConfirmPairingSupportDescription[];
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
@@ -44,7 +35,7 @@
extern const char kOzonePlatformHintChoiceDefault[];
extern const char kOzonePlatformHintChoiceAuto[];
extern const char kOzonePlatformHintChoiceX11[];
-@@ -4332,7 +4332,7 @@ extern const char kOzonePlatformHintName[];
+@@ -4339,7 +4339,7 @@ extern const char kOzonePlatformHintName[];
extern const char kOzonePlatformHintDescription[];
#endif // BUILDFLAG(IS_LINUX)
@@ -53,7 +44,7 @@
extern const char kSkipUndecryptablePasswordsName[];
extern const char kSkipUndecryptablePasswordsDescription[];
-@@ -4340,13 +4340,13 @@ extern const char kForcePasswordInitialSyncWhenDecrypt
+@@ -4347,13 +4347,13 @@ extern const char kForcePasswordInitialSyncWhenDecrypt
extern const char kForcePasswordInitialSyncWhenDecryptionFailsDescription[];
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
@@ -69,7 +60,7 @@
extern const char kFollowingFeedSidepanelName[];
extern const char kFollowingFeedSidepanelDescription[];
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
-@@ -4357,7 +4357,7 @@ extern const char kEnableProtoApiForClassifyUrlName[];
+@@ -4364,7 +4364,7 @@ extern const char kEnableProtoApiForClassifyUrlName[];
extern const char kEnableProtoApiForClassifyUrlDescription[];
#endif // BUILDFLAG(ENABLE_SUPERVISED_USERS)
@@ -78,7 +69,7 @@
extern const char kEnableNetworkServiceSandboxName[];
extern const char kEnableNetworkServiceSandboxDescription[];
-@@ -4434,7 +4434,7 @@ extern const char kElasticOverscrollDescription[];
+@@ -4441,7 +4441,7 @@ extern const char kElasticOverscrollDescription[];
#if BUILDFLAG(IS_WIN) || \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
@@ -87,16 +78,7 @@
extern const char kUIDebugToolsName[];
extern const char kUIDebugToolsDescription[];
-@@ -4443,7 +4443,7 @@ extern const char kSyncPollImmediatelyOnEveryStartupDe
- #endif
-
- #if BUILDFLAG(IS_WIN) || (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) || \
-- BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID)
-+ BUILDFLAG(IS_MAC) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
- extern const char kDataRetentionPoliciesDisableSyncTypesNeededName[];
- extern const char kDataRetentionPoliciesDisableSyncTypesNeededDescription[];
- #endif
-@@ -4470,7 +4470,7 @@ extern const char kEnableAudioFocusEnforcementName[];
+@@ -4471,7 +4471,7 @@ extern const char kEnableAudioFocusEnforcementName[];
extern const char kEnableAudioFocusEnforcementDescription[];
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -104,4 +86,4 @@
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
extern const char kThirdPartyProfileManagementName[];
extern const char kThirdPartyProfileManagementDescription[];
- #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
+
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_intranet__redirect__detector.h b/www/ungoogled-chromium/files/patch-chrome_browser_intranet__redirect__detector.h
index 7777bae4ded8..bce307a1df1c 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_intranet__redirect__detector.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_intranet__redirect__detector.h
@@ -1,6 +1,6 @@
---- chrome/browser/intranet_redirect_detector.h.orig 2022-10-01 07:40:07 UTC
+--- chrome/browser/intranet_redirect_detector.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/intranet_redirect_detector.h
-@@ -27,7 +27,7 @@ class SimpleURLLoader;
+@@ -25,7 +25,7 @@ class SimpleURLLoader;
class PrefRegistrySimple;
#if !(BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_media_router_discovery_BUILD.gn b/www/ungoogled-chromium/files/patch-chrome_browser_media_router_discovery_BUILD.gn
index 5fdee1512276..bc8f263380d1 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_media_router_discovery_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_media_router_discovery_BUILD.gn
@@ -1,8 +1,8 @@
---- chrome/browser/media/router/discovery/BUILD.gn.orig 2022-10-29 17:50:56 UTC
+--- chrome/browser/media/router/discovery/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/media/router/discovery/BUILD.gn
-@@ -80,7 +80,7 @@ static_library("discovery") {
- "media_sink_discovery_metrics.h",
+@@ -81,7 +81,7 @@ static_library("discovery") {
]
+ configs += [ "//build/config/compiler:wexit_time_destructors" ]
- if (is_linux || is_chromeos) {
+ if ((is_linux || is_chromeos) && !is_bsd) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc
index ecdcdb5275b9..765b133fbd40 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc
@@ -1,4 +1,4 @@
---- chrome/browser/metrics/chrome_metrics_service_client.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/metrics/chrome_metrics_service_client.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/metrics/chrome_metrics_service_client.cc
@@ -184,7 +184,7 @@
#include "chrome/notification_helper/notification_helper_constants.h"
@@ -27,7 +27,7 @@
metrics_service_->RegisterMetricsProvider(
std::make_unique<metrics::MotherboardMetricsProvider>());
#endif
-@@ -892,7 +892,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
+@@ -894,7 +894,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -36,7 +36,7 @@
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
metrics_service_->RegisterMetricsProvider(
std::make_unique<DesktopPlatformFeaturesMetricsProvider>());
-@@ -1011,7 +1011,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
+@@ -1013,7 +1013,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
std::make_unique<PowerMetricsProvider>());
#endif
@@ -45,7 +45,7 @@
metrics_service_->RegisterMetricsProvider(
metrics::CreateDesktopSessionMetricsProvider());
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || (BUILDFLAG(IS_LINUX)
-@@ -1210,7 +1210,7 @@ bool ChromeMetricsServiceClient::RegisterForProfileEve
+@@ -1212,7 +1212,7 @@ bool ChromeMetricsServiceClient::RegisterForProfileEve
#endif
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_power_process__monitor.cc b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_power_process__monitor.cc
index 37efe59ba9e5..daf5d12a2891 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_power_process__monitor.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_power_process__monitor.cc
@@ -1,4 +1,4 @@
---- chrome/browser/metrics/power/process_monitor.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/metrics/power/process_monitor.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/metrics/power/process_monitor.cc
@@ -65,7 +65,7 @@ ProcessMonitor::Metrics SampleMetrics(base::ProcessMet
#endif
@@ -9,7 +9,7 @@
metrics.idle_wakeups = process_metrics.GetIdleWakeupsPerSecond();
#endif
#if BUILDFLAG(IS_MAC)
-@@ -82,7 +82,7 @@ void ScaleMetrics(ProcessMonitor::Metrics* metrics, do
+@@ -81,7 +81,7 @@ void ScaleMetrics(ProcessMonitor::Metrics* metrics, do
metrics->cpu_usage *= factor;
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -18,7 +18,7 @@
metrics->idle_wakeups *= factor;
#endif
-@@ -174,7 +174,7 @@ ProcessMonitor::Metrics& operator+=(ProcessMonitor::Me
+@@ -172,7 +172,7 @@ ProcessMonitor::Metrics& operator+=(ProcessMonitor::Me
lhs.cpu_usage += rhs.cpu_usage;
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc
index 75c744fd7a58..40e8b406f606 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc
@@ -1,4 +1,4 @@
---- chrome/browser/net/system_network_context_manager.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/net/system_network_context_manager.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/net/system_network_context_manager.cc
@@ -92,7 +92,7 @@
@@ -68,7 +68,7 @@
// The network service sandbox and the kerberos library are incompatible.
// If kerberos is enabled by policy, disable the network service sandbox.
if (g_network_service_will_allow_gssapi_library_load ||
-@@ -278,7 +278,7 @@ NetworkSandboxState IsNetworkSandboxEnabledInternal()
+@@ -277,7 +277,7 @@ NetworkSandboxState IsNetworkSandboxEnabledInternal()
}
#endif // BUILDFLAG(IS_WIN)
@@ -77,7 +77,7 @@
if (local_state &&
local_state->HasPrefPath(prefs::kNetworkServiceSandboxEnabled)) {
return local_state->GetBoolean(prefs::kNetworkServiceSandboxEnabled)
-@@ -473,7 +473,7 @@ void SystemNetworkContextManager::DeleteInstance() {
+@@ -472,7 +472,7 @@ void SystemNetworkContextManager::DeleteInstance() {
g_system_network_context_manager = nullptr;
}
@@ -86,7 +86,7 @@
SystemNetworkContextManager::GssapiLibraryLoadObserver::
GssapiLibraryLoadObserver(SystemNetworkContextManager* owner)
: owner_(owner) {}
-@@ -531,7 +531,7 @@ SystemNetworkContextManager::SystemNetworkContextManag
+@@ -530,7 +530,7 @@ SystemNetworkContextManager::SystemNetworkContextManag
pref_change_registrar_.Add(prefs::kAllHttpAuthSchemesAllowedForOrigins,
auth_pref_callback);
@@ -95,7 +95,16 @@
pref_change_registrar_.Add(prefs::kAuthNegotiateDelegateByKdcPolicy,
auth_pref_callback);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS)
-@@ -578,7 +578,7 @@ SystemNetworkContextManager::SystemNetworkContextManag
+@@ -544,7 +544,7 @@ SystemNetworkContextManager::SystemNetworkContextManag
+ auth_pref_callback);
+ #endif // BUILDFLAG(IS_ANDROID)
+
+-#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ pref_change_registrar_.Add(kGssapiDesiredPref, auth_pref_callback);
+ #endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
+
+@@ -577,7 +577,7 @@ SystemNetworkContextManager::SystemNetworkContextManag
#endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -104,7 +113,7 @@
pref_change_registrar_.Add(
prefs::kEnforceLocalAnchorConstraintsEnabled,
base::BindRepeating(&SystemNetworkContextManager::
-@@ -627,7 +627,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
+@@ -632,7 +632,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
registry->RegisterBooleanPref(prefs::kKerberosEnabled, false);
#endif // BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -113,7 +122,7 @@
registry->RegisterBooleanPref(prefs::kAuthNegotiateDelegateByKdcPolicy,
false);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS)
-@@ -656,7 +656,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
+@@ -661,7 +661,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
registry->RegisterBooleanPref(prefs::kChromeRootStoreEnabled, false);
#endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -122,7 +131,7 @@
// Note that the default value is not relevant because the pref is only
// evaluated when it is managed.
registry->RegisterBooleanPref(prefs::kEnforceLocalAnchorConstraintsEnabled,
-@@ -665,11 +665,11 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
+@@ -670,11 +670,11 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
registry->RegisterListPref(prefs::kExplicitlyAllowedNetworkPorts);
@@ -136,7 +145,7 @@
registry->RegisterBooleanPref(prefs::kReceivedHttpAuthNegotiateHeader, false);
#endif // BUILDFLAG(IS_LINUX)
-@@ -720,7 +720,7 @@ void SystemNetworkContextManager::OnNetworkServiceCrea
+@@ -727,7 +727,7 @@ void SystemNetworkContextManager::OnNetworkServiceCrea
OnNewHttpAuthDynamicParams(http_auth_dynamic_params);
network_service->ConfigureHttpAuthPrefs(std::move(http_auth_dynamic_params));
@@ -145,7 +154,16 @@
gssapi_library_loader_observer_.Install(network_service);
#endif // BUILDFLAG(IS_LINUX)
-@@ -1028,7 +1028,7 @@ void SystemNetworkContextManager::UpdateChromeRootStor
+@@ -938,7 +938,7 @@ bool SystemNetworkContextManager::IsNetworkSandboxEnab
+ break;
+ }
+
+-#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ if (!enabled) {
+ g_network_service_will_allow_gssapi_library_load = true;
+ }
+@@ -1052,7 +1052,7 @@ void SystemNetworkContextManager::UpdateChromeRootStor
#endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h
index ad6ea7969768..e6d3afbe29e6 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h
@@ -1,4 +1,4 @@
---- chrome/browser/net/system_network_context_manager.h.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/net/system_network_context_manager.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/net/system_network_context_manager.h
@@ -185,7 +185,7 @@ class SystemNetworkContextManager {
class URLLoaderFactoryForSystem;
@@ -18,7 +18,7 @@
// Applies the current value of the kEnforceLocalAnchorConstraintsEnabled
// pref to the enforcement state.
void UpdateEnforceLocalAnchorConstraintsEnabled();
-@@ -275,7 +275,7 @@ class SystemNetworkContextManager {
+@@ -277,7 +277,7 @@ class SystemNetworkContextManager {
static absl::optional<bool> certificate_transparency_enabled_for_testing_;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_performance__manager_metrics_cpu__probe_cpu__probe.cc b/www/ungoogled-chromium/files/patch-chrome_browser_performance__manager_metrics_cpu__probe_cpu__probe.cc
new file mode 100644
index 000000000000..9596cba31f3f
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_performance__manager_metrics_cpu__probe_cpu__probe.cc
@@ -0,0 +1,12 @@
+--- chrome/browser/performance_manager/metrics/cpu_probe/cpu_probe.cc.orig 2023-12-23 12:33:28 UTC
++++ chrome/browser/performance_manager/metrics/cpu_probe/cpu_probe.cc
+@@ -30,6 +30,9 @@ std::unique_ptr<CpuProbe> CpuProbe::Create() {
+ return CpuProbeWin::Create();
+ #elif BUILDFLAG(IS_MAC)
+ return CpuProbeMac::Create();
++#elif BUILDFLAG(IS_BSD)
++ NOTIMPLEMENTED();
++ return nullptr;
+ #else
+ return nullptr;
+ #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc b/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc
index 5a1e4f07fc96..22b95bef86c3 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc
@@ -1,4 +1,4 @@
---- chrome/browser/policy/chrome_browser_cloud_management_controller_desktop.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/policy/chrome_browser_cloud_management_controller_desktop.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/policy/chrome_browser_cloud_management_controller_desktop.cc
@@ -49,7 +49,7 @@
#include "chrome/browser/policy/browser_dm_token_storage_mac.h"
@@ -15,10 +15,10 @@
-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- #include "chrome/browser/enterprise/connectors/device_trust/device_trust_features.h" // nogncheck
#include "chrome/browser/enterprise/connectors/device_trust/key_management/browser/device_trust_key_manager_impl.h"
#include "chrome/browser/enterprise/connectors/device_trust/key_management/browser/key_rotation_launcher.h"
-@@ -91,7 +91,7 @@ void ChromeBrowserCloudManagementControllerDesktop::
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+@@ -90,7 +90,7 @@ void ChromeBrowserCloudManagementControllerDesktop::
#if BUILDFLAG(IS_MAC)
storage_delegate = std::make_unique<BrowserDMTokenStorageMac>();
@@ -27,12 +27,12 @@
storage_delegate = std::make_unique<BrowserDMTokenStorageLinux>();
#elif BUILDFLAG(IS_WIN)
storage_delegate = std::make_unique<BrowserDMTokenStorageWin>();
-@@ -246,7 +246,7 @@ ChromeBrowserCloudManagementControllerDesktop::CreateC
+@@ -245,7 +245,7 @@ ChromeBrowserCloudManagementControllerDesktop::CreateC
std::unique_ptr<enterprise_connectors::DeviceTrustKeyManager>
ChromeBrowserCloudManagementControllerDesktop::CreateDeviceTrustKeyManager() {
-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- if (enterprise_connectors::IsDeviceTrustConnectorFeatureEnabled()) {
- auto* browser_dm_token_storage = BrowserDMTokenStorage::Get();
- auto* device_management_service = GetDeviceManagementService();
+ auto* browser_dm_token_storage = BrowserDMTokenStorage::Get();
+ auto* device_management_service = GetDeviceManagementService();
+ auto shared_url_loader_factory = GetSharedURLLoaderFactory();
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc
index 0e0af62fcc4d..7fe4326af89e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc
@@ -1,4 +1,4 @@
---- chrome/browser/policy/configuration_policy_handler_list_factory.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/policy/configuration_policy_handler_list_factory.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/policy/configuration_policy_handler_list_factory.cc
@@ -216,19 +216,20 @@
#include "components/spellcheck/browser/pref_names.h"
@@ -24,7 +24,7 @@
#include "chrome/browser/privacy_sandbox/privacy_sandbox_policy_handler.h"
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) ||
-@@ -787,14 +788,14 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -798,14 +799,14 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
prefs::kManagedDefaultGeolocationSetting,
base::Value::Type::INTEGER },
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) \
@@ -41,8 +41,8 @@
{ key::kFullscreenAllowed,
prefs::kFullscreenAllowed,
base::Value::Type::BOOLEAN },
-@@ -1514,7 +1515,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
- base::Value::Type::INTEGER },
+@@ -1537,7 +1538,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+ base::Value::Type::LIST },
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
-#if BUILDFLAG(IS_LINUX)
@@ -50,7 +50,7 @@
{ key::kGSSAPILibraryName,
prefs::kGSSAPILibraryName,
base::Value::Type::STRING },
-@@ -1556,7 +1557,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1582,7 +1583,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::BOOLEAN },
#endif // BUILDFLAG(IS_WIN)
@@ -59,7 +59,7 @@
{ key::kNetworkServiceSandboxEnabled,
prefs::kNetworkServiceSandboxEnabled,
base::Value::Type::BOOLEAN },
-@@ -1582,18 +1583,18 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1608,18 +1609,18 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
prefs::kTotalMemoryLimitMb,
base::Value::Type::INTEGER },
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
@@ -81,7 +81,7 @@
{ key::kDefaultBrowserSettingEnabled,
prefs::kDefaultBrowserSettingEnabled,
base::Value::Type::BOOLEAN },
-@@ -1606,7 +1607,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1632,7 +1633,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
// || BUILDFLAG(IS_FUCHSIA)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) \
@@ -90,7 +90,7 @@
{ key::kAutoplayAllowed,
prefs::kAutoplayAllowed,
base::Value::Type::BOOLEAN },
-@@ -1707,7 +1708,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1730,7 +1731,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::BOOLEAN },
#endif // !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_CHROMEOS)
@@ -99,7 +99,7 @@
{ key::kAlternativeBrowserPath,
browser_switcher::prefs::kAlternativeBrowserPath,
base::Value::Type::STRING },
-@@ -1794,7 +1795,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1823,7 +1824,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::BOOLEAN },
#endif // BUILDFLAG(IS_CHROMEOS)
@@ -108,7 +108,7 @@
// TODO(crbug.com/1454054): replace the
// kGetDisplayMediaSetSelectAllScreensAllowedForUrls policy by a policy that
// matches the name of the new `getAllScreensMedia` API.
-@@ -1803,7 +1804,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1832,7 +1833,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::LIST },
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
@@ -117,7 +117,7 @@
{ key::kAuthNegotiateDelegateByKdcPolicy,
prefs::kAuthNegotiateDelegateByKdcPolicy,
base::Value::Type::BOOLEAN },
-@@ -1834,7 +1835,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1863,7 +1864,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
#endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -126,7 +126,7 @@
{ key::kEnforceLocalAnchorConstraintsEnabled,
prefs::kEnforceLocalAnchorConstraintsEnabled,
base::Value::Type::BOOLEAN },
-@@ -1910,7 +1911,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1939,7 +1940,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
ash::prefs::kUrlParameterToAutofillSAMLUsername,
base::Value::Type::STRING },
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -135,7 +135,7 @@
{ key::kBatterySaverModeAvailability,
performance_manager::user_tuning::prefs::kBatterySaverModeState,
base::Value::Type::INTEGER },
-@@ -1941,7 +1942,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1970,7 +1971,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
prefs::kCloudApAuthEnabled,
base::Value::Type::INTEGER },
#endif // BUILDFLAG(IS_WIN)
@@ -144,7 +144,16 @@
{ key::kOutOfProcessSystemDnsResolutionEnabled,
prefs::kOutOfProcessSystemDnsResolutionEnabled,
base::Value::Type::BOOLEAN },
-@@ -2060,7 +2061,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2007,7 +2008,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+ prefs::kManagedPrivateNetworkAccessRestrictionsEnabled,
+ base::Value::Type::BOOLEAN },
+ #if BUILDFLAG(ENABLE_EXTENSIONS)
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ { key::kExtensionInstallTypeBlocklist,
+ extensions::pref_names::kExtensionInstallTypeBlocklist,
+ base::Value::Type::LIST},
+@@ -2102,7 +2103,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
// Policies for all platforms - End
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -153,7 +162,7 @@
handlers->AddHandler(
std::make_unique<performance_manager::HighEfficiencyPolicyHandler>());
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
-@@ -2242,7 +2243,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2278,7 +2279,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -162,7 +171,7 @@
handlers->AddHandler(
std::make_unique<enterprise_idle::IdleTimeoutPolicyHandler>());
handlers->AddHandler(
-@@ -2300,7 +2301,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2336,7 +2337,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
key::kBrowsingDataLifetime, browsing_data::prefs::kBrowsingDataLifetime,
chrome_schema));
@@ -171,7 +180,7 @@
handlers->AddHandler(std::make_unique<LocalSyncPolicyHandler>());
handlers->AddHandler(std::make_unique<ThemeColorPolicyHandler>());
handlers->AddHandler(
-@@ -2694,7 +2695,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2730,7 +2731,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
#endif
#if BUILDFLAG(ENABLE_SPELLCHECK)
@@ -180,7 +189,7 @@
handlers->AddHandler(std::make_unique<SpellcheckLanguagePolicyHandler>());
handlers->AddHandler(
std::make_unique<SpellcheckLanguageBlocklistPolicyHandler>(
-@@ -2702,7 +2703,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2738,7 +2739,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
#endif // BUILDFLAG(ENABLE_SPELLCHECK)
@@ -189,10 +198,10 @@
handlers->AddHandler(std::make_unique<SimplePolicyHandler>(
key::kAllowSystemNotifications, prefs::kAllowSystemNotifications,
base::Value::Type::BOOLEAN));
-@@ -2715,7 +2716,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
- handlers->AddHandler(
- std::make_unique<first_party_sets::FirstPartySetsOverridesPolicyHandler>(
- chrome_schema));
+@@ -2755,7 +2756,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+ /*new_policy_handler=*/std::make_unique<
+ first_party_sets::FirstPartySetsOverridesPolicyHandler>(
+ policy::key::kRelatedWebsiteSetsOverrides, chrome_schema)));
-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD) || \
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc b/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc
index 0898164b532a..6b9205a044d7 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc
@@ -1,4 +1,4 @@
---- chrome/browser/prefs/browser_prefs.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/prefs/browser_prefs.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/prefs/browser_prefs.cc
@@ -482,18 +482,18 @@
#endif
@@ -31,7 +31,7 @@
#include "ui/color/system_theme.h"
#endif
-@@ -671,7 +671,7 @@ const char kPluginsPluginsList[] = "plugins.plugins_li
+@@ -649,7 +649,7 @@ const char kPluginsPluginsList[] = "plugins.plugins_li
const char kPluginsShowDetails[] = "plugins.show_details";
// Deprecated 02/2023.
@@ -40,7 +40,7 @@
const char kWebAppsUrlHandlerInfo[] = "web_apps.url_handler_info";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
-@@ -992,7 +992,7 @@ void RegisterLocalStatePrefsForMigration(PrefRegistryS
+@@ -979,7 +979,7 @@ void RegisterLocalStatePrefsForMigration(PrefRegistryS
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
// Deprecated 02/2023.
@@ -49,7 +49,7 @@
registry->RegisterDictionaryPref(kWebAppsUrlHandlerInfo);
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
-@@ -1943,12 +1943,12 @@ void RegisterProfilePrefs(user_prefs::PrefRegistrySync
+@@ -1952,12 +1952,12 @@ void RegisterProfilePrefs(user_prefs::PrefRegistrySync
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -64,7 +64,7 @@
browser_switcher::BrowserSwitcherPrefs::RegisterProfilePrefs(registry);
enterprise_signin::RegisterProfilePrefs(registry);
#endif
-@@ -2103,7 +2103,7 @@ void MigrateObsoleteLocalStatePrefs(PrefService* local
+@@ -2114,7 +2114,7 @@ void MigrateObsoleteLocalStatePrefs(PrefService* local
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
// Added 02/2023
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_printing_print__backend__service__manager.cc b/www/ungoogled-chromium/files/patch-chrome_browser_printing_print__backend__service__manager.cc
index 9a5066bfbbdb..11426053cb7a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_printing_print__backend__service__manager.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_printing_print__backend__service__manager.cc
@@ -1,4 +1,4 @@
---- chrome/browser/printing/print_backend_service_manager.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/printing/print_backend_service_manager.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/printing/print_backend_service_manager.cc
@@ -33,7 +33,7 @@
#include "printing/buildflags/buildflags.h"
@@ -9,7 +9,7 @@
#include "content/public/common/content_switches.h"
#endif
-@@ -851,7 +851,7 @@ PrintBackendServiceManager::GetServiceFromBundle(
+@@ -849,7 +849,7 @@ PrintBackendServiceManager::GetServiceFromBundle(
host.BindNewPipeAndPassReceiver(),
content::ServiceProcessHost::Options()
.WithDisplayName(IDS_UTILITY_PROCESS_PRINT_BACKEND_SERVICE_NAME)
@@ -18,7 +18,7 @@
.WithExtraCommandLineSwitches({switches::kMessageLoopTypeUi})
#endif
.Pass());
-@@ -1024,7 +1024,7 @@ PrintBackendServiceManager::DetermineIdleTimeoutUpdate
+@@ -1022,7 +1022,7 @@ PrintBackendServiceManager::DetermineIdleTimeoutUpdate
return kNoClientsRegisteredResetOnIdleTimeout;
case ClientType::kQueryWithUi:
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc b/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc
index 1dcc9b394046..cced7ce1f221 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc
@@ -1,6 +1,6 @@
---- chrome/browser/printing/printer_query.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/printing/printer_query.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/printing/printer_query.cc
-@@ -339,7 +339,7 @@ void PrinterQuery::UpdatePrintSettings(base::Value::Di
+@@ -341,7 +341,7 @@ void PrinterQuery::UpdatePrintSettings(base::Value::Di
crash_key = std::make_unique<crash_keys::ScopedPrinterInfo>(
print_backend->GetPrinterDriverInfo(printer_name));
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc b/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc
index 3df05d67d6a6..75f5eee09229 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc
@@ -1,6 +1,6 @@
---- chrome/browser/profiles/chrome_browser_main_extra_parts_profiles.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/profiles/chrome_browser_main_extra_parts_profiles.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/profiles/chrome_browser_main_extra_parts_profiles.cc
-@@ -392,18 +392,18 @@
+@@ -400,18 +400,18 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -22,7 +22,7 @@
#include "chrome/browser/browser_switcher/browser_switcher_service_factory.h"
#include "chrome/browser/enterprise/connectors/analysis/local_binary_upload_service_factory.h"
#include "chrome/browser/enterprise/signals/signals_aggregator_factory.h"
-@@ -606,7 +606,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -620,7 +620,7 @@ void ChromeBrowserMainExtraPartsProfiles::
if (breadcrumbs::IsEnabled()) {
BreadcrumbManagerKeyedServiceFactory::GetInstance();
}
@@ -31,7 +31,7 @@
browser_switcher::BrowserSwitcherServiceFactory::GetInstance();
#endif
browser_sync::UserEventServiceFactory::GetInstance();
-@@ -706,26 +706,26 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -726,26 +726,26 @@ void ChromeBrowserMainExtraPartsProfiles::
enterprise_commands::UserRemoteCommandsServiceFactory::GetInstance();
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -63,7 +63,7 @@
enterprise_signin::EnterpriseSigninServiceFactory::GetInstance();
#endif
#if BUILDFLAG(ENABLE_SESSION_SERVICE)
-@@ -840,7 +840,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -861,7 +861,7 @@ void ChromeBrowserMainExtraPartsProfiles::
#endif
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -72,7 +72,7 @@
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
metrics::DesktopProfileSessionDurationsServiceFactory::GetInstance();
#endif
-@@ -935,7 +935,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -958,7 +958,7 @@ void ChromeBrowserMainExtraPartsProfiles::
#if BUILDFLAG(IS_CHROMEOS)
policy::PolicyCertServiceFactory::GetInstance();
#endif
@@ -81,7 +81,7 @@
policy::ProfileTokenPolicyWebSigninServiceFactory::GetInstance();
#endif
policy::UserCloudPolicyInvalidatorFactory::GetInstance();
-@@ -976,7 +976,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -1002,7 +1002,7 @@ void ChromeBrowserMainExtraPartsProfiles::
#if !BUILDFLAG(IS_ANDROID)
ProfileThemeUpdateServiceFactory::GetInstance();
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_profiles_profile__impl.cc b/www/ungoogled-chromium/files/patch-chrome_browser_profiles_profile__impl.cc
index ca6e25fa359d..3cd115e60e00 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_profiles_profile__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_profiles_profile__impl.cc
@@ -1,6 +1,6 @@
---- chrome/browser/profiles/profile_impl.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/profiles/profile_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/profiles/profile_impl.cc
-@@ -256,6 +256,10 @@
+@@ -258,6 +258,10 @@
#include "chrome/browser/spellchecker/spellcheck_service.h"
#endif
@@ -11,7 +11,7 @@
using bookmarks::BookmarkModel;
using content::BrowserThread;
using content::DownloadManagerDelegate;
-@@ -598,7 +602,7 @@ void ProfileImpl::LoadPrefsForNormalStartup(bool async
+@@ -603,7 +607,7 @@ void ProfileImpl::LoadPrefsForNormalStartup(bool async
#else
{
#endif // BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -20,7 +20,7 @@
ProfileManager* profile_manager = g_browser_process->profile_manager();
ProfileAttributesEntry* entry =
profile_manager->GetProfileAttributesStorage()
-@@ -860,7 +864,17 @@ void ProfileImpl::DoFinalInit(CreateMode create_mode)
+@@ -871,7 +875,17 @@ void ProfileImpl::DoFinalInit(CreateMode create_mode)
}
base::FilePath ProfileImpl::last_selected_directory() {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts b/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts
index 544dea3f8546..576d1db37c95 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts
@@ -1,6 +1,6 @@
---- chrome/browser/resources/settings/route.ts.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/resources/settings/route.ts.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/resources/settings/route.ts
-@@ -240,7 +240,7 @@ function createBrowserSettingsRoutes(): SettingsRoutes
+@@ -242,7 +242,7 @@ function createBrowserSettingsRoutes(): SettingsRoutes
r.ACCESSIBILITY = r.ADVANCED.createSection(
'/accessibility', 'a11y', loadTimeData.getString('a11yPageTitle'));
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc
index d3894401bde9..b01714448ca9 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc
@@ -1,4 +1,4 @@
---- chrome/browser/safe_browsing/download_protection/file_analyzer.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/safe_browsing/download_protection/file_analyzer.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/safe_browsing/download_protection/file_analyzer.cc
@@ -21,7 +21,7 @@
#include "content/public/browser/browser_thread.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/safe_browsing/download_protection/document_analysis_service.h"
#endif
-@@ -83,7 +83,7 @@ void FileAnalyzer::Start(const base::FilePath& target_
+@@ -85,7 +85,7 @@ void FileAnalyzer::Start(const base::FilePath& target_
} else if (inspection_type == DownloadFileType::DMG) {
StartExtractDmgFeatures();
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.h b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.h
index 52d837393964..bda364a4812b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.h
@@ -1,4 +1,4 @@
---- chrome/browser/safe_browsing/download_protection/file_analyzer.h.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/safe_browsing/download_protection/file_analyzer.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/safe_browsing/download_protection/file_analyzer.h
@@ -18,7 +18,7 @@
#include "components/safe_browsing/core/common/proto/csd.pb.h"
@@ -9,7 +9,7 @@
#include "chrome/services/file_util/public/cpp/sandboxed_document_analyzer.h"
#endif
-@@ -109,7 +109,7 @@ class FileAnalyzer {
+@@ -110,7 +110,7 @@ class FileAnalyzer {
const safe_browsing::ArchiveAnalyzerResults& archive_results);
#endif
@@ -18,7 +18,7 @@
void StartExtractDocumentFeatures();
void OnDocumentAnalysisFinished(
const DocumentAnalyzerResults& document_results);
-@@ -139,7 +139,7 @@ class FileAnalyzer {
+@@ -141,7 +141,7 @@ class FileAnalyzer {
dmg_analyzer_{nullptr, base::OnTaskRunnerDeleter(nullptr)};
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_incident__reporting_incident__reporting__service.cc b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_incident__reporting_incident__reporting__service.cc
index 702b58abb80c..718e3bafe705 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_incident__reporting_incident__reporting__service.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_incident__reporting_incident__reporting__service.cc
@@ -1,6 +1,6 @@
---- chrome/browser/safe_browsing/incident_reporting/incident_reporting_service.cc.orig 2023-02-11 09:11:04 UTC
+--- chrome/browser/safe_browsing/incident_reporting/incident_reporting_service.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/safe_browsing/incident_reporting/incident_reporting_service.cc
-@@ -707,7 +707,7 @@ void IncidentReportingService::OnEnvironmentDataCollec
+@@ -696,7 +696,7 @@ void IncidentReportingService::OnEnvironmentDataCollec
// Process::Current().CreationTime() is missing on some platforms.
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_sync_chrome__sync__client.cc b/www/ungoogled-chromium/files/patch-chrome_browser_sync_chrome__sync__client.cc
index c0180e8b4c5a..6d268aaf32e9 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_sync_chrome__sync__client.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_sync_chrome__sync__client.cc
@@ -1,6 +1,6 @@
---- chrome/browser/sync/chrome_sync_client.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/sync/chrome_sync_client.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/sync/chrome_sync_client.cc
-@@ -110,7 +110,7 @@
+@@ -111,7 +111,7 @@
#endif // BUILDFLAG(ENABLE_SPELLCHECK)
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/tabs/saved_tab_groups/saved_tab_group_keyed_service.h"
#include "chrome/browser/ui/tabs/saved_tab_groups/saved_tab_group_service_factory.h"
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) ||
-@@ -440,7 +440,7 @@ ChromeSyncClient::CreateDataTypeControllers(syncer::Sy
+@@ -441,7 +441,7 @@ ChromeSyncClient::CreateDataTypeControllers(syncer::Sy
#endif // !BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
@@ -18,7 +18,7 @@
if (base::FeatureList::IsEnabled(features::kTabGroupsSave)) {
controllers.push_back(std::make_unique<syncer::ModelTypeController>(
syncer::SAVED_TAB_GROUP,
-@@ -453,7 +453,7 @@ ChromeSyncClient::CreateDataTypeControllers(syncer::Sy
+@@ -454,7 +454,7 @@ ChromeSyncClient::CreateDataTypeControllers(syncer::Sy
// Chrome prefers OS provided spell checkers where they exist. So only sync the
// custom dictionary on platforms that typically don't provide one.
@@ -27,7 +27,7 @@
// Dictionary sync is enabled by default.
if (GetPrefService()->GetBoolean(spellcheck::prefs::kSpellCheckEnable)) {
controllers.push_back(
-@@ -609,7 +609,7 @@ base::WeakPtr<syncer::ModelTypeControllerDelegate>
+@@ -610,7 +610,7 @@ base::WeakPtr<syncer::ModelTypeControllerDelegate>
ChromeSyncClient::GetControllerDelegateForModelType(syncer::ModelType type) {
switch (type) {
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__factory.cc
index 0c98e16ac64a..faeb56efbd2d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__factory.cc
@@ -1,6 +1,6 @@
---- chrome/browser/sync/sync_service_factory.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/sync/sync_service_factory.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/sync/sync_service_factory.cc
-@@ -81,7 +81,7 @@
+@@ -84,7 +84,7 @@
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/tabs/saved_tab_groups/saved_tab_group_service_factory.h"
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) ||
// BUILDFLAG(IS_WIN)
-@@ -126,7 +126,7 @@ std::unique_ptr<KeyedService> BuildSyncService(
+@@ -131,7 +131,7 @@ std::unique_ptr<KeyedService> BuildSyncService(
// TODO(crbug.com/1052397): Reassess whether the following block needs to be
// included in lacros-chrome once build flag switch of lacros-chrome is
// complete.
@@ -18,8 +18,8 @@
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
syncer::SyncPrefs prefs(profile->GetPrefs());
local_sync_backend_enabled = prefs.IsLocalSyncEnabled();
-@@ -252,7 +252,7 @@ SyncServiceFactory::SyncServiceFactory()
- DependsOn(PasswordStoreFactory::GetInstance());
+@@ -258,7 +258,7 @@ SyncServiceFactory::SyncServiceFactory()
+ DependsOn(ProfilePasswordStoreFactory::GetInstance());
DependsOn(PowerBookmarkServiceFactory::GetInstance());
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
- BUILDFLAG(IS_WIN)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__util.cc b/www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__util.cc
new file mode 100644
index 000000000000..1550b617bd48
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_sync_sync__service__util.cc
@@ -0,0 +1,20 @@
+--- chrome/browser/sync/sync_service_util.cc.orig 2023-12-23 12:33:28 UTC
++++ chrome/browser/sync/sync_service_util.cc
+@@ -9,7 +9,7 @@
+ #include "components/sync/base/features.h"
+
+ #if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
+- BUILDFLAG(IS_WIN)
++ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ #include "components/variations/service/variations_service.h"
+ #endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) ||
+ // BUILDFLAG(IS_WIN)
+@@ -22,7 +22,7 @@ bool IsDesktopEnUSLocaleOnlySyncPollFeatureEnabled() {
+ }
+
+ #if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
+- BUILDFLAG(IS_WIN)
++ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ std::string country_code;
+ auto* variations_service = g_browser_process->variations_service();
+ if (variations_service) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_themes_theme__service.cc b/www/ungoogled-chromium/files/patch-chrome_browser_themes_theme__service.cc
index d2be10113968..0bf3e1aff349 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_themes_theme__service.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_themes_theme__service.cc
@@ -1,6 +1,6 @@
---- chrome/browser/themes/theme_service.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/themes/theme_service.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/themes/theme_service.cc
-@@ -75,7 +75,7 @@
+@@ -74,7 +74,7 @@
#include "extensions/browser/extension_registry_observer.h"
#endif
@@ -9,7 +9,7 @@
#include "ui/linux/linux_ui.h"
#include "ui/ozone/public/ozone_platform.h"
#endif
-@@ -331,7 +331,7 @@ CustomThemeSupplier* ThemeService::GetThemeSupplier()
+@@ -330,7 +330,7 @@ CustomThemeSupplier* ThemeService::GetThemeSupplier()
}
bool ThemeService::ShouldUseCustomFrame() const {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_actions_chrome__action__id.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_actions_chrome__action__id.h
index 9863c48e38a9..bf6b24583c88 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_actions_chrome__action__id.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_actions_chrome__action__id.h
@@ -1,6 +1,6 @@
---- chrome/browser/ui/actions/chrome_action_id.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/actions/chrome_action_id.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/actions/chrome_action_id.h
-@@ -542,7 +542,7 @@
+@@ -526,7 +526,7 @@
IDC_CONTENT_CONTEXT_QUICK_ANSWERS_INLINE_ANSWER) \
E(kActionContentContextQuickAnswersInlineQuery, \
IDC_CONTENT_CONTEXT_QUICK_ANSWERS_INLINE_QUERY)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__command__controller.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__command__controller.cc
index dcec51996681..6ffe85e11a64 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__command__controller.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__command__controller.cc
@@ -1,15 +1,15 @@
---- chrome/browser/ui/browser_command_controller.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/browser_command_controller.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/browser_command_controller.cc
-@@ -124,7 +124,7 @@
+@@ -120,7 +120,7 @@
#include "components/user_manager/user_manager.h"
#endif
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #include "ui/base/ime/text_input_flags.h"
#include "ui/linux/linux_ui.h"
#endif
-
-@@ -305,7 +305,7 @@ bool BrowserCommandController::IsReservedCommandOrKey(
+@@ -302,7 +302,7 @@ bool BrowserCommandController::IsReservedCommandOrKey(
#endif
}
@@ -18,7 +18,7 @@
// If this key was registered by the user as a content editing hotkey, then
// it is not reserved.
auto* linux_ui = ui::LinuxUi::instance();
-@@ -556,7 +556,7 @@ bool BrowserCommandController::ExecuteCommandWithDispo
+@@ -554,7 +554,7 @@ bool BrowserCommandController::ExecuteCommandWithDispo
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -27,7 +27,7 @@
case IDC_MINIMIZE_WINDOW:
browser_->window()->Minimize();
break;
-@@ -568,7 +568,7 @@ bool BrowserCommandController::ExecuteCommandWithDispo
+@@ -566,7 +566,7 @@ bool BrowserCommandController::ExecuteCommandWithDispo
break;
#endif
@@ -36,7 +36,7 @@
case IDC_USE_SYSTEM_TITLE_BAR: {
PrefService* prefs = profile()->GetPrefs();
prefs->SetBoolean(prefs::kUseCustomChromeFrame,
-@@ -1214,12 +1214,12 @@ void BrowserCommandController::InitCommandState() {
+@@ -1233,12 +1233,12 @@ void BrowserCommandController::InitCommandState() {
#endif
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__dialogs.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__dialogs.h
deleted file mode 100644
index dbe747d092a2..000000000000
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_browser__dialogs.h
+++ /dev/null
@@ -1,29 +0,0 @@
---- chrome/browser/ui/browser_dialogs.h.orig 2023-11-04 07:08:51 UTC
-+++ chrome/browser/ui/browser_dialogs.h
-@@ -26,7 +26,7 @@
- #include "ui/gfx/native_widget_types.h"
-
- #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
-- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
-+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
- #include "chrome/browser/web_applications/web_app_callback_app_identity.h"
- #include "chrome/browser/web_applications/web_app_id.h"
- #include "chrome/browser/web_applications/web_app_install_info.h"
-@@ -84,7 +84,7 @@ class Widget;
- } // namespace views
-
- #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
-- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
-+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
- namespace webapps {
- class MlInstallOperationTracker;
- struct Screenshot;
-@@ -162,7 +162,7 @@ void ShowBluetoothDevicePairConfirmDialog(
- #endif // PAIR_BLUETOOTH_ON_DEMAND()
-
- #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
-- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
-+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
- // Callback used to indicate whether a user has accepted the installation of a
- // web app. The boolean parameter is true when the user accepts the dialog. The
- // WebAppInstallInfo parameter contains the information about the app,
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc
index 2130a1951baa..de286bc517b7 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc
@@ -1,4 +1,4 @@
---- chrome/browser/ui/chrome_pages.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/chrome_pages.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/chrome_pages.cc
@@ -78,7 +78,7 @@
#endif
@@ -9,7 +9,7 @@
#include "chrome/browser/web_applications/web_app_utils.h"
#endif
-@@ -673,7 +673,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
+@@ -671,7 +671,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
}
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h
index 331389fea8e3..b455b16f2562 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h
@@ -1,4 +1,4 @@
---- chrome/browser/ui/chrome_pages.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/chrome_pages.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/chrome_pages.h
@@ -38,7 +38,7 @@ enum class ConsentLevel;
} // namespace signin
@@ -9,7 +9,7 @@
namespace web_app {
enum class AppSettingsPageEntryPoint;
} // namespace web_app
-@@ -250,7 +250,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
+@@ -255,7 +255,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_omnibox_omnibox__pedal__implementations.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_omnibox_omnibox__pedal__implementations.cc
index 54d2979b20b8..4ee9a186c7c1 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_omnibox_omnibox__pedal__implementations.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_omnibox_omnibox__pedal__implementations.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/omnibox/omnibox_pedal_implementations.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/omnibox/omnibox_pedal_implementations.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/omnibox/omnibox_pedal_implementations.cc
-@@ -1981,7 +1981,7 @@ const gfx::VectorIcon& GetSharingHubVectorIcon() {
+@@ -1995,7 +1995,7 @@ const gfx::VectorIcon& GetSharingHubVectorIcon() {
OmniboxFieldTrial::IsChromeRefreshActionChipIconsEnabled()
? omnibox::kShareWinChromeRefreshIcon
: omnibox::kShareWinIcon;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator__impl.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator__impl.cc
index f8121c4e4f7a..0f2c9684d013 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator__impl.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/startup/startup_browser_creator_impl.cc.orig 2023-07-21 09:49:17 UTC
+--- chrome/browser/ui/startup/startup_browser_creator_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/startup/startup_browser_creator_impl.cc
-@@ -260,7 +260,7 @@ Browser* StartupBrowserCreatorImpl::OpenTabsInBrowser(
+@@ -255,7 +255,7 @@ Browser* StartupBrowserCreatorImpl::OpenTabsInBrowser(
// at the state of the MessageLoop.
Browser::CreateParams params = Browser::CreateParams(profile_, false);
params.creation_source = Browser::CreationSource::kStartupCreator;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc
index 94792d1016c5..b427cf4831a4 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/tab_helpers.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/tab_helpers.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/tab_helpers.cc
-@@ -224,7 +224,7 @@
+@@ -225,7 +225,7 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/blocked_content/framebust_block_tab_helper.h"
#include "chrome/browser/ui/browser_finder.h"
#include "chrome/browser/ui/hats/hats_helper.h"
-@@ -612,12 +612,12 @@ void TabHelpers::AttachTabHelpers(WebContents* web_con
+@@ -641,12 +641,12 @@ void TabHelpers::AttachTabHelpers(WebContents* web_con
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_tabs_organization_trigger__observer.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_tabs_organization_trigger__observer.h
new file mode 100644
index 000000000000..0d37fd48d927
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_tabs_organization_trigger__observer.h
@@ -0,0 +1,10 @@
+--- chrome/browser/ui/tabs/organization/trigger_observer.h.orig 2023-12-23 12:33:28 UTC
++++ chrome/browser/ui/tabs/organization/trigger_observer.h
+@@ -6,6 +6,7 @@
+ #define CHROME_BROWSER_UI_TABS_ORGANIZATION_TRIGGER_OBSERVER_H_
+
+ #include <memory>
++#include <unordered_map>
+
+ #include "base/functional/callback.h"
+ #include "chrome/browser/ui/browser_list_observer.h"
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_task__manager_task__manager__table__model.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_task__manager_task__manager__table__model.cc
index 04aab332edaa..79b7169ca6cb 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_task__manager_task__manager__table__model.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_task__manager_task__manager__table__model.cc
@@ -1,4 +1,4 @@
---- chrome/browser/ui/task_manager/task_manager_table_model.cc.orig 2023-03-10 11:01:21 UTC
+--- chrome/browser/ui/task_manager/task_manager_table_model.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/task_manager/task_manager_table_model.cc
@@ -454,7 +454,7 @@ std::u16string TaskManagerTableModel::GetText(size_t r
? stringifier_->backgrounded_string()
@@ -18,7 +18,7 @@
case IDS_TASK_MANAGER_OPEN_FD_COUNT_COLUMN: {
const int proc1_fd_count =
observed_task_manager()->GetOpenFdCount(tasks_[row1]);
-@@ -795,7 +795,7 @@ void TaskManagerTableModel::UpdateRefreshTypes(int col
+@@ -799,7 +799,7 @@ void TaskManagerTableModel::UpdateRefreshTypes(int col
type = REFRESH_TYPE_KEEPALIVE_COUNT;
break;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc
index 3dd828e91a2b..49b1b836aadf 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/ui_features.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/ui_features.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/ui_features.cc
-@@ -311,7 +311,7 @@ BASE_FEATURE(kTopChromeWebUIUsesSpareRenderer,
+@@ -315,7 +315,7 @@ BASE_FEATURE(kTopChromeWebUIUsesSpareRenderer,
"TopChromeWebUIUsesSpareRenderer",
base::FEATURE_ENABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc
index 022b35cd7225..5f908d6172bf 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/frame/browser_view.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/views/frame/browser_view.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/frame/browser_view.cc
-@@ -2138,7 +2138,7 @@ void BrowserView::TabDraggingStatusChanged(bool is_dra
+@@ -2171,7 +2171,7 @@ void BrowserView::TabDraggingStatusChanged(bool is_dra
// CrOS cleanup is done.
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc
index 5db5a176b32a..1628cdf0e82b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc
@@ -1,4 +1,4 @@
---- chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.cc
@@ -55,7 +55,7 @@
#include "ui/aura/window.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/views/frame/browser_frame_view_paint_utils_linux.h"
#include "chrome/browser/ui/views/frame/desktop_browser_frame_aura_linux.h"
#endif
-@@ -82,7 +82,7 @@ constexpr int kBackToTabImageSize = 16;
+@@ -83,7 +83,7 @@ constexpr int kContentSettingIconSize = 16;
// The height of the controls bar at the top of the window.
constexpr int kTopControlsHeight = 30;
@@ -18,7 +18,7 @@
// Frame border when window shadow is not drawn.
constexpr int kFrameBorderThickness = 4;
#endif
-@@ -177,7 +177,7 @@ class WindowEventObserver : public ui::EventObserver {
+@@ -178,7 +178,7 @@ class WindowEventObserver : public ui::EventObserver {
gfx::Rect input_bounds = pip_browser_frame_view_->GetLocalBounds();
@@ -27,7 +27,7 @@
// Calculate input bounds for Linux. This is needed because the input bounds
// is not necessary the same as the local bounds on Linux.
if (pip_browser_frame_view_->ShouldDrawFrameShadow()) {
-@@ -558,7 +558,7 @@ PictureInPictureBrowserFrameView::PictureInPictureBrow
+@@ -560,7 +560,7 @@ PictureInPictureBrowserFrameView::PictureInPictureBrow
AddChildView(std::move(auto_pip_setting_overlay));
}
@@ -36,7 +36,7 @@
frame_background_ = std::make_unique<views::FrameBackground>();
#endif
-@@ -734,7 +734,7 @@ void PictureInPictureBrowserFrameView::OnThemeChanged(
+@@ -736,7 +736,7 @@ void PictureInPictureBrowserFrameView::OnThemeChanged(
for (ContentSettingImageView* view : content_setting_views_)
view->SetIconColor(color_provider->GetColor(kColorPipWindowForeground));
@@ -45,7 +45,7 @@
// On Linux the top bar background will be drawn in OnPaint().
top_bar_container_view_->SetBackground(views::CreateSolidBackground(
color_provider->GetColor(kColorPipWindowTopBarBackground)));
-@@ -803,7 +803,7 @@ void PictureInPictureBrowserFrameView::RemovedFromWidg
+@@ -811,7 +811,7 @@ void PictureInPictureBrowserFrameView::RemovedFromWidg
BrowserNonClientFrameView::RemovedFromWidget();
}
@@ -54,7 +54,7 @@
gfx::Insets PictureInPictureBrowserFrameView::MirroredFrameBorderInsets()
const {
auto border = FrameBorderInsets();
-@@ -1052,7 +1052,7 @@ void PictureInPictureBrowserFrameView::AnimationProgre
+@@ -1058,7 +1058,7 @@ void PictureInPictureBrowserFrameView::AnimationProgre
// views::View implementations:
void PictureInPictureBrowserFrameView::OnPaint(gfx::Canvas* canvas) {
@@ -63,7 +63,7 @@
// Draw the PiP window frame borders and shadows, including the top bar
// background.
if (window_frame_provider_) {
-@@ -1176,7 +1176,7 @@ void PictureInPictureBrowserFrameView::UpdateTopBarVie
+@@ -1182,7 +1182,7 @@ void PictureInPictureBrowserFrameView::UpdateTopBarVie
}
gfx::Insets PictureInPictureBrowserFrameView::FrameBorderInsets() const {
@@ -72,7 +72,7 @@
if (window_frame_provider_) {
const auto insets = window_frame_provider_->GetFrameThicknessDip();
const auto tiled_edges = frame()->tiled_edges();
-@@ -1197,7 +1197,7 @@ gfx::Insets PictureInPictureBrowserFrameView::FrameBor
+@@ -1203,7 +1203,7 @@ gfx::Insets PictureInPictureBrowserFrameView::FrameBor
}
gfx::Insets PictureInPictureBrowserFrameView::ResizeBorderInsets() const {
@@ -81,7 +81,7 @@
return FrameBorderInsets();
#elif BUILDFLAG(IS_CHROMEOS_ASH)
return gfx::Insets(chromeos::kResizeInsideBoundsSize);
-@@ -1218,7 +1218,7 @@ gfx::Size PictureInPictureBrowserFrameView::GetNonClie
+@@ -1224,7 +1224,7 @@ gfx::Size PictureInPictureBrowserFrameView::GetNonClie
top_height + border_thickness.bottom());
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.h
index 4c5d8419bc16..0c2ef1cda6a4 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.h
@@ -1,4 +1,4 @@
---- chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.h
@@ -24,7 +24,7 @@
#include "ui/views/controls/image_view.h"
@@ -27,16 +27,16 @@
gfx::Insets MirroredFrameBorderInsets() const override;
gfx::Insets GetInputInsets() const override;
SkRRect GetRestoredClipRegion() const override;
-@@ -188,7 +188,7 @@ class PictureInPictureBrowserFrameView
- // Called when mouse entered or exited the pip window.
- void OnMouseEnteredOrExitedWindow(bool entered);
+@@ -191,7 +191,7 @@ class PictureInPictureBrowserFrameView
+ // Returns true if there's an overlay view that's currently shown.
+ bool IsOverlayViewVisible() const;
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
// Sets the window frame provider so that it will be used for drawing.
void SetWindowFrameProvider(ui::WindowFrameProvider* window_frame_provider);
-@@ -355,7 +355,7 @@ class PictureInPictureBrowserFrameView
+@@ -358,7 +358,7 @@ class PictureInPictureBrowserFrameView
// `top_bar_color_animation_`.
absl::optional<SkColor> current_foreground_color_;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_hung__renderer__view.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_hung__renderer__view.cc
index 53c5bc754d29..dda4f4cd7ab0 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_hung__renderer__view.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_hung__renderer__view.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/hung_renderer_view.cc.orig 2023-02-11 09:11:04 UTC
+--- chrome/browser/ui/views/hung_renderer_view.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/hung_renderer_view.cc
-@@ -404,7 +404,7 @@ void HungRendererDialogView::ForceCrashHungRenderer()
+@@ -405,7 +405,7 @@ void HungRendererDialogView::ForceCrashHungRenderer()
content::RenderProcessHost* rph =
hung_pages_table_model_->GetRenderWidgetHost()->GetProcess();
if (rph) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc
index b9f1fd19687a..7bf20a0225aa 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/profiles/profile_menu_view_base.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/ui/views/profiles/profile_menu_view_base.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/profiles/profile_menu_view_base.cc
-@@ -652,7 +652,7 @@ void ProfileMenuViewBase::SetProfileIdentityInfo(
+@@ -660,7 +660,7 @@ void ProfileMenuViewBase::SetProfileIdentityInfo(
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc
index e2cdb481571d..8990ee8f867f 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/tab_search_bubble_host.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/views/tab_search_bubble_host.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/tab_search_bubble_host.cc
-@@ -186,7 +186,7 @@ bool TabSearchBubbleHost::ShouldTabSearchRenderBeforeT
+@@ -222,7 +222,7 @@ bool TabSearchBubbleHost::ShouldTabSearchRenderBeforeT
// Mac should have tabsearch on the right side. Windows >= Win10 has the
// Tab Search button as a FrameCaptionButton, but it still needs to be on the
// left if it exists.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc
index 0e6898a5ae29..fa2848338a2b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/tabs/tab.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/views/tabs/tab.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/tabs/tab.cc
-@@ -611,7 +611,7 @@ void Tab::MaybeUpdateHoverStatus(const ui::MouseEvent&
+@@ -612,7 +612,7 @@ void Tab::MaybeUpdateHoverStatus(const ui::MouseEvent&
if (mouse_hovered_ || !GetWidget()->IsMouseEventsEnabled())
return;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__drag__controller.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__drag__controller.cc
index 4ef7c7697ab5..133d7a0c587c 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__drag__controller.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__drag__controller.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/tabs/tab_drag_controller.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/views/tabs/tab_drag_controller.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/tabs/tab_drag_controller.cc
-@@ -91,7 +91,7 @@
+@@ -89,7 +89,7 @@
#include "components/remote_cocoa/browser/window.h"
#endif
@@ -9,7 +9,7 @@
#include "ui/aura/client/drag_drop_client.h"
#endif
-@@ -208,7 +208,7 @@ bool IsWindowDragUsingSystemDragDropAllowed() {
+@@ -206,7 +206,7 @@ bool IsWindowDragUsingSystemDragDropAllowed() {
void UpdateSystemDnDDragImage(TabDragContext* attached_context,
const gfx::ImageSkia& image) {
@@ -18,7 +18,7 @@
aura::Window* root_window =
attached_context->GetWidget()->GetNativeWindow()->GetRootWindow();
if (aura::client::GetDragDropClient(root_window)) {
-@@ -388,7 +388,7 @@ void TabDragController::Init(TabDragContext* source_co
+@@ -386,7 +386,7 @@ void TabDragController::Init(TabDragContext* source_co
// synchronous on desktop Linux, so use that.
// - ChromeOS Ash
// Releasing capture on Ash cancels gestures so avoid it.
@@ -27,7 +27,7 @@
can_release_capture_ = false;
#endif
start_point_in_screen_ = gfx::Point(source_view_offset, mouse_offset.y());
-@@ -1013,7 +1013,7 @@ TabDragController::DragBrowserToNewTabStrip(TabDragCon
+@@ -1011,7 +1011,7 @@ TabDragController::DragBrowserToNewTabStrip(TabDragCon
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -36,7 +36,7 @@
// EndMoveLoop is going to snap the window back to its original location.
// Hide it so users don't see this. Hiding a window in Linux aura causes
// it to lose capture so skip it.
-@@ -2053,7 +2053,7 @@ void TabDragController::CompleteDrag() {
+@@ -2059,7 +2059,7 @@ void TabDragController::CompleteDrag() {
}
// If source window was maximized - maximize the new window as well.
@@ -45,7 +45,7 @@
// Keeping maximized state breaks snap to Grid on Windows when dragging
// tabs from maximized windows. TODO:(crbug.com/727051) Explore doing this
// for other desktop OS's. kMaximizedStateRetainedOnTabDrag in
-@@ -2485,7 +2485,7 @@ TabDragController::Liveness TabDragController::GetLoca
+@@ -2483,7 +2483,7 @@ TabDragController::Liveness TabDragController::GetLoca
}
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__hover__card__bubble__view.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__hover__card__bubble__view.cc
index 0d77a8d75afa..c6c58a59291d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__hover__card__bubble__view.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab__hover__card__bubble__view.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/tabs/tab_hover_card_bubble_view.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/ui/views/tabs/tab_hover_card_bubble_view.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/tabs/tab_hover_card_bubble_view.cc
-@@ -372,7 +372,7 @@ TabHoverCardBubbleView::TabHoverCardBubbleView(Tab* ta
+@@ -376,7 +376,7 @@ TabHoverCardBubbleView::TabHoverCardBubbleView(Tab* ta
// not become active. Setting this to false creates the need to explicitly
// hide the hovercard on press, touch, and keyboard events.
SetCanActivate(false);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_web__apps_web__app__integration__test__driver.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_web__apps_web__app__integration__test__driver.cc
index fd24663e9f5f..dafb76b07a2b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_web__apps_web__app__integration__test__driver.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_web__apps_web__app__integration__test__driver.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/web_apps/web_app_integration_test_driver.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/views/web_apps/web_app_integration_test_driver.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/views/web_apps/web_app_integration_test_driver.cc
-@@ -464,7 +464,7 @@ std::string GetFileExtension(FileExtension file_extens
+@@ -463,7 +463,7 @@ std::string GetFileExtension(FileExtension file_extens
}
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
SiteConfig GetSiteConfigurationFromAppName(const std::string& app_name) {
SiteConfig config;
bool is_app_found = false;
-@@ -1936,7 +1936,7 @@ void WebAppIntegrationTestDriver::DeletePlatformShortc
+@@ -1924,7 +1924,7 @@ void WebAppIntegrationTestDriver::DeletePlatformShortc
if (app_name.empty()) {
app_name = GetSiteConfiguration(site).app_name;
}
@@ -18,7 +18,7 @@
ASSERT_TRUE(override_registration_->test_override->IsShortcutCreated(
profile(), app_id, app_name));
ASSERT_TRUE(
-@@ -3253,7 +3253,7 @@ void WebAppIntegrationTestDriver::CheckRunOnOsLoginEna
+@@ -3243,7 +3243,7 @@ void WebAppIntegrationTestDriver::CheckRunOnOsLoginEna
app_state->id, app_state->name);
ASSERT_TRUE(icon_color.has_value());
ASSERT_THAT(site_config.icon_color, testing::Eq(icon_color.value()));
@@ -27,7 +27,7 @@
ASSERT_TRUE(override_registration_->test_override->IsRunOnOsLoginEnabled(
profile(), app_state->id, app_state->name));
#endif
-@@ -3268,7 +3268,7 @@ void WebAppIntegrationTestDriver::CheckRunOnOsLoginDis
+@@ -3258,7 +3258,7 @@ void WebAppIntegrationTestDriver::CheckRunOnOsLoginDis
GetAppBySiteMode(after_state_change_action_state_.get(), profile(), site);
ASSERT_TRUE(app_state);
base::ScopedAllowBlockingForTesting allow_blocking;
@@ -36,7 +36,7 @@
ASSERT_FALSE(override_registration_->test_override->IsRunOnOsLoginEnabled(
profile(), app_state->id, app_state->name));
#endif
-@@ -3278,7 +3278,7 @@ void WebAppIntegrationTestDriver::CheckRunOnOsLoginDis
+@@ -3268,7 +3268,7 @@ void WebAppIntegrationTestDriver::CheckRunOnOsLoginDis
void WebAppIntegrationTestDriver::CheckSiteHandlesFile(
Site site,
FileExtension file_extension) {
@@ -45,7 +45,7 @@
if (!BeforeStateCheckAction(__FUNCTION__)) {
return;
}
-@@ -3294,7 +3294,7 @@ void WebAppIntegrationTestDriver::CheckSiteHandlesFile
+@@ -3284,7 +3284,7 @@ void WebAppIntegrationTestDriver::CheckSiteHandlesFile
void WebAppIntegrationTestDriver::CheckSiteNotHandlesFile(
Site site,
FileExtension file_extension) {
@@ -54,7 +54,7 @@
if (!BeforeStateCheckAction(__FUNCTION__)) {
return;
}
-@@ -4079,7 +4079,7 @@ base::FilePath WebAppIntegrationTestDriver::GetShortcu
+@@ -4077,7 +4077,7 @@ base::FilePath WebAppIntegrationTestDriver::GetShortcu
base::FilePath shortcut_dir,
const std::string& app_name,
const webapps::AppId& app_id) {
@@ -63,7 +63,7 @@
return override_registration_->test_override->GetShortcutPath(
profile(), shortcut_dir, app_id, app_name);
#else
-@@ -4273,7 +4273,7 @@ bool WebAppIntegrationTestDriver::IsShortcutAndIconCre
+@@ -4271,7 +4271,7 @@ bool WebAppIntegrationTestDriver::IsShortcutAndIconCre
const webapps::AppId& id) {
base::ScopedAllowBlockingForTesting allow_blocking;
bool is_shortcut_and_icon_correct = false;
@@ -72,7 +72,7 @@
bool is_shortcut_correct =
override_registration_->test_override->IsShortcutCreated(profile, id,
name);
-@@ -4317,7 +4317,7 @@ bool WebAppIntegrationTestDriver::DoIconColorsMatch(Pr
+@@ -4315,7 +4315,7 @@ bool WebAppIntegrationTestDriver::DoIconColorsMatch(Pr
do_icon_colors_match =
(expected_icon_pixel_color == shortcut_pixel_color_apps_folder.value());
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h
new file mode 100644
index 000000000000..f5c30e1db9b6
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h
@@ -0,0 +1,11 @@
+--- chrome/browser/ui/web_applications/web_app_dialogs.h.orig 2023-12-23 12:33:28 UTC
++++ chrome/browser/ui/web_applications/web_app_dialogs.h
+@@ -20,7 +20,7 @@
+ #include "ui/gfx/native_widget_types.h"
+
+ static_assert(BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
+- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA));
++ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD));
+
+ class GURL;
+ class Profile;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc
index 3e5314662fa0..6ef87ebe30ac 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/webui/chrome_web_ui_controller_factory.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/webui/chrome_web_ui_controller_factory.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/webui/chrome_web_ui_controller_factory.cc
-@@ -213,7 +213,7 @@
+@@ -211,7 +211,7 @@
#include "chrome/browser/ui/webui/chromeos/chrome_url_disabled/chrome_url_disabled_ui.h"
#endif
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/webui/webui_js_error/webui_js_error_ui.h"
#endif
-@@ -239,17 +239,17 @@
+@@ -237,17 +237,17 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -30,7 +30,7 @@
#include "chrome/browser/ui/webui/connectors_internals/connectors_internals_ui.h"
#endif
-@@ -402,7 +402,7 @@ bool IsAboutUI(const GURL& url) {
+@@ -404,7 +404,7 @@ bool IsAboutUI(const GURL& url) {
#if !BUILDFLAG(IS_ANDROID)
|| url.host_piece() == chrome::kChromeUITermsHost
#endif
@@ -39,7 +39,7 @@
|| url.host_piece() == chrome::kChromeUILinuxProxyConfigHost
#endif
#if BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -661,7 +661,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
+@@ -668,7 +668,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
if (url.host_piece() == chrome::kChromeUIMobileSetupHost)
return &NewWebUI<ash::cellular_setup::MobileSetupUI>;
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -48,7 +48,7 @@
if (url.host_piece() == chrome::kChromeUIWebUIJsErrorHost)
return &NewWebUI<WebUIJsErrorUI>;
#endif
-@@ -724,7 +724,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
+@@ -731,7 +731,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
if (url.host_piece() == chrome::kChromeUINaClHost)
return &NewWebUI<NaClUI>;
#endif
@@ -57,7 +57,7 @@
defined(TOOLKIT_VIEWS)) || \
defined(USE_AURA)
if (url.host_piece() == chrome::kChromeUITabModalConfirmDialogHost)
-@@ -795,27 +795,27 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
+@@ -792,27 +792,27 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
}
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_ntp_app__launcher__handler.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_ntp_app__launcher__handler.cc
index 46e3ec55b378..754698015ec3 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_ntp_app__launcher__handler.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_ntp_app__launcher__handler.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/webui/ntp/app_launcher_handler.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/webui/ntp/app_launcher_handler.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/ui/webui/ntp/app_launcher_handler.cc
-@@ -312,7 +312,7 @@ base::Value::Dict AppLauncherHandler::CreateExtensionI
+@@ -311,7 +311,7 @@ base::Value::Dict AppLauncherHandler::CreateExtensionI
bool is_deprecated_app = false;
auto* context = extension_service_->GetBrowserContext();
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_os__integration__test__override.h b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_os__integration__test__override.h
index 5b8d44672ebc..97c97794e89e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_os__integration__test__override.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_os__integration__test__override.h
@@ -1,6 +1,6 @@
---- chrome/browser/web_applications/os_integration/os_integration_test_override.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/web_applications/os_integration/os_integration_test_override.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/web_applications/os_integration/os_integration_test_override.h
-@@ -93,7 +93,7 @@ class OsIntegrationTestOverride
+@@ -92,7 +92,7 @@ class OsIntegrationTestOverride
virtual const base::FilePath& chrome_apps_folder() = 0;
virtual void EnableOrDisablePathOnLogin(const base::FilePath& file_path,
bool enable_on_login) = 0;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__file__handler__registration.h b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__file__handler__registration.h
index ad4fc6be2453..ef1673bbacb1 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__file__handler__registration.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__file__handler__registration.h
@@ -1,6 +1,6 @@
---- chrome/browser/web_applications/os_integration/web_app_file_handler_registration.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/web_applications/os_integration/web_app_file_handler_registration.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/web_applications/os_integration/web_app_file_handler_registration.h
-@@ -44,7 +44,7 @@ void UnregisterFileHandlersWithOs(const webapps::AppId
+@@ -43,7 +43,7 @@ void UnregisterFileHandlersWithOs(const webapps::AppId
const base::FilePath& profile_path,
ResultCallback callback);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__shortcut.h b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__shortcut.h
index e563d909b0f4..d8a2431f2ad5 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__shortcut.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_os__integration_web__app__shortcut.h
@@ -1,6 +1,6 @@
---- chrome/browser/web_applications/os_integration/web_app_shortcut.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/web_applications/os_integration/web_app_shortcut.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/web_applications/os_integration/web_app_shortcut.h
-@@ -22,7 +22,7 @@
+@@ -21,7 +21,7 @@
#include "ui/gfx/image/image_family.h"
#include "url/gurl.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/web_applications/os_integration/web_app_shortcut_linux.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -71,7 +71,7 @@ struct ShortcutInfo {
+@@ -70,7 +70,7 @@ struct ShortcutInfo {
std::set<std::string> file_handler_extensions;
std::set<std::string> file_handler_mime_types;
std::set<std::string> protocol_handlers;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_test_os__integration__test__override__impl.h b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_test_os__integration__test__override__impl.h
index 4e2ef9eb6fb4..2181408dbff8 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_test_os__integration__test__override__impl.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_test_os__integration__test__override__impl.h
@@ -1,6 +1,6 @@
---- chrome/browser/web_applications/test/os_integration_test_override_impl.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/web_applications/test/os_integration_test_override_impl.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/web_applications/test/os_integration_test_override_impl.h
-@@ -39,7 +39,7 @@ class ShellLinkItem;
+@@ -38,7 +38,7 @@ class ShellLinkItem;
namespace web_app {
@@ -9,7 +9,7 @@
struct LinuxFileRegistration {
base::FilePath file_name;
std::string xdg_command;
-@@ -113,7 +113,7 @@ class OsIntegrationTestOverrideImpl : public OsIntegra
+@@ -112,7 +112,7 @@ class OsIntegrationTestOverrideImpl : public OsIntegra
bool DeleteApplicationMenuDirOnWin();
#endif // BUILDFLAG(IS_WIN)
@@ -18,7 +18,7 @@
bool DeleteDesktopDirOnLinux();
#endif // BUILDFLAG(IS_LINUX)
-@@ -232,7 +232,7 @@ class OsIntegrationTestOverrideImpl : public OsIntegra
+@@ -231,7 +231,7 @@ class OsIntegrationTestOverrideImpl : public OsIntegra
const base::FilePath& chrome_apps_folder() override;
void EnableOrDisablePathOnLogin(const base::FilePath& file_path,
bool enable_on_login) override;
@@ -27,7 +27,7 @@
const base::FilePath& desktop() override;
const base::FilePath& startup() override;
const base::FilePath& applications_dir() override;
-@@ -279,7 +279,7 @@ class OsIntegrationTestOverrideImpl : public OsIntegra
+@@ -278,7 +278,7 @@ class OsIntegrationTestOverrideImpl : public OsIntegra
base::ScopedTempDir chrome_apps_folder_;
std::map<base::FilePath, bool> startup_enabled_;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_web__app__install__info.h b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_web__app__install__info.h
index dde3815f165f..2774381c0487 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_web__app__install__info.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_web__applications_web__app__install__info.h
@@ -1,11 +1,11 @@
---- chrome/browser/web_applications/web_app_install_info.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/web_applications/web_app_install_info.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/web_applications/web_app_install_info.h
-@@ -36,7 +36,7 @@
+@@ -35,7 +35,7 @@
#include "url/gurl.h"
static_assert(BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA));
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD));
- class SkBitmap;
+ namespace web_app {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc b/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc
index 52d7b1b0cbe8..f69ac2c627df 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc
@@ -1,6 +1,6 @@
---- chrome/browser/webauthn/chrome_authenticator_request_delegate.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/webauthn/chrome_authenticator_request_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/browser/webauthn/chrome_authenticator_request_delegate.cc
-@@ -672,7 +672,7 @@ void ChromeAuthenticatorRequestDelegate::ConfigureDisc
+@@ -677,7 +677,7 @@ void ChromeAuthenticatorRequestDelegate::ConfigureDisc
g_observer->ConfiguringCable(request_type);
}
@@ -9,12 +9,3 @@
// No caBLEv1 on Linux. It tends to crash bluez.
if (base::Contains(pairings_from_extension,
device::CableDiscoveryData::Version::V1,
-@@ -879,7 +879,7 @@ void ChromeAuthenticatorRequestDelegate::OnTransportAv
- device::FidoRequestHandlerBase::TransportAvailabilityInfo data) {
- if (base::FeatureList::IsEnabled(device::kWebAuthnFilterGooglePasskeys) &&
- dialog_model()->relying_party_id() == kGoogleRpId &&
-- std::ranges::any_of(data.recognized_credentials,
-+ base::ranges::any_of(data.recognized_credentials,
- IsCredentialFromPlatformAuthenticator)) {
- // Regrettably, Chrome will create webauthn credentials for things other
- // than authentication (e.g. credit card autofill auth) under the rp id
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc
index 20b55e66adf6..e3a49b249012 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc
@@ -1,4 +1,4 @@
---- chrome/common/chrome_features.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/chrome_features.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/chrome_features.cc
@@ -76,7 +76,7 @@ BASE_FEATURE(kAppShimNotificationAttribution,
BASE_FEATURE(kAsyncDns,
@@ -45,7 +45,7 @@
// Controls whether Chrome Apps are supported. See https://crbug.com/1221251.
// If the feature is disabled, Chrome Apps continue to work. If enabled, Chrome
// Apps will not launch and will be marked in the UI as deprecated.
-@@ -382,7 +382,7 @@ const base::FeatureParam<bool> kDnsOverHttpsFallbackPa
+@@ -377,7 +377,7 @@ const base::FeatureParam<bool> kDnsOverHttpsFallbackPa
const base::FeatureParam<bool> kDnsOverHttpsShowUiParam {
&kDnsOverHttps, "ShowUi",
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
@@ -54,8 +54,8 @@
true
#else
false
-@@ -889,7 +889,7 @@ BASE_FEATURE(kLacrosSharedComponentsDir,
- base::FEATURE_DISABLED_BY_DEFAULT);
+@@ -907,7 +907,7 @@ BASE_FEATURE(kLacrosSharedComponentsDir,
+ base::FEATURE_ENABLED_BY_DEFAULT);
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
-#if BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS)
@@ -63,7 +63,7 @@
BASE_FEATURE(kLinuxLowMemoryMonitor,
"LinuxLowMemoryMonitor",
base::FEATURE_DISABLED_BY_DEFAULT);
-@@ -902,7 +902,7 @@ constexpr base::FeatureParam<int> kLinuxLowMemoryMonit
+@@ -920,7 +920,7 @@ constexpr base::FeatureParam<int> kLinuxLowMemoryMonit
&kLinuxLowMemoryMonitor, "critical_level", 255};
#endif // BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS)
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h
index 7bf820e488dc..73773b5bcd79 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h
@@ -1,4 +1,4 @@
---- chrome/common/chrome_features.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/chrome_features.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/chrome_features.h
@@ -61,13 +61,13 @@ BASE_DECLARE_FEATURE(kAppShimNotificationAttribution);
COMPONENT_EXPORT(CHROME_FEATURES) BASE_DECLARE_FEATURE(kAsyncDns);
@@ -25,7 +25,7 @@
COMPONENT_EXPORT(CHROME_FEATURES) BASE_DECLARE_FEATURE(kChromeAppsDeprecation);
COMPONENT_EXPORT(CHROME_FEATURES)
BASE_DECLARE_FEATURE(kKeepForceInstalledPreinstalledApps);
-@@ -508,7 +508,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
+@@ -523,7 +523,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
BASE_DECLARE_FEATURE(kLacrosSharedComponentsDir);
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -34,7 +34,7 @@
COMPONENT_EXPORT(CHROME_FEATURES) BASE_DECLARE_FEATURE(kLinuxLowMemoryMonitor);
COMPONENT_EXPORT(CHROME_FEATURES)
extern const base::FeatureParam<int> kLinuxLowMemoryMonitorModerateLevel;
-@@ -516,7 +516,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
+@@ -531,7 +531,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
extern const base::FeatureParam<int> kLinuxLowMemoryMonitorCriticalLevel;
#endif // BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS)
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc b/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc
index 845581679afd..24b0a5573cb2 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc
@@ -1,4 +1,4 @@
---- chrome/common/chrome_paths.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/chrome_paths.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/chrome_paths.cc
@@ -30,7 +30,7 @@
#include "base/apple/foundation_util.h"
@@ -9,7 +9,7 @@
#include "components/policy/core/common/policy_paths.h"
#endif
-@@ -51,14 +51,14 @@
+@@ -52,14 +52,14 @@
namespace {
@@ -27,7 +27,7 @@
#endif // BUILDFLAG(GOOGLE_CHROME_BRANDING)
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -213,7 +213,7 @@ bool PathProvider(int key, base::FilePath* result) {
+@@ -220,7 +220,7 @@ bool PathProvider(int key, base::FilePath* result) {
}
break;
case chrome::DIR_DEFAULT_DOWNLOADS_SAFE:
@@ -36,7 +36,7 @@
if (!GetUserDownloadsDirectorySafe(&cur)) {
return false;
}
-@@ -516,7 +516,7 @@ bool PathProvider(int key, base::FilePath* result) {
+@@ -523,7 +523,7 @@ bool PathProvider(int key, base::FilePath* result) {
return false;
}
break;
@@ -45,7 +45,7 @@
case chrome::DIR_POLICY_FILES: {
cur = base::FilePath(policy::kPolicyPath);
break;
-@@ -527,7 +527,7 @@ bool PathProvider(int key, base::FilePath* result) {
+@@ -534,7 +534,7 @@ bool PathProvider(int key, base::FilePath* result) {
#if BUILDFLAG(IS_CHROMEOS_ASH) || \
((BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) && \
BUILDFLAG(CHROMIUM_BRANDING)) || \
@@ -54,7 +54,7 @@
case chrome::DIR_USER_EXTERNAL_EXTENSIONS: {
if (!base::PathService::Get(chrome::DIR_USER_DATA, &cur)) {
return false;
-@@ -536,7 +536,7 @@ bool PathProvider(int key, base::FilePath* result) {
+@@ -543,7 +543,7 @@ bool PathProvider(int key, base::FilePath* result) {
break;
}
#endif
@@ -63,7 +63,7 @@
case chrome::DIR_STANDALONE_EXTERNAL_EXTENSIONS: {
cur = base::FilePath(kFilepathSinglePrefExtensions);
break;
-@@ -583,7 +583,7 @@ bool PathProvider(int key, base::FilePath* result) {
+@@ -590,7 +590,7 @@ bool PathProvider(int key, base::FilePath* result) {
break;
#endif
@@ -72,7 +72,7 @@
case chrome::DIR_NATIVE_MESSAGING:
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(GOOGLE_CHROME_BRANDING)
-@@ -597,9 +597,12 @@ bool PathProvider(int key, base::FilePath* result) {
+@@ -604,9 +604,12 @@ bool PathProvider(int key, base::FilePath* result) {
#if BUILDFLAG(GOOGLE_CHROME_BRANDING)
cur = base::FilePath(
FILE_PATH_LITERAL("/etc/opt/chrome/native-messaging-hosts"));
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc b/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc
index 37ce56410ad1..a89942245543 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc
@@ -1,6 +1,6 @@
---- chrome/common/chrome_switches.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/chrome_switches.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/chrome_switches.cc
-@@ -861,14 +861,14 @@ const char kAllowNaClSocketAPI[] = "allow-nacl-socket-
+@@ -857,14 +857,14 @@ const char kAllowNaClSocketAPI[] = "allow-nacl-socket-
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.h b/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.h
index b0f3211db6ec..940a5e2a47d4 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.h
@@ -1,6 +1,6 @@
---- chrome/common/chrome_switches.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/chrome_switches.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/chrome_switches.h
-@@ -273,12 +273,12 @@ extern const char kAllowNaClSocketAPI[];
+@@ -272,12 +272,12 @@ extern const char kAllowNaClSocketAPI[];
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_media_cdm__registration.cc b/www/ungoogled-chromium/files/patch-chrome_common_media_cdm__registration.cc
index 09aa43c53fc8..dbb16ddeef4b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_media_cdm__registration.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_media_cdm__registration.cc
@@ -1,6 +1,6 @@
---- chrome/common/media/cdm_registration.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/common/media/cdm_registration.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/media/cdm_registration.cc
-@@ -25,11 +25,11 @@
+@@ -26,11 +26,11 @@
#if BUILDFLAG(ENABLE_WIDEVINE)
#include "third_party/widevine/cdm/widevine_cdm_common.h" // nogncheck
@@ -13,7 +13,7 @@
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
#include "base/no_destructor.h"
#include "chrome/common/media/component_widevine_cdm_hint_file_linux.h"
- #include "components/cdm/common/cdm_manifest.h"
+ #include "media/cdm/supported_audio_codecs.h"
@@ -56,7 +56,7 @@ using Robustness = content::CdmInfo::Robustness;
#if BUILDFLAG(ENABLE_WIDEVINE)
#if (BUILDFLAG(BUNDLE_WIDEVINE_CDM) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h b/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h
index bf46f6f3f9a5..543263c9af41 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h
@@ -1,6 +1,6 @@
---- chrome/common/pref_names.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/pref_names.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/pref_names.h
-@@ -1302,7 +1302,7 @@ inline constexpr char kUseAshProxy[] = "lacros.proxy.u
+@@ -1315,7 +1315,7 @@ inline constexpr char kUseAshProxy[] = "lacros.proxy.u
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,7 +9,7 @@
// Linux specific preference on whether we should match the system theme.
inline constexpr char kSystemTheme[] = "extensions.theme.system_theme";
#endif
-@@ -1428,7 +1428,7 @@ inline constexpr char kShowUpdatePromotionInfoBar[] =
+@@ -1441,7 +1441,7 @@ inline constexpr char kShowUpdatePromotionInfoBar[] =
"browser.show_update_promotion_info_bar";
#endif
@@ -18,8 +18,8 @@
// Boolean that is false if we should show window manager decorations. If
// true, we draw a custom chrome frame (thicker title bar and blue border).
inline constexpr char kUseCustomChromeFrame[] = "browser.custom_chrome_frame";
-@@ -1969,7 +1969,7 @@ inline constexpr char kDownloadLastCompleteTime[] =
- "download.last_complete_time";
+@@ -1989,7 +1989,7 @@ inline constexpr char kDownloadDefaultDirectory[] =
+ inline constexpr char kDownloadDirUpgraded[] = "download.directory_upgrade";
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
- BUILDFLAG(IS_MAC)
@@ -27,7 +27,7 @@
inline constexpr char kOpenPdfDownloadInSystemReader[] =
"download.open_pdf_in_system_reader";
#endif
-@@ -2380,14 +2380,14 @@ inline constexpr char kMediaStorageIdSalt[] = "media.s
+@@ -2407,14 +2407,14 @@ inline constexpr char kMediaStorageIdSalt[] = "media.s
inline constexpr char kMediaCdmOriginData[] = "media.cdm.origin_data";
#endif // BUILDFLAG(IS_WIN)
@@ -44,7 +44,7 @@
// Records whether the user has seen an HTTP auth "negotiate" header.
inline constexpr char kReceivedHttpAuthNegotiateHeader[] =
"net.received_http_auth_negotiate_headers";
-@@ -2465,7 +2465,7 @@ inline constexpr char kAmbientAuthenticationInPrivateM
+@@ -2492,7 +2492,7 @@ inline constexpr char kAmbientAuthenticationInPrivateM
inline constexpr char kBasicAuthOverHttpEnabled[] =
"auth.basic_over_http_enabled";
@@ -53,7 +53,7 @@
// Boolean that specifies whether OK-AS-DELEGATE flag from KDC is respected
// along with kAuthNegotiateDelegateAllowlist.
inline constexpr char kAuthNegotiateDelegateByKdcPolicy[] =
-@@ -3494,7 +3494,7 @@ inline constexpr char kFileOrDirectoryPickerWithoutGes
+@@ -3533,7 +3533,7 @@ inline constexpr char kFileOrDirectoryPickerWithoutGes
inline constexpr char kSandboxExternalProtocolBlocked[] =
"profile.sandbox_external_protocol_blocked";
@@ -62,7 +62,7 @@
// Boolean that indicates if system notifications are allowed to be used in
// place of Chrome notifications.
inline constexpr char kAllowSystemNotifications[] =
-@@ -3550,7 +3550,7 @@ inline constexpr char kCACertificateManagementAllowed[
+@@ -3589,7 +3589,7 @@ inline constexpr char kCACertificateManagementAllowed[
inline constexpr char kChromeRootStoreEnabled[] = "chrome_root_store_enabled";
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -71,7 +71,7 @@
inline constexpr char kEnforceLocalAnchorConstraintsEnabled[] =
"enforce_local_anchor_constraints_enabled";
#endif
-@@ -3864,7 +3864,7 @@ inline constexpr char kThrottleNonVisibleCrossOriginIf
+@@ -3903,7 +3903,7 @@ inline constexpr char kThrottleNonVisibleCrossOriginIf
inline constexpr char kNewBaseUrlInheritanceBehaviorAllowed[] =
"new_base_url_inheritance_behavior_allowed";
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_url__constants.cc b/www/ungoogled-chromium/files/patch-chrome_common_url__constants.cc
deleted file mode 100644
index 9d7537447efa..000000000000
--- a/www/ungoogled-chromium/files/patch-chrome_common_url__constants.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- chrome/common/url_constants.cc.orig 2023-11-04 07:08:51 UTC
-+++ chrome/common/url_constants.cc
-@@ -592,7 +592,7 @@ const char kPhoneHubPermissionLearnMoreURL[] =
- "https://support.9oo91e.qjz9zk/chromebook/?p=multidevice";
-
- #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
-- BUILDFLAG(IS_FUCHSIA)
-+ BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
- const char kChromeAppsDeprecationLearnMoreURL[] =
- "https://support.9oo91e.qjz9zk/chrome/?p=chrome_app_deprecation";
- #endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h b/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h
index 49dd76750136..993a4c2de308 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h
@@ -1,11 +1,11 @@
---- chrome/common/url_constants.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/url_constants.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/url_constants.h
-@@ -567,7 +567,7 @@ extern const char kOutdatedPluginLearnMoreURL[];
- extern const char kPhoneHubPermissionLearnMoreURL[];
+@@ -824,7 +824,7 @@ inline constexpr char kPhoneHubPermissionLearnMoreURL[
+ "https://support.9oo91e.qjz9zk/chromebook?p=multidevice";
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_FUCHSIA)
+ BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
-
// "Learn more" URL for the chrome apps deprecation dialog.
- extern const char kChromeAppsDeprecationLearnMoreURL[];
+ inline constexpr char kChromeAppsDeprecationLearnMoreURL[] =
+ "https://support.9oo91e.qjz9zk/chrome?p=chrome_app_deprecation";
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc
index 1f96669755a4..94aebdf4f32c 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc
@@ -1,6 +1,6 @@
---- chrome/common/webui_url_constants.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/webui_url_constants.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/webui_url_constants.cc
-@@ -499,18 +499,18 @@ const char kOsUIShortcutCustomizationAppURL[] = "os://
+@@ -512,18 +512,18 @@ const char kOsUIShortcutCustomizationAppURL[] = "os://
const char kOsUIVersionURL[] = "os://version";
#endif
@@ -22,7 +22,7 @@
const char kChromeUIDiscardsHost[] = "discards";
const char kChromeUIDiscardsURL[] = "chrome://discards/";
#endif
-@@ -525,14 +525,14 @@ const char kChromeUILinuxProxyConfigHost[] = "linux-pr
+@@ -538,14 +538,14 @@ const char kChromeUILinuxProxyConfigHost[] = "linux-pr
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -39,7 +39,7 @@
const char kChromeUIBrowserSwitchHost[] = "browser-switch";
const char kChromeUIBrowserSwitchURL[] = "chrome://browser-switch/";
const char kChromeUIEnterpriseProfileWelcomeHost[] =
-@@ -551,7 +551,7 @@ const char kChromeUIProfilePickerUrl[] = "chrome://pro
+@@ -564,7 +564,7 @@ const char kChromeUIProfilePickerUrl[] = "chrome://pro
const char kChromeUIProfilePickerStartupQuery[] = "startup";
#endif
@@ -48,7 +48,7 @@
defined(TOOLKIT_VIEWS)) || \
defined(USE_AURA)
const char kChromeUITabModalConfirmDialogHost[] = "tab-modal-confirm-dialog";
-@@ -636,7 +636,7 @@ const char kCookiesSubPagePath[] = "/cookies";
+@@ -650,7 +650,7 @@ const char kCookiesSubPagePath[] = "/cookies";
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -57,7 +57,7 @@
const char kChromeUIWebAppSettingsURL[] = "chrome://app-settings/";
const char kChromeUIWebAppSettingsHost[] = "app-settings";
#endif
-@@ -870,7 +870,7 @@ const char* const kChromeDebugURLs[] = {
+@@ -890,7 +890,7 @@ const char* const kChromeDebugURLs[] = {
blink::kChromeUIGpuJavaCrashURL,
kChromeUIJavaCrashURL,
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h
index f87de4d1c752..6da30d328a94 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h
@@ -1,6 +1,6 @@
---- chrome/common/webui_url_constants.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/common/webui_url_constants.h.orig 2023-12-23 12:33:28 UTC
+++ chrome/common/webui_url_constants.h
-@@ -418,24 +418,24 @@ extern const char kOsUIShortcutCustomizationAppURL[];
+@@ -426,24 +426,24 @@ extern const char kOsUIShortcutCustomizationAppURL[];
extern const char kOsUIVersionURL[];
#endif
@@ -29,7 +29,7 @@
extern const char kChromeUIWebAppSettingsURL[];
extern const char kChromeUIWebAppSettingsHost[];
#endif
-@@ -450,7 +450,7 @@ extern const char kChromeUILinuxProxyConfigHost[];
+@@ -458,7 +458,7 @@ extern const char kChromeUILinuxProxyConfigHost[];
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -38,7 +38,7 @@
extern const char kChromeUISandboxHost[];
#endif
-@@ -462,7 +462,7 @@ extern const char kChromeUISearchEngineChoiceHost[];
+@@ -470,7 +470,7 @@ extern const char kChromeUISearchEngineChoiceHost[];
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_FUCHSIA) || \
@@ -47,7 +47,7 @@
extern const char kChromeUIBrowserSwitchHost[];
extern const char kChromeUIBrowserSwitchURL[];
extern const char kChromeUIEnterpriseProfileWelcomeHost[];
-@@ -478,7 +478,7 @@ extern const char kChromeUIProfilePickerUrl[];
+@@ -486,7 +486,7 @@ extern const char kChromeUIProfilePickerUrl[];
extern const char kChromeUIProfilePickerStartupQuery[];
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc b/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc
index 7806d175db0d..1d31d0fb8ce0 100644
--- a/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc
@@ -1,4 +1,4 @@
---- chrome/services/printing/print_backend_service_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/services/printing/print_backend_service_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/services/printing/print_backend_service_impl.cc
@@ -46,7 +46,7 @@
#include "printing/backend/cups_connection_pool.h"
@@ -36,7 +36,7 @@
// Test framework already initializes the UI, so this should not go in
// `InitCommon()`. Additionally, low-level Linux UI is not needed when tests
// are using `TestPrintingContext`.
-@@ -680,7 +680,7 @@ void PrintBackendServiceImpl::UpdatePrintSettings(
+@@ -681,7 +681,7 @@ void PrintBackendServiceImpl::UpdatePrintSettings(
crash_keys_ = std::make_unique<crash_keys::ScopedPrinterInfo>(
print_backend_->GetPrinterDriverInfo(*printer_name));
diff --git a/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn b/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn
index ff007a7aa0b2..17f16dee3f92 100644
--- a/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn
@@ -1,6 +1,6 @@
---- chrome/test/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- chrome/test/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ chrome/test/BUILD.gn
-@@ -11050,7 +11050,7 @@ test("chrome_app_unittests") {
+@@ -11236,7 +11236,7 @@ test("chrome_app_unittests") {
"//components/heap_profiling/in_process",
"//components/safe_browsing:buildflags",
]
diff --git a/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome__launcher.cc b/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome__launcher.cc
index d7afbadaad7c..0a914f8feef1 100644
--- a/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome__launcher.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome__launcher.cc
@@ -1,6 +1,6 @@
---- chrome/test/chromedriver/chrome_launcher.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/test/chromedriver/chrome_launcher.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/test/chromedriver/chrome_launcher.cc
-@@ -69,6 +69,7 @@
+@@ -73,6 +73,7 @@
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
diff --git a/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome_chrome__finder.cc b/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome_chrome__finder.cc
index f5fe40c84bdf..bc1ef57596ed 100644
--- a/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome_chrome__finder.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_test_chromedriver_chrome_chrome__finder.cc
@@ -1,6 +1,6 @@
---- chrome/test/chromedriver/chrome/chrome_finder.cc.orig 2023-08-04 12:10:50 UTC
+--- chrome/test/chromedriver/chrome/chrome_finder.cc.orig 2023-12-23 12:33:28 UTC
+++ chrome/test/chromedriver/chrome/chrome_finder.cc
-@@ -57,7 +57,7 @@ void GetApplicationDirs(std::vector<base::FilePath>* l
+@@ -58,7 +58,7 @@ void GetApplicationDirs(std::vector<base::FilePath>* l
installation_locations[i].Append(L"Chromium\\Application"));
}
}
@@ -9,12 +9,21 @@
void GetApplicationDirs(std::vector<base::FilePath>* locations) {
// TODO: Respect users' PATH variables.
// Until then, we use an approximation of the most common defaults.
-@@ -157,7 +157,7 @@ bool FindChrome(base::FilePath* browser_exe) {
- base::FilePath(chrome::kGoogleChromeForTestingBrowserProcessExecutablePath),
- base::FilePath(chrome::kGoogleChromeBrowserProcessExecutablePath),
- base::FilePath(chrome::kChromiumBrowserProcessExecutablePath),
+@@ -125,7 +125,7 @@ std::vector<base::FilePath> GetChromeProgramNames() {
+ chrome::kGoogleChromeForTestingBrowserProcessExecutablePath),
+ base::FilePath(chrome::kGoogleChromeBrowserProcessExecutablePath),
+ base::FilePath(chrome::kChromiumBrowserProcessExecutablePath),
-#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
base::FilePath(chrome::kBrowserProcessExecutablePath),
- base::FilePath("chrome"), // Chrome for Testing or Google Chrome
- base::FilePath("google-chrome"),
+ base::FilePath("chrome"), // Chrome for Testing or Google Chrome
+ base::FilePath("google-chrome"), base::FilePath("chromium"),
+@@ -141,7 +141,7 @@ std::vector<base::FilePath> GetHeadlessShellProgramNam
+ return {
+ #if BUILDFLAG(IS_WIN)
+ base::FilePath(FILE_PATH_LITERAL("chrome-headless-shell.exe")),
+-#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS)
++#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ base::FilePath("chrome-headless-shell"),
+ #else
+ // it will compile but won't work on other OSes
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h
new file mode 100644
index 000000000000..05ca4c2bc4c1
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h
@@ -0,0 +1,16 @@
+--- components/autofill/core/browser/data_model/autofill_i18n_api.h.orig 2023-12-23 12:33:28 UTC
++++ components/autofill/core/browser/data_model/autofill_i18n_api.h
+@@ -15,9 +15,13 @@ namespace autofill::i18n_model_definition {
+ // Country code that represents autofill's legacy address hierarchy model as
+ // stored `kAutofillModelRules`. As a workaround for GCC we declare the
+ // std::string constexpr first.
++// XXX
++#if 0
+ constexpr inline std::string kLegacyHierarchyCountryCodeString{"XX"};
+ constexpr AddressCountryCode kLegacyHierarchyCountryCode =
+ AddressCountryCode(kLegacyHierarchyCountryCodeString);
++#endif
++inline AddressCountryCode kLegacyHierarchyCountryCode = AddressCountryCode("XX");
+
+ // Creates an instance of the address hierarchy model corresponding to the
+ // provided country. All the nodes have empty values, except for the country
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc
index e22d0f05900f..d47301736a88 100644
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc
@@ -1,14 +1,14 @@
---- components/autofill/core/browser/payments/iban_save_manager.cc.orig 2023-11-04 07:08:51 UTC
+--- components/autofill/core/browser/payments/iban_save_manager.cc.orig 2023-12-23 12:33:28 UTC
+++ components/autofill/core/browser/payments/iban_save_manager.cc
-@@ -63,7 +63,11 @@ bool IbanSaveManager::AttemptToOfferIbanLocalSave(
- bool IbanSaveManager::ShouldOfferLocalSave(const Iban& iban_import_candidate) {
- // Only offer to save new IBANs. Users can go to the payment methods settings
- // page to update existing IBANs if desired.
+@@ -123,7 +123,11 @@ bool IbanSaveManager::ShouldOfferUploadSave(
+
+ // Offer server save for this IBAN if it doesn't already match an existing
+ // server IBAN.
+#if (_LIBCPP_VERSION >= 160000)
return std::ranges::none_of(
+#else
+ return base::ranges::none_of(
+#endif
- personal_data_manager_->GetLocalIbans(), [&](const auto& iban) {
- return iban->value() == iban_import_candidate.value();
- });
+ personal_data_manager_->GetServerIbans(),
+ [&iban_import_candidate](const auto& iban) {
+ return iban->MatchesPrefixSuffixAndLength(iban_import_candidate);
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc
index 141a7d3f9218..52b6b0d2a2db 100644
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc
@@ -1,6 +1,6 @@
---- components/autofill/core/browser/personal_data_manager.cc.orig 2023-11-04 07:08:51 UTC
+--- components/autofill/core/browser/personal_data_manager.cc.orig 2023-12-23 12:33:28 UTC
+++ components/autofill/core/browser/personal_data_manager.cc
-@@ -2384,7 +2384,8 @@ bool PersonalDataManager::ShouldShowCardsFromAccountOp
+@@ -2397,7 +2397,8 @@ bool PersonalDataManager::ShouldShowCardsFromAccountOp
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc
index 2d1ddf848021..fe704330166f 100644
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc
@@ -1,6 +1,6 @@
---- components/autofill/core/common/autofill_payments_features.cc.orig 2023-11-04 07:08:51 UTC
+--- components/autofill/core/common/autofill_payments_features.cc.orig 2023-12-23 12:33:28 UTC
+++ components/autofill/core/common/autofill_payments_features.cc
-@@ -292,7 +292,7 @@ BASE_FEATURE(kEnablePixPayments,
+@@ -261,7 +261,7 @@ BASE_FEATURE(kEnablePixPayments,
bool ShouldShowImprovedUserConsentForCreditCardSave() {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc
index 4cc1038d08e9..867c199e0b4d 100644
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc
@@ -1,6 +1,6 @@
---- components/autofill/core/common/autofill_util.cc.orig 2023-11-04 07:08:51 UTC
+--- components/autofill/core/common/autofill_util.cc.orig 2023-12-23 12:33:28 UTC
+++ components/autofill/core/common/autofill_util.cc
-@@ -133,7 +133,7 @@ bool SanitizedFieldIsEmpty(const std::u16string& value
+@@ -207,7 +207,7 @@ size_t LevenshteinDistance(std::u16string_view a,
bool ShouldAutoselectFirstSuggestionOnArrowDown() {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc b/www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc
new file mode 100644
index 000000000000..a2e7b9dec04b
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc
@@ -0,0 +1,11 @@
+--- components/commerce/core/commerce_feature_list.cc.orig 2023-12-23 12:33:28 UTC
++++ components/commerce/core/commerce_feature_list.cc
+@@ -225,7 +225,7 @@ BASE_FEATURE(kShoppingCollection,
+
+ BASE_FEATURE(kShoppingList, "ShoppingList", base::FEATURE_DISABLED_BY_DEFAULT);
+ #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
+- BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_IOS)
++ BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_IOS) || BUILDFLAG(IS_BSD)
+ BASE_FEATURE(kShoppingListRegionLaunched,
+ "ShoppingListRegionLaunched",
+ base::FEATURE_ENABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-components_crash_core_app_BUILD.gn b/www/ungoogled-chromium/files/patch-components_crash_core_app_BUILD.gn
index 0dbf38f882d3..244f0a27dc6c 100644
--- a/www/ungoogled-chromium/files/patch-components_crash_core_app_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-components_crash_core_app_BUILD.gn
@@ -1,6 +1,6 @@
---- components/crash/core/app/BUILD.gn.orig 2023-09-17 07:59:53 UTC
+--- components/crash/core/app/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ components/crash/core/app/BUILD.gn
-@@ -72,7 +72,7 @@ static_library("app") {
+@@ -76,7 +76,7 @@ static_library("app") {
"crashpad.h",
]
@@ -9,7 +9,7 @@
sources += [ "crashpad_linux.cc" ]
}
-@@ -85,6 +85,10 @@ static_library("app") {
+@@ -89,6 +89,10 @@ static_library("app") {
]
}
@@ -20,7 +20,7 @@
defines = [ "CRASH_IMPLEMENTATION" ]
public_deps = [ ":lib" ]
-@@ -113,7 +117,7 @@ static_library("app") {
+@@ -117,7 +121,7 @@ static_library("app") {
libs = [ "log" ]
}
@@ -29,7 +29,7 @@
deps += [
"//base:base_static",
"//components/crash/core/common",
-@@ -241,11 +245,6 @@ if (is_mac || is_android || is_linux || is_chromeos) {
+@@ -245,11 +249,6 @@ if (is_mac || is_android || is_linux || is_chromeos) {
# define custom UserStreamDataSources.
executable("chrome_crashpad_handler") {
sources = [ "chrome_crashpad_handler.cc" ]
diff --git a/www/ungoogled-chromium/files/patch-components_discardable__memory_service_discardable__shared__memory__manager.cc b/www/ungoogled-chromium/files/patch-components_discardable__memory_service_discardable__shared__memory__manager.cc
index 3cf12b8ef80a..c275c1f059b2 100644
--- a/www/ungoogled-chromium/files/patch-components_discardable__memory_service_discardable__shared__memory__manager.cc
+++ b/www/ungoogled-chromium/files/patch-components_discardable__memory_service_discardable__shared__memory__manager.cc
@@ -1,6 +1,6 @@
---- components/discardable_memory/service/discardable_shared_memory_manager.cc.orig 2022-10-01 07:40:07 UTC
+--- components/discardable_memory/service/discardable_shared_memory_manager.cc.orig 2023-12-23 12:33:28 UTC
+++ components/discardable_memory/service/discardable_shared_memory_manager.cc
-@@ -171,7 +171,7 @@ uint64_t GetDefaultMemoryLimit() {
+@@ -172,7 +172,7 @@ uint64_t GetDefaultMemoryLimit() {
// Limits the number of FDs used to 32, assuming a 4MB allocation size.
uint64_t max_default_memory_limit = 128 * kMegabyte;
#else
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_eye__dropper_eye__dropper__view.cc b/www/ungoogled-chromium/files/patch-components_eye__dropper_eye__dropper__view.cc
index f8a59eb2b2b9..e782b45df690 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_eye__dropper_eye__dropper__view.cc
+++ b/www/ungoogled-chromium/files/patch-components_eye__dropper_eye__dropper__view.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/eye_dropper/eye_dropper_view.cc.orig 2023-10-13 13:20:35 UTC
-+++ chrome/browser/ui/views/eye_dropper/eye_dropper_view.cc
-@@ -178,7 +178,7 @@ EyeDropperView::EyeDropperView(content::RenderFrameHos
+--- components/eye_dropper/eye_dropper_view.cc.orig 2023-12-23 12:33:28 UTC
++++ components/eye_dropper/eye_dropper_view.cc
+@@ -196,7 +196,7 @@ EyeDropperView::EyeDropperView(gfx::NativeView parent,
// EyeDropper/WidgetDelegate.
set_owned_by_client();
SetPreferredSize(GetSize());
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_eye__dropper_eye__dropper__view__aura.cc b/www/ungoogled-chromium/files/patch-components_eye__dropper_eye__dropper__view__aura.cc
index 49432a61fa25..659f4f175b2e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_eye__dropper_eye__dropper__view__aura.cc
+++ b/www/ungoogled-chromium/files/patch-components_eye__dropper_eye__dropper__view__aura.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/eye_dropper/eye_dropper_view_aura.cc.orig 2023-10-13 13:20:35 UTC
-+++ chrome/browser/ui/views/eye_dropper/eye_dropper_view_aura.cc
-@@ -127,7 +127,7 @@ void EyeDropperView::MoveViewToFront() {
+--- components/eye_dropper/eye_dropper_view_aura.cc.orig 2023-12-23 12:33:28 UTC
++++ components/eye_dropper/eye_dropper_view_aura.cc
+@@ -129,7 +129,7 @@ void EyeDropperView::MoveViewToFront() {
}
void EyeDropperView::CaptureInputIfNeeded() {
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc
index 3ebb8f5ffde3..356461cb3726 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc
@@ -1,6 +1,6 @@
---- components/feature_engagement/public/feature_configurations.cc.orig 2023-11-04 07:08:51 UTC
+--- components/feature_engagement/public/feature_configurations.cc.orig 2023-12-23 12:33:28 UTC
+++ components/feature_engagement/public/feature_configurations.cc
-@@ -46,7 +46,7 @@ FeatureConfig CreateAlwaysTriggerConfig(const base::Fe
+@@ -49,7 +49,7 @@ FeatureConfig CreateAlwaysTriggerConfig(const base::Fe
absl::optional<FeatureConfig> GetClientSideFeatureConfig(
const base::Feature* feature) {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
if (kIPHPasswordsAccountStorageFeature.name == feature->name) {
absl::optional<FeatureConfig> config = FeatureConfig();
config->valid = true;
-@@ -1399,7 +1399,8 @@ absl::optional<FeatureConfig> GetClientSideFeatureConf
+@@ -1440,7 +1440,8 @@ absl::optional<FeatureConfig> GetClientSideFeatureConf
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc
index 505a4391a27d..24f3ebbfb923 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc
@@ -1,4 +1,4 @@
---- components/feature_engagement/public/feature_constants.cc.orig 2023-11-04 07:08:51 UTC
+--- components/feature_engagement/public/feature_constants.cc.orig 2023-12-23 12:33:28 UTC
+++ components/feature_engagement/public/feature_constants.cc
@@ -21,7 +21,7 @@ BASE_FEATURE(kUseClientConfigIPH,
BASE_FEATURE(kIPHDummyFeature, "IPH_Dummy", base::FEATURE_DISABLED_BY_DEFAULT);
@@ -9,7 +9,7 @@
BASE_FEATURE(kIPHBatterySaverModeFeature,
"IPH_BatterySaverMode",
base::FEATURE_ENABLED_BY_DEFAULT);
-@@ -539,7 +539,7 @@ constexpr base::FeatureParam<int> kDefaultBrowserEligi
+@@ -560,7 +560,7 @@ constexpr base::FeatureParam<int> kDefaultBrowserEligi
/*default_value=*/365};
#endif // BUILDFLAG(IS_IOS)
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h
index 2a14dfd76bf3..c4632760c9e7 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h
@@ -1,4 +1,4 @@
---- components/feature_engagement/public/feature_constants.h.orig 2023-11-04 07:08:51 UTC
+--- components/feature_engagement/public/feature_constants.h.orig 2023-12-23 12:33:28 UTC
+++ components/feature_engagement/public/feature_constants.h
@@ -24,7 +24,7 @@ BASE_DECLARE_FEATURE(kUseClientConfigIPH);
BASE_DECLARE_FEATURE(kIPHDummyFeature);
@@ -9,7 +9,7 @@
BASE_DECLARE_FEATURE(kIPHBatterySaverModeFeature);
BASE_DECLARE_FEATURE(kIPHCompanionSidePanelFeature);
BASE_DECLARE_FEATURE(kIPHCompanionSidePanelRegionSearchFeature);
-@@ -226,7 +226,7 @@ extern const base::FeatureParam<int>
+@@ -233,7 +233,7 @@ extern const base::FeatureParam<int>
kDefaultBrowserEligibilitySlidingWindowParam;
#endif // BUILDFLAG(IS_IOS)
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc
index b75766a36223..525645b64e51 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc
@@ -1,7 +1,7 @@
---- components/feature_engagement/public/feature_list.cc.orig 2023-11-04 07:08:51 UTC
+--- components/feature_engagement/public/feature_list.cc.orig 2023-12-23 12:33:28 UTC
+++ components/feature_engagement/public/feature_list.cc
-@@ -143,7 +143,7 @@ const base::Feature* const kAllFeatures[] = {
- &kIPHiOSChoiceScreenFeature,
+@@ -146,7 +146,7 @@ const base::Feature* const kAllFeatures[] = {
+ &kIPHiOSParcelTrackingFeature,
#endif // BUILDFLAG(IS_IOS)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
@@ -9,7 +9,7 @@
&kIPHBatterySaverModeFeature,
&kIPHCompanionSidePanelFeature,
&kIPHCompanionSidePanelRegionSearchFeature,
-@@ -189,7 +189,7 @@ const base::Feature* const kAllFeatures[] = {
+@@ -196,7 +196,7 @@ const base::Feature* const kAllFeatures[] = {
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h
index 43ccdebd8796..6106a2312f83 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h
@@ -1,6 +1,6 @@
---- components/feature_engagement/public/feature_list.h.orig 2023-11-04 07:08:51 UTC
+--- components/feature_engagement/public/feature_list.h.orig 2023-12-23 12:33:28 UTC
+++ components/feature_engagement/public/feature_list.h
-@@ -265,7 +265,7 @@ DEFINE_VARIATION_PARAM(kIPHiOSChoiceScreenFeature,
+@@ -269,7 +269,7 @@ DEFINE_VARIATION_PARAM(kIPHiOSParcelTrackingFeature,
#endif // BUILDFLAG(IS_IOS)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
DEFINE_VARIATION_PARAM(kIPHBatterySaverModeFeature, "IPH_BatterySaverMode");
DEFINE_VARIATION_PARAM(kIPHCompanionSidePanelFeature, "IPH_CompanionSidePanel");
DEFINE_VARIATION_PARAM(kIPHCompanionSidePanelRegionSearchFeature,
-@@ -340,7 +340,7 @@ DEFINE_VARIATION_PARAM(kIPHBackNavigationMenuFeature,
+@@ -352,7 +352,7 @@ DEFINE_VARIATION_PARAM(kIPHBackNavigationMenuFeature,
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
@@ -18,16 +18,16 @@
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
DEFINE_VARIATION_PARAM(kIPHAutofillExternalAccountProfileSuggestionFeature,
"IPH_AutofillExternalAccountProfileSuggestion");
-@@ -552,7 +552,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
- VARIATION_ENTRY(kIPHiOSPromoPasswordManagerWidgetFeature),
+@@ -567,7 +567,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
VARIATION_ENTRY(kIPHiOSChoiceScreenFeature),
+ VARIATION_ENTRY(kIPHiOSParcelTrackingFeature),
#elif BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
VARIATION_ENTRY(kIPHBatterySaverModeFeature),
VARIATION_ENTRY(kIPHCompanionSidePanelFeature),
VARIATION_ENTRY(kIPHCompanionSidePanelRegionSearchFeature),
-@@ -599,7 +599,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
+@@ -617,7 +617,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
diff --git a/www/ungoogled-chromium/files/patch-components_feed_core_v2_feed__network__impl__unittest.cc b/www/ungoogled-chromium/files/patch-components_feed_core_v2_feed__network__impl__unittest.cc
index aea00b6d588e..ad28195004ea 100644
--- a/www/ungoogled-chromium/files/patch-components_feed_core_v2_feed__network__impl__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-components_feed_core_v2_feed__network__impl__unittest.cc
@@ -1,6 +1,6 @@
---- components/feed/core/v2/feed_network_impl_unittest.cc.orig 2023-10-13 13:20:35 UTC
+--- components/feed/core/v2/feed_network_impl_unittest.cc.orig 2023-12-23 12:33:28 UTC
+++ components/feed/core/v2/feed_network_impl_unittest.cc
-@@ -753,8 +753,8 @@ TEST_F(FeedNetworkTest, SendApiRequest_DecodesClientIn
+@@ -781,8 +781,8 @@ TEST_F(FeedNetworkTest, SendApiRequest_DecodesClientIn
EXPECT_EQ(feedwire::ClientInfo::CHROME_ANDROID, client_info.app_type());
EXPECT_EQ(feedwire::Version::RELEASE, client_info.app_version().build_type());
diff --git a/www/ungoogled-chromium/files/patch-components_miracle__parameter_common_public_miracle__parameter.h b/www/ungoogled-chromium/files/patch-components_miracle__parameter_common_public_miracle__parameter.h
deleted file mode 100644
index 7b8cff66cc02..000000000000
--- a/www/ungoogled-chromium/files/patch-components_miracle__parameter_common_public_miracle__parameter.h
+++ /dev/null
@@ -1,12 +0,0 @@
---- components/miracle_parameter/common/public/miracle_parameter.h.orig 2023-11-04 07:08:51 UTC
-+++ components/miracle_parameter/common/public/miracle_parameter.h
-@@ -93,7 +93,8 @@ class MiracleParameter {
- template <>
- class MiracleParameter<std::string> : public MiracleParameterBase<std::string> {
- public:
-- constexpr MiracleParameter(const base::Feature* feature,
-+ // XXXROBERT
-+ MiracleParameter(const base::Feature* feature,
- const char* param_name,
- std::string default_value)
- : MiracleParameterBase(feature, param_name, std::move(default_value)) {}
diff --git a/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js b/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js
index 58705c0a3222..a15572eb9d3f 100644
--- a/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js
+++ b/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js
@@ -1,6 +1,6 @@
---- components/neterror/resources/neterror.js.orig 2023-02-11 09:11:04 UTC
+--- components/neterror/resources/neterror.js.orig 2023-12-23 12:33:28 UTC
+++ components/neterror/resources/neterror.js
-@@ -137,7 +137,7 @@ function detailsButtonClick() {
+@@ -141,7 +141,7 @@ function detailsButtonClick() {
let primaryControlOnLeft = true;
// clang-format off
diff --git a/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc b/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc
index 45efd8ec833a..77d43e6cdc4e 100644
--- a/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc
+++ b/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc
@@ -1,4 +1,4 @@
---- components/optimization_guide/core/optimization_guide_util.cc.orig 2023-11-11 14:10:41 UTC
+--- components/optimization_guide/core/optimization_guide_util.cc.orig 2023-12-23 12:33:28 UTC
+++ components/optimization_guide/core/optimization_guide_util.cc
@@ -34,7 +34,7 @@ optimization_guide::proto::Platform GetPlatform() {
return optimization_guide::proto::PLATFORM_CHROMEOS;
@@ -8,4 +8,4 @@
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
return optimization_guide::proto::PLATFORM_LINUX;
#else
- return optimization_guide::proto::PLATFORM_UNKNOWN;
+ return optimization_guide::proto::PLATFORM_UNDEFINED;
diff --git a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc
index 59ecb2079014..3ae8b65fbabd 100644
--- a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc
+++ b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc
@@ -1,11 +1,20 @@
---- components/password_manager/core/browser/features/password_features.cc.orig 2023-11-04 07:08:51 UTC
+--- components/password_manager/core/browser/features/password_features.cc.orig 2023-12-23 12:33:28 UTC
+++ components/password_manager/core/browser/features/password_features.cc
-@@ -20,7 +20,7 @@ BASE_FEATURE(kBiometricTouchToFill,
+@@ -8,7 +8,7 @@
+
+ namespace password_manager::features {
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ // Enables attaching password manager and autofill internals logs to an Autofill
+ // Rater Extension Report.
+ BASE_FEATURE(kAttachLogsToAutofillRaterExtensionReport,
+@@ -30,7 +30,7 @@ BASE_FEATURE(kBiometricTouchToFill,
// Delete undecryptable passwords from the store when Sync is active.
BASE_FEATURE(kClearUndecryptablePasswordsOnSync,
"ClearUndecryptablePasswordsInSync",
--#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_IOS)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_IOS) || BUILDFLAG(IS_BSD)
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
diff --git a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h
new file mode 100644
index 000000000000..7ab44347a8cd
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h
@@ -0,0 +1,11 @@
+--- components/password_manager/core/browser/features/password_features.h.orig 2023-12-23 12:33:28 UTC
++++ components/password_manager/core/browser/features/password_features.h
+@@ -15,7 +15,7 @@ namespace password_manager::features {
+ // All features in alphabetical order. The features should be documented
+ // alongside the definition of their values in the .cc file.
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ BASE_DECLARE_FEATURE(kAttachLogsToAutofillRaterExtensionReport);
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc
index 828cbf6eecab..82b928d178d8 100644
--- a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc
@@ -1,6 +1,6 @@
---- components/password_manager/core/browser/login_database_unittest.cc.orig 2023-11-04 07:08:51 UTC
+--- components/password_manager/core/browser/login_database_unittest.cc.orig 2023-12-23 12:33:28 UTC
+++ components/password_manager/core/browser/login_database_unittest.cc
-@@ -2100,7 +2100,7 @@ INSTANTIATE_TEST_SUITE_P(MigrationToVCurrent,
+@@ -2102,7 +2102,7 @@ INSTANTIATE_TEST_SUITE_P(MigrationToVCurrent,
LoginDatabaseMigrationTestBroken,
testing::Values(1, 2, 3, 24));
diff --git a/www/ungoogled-chromium/files/patch-components_password__manager_core_common_password__manager__features.h b/www/ungoogled-chromium/files/patch-components_password__manager_core_common_password__manager__features.h
index 6cd1e06da6a5..ab14ed4a5471 100644
--- a/www/ungoogled-chromium/files/patch-components_password__manager_core_common_password__manager__features.h
+++ b/www/ungoogled-chromium/files/patch-components_password__manager_core_common_password__manager__features.h
@@ -1,6 +1,6 @@
---- components/password_manager/core/common/password_manager_features.h.orig 2023-10-27 05:38:38 UTC
+--- components/password_manager/core/common/password_manager_features.h.orig 2023-12-23 12:33:28 UTC
+++ components/password_manager/core/common/password_manager_features.h
-@@ -24,7 +24,7 @@ namespace password_manager::features {
+@@ -19,7 +19,7 @@ namespace password_manager::features {
// alongside the definition of their values in the .cc file.
BASE_DECLARE_FEATURE(kEnableOverwritingPlaceholderUsernames);
diff --git a/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc b/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc
index ef98bb6876fa..f4519d4f45e6 100644
--- a/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc
+++ b/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc
@@ -1,6 +1,6 @@
---- components/policy/core/common/cloud/cloud_policy_client.cc.orig 2023-11-04 07:08:51 UTC
+--- components/policy/core/common/cloud/cloud_policy_client.cc.orig 2023-12-23 12:33:28 UTC
+++ components/policy/core/common/cloud/cloud_policy_client.cc
-@@ -507,7 +507,7 @@ void CloudPolicyClient::FetchPolicy(PolicyFetchReason
+@@ -508,7 +508,7 @@ void CloudPolicyClient::FetchPolicy(PolicyFetchReason
fetch_request->set_invalidation_payload(invalidation_payload_);
}
}
diff --git a/www/ungoogled-chromium/files/patch-components_safe__browsing_core_browser_db_v4__protocol__manager__util.cc b/www/ungoogled-chromium/files/patch-components_safe__browsing_core_browser_db_v4__protocol__manager__util.cc
index 4c7662e124e1..45ff508a433a 100644
--- a/www/ungoogled-chromium/files/patch-components_safe__browsing_core_browser_db_v4__protocol__manager__util.cc
+++ b/www/ungoogled-chromium/files/patch-components_safe__browsing_core_browser_db_v4__protocol__manager__util.cc
@@ -1,6 +1,6 @@
---- components/safe_browsing/core/browser/db/v4_protocol_manager_util.cc.orig 2023-05-05 12:12:41 UTC
+--- components/safe_browsing/core/browser/db/v4_protocol_manager_util.cc.orig 2023-12-23 12:33:28 UTC
+++ components/safe_browsing/core/browser/db/v4_protocol_manager_util.cc
-@@ -114,7 +114,7 @@ std::ostream& operator<<(std::ostream& os, const ListI
+@@ -113,7 +113,7 @@ std::ostream& operator<<(std::ostream& os, const ListI
PlatformType GetCurrentPlatformType() {
#if BUILDFLAG(IS_WIN)
return WINDOWS_PLATFORM;
diff --git a/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc b/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc
index b3185b4644ae..4b7b22ad7ea2 100644
--- a/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc
+++ b/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc
@@ -1,6 +1,6 @@
---- components/supervised_user/core/browser/supervised_user_service.cc.orig 2023-10-13 13:20:35 UTC
+--- components/supervised_user/core/browser/supervised_user_service.cc.orig 2023-12-23 12:33:28 UTC
+++ components/supervised_user/core/browser/supervised_user_service.cc
-@@ -206,7 +206,7 @@ FirstTimeInterstitialBannerState SupervisedUserService
+@@ -212,7 +212,7 @@ FirstTimeInterstitialBannerState SupervisedUserService
const FirstTimeInterstitialBannerState original_state) {
FirstTimeInterstitialBannerState target_state = original_state;
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc b/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc
new file mode 100644
index 000000000000..7297e0f0d14e
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc
@@ -0,0 +1,20 @@
+--- components/supervised_user/core/common/features.cc.orig 2023-12-23 12:33:28 UTC
++++ components/supervised_user/core/common/features.cc
+@@ -91,7 +91,7 @@ BASE_FEATURE(kEnableManagedByParentUi,
+ "EnableManagedByParentUi",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ BASE_FEATURE(kEnableExtensionsPermissionsForSupervisedUsersOnDesktop,
+ "EnableExtensionsPermissionsForSupervisedUsersOnDesktop",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+@@ -149,7 +149,7 @@ bool IsChildAccountSupervisionEnabled() {
+ return base::FeatureList::IsEnabled(
+ supervised_user::
+ kFilterWebsitesForSupervisedUsersOnDesktopAndIOS) ||
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ base::FeatureList::IsEnabled(
+ supervised_user::
+ kEnableExtensionsPermissionsForSupervisedUsersOnDesktop) ||
diff --git a/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.h b/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.h
new file mode 100644
index 000000000000..3a6d91fb3891
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.h
@@ -0,0 +1,11 @@
+--- components/supervised_user/core/common/features.h.orig 2023-12-23 12:33:28 UTC
++++ components/supervised_user/core/common/features.h
+@@ -24,7 +24,7 @@ BASE_DECLARE_FEATURE(kEnableManagedByParentUi);
+ extern const base::FeatureParam<std::string> kManagedByParentUiMoreInfoUrl;
+ BASE_DECLARE_FEATURE(kClearingCookiesKeepsSupervisedUsersSignedIn);
+
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ BASE_DECLARE_FEATURE(kEnableExtensionsPermissionsForSupervisedUsersOnDesktop);
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-components_sync_base_features.cc b/www/ungoogled-chromium/files/patch-components_sync_base_features.cc
new file mode 100644
index 000000000000..d9b1b79eda88
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_sync_base_features.cc
@@ -0,0 +1,11 @@
+--- components/sync/base/features.cc.orig 2023-12-23 12:33:28 UTC
++++ components/sync/base/features.cc
+@@ -101,7 +101,7 @@ BASE_FEATURE(kEnablePreferencesAccountStorage,
+ BASE_FEATURE(kSyncPollImmediatelyOnEveryStartup,
+ "SyncPollImmediatelyOnEveryStartup",
+ #if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
+- BUILDFLAG(IS_WIN)
++ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ base::FEATURE_ENABLED_BY_DEFAULT
+ #else
+ base::FEATURE_DISABLED_BY_DEFAULT
diff --git a/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc b/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc
index 59dfaa62b82c..f38a157a54bc 100644
--- a/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc
+++ b/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc
@@ -1,6 +1,6 @@
---- components/user_education/views/help_bubble_view.cc.orig 2023-11-04 07:08:51 UTC
+--- components/user_education/views/help_bubble_view.cc.orig 2023-12-23 12:33:28 UTC
+++ components/user_education/views/help_bubble_view.cc
-@@ -997,7 +997,7 @@ gfx::Rect HelpBubbleView::GetAnchorRect() const {
+@@ -1003,7 +1003,7 @@ gfx::Rect HelpBubbleView::GetAnchorRect() const {
void HelpBubbleView::OnBeforeBubbleWidgetInit(views::Widget::InitParams* params,
views::Widget* widget) const {
BubbleDialogDelegateView::OnBeforeBubbleWidgetInit(params, widget);
diff --git a/www/ungoogled-chromium/files/patch-components_variations_service_variations__service.cc b/www/ungoogled-chromium/files/patch-components_variations_service_variations__service.cc
index d98f5cd7eafc..3e4943eff886 100644
--- a/www/ungoogled-chromium/files/patch-components_variations_service_variations__service.cc
+++ b/www/ungoogled-chromium/files/patch-components_variations_service_variations__service.cc
@@ -1,6 +1,6 @@
---- components/variations/service/variations_service.cc.orig 2023-06-05 19:39:05 UTC
+--- components/variations/service/variations_service.cc.orig 2023-12-23 12:33:28 UTC
+++ components/variations/service/variations_service.cc
-@@ -95,7 +95,7 @@ std::string GetPlatformString() {
+@@ -96,7 +96,7 @@ std::string GetPlatformString() {
return "android";
#elif BUILDFLAG(IS_FUCHSIA)
return "fuchsia";
diff --git a/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc b/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc
deleted file mode 100644
index 9760f6191cb6..000000000000
--- a/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- components/viz/host/host_display_client.cc.orig 2023-03-10 11:01:21 UTC
-+++ components/viz/host/host_display_client.cc
-@@ -67,7 +67,7 @@ void HostDisplayClient::AddChildWindowToBrowser(
-
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- void HostDisplayClient::DidCompleteSwapWithNewSize(const gfx::Size& size) {
- NOTIMPLEMENTED();
- }
diff --git a/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h b/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h
deleted file mode 100644
index ad49f99aca39..000000000000
--- a/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- components/viz/host/host_display_client.h.orig 2023-01-13 08:56:02 UTC
-+++ components/viz/host/host_display_client.h
-@@ -54,7 +54,7 @@ class VIZ_HOST_EXPORT HostDisplayClient : public mojom
-
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- void DidCompleteSwapWithNewSize(const gfx::Size& size) override;
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc
index 1fba711c311f..18a956045818 100644
--- a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc
@@ -1,6 +1,6 @@
---- components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc.orig 2023-11-04 07:08:51 UTC
+--- components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc.orig 2023-12-23 12:33:28 UTC
+++ components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc
-@@ -1530,7 +1530,12 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutputNV12(
+@@ -1567,7 +1567,12 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutputNV12(
// Issue readbacks from the surfaces:
for (size_t i = 0; i < CopyOutputResult::kNV12MaxPlanes; ++i) {
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.cc b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.cc
index 8e7c4f4e1396..e13e70b70b20 100644
--- a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.cc
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.cc
@@ -1,6 +1,6 @@
---- components/viz/service/display_embedder/software_output_surface.cc.orig 2023-02-11 09:11:04 UTC
+--- components/viz/service/display_embedder/software_output_surface.cc.orig 2023-12-23 12:33:28 UTC
+++ components/viz/service/display_embedder/software_output_surface.cc
-@@ -98,7 +98,7 @@ void SoftwareOutputSurface::SwapBuffersCallback(base::
+@@ -124,7 +124,7 @@ void SoftwareOutputSurface::SwapBuffersCallback(base::
now.SnappedToNextTick(refresh_timebase_, refresh_interval_) - now;
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,7 +9,7 @@
if (needs_swap_size_notifications_)
client_->DidSwapWithSize(pixel_size);
#endif
-@@ -125,7 +125,7 @@ gfx::OverlayTransform SoftwareOutputSurface::GetDispla
+@@ -151,7 +151,7 @@ gfx::OverlayTransform SoftwareOutputSurface::GetDispla
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.h b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.h
index 7fffb4339ed9..fca5bb1a540b 100644
--- a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.h
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_software__output__surface.h
@@ -1,4 +1,4 @@
---- components/viz/service/display_embedder/software_output_surface.h.orig 2022-10-01 07:40:07 UTC
+--- components/viz/service/display_embedder/software_output_surface.h.orig 2023-12-23 12:33:28 UTC
+++ components/viz/service/display_embedder/software_output_surface.h
@@ -47,7 +47,7 @@ class VIZ_SERVICE_EXPORT SoftwareOutputSurface : publi
gfx::OverlayTransform GetDisplayTransform() override;
@@ -9,7 +9,7 @@
void SetNeedsSwapSizeNotifications(
bool needs_swap_size_notifications) override;
#endif
-@@ -69,7 +69,7 @@ class VIZ_SERVICE_EXPORT SoftwareOutputSurface : publi
+@@ -70,7 +70,7 @@ class VIZ_SERVICE_EXPORT SoftwareOutputSurface : publi
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc b/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc
deleted file mode 100644
index a4267ef3f72a..000000000000
--- a/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc
+++ /dev/null
@@ -1,20 +0,0 @@
---- components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc.orig 2023-11-04 07:08:51 UTC
-+++ components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc
-@@ -110,7 +110,7 @@ RootCompositorFrameSinkImpl::Create(
-
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- // For X11, we need notify client about swap completion after resizing, so the
- // client can use it for synchronize with X11 WM.
- output_surface->SetNeedsSwapSizeNotifications(true);
-@@ -714,7 +714,7 @@ void RootCompositorFrameSinkImpl::DisplayDidCompleteSw
- display_client_->DidCompleteSwapWithSize(pixel_size);
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- if (display_client_ && pixel_size != last_swap_pixel_size_) {
- last_swap_pixel_size_ = pixel_size;
- display_client_->DidCompleteSwapWithNewSize(last_swap_pixel_size_);
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h b/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h
deleted file mode 100644
index a13d54327b82..000000000000
--- a/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h.orig 2023-11-04 07:08:51 UTC
-+++ components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h
-@@ -212,7 +212,7 @@ class VIZ_SERVICE_EXPORT RootCompositorFrameSinkImpl
-
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- gfx::Size last_swap_pixel_size_;
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc b/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc
deleted file mode 100644
index dd8233188cc5..000000000000
--- a/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- components/viz/test/fake_display_client.cc.orig 2023-01-13 08:56:02 UTC
-+++ components/viz/test/fake_display_client.cc
-@@ -27,7 +27,7 @@ void FakeDisplayClient::AddChildWindowToBrowser(
- gpu::SurfaceHandle child_window) {}
- #endif
-
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- void FakeDisplayClient::DidCompleteSwapWithNewSize(const gfx::Size& size) {}
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h b/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h
deleted file mode 100644
index 57fa9561825b..000000000000
--- a/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- components/viz/test/fake_display_client.h.orig 2023-01-13 08:56:02 UTC
-+++ components/viz/test/fake_display_client.h
-@@ -35,7 +35,7 @@ class FakeDisplayClient : public mojom::DisplayClient
- void AddChildWindowToBrowser(gpu::SurfaceHandle child_window) override;
- #endif
-
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- void DidCompleteSwapWithNewSize(const gfx::Size& size) override;
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h b/www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h
deleted file mode 100644
index b52cbf14967a..000000000000
--- a/www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- components/viz/test/mock_display_client.h.orig 2023-01-13 08:56:02 UTC
-+++ components/viz/test/mock_display_client.h
-@@ -45,7 +45,7 @@ class MockDisplayClient : public mojom::DisplayClient
- #endif
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- MOCK_METHOD1(DidCompleteSwapWithNewSize, void(const gfx::Size&));
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn b/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn
index 36627d735018..fe6bdde7874d 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn
@@ -1,6 +1,6 @@
---- content/browser/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- content/browser/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ content/browser/BUILD.gn
-@@ -2462,6 +2462,13 @@ source_set("browser") {
+@@ -2474,6 +2474,13 @@ source_set("browser") {
deps += [ "//media/mojo/mojom/stable:stable_video_decoder" ]
}
diff --git a/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.cc
index 40e169b181d8..4ca42a709162 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.cc
@@ -1,4 +1,4 @@
---- content/browser/browser_child_process_host_impl.cc.orig 2023-07-21 09:49:17 UTC
+--- content/browser/browser_child_process_host_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/browser_child_process_host_impl.cc
@@ -321,6 +321,7 @@ void BrowserChildProcessHostImpl::LaunchWithoutExtraCo
switches::kDisableBestEffortTasks,
@@ -8,3 +8,12 @@
switches::kIPCConnectionTimeout,
switches::kLogBestEffortTasks,
switches::kLogFile,
+@@ -630,7 +631,7 @@ void BrowserChildProcessHostImpl::OnProcessLaunched()
+ ->child_process());
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ child_thread_type_switcher_.SetPid(process.Pid());
+ #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
diff --git a/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.h b/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.h
new file mode 100644
index 000000000000..8941fb61f88e
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl.h
@@ -0,0 +1,20 @@
+--- content/browser/browser_child_process_host_impl.h.orig 2023-12-23 12:33:28 UTC
++++ content/browser/browser_child_process_host_impl.h
+@@ -33,7 +33,7 @@
+ #include "base/win/object_watcher.h"
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ #include "content/browser/child_thread_type_switcher_linux.h"
+ #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+@@ -261,7 +261,7 @@ class BrowserChildProcessHostImpl
+ std::unique_ptr<tracing::SystemTracingService> system_tracing_service_;
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ ChildThreadTypeSwitcher child_thread_type_switcher_;
+ #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
diff --git a/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl__receiver__bindings.cc b/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl__receiver__bindings.cc
new file mode 100644
index 000000000000..5a02013dd7d9
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_browser_browser__child__process__host__impl__receiver__bindings.cc
@@ -0,0 +1,11 @@
+--- content/browser/browser_child_process_host_impl_receiver_bindings.cc.orig 2023-12-23 12:33:28 UTC
++++ content/browser/browser_child_process_host_impl_receiver_bindings.cc
+@@ -62,7 +62,7 @@ void BrowserChildProcessHostImpl::BindHostReceiver(
+ }
+ }
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ if (auto r = receiver.As<mojom::ThreadTypeSwitcher>()) {
+ child_thread_type_switcher_.Bind(std::move(r));
+ return;
diff --git a/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc b/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc
index 32d269de3527..af79fb42ce12 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc
@@ -1,4 +1,4 @@
---- content/browser/browser_main_loop.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/browser_main_loop.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/browser_main_loop.cc
@@ -247,6 +247,12 @@
#include "mojo/public/cpp/bindings/lib/test_random_mojo_delays.h"
@@ -13,7 +13,7 @@
// One of the linux specific headers defines this as a macro.
#ifdef DestroyAll
#undef DestroyAll
-@@ -540,6 +546,12 @@ int BrowserMainLoop::EarlyInitialization() {
+@@ -552,6 +558,12 @@ int BrowserMainLoop::EarlyInitialization() {
// by now since a thread to start the ServiceManager has been created
// before the browser main loop starts.
DCHECK(SandboxHostLinux::GetInstance()->IsInitialized());
@@ -26,7 +26,7 @@
#endif
// GLib's spawning of new processes is buggy, so it's important that at this
-@@ -577,7 +589,7 @@ int BrowserMainLoop::EarlyInitialization() {
+@@ -589,7 +601,7 @@ int BrowserMainLoop::EarlyInitialization() {
base::PlatformThread::SetCurrentThreadType(base::ThreadType::kCompositing);
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-content_browser_child__thread__type__switcher__linux.cc b/www/ungoogled-chromium/files/patch-content_browser_child__thread__type__switcher__linux.cc
new file mode 100644
index 000000000000..9f1fb40dca25
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_browser_child__thread__type__switcher__linux.cc
@@ -0,0 +1,20 @@
+--- content/browser/child_thread_type_switcher_linux.cc.orig 2023-12-23 12:33:28 UTC
++++ content/browser/child_thread_type_switcher_linux.cc
+@@ -20,6 +20,9 @@ void SetThreadTypeOnLauncherThread(base::ProcessId pee
+ base::ThreadType thread_type) {
+ DCHECK(CurrentlyOnProcessLauncherTaskRunner());
+
++#if BUILDFLAG(IS_BSD)
++ NOTIMPLEMENTED();
++#else
+ bool ns_pid_supported = false;
+ pid_t peer_tid = base::FindThreadID(peer_pid, ns_tid, &ns_pid_supported);
+ if (peer_tid == -1) {
+@@ -37,6 +40,7 @@ void SetThreadTypeOnLauncherThread(base::ProcessId pee
+
+ base::PlatformThread::SetThreadType(peer_pid, peer_tid, thread_type,
+ base::IsViaIPC(true));
++#endif
+ }
+
+ } // namespace
diff --git a/www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc b/www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc
deleted file mode 100644
index a724f7f95aef..000000000000
--- a/www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- content/browser/compositor/viz_process_transport_factory.cc.orig 2023-10-13 13:20:35 UTC
-+++ content/browser/compositor/viz_process_transport_factory.cc
-@@ -108,7 +108,7 @@ class HostDisplayClient : public viz::HostDisplayClien
- // viz::HostDisplayClient:
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- void DidCompleteSwapWithNewSize(const gfx::Size& size) override {
- compositor_->OnCompleteSwapWithNewSize(size);
- }
diff --git a/www/ungoogled-chromium/files/patch-content_browser_download_save__package.cc b/www/ungoogled-chromium/files/patch-content_browser_download_save__package.cc
index 6066c249e6f5..1982f5229960 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_download_save__package.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_download_save__package.cc
@@ -1,6 +1,6 @@
---- content/browser/download/save_package.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/download/save_package.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/download/save_package.cc
-@@ -769,8 +769,13 @@ void SavePackage::Finish() {
+@@ -767,8 +767,13 @@ void SavePackage::Finish() {
if (download_) {
std::vector<download::DownloadSaveItemData::ItemInfo> files;
for (auto& item : saved_success_items_) {
diff --git a/www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc b/www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc
new file mode 100644
index 000000000000..920a631de1a7
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc
@@ -0,0 +1,14 @@
+--- content/browser/generic_sensor/frame_sensor_provider_proxy.cc.orig 2023-12-23 12:33:28 UTC
++++ content/browser/generic_sensor/frame_sensor_provider_proxy.cc
+@@ -20,7 +20,11 @@ namespace content {
+
+ namespace {
+
++#if (_LIBCPP_VERSION >= 160000)
+ constexpr std::vector<blink::mojom::PermissionsPolicyFeature>
++#else
++std::vector<blink::mojom::PermissionsPolicyFeature>
++#endif
+ SensorTypeToPermissionsPolicyFeatures(SensorType type) {
+ switch (type) {
+ case SensorType::AMBIENT_LIGHT:
diff --git a/www/ungoogled-chromium/files/patch-content_browser_gpu_gpu__process__host.cc b/www/ungoogled-chromium/files/patch-content_browser_gpu_gpu__process__host.cc
index 615918da9d23..51060dcfd030 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_gpu_gpu__process__host.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_gpu_gpu__process__host.cc
@@ -1,14 +1,14 @@
---- content/browser/gpu/gpu_process_host.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/gpu/gpu_process_host.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/gpu/gpu_process_host.cc
-@@ -253,6 +253,7 @@ static const char* const kSwitchNames[] = {
- switches::kEnableBackgroundThreadPool,
+@@ -254,6 +254,7 @@ static const char* const kSwitchNames[] = {
switches::kEnableGpuRasterization,
+ switches::kEnableSkiaGraphite,
switches::kEnableLogging,
+ switches::kDisableUnveil,
switches::kDoubleBufferCompositing,
switches::kHeadless,
switches::kLoggingLevel,
-@@ -287,7 +288,7 @@ static const char* const kSwitchNames[] = {
+@@ -289,7 +290,7 @@ static const char* const kSwitchNames[] = {
switches::kOzoneDumpFile,
switches::kDisableBufferBWCompression,
#endif
diff --git a/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc
index 0d2c22ae4350..e33e979c7b2b 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc
@@ -1,6 +1,6 @@
---- content/browser/media/media_keys_listener_manager_impl.cc.orig 2023-08-18 10:26:52 UTC
+--- content/browser/media/media_keys_listener_manager_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/media/media_keys_listener_manager_impl.cc
-@@ -234,7 +234,7 @@ void MediaKeysListenerManagerImpl::StartListeningForMe
+@@ -252,7 +252,7 @@ void MediaKeysListenerManagerImpl::StartListeningForMe
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
diff --git a/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc
index 692d7ced7c2d..53453e580e24 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc
@@ -1,6 +1,6 @@
---- content/browser/network_service_instance_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/network_service_instance_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/network_service_instance_impl.cc
-@@ -81,7 +81,7 @@
+@@ -80,7 +80,7 @@
#include "content/browser/network/network_service_process_tracker_win.h"
#endif
@@ -9,7 +9,7 @@
#include "content/browser/system_dns_resolution/system_dns_resolver.h"
#include "services/network/public/mojom/system_dns_resolution.mojom-forward.h"
#endif
-@@ -373,7 +373,7 @@ void CreateInProcessNetworkService(
+@@ -357,7 +357,7 @@ void CreateInProcessNetworkService(
std::move(receiver)));
}
@@ -18,7 +18,7 @@
// Runs a self-owned SystemDnsResolverMojoImpl. This is meant to run on a
// high-priority thread pool.
void RunSystemDnsResolverOnThreadPool(
-@@ -440,7 +440,7 @@ network::mojom::NetworkServiceParamsPtr CreateNetworkS
+@@ -426,7 +426,7 @@ network::mojom::NetworkServiceParamsPtr CreateNetworkS
}
#endif // BUILDFLAG(IS_POSIX)
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_media_service__video__capture__device__launcher.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_media_service__video__capture__device__launcher.cc
index 46243cf33420..e4baaad86c4a 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_media_service__video__capture__device__launcher.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_media_service__video__capture__device__launcher.cc
@@ -1,6 +1,6 @@
---- content/browser/renderer_host/media/service_video_capture_device_launcher.cc.orig 2023-09-17 07:59:53 UTC
+--- content/browser/renderer_host/media/service_video_capture_device_launcher.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/renderer_host/media/service_video_capture_device_launcher.cc
-@@ -24,7 +24,7 @@
+@@ -25,7 +25,7 @@
#include "media/base/media_switches.h"
#endif
@@ -9,7 +9,7 @@
#include "content/browser/gpu/gpu_data_manager_impl.h"
#endif
-@@ -166,7 +166,7 @@ void ServiceVideoCaptureDeviceLauncher::LaunchDeviceAs
+@@ -172,7 +172,7 @@ void ServiceVideoCaptureDeviceLauncher::LaunchDeviceAs
}
#else
if (switches::IsVideoCaptureUseGpuMemoryBufferEnabled()) {
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc
index 171ef9706704..818762f9fa59 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc
@@ -1,6 +1,6 @@
---- content/browser/renderer_host/render_process_host_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/renderer_host/render_process_host_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/renderer_host/render_process_host_impl.cc
-@@ -221,7 +221,7 @@
+@@ -223,7 +223,7 @@
#include "third_party/blink/public/mojom/android_font_lookup/android_font_lookup.mojom.h"
#endif
@@ -9,7 +9,7 @@
#include <sys/resource.h>
#include "components/services/font/public/mojom/font_service.mojom.h" // nogncheck
-@@ -952,7 +952,7 @@ static constexpr size_t kUnknownPlatformProcessLimit =
+@@ -953,7 +953,7 @@ static constexpr size_t kUnknownPlatformProcessLimit =
// to indicate failure and std::numeric_limits<size_t>::max() to indicate
// unlimited.
size_t GetPlatformProcessLimit() {
@@ -18,7 +18,7 @@
struct rlimit limit;
if (getrlimit(RLIMIT_NPROC, &limit) != 0)
return kUnknownPlatformProcessLimit;
-@@ -1095,7 +1095,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
+@@ -1160,7 +1160,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
return;
}
@@ -27,7 +27,7 @@
if (auto font_receiver = receiver.As<font_service::mojom::FontService>()) {
ConnectToFontService(std::move(font_receiver));
return;
-@@ -1184,7 +1184,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
+@@ -1254,7 +1254,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
std::unique_ptr<service_manager::BinderRegistry> binders_;
mojo::Receiver<mojom::ChildProcessHost> receiver_{this};
@@ -35,8 +35,8 @@
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
mojo::Remote<media::mojom::VideoEncodeAcceleratorProviderFactory>
video_encode_accelerator_factory_remote_;
- #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -3257,7 +3257,7 @@ void RenderProcessHostImpl::AppendRendererCommandLine(
+ ChildThreadTypeSwitcher child_thread_type_switcher_;
+@@ -3331,7 +3331,7 @@ void RenderProcessHostImpl::AppendRendererCommandLine(
base::TimeTicks::UnixEpoch().since_origin().InMicroseconds()));
}
@@ -45,7 +45,7 @@
// Append `kDisableVideoCaptureUseGpuMemoryBuffer` flag if there is no support
// for NV12 GPU memory buffer.
if (switches::IsVideoCaptureUseGpuMemoryBufferEnabled() &&
-@@ -3317,6 +3317,7 @@ void RenderProcessHostImpl::PropagateBrowserCommandLin
+@@ -3391,6 +3391,7 @@ void RenderProcessHostImpl::PropagateBrowserCommandLin
switches::kDisableSpeechAPI,
switches::kDisableThreadedCompositing,
switches::kDisableTouchDragDrop,
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.h b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.h
index dee03c8f2674..ea1facc7053c 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.h
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.h
@@ -1,6 +1,6 @@
---- content/browser/renderer_host/render_process_host_impl.h.orig 2023-11-04 07:08:51 UTC
+--- content/browser/renderer_host/render_process_host_impl.h.orig 2023-12-23 12:33:28 UTC
+++ content/browser/renderer_host/render_process_host_impl.h
-@@ -541,7 +541,7 @@ class CONTENT_EXPORT RenderProcessHostImpl
+@@ -540,7 +540,7 @@ class CONTENT_EXPORT RenderProcessHostImpl
// Sets this RenderProcessHost to be guest only. For Testing only.
void SetForGuestsOnlyForTesting();
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__view__host__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__view__host__impl.cc
index 329c17ba7e57..ab5b633dd811 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__view__host__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__view__host__impl.cc
@@ -1,6 +1,6 @@
---- content/browser/renderer_host/render_view_host_impl.cc.orig 2023-10-13 13:20:35 UTC
+--- content/browser/renderer_host/render_view_host_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/renderer_host/render_view_host_impl.cc
-@@ -273,7 +273,7 @@ void RenderViewHostImpl::GetPlatformSpecificPrefs(
+@@ -271,7 +271,7 @@ void RenderViewHostImpl::GetPlatformSpecificPrefs(
display::win::ScreenWin::GetSystemMetricsInDIP(SM_CYVSCROLL);
prefs->arrow_bitmap_width_horizontal_scroll_bar_in_dips =
display::win::ScreenWin::GetSystemMetricsInDIP(SM_CXHSCROLL);
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc
index 5b3fad481f6b..616ec7e61f28 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc
@@ -1,4 +1,4 @@
---- content/browser/renderer_host/render_widget_host_view_aura.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/renderer_host/render_widget_host_view_aura.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/renderer_host/render_widget_host_view_aura.cc
@@ -121,7 +121,7 @@
#include "ui/gfx/gdi_util.h"
@@ -8,8 +8,8 @@
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
#include "content/browser/accessibility/browser_accessibility_auralinux.h"
#include "ui/base/ime/linux/text_edit_command_auralinux.h"
- #include "ui/linux/linux_ui.h"
-@@ -462,7 +462,7 @@ gfx::NativeViewAccessible RenderWidgetHostViewAura::Ge
+ #include "ui/base/ime/text_input_flags.h"
+@@ -463,7 +463,7 @@ gfx::NativeViewAccessible RenderWidgetHostViewAura::Ge
return ToBrowserAccessibilityWin(manager->GetBrowserAccessibilityRoot())
->GetCOM();
@@ -18,7 +18,7 @@
BrowserAccessibilityManager* manager =
host()->GetOrCreateRootBrowserAccessibilityManager();
if (manager && manager->GetBrowserAccessibilityRoot())
-@@ -1721,7 +1721,7 @@ bool RenderWidgetHostViewAura::ShouldDoLearning() {
+@@ -1724,7 +1724,7 @@ bool RenderWidgetHostViewAura::ShouldDoLearning() {
return GetTextInputManager() && GetTextInputManager()->should_do_learning();
}
@@ -27,7 +27,7 @@
bool RenderWidgetHostViewAura::SetCompositionFromExistingText(
const gfx::Range& range,
const std::vector<ui::ImeTextSpan>& ui_ime_text_spans) {
-@@ -2566,7 +2566,7 @@ bool RenderWidgetHostViewAura::NeedsInputGrab() {
+@@ -2562,7 +2562,7 @@ bool RenderWidgetHostViewAura::NeedsInputGrab() {
}
bool RenderWidgetHostViewAura::NeedsMouseCapture() {
@@ -36,7 +36,7 @@
return NeedsInputGrab();
#else
return false;
-@@ -2749,7 +2749,7 @@ void RenderWidgetHostViewAura::ForwardKeyboardEventWit
+@@ -2745,7 +2745,7 @@ void RenderWidgetHostViewAura::ForwardKeyboardEventWit
if (!target_host)
return;
diff --git a/www/ungoogled-chromium/files/patch-content_browser_service__worker_service__worker__context__wrapper.cc b/www/ungoogled-chromium/files/patch-content_browser_service__worker_service__worker__context__wrapper.cc
index c43c4fd9b2f2..834e2f0d24eb 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_service__worker_service__worker__context__wrapper.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_service__worker_service__worker__context__wrapper.cc
@@ -1,6 +1,6 @@
---- content/browser/service_worker/service_worker_context_wrapper.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/service_worker/service_worker_context_wrapper.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/service_worker/service_worker_context_wrapper.cc
-@@ -1440,7 +1440,12 @@ void ServiceWorkerContextWrapper::MaybeProcessPendingW
+@@ -1470,7 +1470,12 @@ void ServiceWorkerContextWrapper::MaybeProcessPendingW
return;
}
diff --git a/www/ungoogled-chromium/files/patch-content_browser_utility__process__host.cc b/www/ungoogled-chromium/files/patch-content_browser_utility__process__host.cc
index 9d9a6932a806..327f900e4c80 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_utility__process__host.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_utility__process__host.cc
@@ -1,4 +1,4 @@
---- content/browser/utility_process_host.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/utility_process_host.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/utility_process_host.cc
@@ -60,7 +60,7 @@
#include "content/browser/v8_snapshot_files.h"
@@ -36,7 +36,7 @@
gpu_client_(nullptr, base::OnTaskRunnerDeleter(nullptr)),
#endif
client_(std::move(client)) {
-@@ -419,7 +419,7 @@ bool UtilityProcessHost::StartProcess() {
+@@ -417,7 +417,7 @@ bool UtilityProcessHost::StartProcess() {
file_data_->files_to_preload.merge(GetV8SnapshotFilesToPreload());
#endif // BUILDFLAG(IS_POSIX)
@@ -45,7 +45,7 @@
// The network service should have access to the parent directories
// necessary for its usage.
if (sandbox_type_ == sandbox::mojom::Sandbox::kNetwork) {
-@@ -430,7 +430,7 @@ bool UtilityProcessHost::StartProcess() {
+@@ -428,7 +428,7 @@ bool UtilityProcessHost::StartProcess() {
}
#endif // BUILDFLAG(IS_LINUX)
diff --git a/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc b/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc
index bca3b89a2fe7..4a08305ce7d5 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc
@@ -1,6 +1,6 @@
---- content/browser/utility_sandbox_delegate.cc.orig 2023-07-21 09:49:17 UTC
+--- content/browser/utility_sandbox_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/utility_sandbox_delegate.cc
-@@ -63,13 +63,13 @@ UtilitySandboxedProcessLauncherDelegate::
+@@ -64,13 +64,13 @@ UtilitySandboxedProcessLauncherDelegate::
#if BUILDFLAG(ENABLE_PPAPI)
sandbox_type_ == sandbox::mojom::Sandbox::kPpapi ||
#endif
@@ -17,7 +17,7 @@
sandbox_type_ == sandbox::mojom::Sandbox::kHardwareVideoEncoding ||
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#if BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -124,10 +124,10 @@ ZygoteCommunication* UtilitySandboxedProcessLauncherDe
+@@ -125,10 +125,10 @@ ZygoteCommunication* UtilitySandboxedProcessLauncherDe
// unsandboxed zygote and then apply their actual sandboxes in the forked
// process upon startup.
if (sandbox_type_ == sandbox::mojom::Sandbox::kNetwork ||
diff --git a/www/ungoogled-chromium/files/patch-content_browser_web__contents_web__contents__view__aura.cc b/www/ungoogled-chromium/files/patch-content_browser_web__contents_web__contents__view__aura.cc
index 93749c75cea8..50cc97f2df0c 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_web__contents_web__contents__view__aura.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_web__contents_web__contents__view__aura.cc
@@ -1,6 +1,6 @@
---- content/browser/web_contents/web_contents_view_aura.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/web_contents/web_contents_view_aura.cc.orig 2023-12-23 12:33:28 UTC
+++ content/browser/web_contents/web_contents_view_aura.cc
-@@ -178,7 +178,7 @@ class WebDragSourceAura : public content::WebContentsO
+@@ -174,7 +174,7 @@ class WebDragSourceAura : public content::WebContentsO
raw_ptr<aura::Window> window_;
};
@@ -9,7 +9,7 @@
// Fill out the OSExchangeData with a file contents, synthesizing a name if
// necessary.
void PrepareDragForFileContents(const DropData& drop_data,
-@@ -262,7 +262,7 @@ void PrepareDragData(const DropData& drop_data,
+@@ -258,7 +258,7 @@ void PrepareDragData(const DropData& drop_data,
if (!drop_data.download_metadata.empty())
PrepareDragForDownload(drop_data, provider, web_contents);
#endif
@@ -18,7 +18,7 @@
// We set the file contents before the URL because the URL also sets file
// contents (to a .URL shortcut). We want to prefer file content data over
// a shortcut so we add it first.
-@@ -1380,7 +1380,7 @@ void WebContentsViewAura::OnMouseEvent(ui::MouseEvent*
+@@ -1319,7 +1319,7 @@ void WebContentsViewAura::OnMouseEvent(ui::MouseEvent*
// Linux window managers like to handle raise-on-click themselves. If we
// raise-on-click manually, this may override user settings that prevent
// focus-stealing.
diff --git a/www/ungoogled-chromium/files/patch-content_child_BUILD.gn b/www/ungoogled-chromium/files/patch-content_child_BUILD.gn
new file mode 100644
index 000000000000..81140ab6a872
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_child_BUILD.gn
@@ -0,0 +1,16 @@
+--- content/child/BUILD.gn.orig 2023-12-23 12:33:28 UTC
++++ content/child/BUILD.gn
+@@ -129,6 +129,13 @@ target(link_target_type, "child") {
+ ]
+ }
+
++ if (is_bsd) {
++ sources -= [
++ "sandboxed_process_thread_type_handler.cc",
++ "sandboxed_process_thread_type_handler.h",
++ ]
++ }
++
+ if (is_win) {
+ sources += [
+ "dwrite_font_proxy/dwrite_font_proxy_init_impl_win.cc",
diff --git a/www/ungoogled-chromium/files/patch-content_common_BUILD.gn b/www/ungoogled-chromium/files/patch-content_common_BUILD.gn
index 1f3f0249510b..c7e5e189815c 100644
--- a/www/ungoogled-chromium/files/patch-content_common_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_common_BUILD.gn
@@ -1,6 +1,6 @@
---- content/common/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- content/common/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ content/common/BUILD.gn
-@@ -441,6 +441,9 @@ if (is_linux || is_chromeos) {
+@@ -438,6 +438,9 @@ if (is_linux || is_chromeos) {
public = [ "set_process_title_linux.h" ]
sources = [ "set_process_title_linux.cc" ]
deps = [ "//base" ]
@@ -10,12 +10,3 @@
}
}
-@@ -486,7 +489,7 @@ mojom("mojo_bindings") {
- if (enable_ipc_logging) {
- enabled_features += [ "ipc_logging" ]
- }
-- if (is_linux || is_chromeos) {
-+ if (!is_bsd && (is_linux || is_chromeos)) {
- enabled_features += [ "supports_thread_types" ]
- }
- if (use_clang_profiling_inside_sandbox) {
diff --git a/www/ungoogled-chromium/files/patch-content_common_features.cc b/www/ungoogled-chromium/files/patch-content_common_features.cc
index 3e501198592a..60982604be55 100644
--- a/www/ungoogled-chromium/files/patch-content_common_features.cc
+++ b/www/ungoogled-chromium/files/patch-content_common_features.cc
@@ -1,6 +1,6 @@
---- content/common/features.cc.orig 2023-11-04 07:08:51 UTC
+--- content/common/features.cc.orig 2023-12-23 12:33:28 UTC
+++ content/common/features.cc
-@@ -147,7 +147,7 @@ BASE_FEATURE(kEnableBackForwardCacheForScreenReader,
+@@ -154,7 +154,7 @@ BASE_FEATURE(kEnableBackForwardCacheForScreenReader,
base::FEATURE_ENABLED_BY_DEFAULT);
// Enables error reporting for JS errors inside DevTools frontend host
@@ -9,12 +9,12 @@
BASE_FEATURE(kEnableDevToolsJsErrorReporting,
"EnableDevToolsJsErrorReporting",
base::FEATURE_DISABLED_BY_DEFAULT);
-@@ -219,7 +219,7 @@ BASE_FEATURE(kGpuInfoCollectionSeparatePrefetch,
+@@ -246,7 +246,7 @@ BASE_FEATURE(kGroupNIKByJoiningOrigin,
// process and having browser process handle adjusting thread properties (nice
- // value, c-group, latency sensitivity...) for renderers which have sandbox
+ // value, c-group, latency sensitivity...) for children which have sandbox
// restrictions.
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- BASE_FEATURE(kHandleRendererThreadTypeChangesInBrowser,
- "HandleRendererThreadTypeChangesInBrowser",
+ BASE_FEATURE(kHandleChildThreadTypeChangesInBrowser,
+ "HandleChildThreadTypeChangesInBrowser",
base::FEATURE_ENABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-content_common_features.h b/www/ungoogled-chromium/files/patch-content_common_features.h
index 584cd8719726..3dca36b0de1f 100644
--- a/www/ungoogled-chromium/files/patch-content_common_features.h
+++ b/www/ungoogled-chromium/files/patch-content_common_features.h
@@ -1,4 +1,4 @@
---- content/common/features.h.orig 2023-11-04 07:08:51 UTC
+--- content/common/features.h.orig 2023-12-23 12:33:28 UTC
+++ content/common/features.h
@@ -37,7 +37,7 @@ CONTENT_EXPORT BASE_DECLARE_FEATURE(kDeviceMonitorMac)
CONTENT_EXPORT BASE_DECLARE_FEATURE(kDocumentPolicyNegotiation);
@@ -9,12 +9,12 @@
CONTENT_EXPORT BASE_DECLARE_FEATURE(kEnableDevToolsJsErrorReporting);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
CONTENT_EXPORT BASE_DECLARE_FEATURE(kEnsureAllowBindingsIsAlwaysForWebUI);
-@@ -55,7 +55,7 @@ CONTENT_EXPORT BASE_DECLARE_FEATURE(kForwardMemoryPres
- #if BUILDFLAG(IS_WIN)
+@@ -58,7 +58,7 @@ CONTENT_EXPORT BASE_DECLARE_FEATURE(kForwardMemoryPres
CONTENT_EXPORT BASE_DECLARE_FEATURE(kGpuInfoCollectionSeparatePrefetch);
#endif
+ CONTENT_EXPORT BASE_DECLARE_FEATURE(kGroupNIKByJoiningOrigin);
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- CONTENT_EXPORT BASE_DECLARE_FEATURE(kHandleRendererThreadTypeChangesInBrowser);
+ CONTENT_EXPORT BASE_DECLARE_FEATURE(kHandleChildThreadTypeChangesInBrowser);
#endif
CONTENT_EXPORT BASE_DECLARE_FEATURE(kHighPriorityBeforeUnload);
diff --git a/www/ungoogled-chromium/files/patch-content_gpu_gpu__child__thread.cc b/www/ungoogled-chromium/files/patch-content_gpu_gpu__child__thread.cc
new file mode 100644
index 000000000000..f7c8e6a75f03
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_gpu_gpu__child__thread.cc
@@ -0,0 +1,21 @@
+--- content/gpu/gpu_child_thread.cc.orig 2023-12-23 12:33:28 UTC
++++ content/gpu/gpu_child_thread.cc
+@@ -59,7 +59,7 @@
+ #include "third_party/skia/include/ports/SkFontConfigInterface.h"
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ #include "content/child/sandboxed_process_thread_type_handler.h"
+ #endif
+
+@@ -143,7 +143,8 @@ void GpuChildThread::Init(const base::TimeTicks& proce
+
+ viz_main_.gpu_service()->set_start_time(process_start_time);
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++// XXX BSD
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_BSD)
+ SandboxedProcessThreadTypeHandler::NotifyMainChildThreadCreated();
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc b/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc
index 218b768ce6da..91ec5755fa97 100644
--- a/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc
+++ b/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc
@@ -1,15 +1,15 @@
---- content/gpu/gpu_main.cc.orig 2023-08-18 10:26:52 UTC
+--- content/gpu/gpu_main.cc.orig 2023-12-23 12:33:28 UTC
+++ content/gpu/gpu_main.cc
-@@ -90,7 +90,7 @@
+@@ -91,7 +91,7 @@
#include "sandbox/win/src/sandbox.h"
#endif
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ #include "content/child/sandboxed_process_thread_type_handler.h"
#include "content/gpu/gpu_sandbox_hook_linux.h"
#include "sandbox/policy/linux/sandbox_linux.h"
- #include "sandbox/policy/sandbox_type.h"
-@@ -112,7 +112,7 @@ namespace content {
+@@ -114,7 +114,7 @@ namespace content {
namespace {
@@ -18,7 +18,7 @@
bool StartSandboxLinux(gpu::GpuWatchdogThread*,
const gpu::GPUInfo*,
const gpu::GpuPreferences&);
-@@ -177,7 +177,7 @@ class ContentSandboxHelper : public gpu::GpuSandboxHel
+@@ -179,7 +179,7 @@ class ContentSandboxHelper : public gpu::GpuSandboxHel
bool EnsureSandboxInitialized(gpu::GpuWatchdogThread* watchdog_thread,
const gpu::GPUInfo* gpu_info,
const gpu::GpuPreferences& gpu_prefs) override {
@@ -27,7 +27,7 @@
return StartSandboxLinux(watchdog_thread, gpu_info, gpu_prefs);
#elif BUILDFLAG(IS_WIN)
return StartSandboxWindows(sandbox_info_);
-@@ -287,7 +287,7 @@ int GpuMain(MainFunctionParams parameters) {
+@@ -289,7 +289,7 @@ int GpuMain(MainFunctionParams parameters) {
std::make_unique<base::SingleThreadTaskExecutor>(
gpu_preferences.message_pump_type);
}
@@ -36,7 +36,17 @@
#error "Unsupported Linux platform."
#elif BUILDFLAG(IS_MAC)
// Cross-process CoreAnimation requires a CFRunLoop to function at all, and
-@@ -422,7 +422,7 @@ int GpuMain(MainFunctionParams parameters) {
+@@ -328,7 +328,8 @@ int GpuMain(MainFunctionParams parameters) {
+ // before it.
+ InitializeSkia();
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++// XXX BSD
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_BSD)
+ // Thread type delegate of the process should be registered before
+ // first thread type change in ChildProcess constructor.
+ // It also needs to be registered before the process has multiple threads,
+@@ -436,7 +437,7 @@ int GpuMain(MainFunctionParams parameters) {
namespace {
@@ -45,7 +55,7 @@
bool StartSandboxLinux(gpu::GpuWatchdogThread* watchdog_thread,
const gpu::GPUInfo* gpu_info,
const gpu::GpuPreferences& gpu_prefs) {
-@@ -462,7 +462,7 @@ bool StartSandboxLinux(gpu::GpuWatchdogThread* watchdo
+@@ -476,7 +477,7 @@ bool StartSandboxLinux(gpu::GpuWatchdogThread* watchdo
sandbox_options.accelerated_video_encode_enabled =
!gpu_prefs.disable_accelerated_video_encode;
diff --git a/www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc b/www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc
new file mode 100644
index 000000000000..76b11df472f9
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc
@@ -0,0 +1,11 @@
+--- content/public/browser/content_browser_client.cc.orig 2023-12-23 12:33:28 UTC
++++ content/public/browser/content_browser_client.cc
+@@ -1273,7 +1273,7 @@ bool ContentBrowserClient::ShouldRunOutOfProcessSystem
+ // that can be adequately sandboxed.
+ // Currently Android's network service will not run out of process or sandboxed,
+ // so OutOfProcessSystemDnsResolution is not currently enabled on Android.
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ return true;
+ #else
+ return false;
diff --git a/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc b/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc
index 7ae123b66cb4..2a98684ad6fe 100644
--- a/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc
+++ b/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc
@@ -1,4 +1,4 @@
---- content/public/common/content_features.cc.orig 2023-11-04 07:08:51 UTC
+--- content/public/common/content_features.cc.orig 2023-12-23 12:33:28 UTC
+++ content/public/common/content_features.cc
@@ -40,7 +40,7 @@ BASE_FEATURE(kAudioServiceOutOfProcess,
"AudioServiceOutOfProcess",
@@ -18,7 +18,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -1131,7 +1131,7 @@ BASE_FEATURE(kWebAssemblyTiering,
+@@ -1170,7 +1170,7 @@ BASE_FEATURE(kWebAssemblyTiering,
BASE_FEATURE(kWebAssemblyTrapHandler,
"WebAssemblyTrapHandler",
#if ((BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_WIN) || \
@@ -27,7 +27,7 @@
defined(ARCH_CPU_X86_64)) || \
(BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64))
base::FEATURE_ENABLED_BY_DEFAULT
-@@ -1172,7 +1172,11 @@ BASE_FEATURE(kWebUICodeCache,
+@@ -1212,7 +1212,11 @@ BASE_FEATURE(kWebUICodeCache,
// Controls whether the WebUSB API is enabled:
// https://wicg.github.io/webusb
diff --git a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc
index 0dea48e5a732..8e88023a0de9 100644
--- a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc
+++ b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc
@@ -1,6 +1,6 @@
---- content/public/common/content_switches.cc.orig 2023-10-13 13:20:35 UTC
+--- content/public/common/content_switches.cc.orig 2023-12-23 12:33:28 UTC
+++ content/public/common/content_switches.cc
-@@ -367,6 +367,8 @@ const char kEnableIsolatedWebAppsInRenderer[] =
+@@ -364,6 +364,8 @@ const char kEnableIsolatedWebAppsInRenderer[] =
// builds.
const char kEnableLogging[] = "enable-logging";
@@ -9,7 +9,7 @@
// Enables the type, downlinkMax attributes of the NetInfo API. Also, enables
// triggering of change attribute of the NetInfo API when there is a change in
// the connection type.
-@@ -992,7 +994,7 @@ const char kEnableAutomation[] = "enable-automation";
+@@ -994,7 +996,7 @@ const char kEnableAutomation[] = "enable-automation";
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h
index 2405fe238779..c9ceac0379ff 100644
--- a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h
+++ b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h
@@ -1,6 +1,6 @@
---- content/public/common/content_switches.h.orig 2023-10-13 13:20:35 UTC
+--- content/public/common/content_switches.h.orig 2023-12-23 12:33:28 UTC
+++ content/public/common/content_switches.h
-@@ -116,6 +116,7 @@ CONTENT_EXPORT extern const char kEnableGpuMemoryBuffe
+@@ -115,6 +115,7 @@ CONTENT_EXPORT extern const char kEnableGpuMemoryBuffe
CONTENT_EXPORT extern const char kEnableIsolatedWebAppsInRenderer[];
CONTENT_EXPORT extern const char kEnableLCDText[];
CONTENT_EXPORT extern const char kEnableLogging[];
diff --git a/www/ungoogled-chromium/files/patch-content_public_test_mock__render__thread.cc b/www/ungoogled-chromium/files/patch-content_public_test_mock__render__thread.cc
deleted file mode 100644
index b414f4d576e8..000000000000
--- a/www/ungoogled-chromium/files/patch-content_public_test_mock__render__thread.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- content/public/test/mock_render_thread.cc.orig 2023-03-10 11:01:21 UTC
-+++ content/public/test/mock_render_thread.cc
-@@ -64,7 +64,7 @@ class MockRenderMessageFilterImpl : public mojom::Rend
- std::move(callback).Run(false);
- }
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- void SetThreadType(int32_t platform_thread_id,
- base::ThreadType thread_type) override {}
- #endif
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_BUILD.gn b/www/ungoogled-chromium/files/patch-content_renderer_BUILD.gn
deleted file mode 100644
index 49456c5fc4d8..000000000000
--- a/www/ungoogled-chromium/files/patch-content_renderer_BUILD.gn
+++ /dev/null
@@ -1,16 +0,0 @@
---- content/renderer/BUILD.gn.orig 2023-10-13 13:20:35 UTC
-+++ content/renderer/BUILD.gn
-@@ -201,6 +201,13 @@ target(link_target_type, "renderer") {
- ]
- }
-
-+ if (is_bsd) {
-+ sources -= [
-+ "renderer_thread_type_handler.cc",
-+ "renderer_thread_type_handler.h",
-+ ]
-+ }
-+
- if (is_mac) {
- sources += [
- "renderer_main_platform_delegate_mac.mm",
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc b/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc
index 664dcb3e3015..d3b1d49d98f7 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc
@@ -1,6 +1,6 @@
---- content/renderer/render_process_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- content/renderer/render_process_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/renderer/render_process_impl.cc
-@@ -46,7 +46,7 @@
+@@ -47,7 +47,7 @@
#if BUILDFLAG(IS_WIN)
#include "base/win/win_util.h"
#endif
@@ -9,7 +9,7 @@
#include "v8/include/v8-wasm-trap-handler-posix.h"
#endif
-@@ -212,7 +212,7 @@ RenderProcessImpl::RenderProcessImpl()
+@@ -224,7 +224,7 @@ RenderProcessImpl::RenderProcessImpl()
v8::V8::SetFlagsFromString(kSABPerContextFlag, sizeof(kSABPerContextFlag));
}
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc b/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc
index 03608b5daf7f..a1872fb8a085 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc
@@ -1,4 +1,4 @@
---- content/renderer/render_thread_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- content/renderer/render_thread_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/renderer/render_thread_impl.cc
@@ -205,6 +205,8 @@
@@ -9,7 +9,7 @@
#else
#include <malloc.h>
#endif
-@@ -1016,7 +1018,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
+@@ -1020,7 +1022,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
kGpuStreamIdMedia, kGpuStreamPriorityMedia);
const bool enable_video_decode_accelerator =
@@ -18,7 +18,7 @@
base::FeatureList::IsEnabled(media::kVaapiVideoDecodeLinux) &&
#endif // BUILDFLAG(IS_LINUX)
!cmd_line->HasSwitch(switches::kDisableAcceleratedVideoDecode) &&
-@@ -1025,7 +1027,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
+@@ -1029,7 +1031,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
gpu::kGpuFeatureStatusEnabled);
const bool enable_video_encode_accelerator =
@@ -27,7 +27,7 @@
base::FeatureList::IsEnabled(media::kVaapiVideoEncodeLinux) &&
#else
!cmd_line->HasSwitch(switches::kDisableAcceleratedVideoEncode) &&
-@@ -1799,7 +1801,7 @@ std::unique_ptr<CodecFactory> RenderThreadImpl::Create
+@@ -1807,7 +1809,7 @@ std::unique_ptr<CodecFactory> RenderThreadImpl::Create
bool enable_video_encode_accelerator) {
mojo::PendingRemote<media::mojom::VideoEncodeAcceleratorProvider>
vea_provider;
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc
index 53853e151723..582dac289c64 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc
@@ -1,4 +1,4 @@
---- content/renderer/renderer_blink_platform_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- content/renderer/renderer_blink_platform_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ content/renderer/renderer_blink_platform_impl.cc
@@ -113,7 +113,7 @@
@@ -7,9 +7,9 @@
-#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
#include "content/child/child_process_sandbox_support_impl_linux.h"
+ #include "content/child/sandboxed_process_thread_type_handler.h"
#endif
-
-@@ -181,13 +181,13 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
+@@ -182,13 +182,13 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
sudden_termination_disables_(0),
is_locked_to_site_(false),
main_thread_scheduler_(main_thread_scheduler) {
@@ -25,7 +25,7 @@
mojo::PendingRemote<font_service::mojom::FontService> font_service;
RenderThreadImpl::current()->BindHostReceiver(
font_service.InitWithNewPipeAndPassReceiver());
-@@ -196,7 +196,7 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
+@@ -197,7 +197,7 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
#endif
}
@@ -34,7 +34,7 @@
if (sandboxEnabled()) {
#if BUILDFLAG(IS_MAC)
sandbox_support_ = std::make_unique<WebSandboxSupportMac>();
-@@ -254,7 +254,7 @@ void RendererBlinkPlatformImpl::SetThreadType(base::Pl
+@@ -258,7 +258,7 @@ void RendererBlinkPlatformImpl::SetThreadType(base::Pl
#endif
blink::WebSandboxSupport* RendererBlinkPlatformImpl::GetSandboxSupport() {
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h
index 4854b74fc422..933e6c843204 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h
+++ b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h
@@ -1,6 +1,6 @@
---- content/renderer/renderer_blink_platform_impl.h.orig 2023-11-04 07:08:51 UTC
+--- content/renderer/renderer_blink_platform_impl.h.orig 2023-12-23 12:33:28 UTC
+++ content/renderer/renderer_blink_platform_impl.h
-@@ -235,7 +235,7 @@ class CONTENT_EXPORT RendererBlinkPlatformImpl : publi
+@@ -234,7 +234,7 @@ class CONTENT_EXPORT RendererBlinkPlatformImpl : publi
void Collect3DContextInformation(blink::Platform::GraphicsInfo* gl_info,
const gpu::GPUInfo& gpu_info) const;
diff --git a/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn b/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn
index a4c070ffa0e1..228195a4f068 100644
--- a/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn
@@ -1,6 +1,6 @@
---- content/shell/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- content/shell/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ content/shell/BUILD.gn
-@@ -1025,7 +1025,7 @@ group("content_shell_crash_test") {
+@@ -1023,7 +1023,7 @@ group("content_shell_crash_test") {
if (is_win) {
data_deps += [ "//build/win:copy_cdb_to_output" ]
}
diff --git a/www/ungoogled-chromium/files/patch-content_shell_app_shell__main__delegate.cc b/www/ungoogled-chromium/files/patch-content_shell_app_shell__main__delegate.cc
index ee8115209db3..11b01dbb41f7 100644
--- a/www/ungoogled-chromium/files/patch-content_shell_app_shell__main__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-content_shell_app_shell__main__delegate.cc
@@ -1,6 +1,6 @@
---- content/shell/app/shell_main_delegate.cc.orig 2023-05-05 12:12:41 UTC
+--- content/shell/app/shell_main_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ content/shell/app/shell_main_delegate.cc
-@@ -223,7 +223,7 @@ void ShellMainDelegate::PreSandboxStartup() {
+@@ -227,7 +227,7 @@ void ShellMainDelegate::PreSandboxStartup() {
// Reporting for sub-processes will be initialized in ZygoteForked.
if (process_type != switches::kZygoteProcess) {
crash_reporter::InitializeCrashpad(process_type.empty(), process_type);
diff --git a/www/ungoogled-chromium/files/patch-content_test_BUILD.gn b/www/ungoogled-chromium/files/patch-content_test_BUILD.gn
index f06b689d18e9..24262177f7b3 100644
--- a/www/ungoogled-chromium/files/patch-content_test_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_test_BUILD.gn
@@ -1,6 +1,6 @@
---- content/test/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- content/test/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ content/test/BUILD.gn
-@@ -2237,7 +2237,7 @@ static_library("run_all_unittests") {
+@@ -2269,7 +2269,7 @@ static_library("run_all_unittests") {
":test_support",
"//base/test:test_support",
]
@@ -9,7 +9,7 @@
deps += [ "//content/common:set_process_title_linux" ]
}
}
-@@ -3322,7 +3322,7 @@ test("content_unittests") {
+@@ -3367,7 +3367,7 @@ test("content_unittests") {
deps += [ "//third_party/boringssl" ]
}
diff --git a/www/ungoogled-chromium/files/patch-content_utility_services.cc b/www/ungoogled-chromium/files/patch-content_utility_services.cc
index 7de2e83d6ea5..9ee56dc42d26 100644
--- a/www/ungoogled-chromium/files/patch-content_utility_services.cc
+++ b/www/ungoogled-chromium/files/patch-content_utility_services.cc
@@ -1,4 +1,4 @@
---- content/utility/services.cc.orig 2023-11-04 07:08:51 UTC
+--- content/utility/services.cc.orig 2023-12-23 12:33:28 UTC
+++ content/utility/services.cc
@@ -67,7 +67,7 @@
extern sandbox::TargetServices* g_utility_target_services;
@@ -16,9 +16,9 @@
-#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)) && \
+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)) && \
(BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
+ #include "content/common/features.h"
#include "media/mojo/services/stable_video_decoder_factory_process_service.h" // nogncheck
- #endif // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)) &&
-@@ -106,7 +106,7 @@ extern sandbox::TargetServices* g_utility_target_servi
+@@ -107,7 +107,7 @@ extern sandbox::TargetServices* g_utility_target_servi
#include "ui/accessibility/accessibility_features.h"
#endif // BUILDFLAG(ENABLE_ACCESSIBILITY_SERVICE)
@@ -27,7 +27,7 @@
#include "media/capture/capture_switches.h"
#include "services/viz/public/cpp/gpu/gpu.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
-@@ -223,7 +223,7 @@ auto RunAudio(mojo::PendingReceiver<audio::mojom::Audi
+@@ -224,7 +224,7 @@ auto RunAudio(mojo::PendingReceiver<audio::mojom::Audi
<< "task_policy_set TASK_QOS_POLICY";
#endif
@@ -36,7 +36,7 @@
auto* command_line = base::CommandLine::ForCurrentProcess();
if (sandbox::policy::SandboxTypeFromCommandLine(*command_line) ==
sandbox::mojom::Sandbox::kNoSandbox) {
-@@ -305,7 +305,7 @@ auto RunVideoCapture(
+@@ -306,7 +306,7 @@ auto RunVideoCapture(
mojo::PendingReceiver<video_capture::mojom::VideoCaptureService> receiver) {
auto service = std::make_unique<UtilityThreadVideoCaptureServiceImpl>(
std::move(receiver), base::SingleThreadTaskRunner::GetCurrentDefault());
@@ -45,7 +45,7 @@
if (switches::IsVideoCaptureUseGpuMemoryBufferEnabled()) {
mojo::PendingRemote<viz::mojom::Gpu> remote_gpu;
content::UtilityThread::Get()->BindHostReceiver(
-@@ -344,7 +344,7 @@ auto RunOOPArcVideoAcceleratorFactoryService(
+@@ -345,7 +345,7 @@ auto RunOOPArcVideoAcceleratorFactoryService(
#endif // BUILDFLAG(IS_CHROMEOS_ASH) && (BUILDFLAG(USE_VAAPI) ||
// BUILDFLAG(USE_V4L2_CODEC))
@@ -54,7 +54,7 @@
(BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
auto RunStableVideoDecoderFactoryProcessService(
mojo::PendingReceiver<
-@@ -355,7 +355,7 @@ auto RunStableVideoDecoderFactoryProcessService(
+@@ -356,7 +356,7 @@ auto RunStableVideoDecoderFactoryProcessService(
#endif // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)) &&
// (BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
@@ -63,14 +63,25 @@
auto RunVideoEncodeAcceleratorProviderFactory(
mojo::PendingReceiver<media::mojom::VideoEncodeAcceleratorProviderFactory>
receiver) {
-@@ -418,13 +418,13 @@ void RegisterMainThreadServices(mojo::ServiceFactory&
+@@ -379,7 +379,7 @@ void RegisterIOThreadServices(mojo::ServiceFactory& se
+ // loop of type IO that can get notified when pipes have data.
+ services.Add(RunNetworkService);
+
+-#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)) && \
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)) && \
+ (BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
+ if (base::FeatureList::IsEnabled(
+ features::kRunStableVideoDecoderFactoryProcessServiceOnIOThread)) {
+@@ -428,7 +428,7 @@ void RegisterMainThreadServices(mojo::ServiceFactory&
#endif // BUILDFLAG(IS_CHROMEOS_ASH) && (BUILDFLAG(USE_VAAPI) ||
// BUILDFLAG(USE_V4L2_CODEC))
-#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)) && \
+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)) && \
(BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
- services.Add(RunStableVideoDecoderFactoryProcessService);
+ if (!base::FeatureList::IsEnabled(
+ features::kRunStableVideoDecoderFactoryProcessServiceOnIOThread)) {
+@@ -437,7 +437,7 @@ void RegisterMainThreadServices(mojo::ServiceFactory&
#endif // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)) &&
// (BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
diff --git a/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc b/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc
index 1adb7c259825..4239a3025a30 100644
--- a/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc
+++ b/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc
@@ -1,6 +1,6 @@
---- content/utility/utility_main.cc.orig 2023-10-13 13:20:35 UTC
+--- content/utility/utility_main.cc.orig 2023-12-23 12:33:28 UTC
+++ content/utility/utility_main.cc
-@@ -34,7 +34,7 @@
+@@ -37,7 +37,7 @@
#include "third_party/icu/source/common/unicode/unistr.h"
#include "third_party/icu/source/i18n/unicode/timezone.h"
@@ -9,7 +9,7 @@
#include "base/file_descriptor_store.h"
#include "base/files/file_util.h"
#include "base/pickle.h"
-@@ -42,7 +42,9 @@
+@@ -46,7 +46,9 @@
#include "content/utility/speech/speech_recognition_sandbox_hook_linux.h"
#include "gpu/config/gpu_info_collector.h"
#include "media/gpu/sandbox/hardware_video_encoding_sandbox_hook_linux.h"
@@ -19,7 +19,7 @@
#include "services/audio/audio_sandbox_hook_linux.h"
#include "services/network/network_sandbox_hook_linux.h"
// gn check is not smart enough to realize that this include only applies to
-@@ -54,10 +56,14 @@
+@@ -58,10 +60,14 @@
#endif
#endif
@@ -35,7 +35,7 @@
#if BUILDFLAG(IS_CHROMEOS_ASH)
#include "chromeos/ash/components/assistant/buildflags.h"
#include "chromeos/ash/services/ime/ime_sandbox_hook.h"
-@@ -69,7 +75,7 @@
+@@ -73,7 +79,7 @@
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
#if (BUILDFLAG(ENABLE_SCREEN_AI_SERVICE) && \
@@ -44,7 +44,7 @@
#include "components/services/screen_ai/sandbox/screen_ai_sandbox_hook_linux.h" // nogncheck
#endif
-@@ -95,7 +101,7 @@ namespace content {
+@@ -99,7 +105,7 @@ namespace content {
namespace {
@@ -53,7 +53,7 @@
std::vector<std::string> GetNetworkContextsParentDirectories() {
base::MemoryMappedFile::Region region;
base::ScopedFD read_pipe_fd = base::FileDescriptorStore::GetInstance().TakeFD(
-@@ -123,7 +129,7 @@ std::vector<std::string> GetNetworkContextsParentDirec
+@@ -127,7 +133,7 @@ std::vector<std::string> GetNetworkContextsParentDirec
bool ShouldUseAmdGpuPolicy(sandbox::mojom::Sandbox sandbox_type) {
const bool obtain_gpu_info =
@@ -62,16 +62,26 @@
sandbox_type == sandbox::mojom::Sandbox::kHardwareVideoDecoding ||
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
sandbox_type == sandbox::mojom::Sandbox::kHardwareVideoEncoding;
-@@ -239,7 +245,7 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -248,7 +254,8 @@ int UtilityMain(MainFunctionParams parameters) {
}
}
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++// XXX BSD
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_BSD)
+ // Thread type delegate of the process should be registered before
+ // first thread type change in ChildProcess constructor.
+ // It also needs to be registered before the process has multiple threads,
+@@ -259,7 +266,7 @@ int UtilityMain(MainFunctionParams parameters) {
+ }
+ #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
// Initializes the sandbox before any threads are created.
// TODO(jorgelo): move this after GTK initialization when we enable a strict
// Seccomp-BPF policy.
-@@ -268,7 +274,7 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -288,7 +295,7 @@ int UtilityMain(MainFunctionParams parameters) {
pre_sandbox_hook = base::BindOnce(&screen_ai::ScreenAIPreSandboxHook);
break;
#endif
@@ -80,7 +90,7 @@
case sandbox::mojom::Sandbox::kHardwareVideoDecoding:
pre_sandbox_hook =
base::BindOnce(&media::HardwareVideoDecodingPreSandboxHook);
-@@ -295,6 +301,7 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -315,6 +322,7 @@ int UtilityMain(MainFunctionParams parameters) {
default:
break;
}
@@ -88,7 +98,7 @@
if (!sandbox::policy::IsUnsandboxedSandboxType(sandbox_type) &&
(parameters.zygote_child || !pre_sandbox_hook.is_null())) {
sandbox::policy::SandboxLinux::Options sandbox_options;
-@@ -303,6 +310,11 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -323,6 +331,11 @@ int UtilityMain(MainFunctionParams parameters) {
sandbox::policy::Sandbox::Initialize(
sandbox_type, std::move(pre_sandbox_hook), sandbox_options);
}
diff --git a/www/ungoogled-chromium/files/patch-content_utility_utility__thread__impl.cc b/www/ungoogled-chromium/files/patch-content_utility_utility__thread__impl.cc
new file mode 100644
index 000000000000..567a13af016e
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_utility_utility__thread__impl.cc
@@ -0,0 +1,21 @@
+--- content/utility/utility_thread_impl.cc.orig 2023-12-23 12:33:28 UTC
++++ content/utility/utility_thread_impl.cc
+@@ -32,7 +32,7 @@
+ #include "mojo/public/cpp/bindings/service_factory.h"
+ #include "third_party/abseil-cpp/absl/types/optional.h"
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ #include "content/child/sandboxed_process_thread_type_handler.h"
+ #endif
+
+@@ -252,7 +252,8 @@ void UtilityThreadImpl::Init() {
+
+ GetContentClient()->utility()->UtilityThreadStarted();
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++// XXX BSD
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_BSD)
+ SandboxedProcessThreadTypeHandler::NotifyMainChildThreadCreated();
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-extensions_browser_api_api__browser__context__keyed__service__factories.cc b/www/ungoogled-chromium/files/patch-extensions_browser_api_api__browser__context__keyed__service__factories.cc
index 542e7c7ce960..9c7e55cd423e 100644
--- a/www/ungoogled-chromium/files/patch-extensions_browser_api_api__browser__context__keyed__service__factories.cc
+++ b/www/ungoogled-chromium/files/patch-extensions_browser_api_api__browser__context__keyed__service__factories.cc
@@ -1,8 +1,8 @@
---- extensions/browser/api/api_browser_context_keyed_service_factories.cc.orig 2023-07-21 09:49:17 UTC
+--- extensions/browser/api/api_browser_context_keyed_service_factories.cc.orig 2023-12-23 12:33:28 UTC
+++ extensions/browser/api/api_browser_context_keyed_service_factories.cc
-@@ -104,7 +104,7 @@ void EnsureApiBrowserContextKeyedServiceFactoriesBuilt
- MessageService::GetFactoryInstance();
+@@ -107,7 +107,7 @@ void EnsureApiBrowserContextKeyedServiceFactoriesBuilt
MessagingAPIMessageFilter::EnsureAssociatedFactoryBuilt();
+ #endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_WIN) || \
- BUILDFLAG(IS_MAC)
+ BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-extensions_browser_api_management_management__api.cc b/www/ungoogled-chromium/files/patch-extensions_browser_api_management_management__api.cc
new file mode 100644
index 000000000000..ea57431dd7fc
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-extensions_browser_api_management_management__api.cc
@@ -0,0 +1,11 @@
+--- extensions/browser/api/management/management_api.cc.orig 2023-12-23 12:33:28 UTC
++++ extensions/browser/api/management/management_api.cc
+@@ -278,7 +278,7 @@ bool PlatformSupportsApprovalFlowForExtensions() {
+ #if BUILDFLAG(IS_CHROMEOS)
+ // ChromeOS devices have this feature already shipped.
+ return true;
+-#elif BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
++#elif BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ return base::FeatureList::IsEnabled(
+ supervised_user::kEnableExtensionsPermissionsForSupervisedUsersOnDesktop);
+ #else
diff --git a/www/ungoogled-chromium/files/patch-extensions_browser_api_messaging_message__service.cc b/www/ungoogled-chromium/files/patch-extensions_browser_api_messaging_message__service.cc
index f6405c2ed801..c6435d551fef 100644
--- a/www/ungoogled-chromium/files/patch-extensions_browser_api_messaging_message__service.cc
+++ b/www/ungoogled-chromium/files/patch-extensions_browser_api_messaging_message__service.cc
@@ -1,6 +1,6 @@
---- extensions/browser/api/messaging/message_service.cc.orig 2023-09-17 07:59:53 UTC
+--- extensions/browser/api/messaging/message_service.cc.orig 2023-12-23 12:33:28 UTC
+++ extensions/browser/api/messaging/message_service.cc
-@@ -71,7 +71,7 @@ namespace {
+@@ -70,7 +70,7 @@ namespace {
const char kReceivingEndDoesntExistError[] =
"Could not establish connection. Receiving end does not exist.";
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
const char kMissingPermissionError[] =
"Access to native messaging requires nativeMessaging permission.";
const char kProhibitedByPoliciesError[] =
-@@ -486,7 +486,7 @@ void MessageService::OpenChannelToNativeApp(
+@@ -541,7 +541,7 @@ void MessageService::OpenChannelToNativeAppImpl(
return;
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-extensions_common_api___permission__features.json b/www/ungoogled-chromium/files/patch-extensions_common_api___permission__features.json
index d7d8e4dabdff..7171d41b6956 100644
--- a/www/ungoogled-chromium/files/patch-extensions_common_api___permission__features.json
+++ b/www/ungoogled-chromium/files/patch-extensions_common_api___permission__features.json
@@ -1,4 +1,4 @@
---- extensions/common/api/_permission_features.json.orig 2023-07-21 09:49:17 UTC
+--- extensions/common/api/_permission_features.json.orig 2023-12-23 12:33:28 UTC
+++ extensions/common/api/_permission_features.json
@@ -138,7 +138,7 @@
{
@@ -18,7 +18,7 @@
"allowlist": [
"0DE0F05680A4A056BCEC864ED8DDA84296F82B40", // http://crbug.com/434651
"1C93BD3CF875F4A73C0B2A163BB8FBDA8B8B3D80", // http://crbug.com/293683
-@@ -474,7 +474,7 @@
+@@ -476,7 +476,7 @@
"networkingPrivate": {
"channel": "stable",
"extension_types": ["extension", "legacy_packaged_app", "platform_app"],
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc
index 6c7407be3496..0665bdab9d2d 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc
@@ -1,6 +1,6 @@
---- gpu/command_buffer/service/dawn_context_provider.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/command_buffer/service/dawn_context_provider.cc.orig 2023-12-23 12:33:28 UTC
+++ gpu/command_buffer/service/dawn_context_provider.cc
-@@ -151,7 +151,7 @@ wgpu::BackendType DawnContextProvider::GetDefaultBacke
+@@ -178,7 +178,7 @@ wgpu::BackendType DawnContextProvider::GetDefaultBacke
return base::FeatureList::IsEnabled(features::kSkiaGraphiteDawnUseD3D12)
? wgpu::BackendType::D3D12
: wgpu::BackendType::D3D11;
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_gles2__cmd__decoder.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_gles2__cmd__decoder.cc
index 1ba866a97535..0623337df2b0 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_gles2__cmd__decoder.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_gles2__cmd__decoder.cc
@@ -1,6 +1,6 @@
---- gpu/command_buffer/service/gles2_cmd_decoder.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/command_buffer/service/gles2_cmd_decoder.cc.orig 2023-12-23 12:33:28 UTC
+++ gpu/command_buffer/service/gles2_cmd_decoder.cc
-@@ -2986,7 +2986,7 @@ GLES2Decoder* GLES2Decoder::Create(
+@@ -2999,7 +2999,7 @@ GLES2Decoder* GLES2Decoder::Create(
}
// Allow linux to run fuzzers.
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.cc
deleted file mode 100644
index f4c9a421be45..000000000000
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.cc
+++ /dev/null
@@ -1,32 +0,0 @@
---- gpu/command_buffer/service/shared_context_state.cc.orig 2023-11-04 07:08:51 UTC
-+++ gpu/command_buffer/service/shared_context_state.cc
-@@ -4,6 +4,7 @@
-
- #include "gpu/command_buffer/service/shared_context_state.h"
-
-+#include "base/immediate_crash.h"
- #include "base/observer_list.h"
- #include "base/strings/stringprintf.h"
- #include "base/system/sys_info.h"
-@@ -101,6 +102,13 @@ void SharedContextState::compileError(const char* shad
- << "------------------------\n"
- << shader << "\nErrors:\n"
- << errors;
-+
-+ // Increase shader cache shm count and crash the GPU process so that the
-+ // browser process would clear the cache.
-+ GpuProcessShmCount::ScopedIncrement increment(
-+ use_shader_cache_shm_count_.get());
-+
-+ base::ImmediateCrash();
- }
- }
-
-@@ -305,6 +313,7 @@ bool SharedContextState::InitializeGanesh(
- gl::ProgressReporter* progress_reporter) {
- progress_reporter_ = progress_reporter;
- gr_shader_cache_ = cache;
-+ use_shader_cache_shm_count_ = use_shader_cache_shm_count;
-
- size_t max_resource_cache_bytes;
- size_t glyph_cache_max_texture_bytes;
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.h b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.h
deleted file mode 100644
index 536a1dc76127..000000000000
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__context__state.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- gpu/command_buffer/service/shared_context_state.h.orig 2023-11-04 07:08:51 UTC
-+++ gpu/command_buffer/service/shared_context_state.h
-@@ -392,6 +392,8 @@ class GPU_GLES2_EXPORT SharedContextState
- std::vector<uint8_t> scratch_deserialization_buffer_;
- raw_ptr<gpu::raster::GrShaderCache, DanglingUntriaged> gr_shader_cache_ =
- nullptr;
-+ raw_ptr<GpuProcessShmCount, DanglingUntriaged> use_shader_cache_shm_count_ =
-+ nullptr;
-
- // |need_context_state_reset| is set whenever Skia may have altered the
- // driver's GL state.
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_ozone__image__gl__textures__holder.h b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_ozone__image__gl__textures__holder.h
new file mode 100644
index 000000000000..2b6021a98a96
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_ozone__image__gl__textures__holder.h
@@ -0,0 +1,10 @@
+--- gpu/command_buffer/service/shared_image/ozone_image_gl_textures_holder.h.orig 2023-12-23 12:33:28 UTC
++++ gpu/command_buffer/service/shared_image/ozone_image_gl_textures_holder.h
+@@ -6,6 +6,7 @@
+ #define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_OZONE_IMAGE_GL_TEXTURES_HOLDER_H_
+
+ #include <memory>
++#include <vector>
+
+ #include "base/memory/raw_ptr.h"
+ #include "base/memory/ref_counted.h"
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc
index 5c1b342a3b20..b9a8f49ecae2 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc
@@ -1,6 +1,6 @@
---- gpu/command_buffer/service/webgpu_decoder_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/command_buffer/service/webgpu_decoder_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ gpu/command_buffer/service/webgpu_decoder_impl.cc
-@@ -1193,7 +1193,7 @@ void WebGPUDecoderImpl::RequestAdapterImpl(
+@@ -1252,7 +1252,7 @@ void WebGPUDecoderImpl::RequestAdapterImpl(
force_fallback_adapter = true;
}
@@ -9,7 +9,7 @@
if (!shared_context_state_->GrContextIsVulkan() &&
!shared_context_state_->IsGraphiteDawnVulkan() &&
use_webgpu_adapter_ != WebGPUAdapterName::kOpenGLES) {
-@@ -1848,7 +1848,7 @@ WebGPUDecoderImpl::AssociateMailboxDawn(
+@@ -1879,7 +1879,7 @@ WebGPUDecoderImpl::AssociateMailboxDawn(
}
#if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_CHROMEOS) && !BUILDFLAG(IS_APPLE) && \
diff --git a/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc b/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc
index 0310cae9bedf..d2e24a50017e 100644
--- a/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc
@@ -1,4 +1,4 @@
---- gpu/ipc/service/gpu_init.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/ipc/service/gpu_init.cc.orig 2023-12-23 12:33:28 UTC
+++ gpu/ipc/service/gpu_init.cc
@@ -357,7 +357,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
enable_watchdog = false;
@@ -18,8 +18,8 @@
// On Chrome OS ARM Mali, GPU driver userspace creates threads when
// initializing a GL context, so start the sandbox early.
// TODO(zmo): Need to collect OS version before this.
-@@ -451,7 +451,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
- }
+@@ -491,7 +491,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+ gpu_preferences_.gr_context_type = GrContextType::kGL;
}
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
@@ -27,7 +27,7 @@
// The ContentSandboxHelper is currently the only one implementation of
// GpuSandboxHelper and it has no dependency. Except on Linux where
// VaapiWrapper checks the GL implementation to determine which display
-@@ -533,7 +533,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+@@ -573,7 +573,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
if (gl_use_swiftshader_) {
@@ -36,7 +36,7 @@
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
<< "on Linux";
return false;
-@@ -686,7 +686,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+@@ -726,7 +726,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
InitializePlatformOverlaySettings(&gpu_info_, gpu_feature_info_);
@@ -45,7 +45,7 @@
// Driver may create a compatibility profile context when collect graphics
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
-@@ -741,7 +741,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+@@ -781,7 +781,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
}
}
}
@@ -54,7 +54,7 @@
(BUILDFLAG(IS_CHROMEOS) && !BUILDFLAG(IS_CHROMEOS_DEVICE))
if (!gl_disabled && !gl_use_swiftshader_ && std::getenv("RUNNING_UNDER_RR")) {
// https://rr-project.org/ is a Linux-only record-and-replay debugger that
-@@ -895,7 +895,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
+@@ -935,7 +935,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
}
bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled;
@@ -63,7 +63,7 @@
(BUILDFLAG(IS_CHROMEOS) && !BUILDFLAG(IS_CHROMEOS_DEVICE))
if (!gl_disabled && !gl_use_swiftshader_ && std::getenv("RUNNING_UNDER_RR")) {
// https://rr-project.org/ is a Linux-only record-and-replay debugger that
-@@ -965,7 +965,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
+@@ -1005,7 +1005,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
}
}
diff --git a/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__util.cc b/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__util.cc
index e59997633cfd..3f9277110939 100644
--- a/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__util.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__util.cc
@@ -1,6 +1,6 @@
---- gpu/vulkan/vulkan_util.cc.orig 2023-08-18 10:26:52 UTC
+--- gpu/vulkan/vulkan_util.cc.orig 2023-12-23 12:33:28 UTC
+++ gpu/vulkan/vulkan_util.cc
-@@ -343,7 +343,7 @@ bool CheckVulkanCompabilities(const VulkanInfo& vulkan
+@@ -433,7 +433,7 @@ bool CheckVulkanCompabilities(const VulkanInfo& vulkan
}
}
diff --git a/www/ungoogled-chromium/files/patch-headless_lib_headless__content__main__delegate.cc b/www/ungoogled-chromium/files/patch-headless_lib_headless__content__main__delegate.cc
index b78e023e78cc..36d6880cddb8 100644
--- a/www/ungoogled-chromium/files/patch-headless_lib_headless__content__main__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-headless_lib_headless__content__main__delegate.cc
@@ -1,6 +1,6 @@
---- headless/lib/headless_content_main_delegate.cc.orig 2023-09-17 07:59:53 UTC
+--- headless/lib/headless_content_main_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ headless/lib/headless_content_main_delegate.cc
-@@ -344,7 +344,7 @@ void HeadlessContentMainDelegate::InitCrashReporter(
+@@ -362,7 +362,7 @@ void HeadlessContentMainDelegate::InitCrashReporter(
if (process_type != ::switches::kZygoteProcess) {
g_headless_crash_client.Pointer()->set_crash_dumps_dir(
command_line.GetSwitchValuePath(switches::kCrashDumpsDir));
diff --git a/www/ungoogled-chromium/files/patch-media_BUILD.gn b/www/ungoogled-chromium/files/patch-media_BUILD.gn
index a72c49aebb95..d899a5ad064a 100644
--- a/www/ungoogled-chromium/files/patch-media_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-media_BUILD.gn
@@ -1,6 +1,6 @@
---- media/BUILD.gn.orig 2023-09-17 07:59:53 UTC
+--- media/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ media/BUILD.gn
-@@ -94,6 +94,9 @@ config("media_config") {
+@@ -95,6 +95,9 @@ config("media_config") {
defines += [ "DLOPEN_PULSEAUDIO" ]
}
}
diff --git a/www/ungoogled-chromium/files/patch-media_base_media__switches.cc b/www/ungoogled-chromium/files/patch-media_base_media__switches.cc
index 311409cdefd3..cadcdde513ae 100644
--- a/www/ungoogled-chromium/files/patch-media_base_media__switches.cc
+++ b/www/ungoogled-chromium/files/patch-media_base_media__switches.cc
@@ -1,4 +1,4 @@
---- media/base/media_switches.cc.orig 2023-11-16 20:38:16 UTC
+--- media/base/media_switches.cc.orig 2023-12-23 12:33:28 UTC
+++ media/base/media_switches.cc
@@ -17,7 +17,7 @@
#include "ui/gl/gl_features.h"
@@ -9,7 +9,7 @@
#include "base/cpu.h"
#endif
-@@ -698,7 +698,7 @@ BASE_FEATURE(kFallbackAfterDecodeError,
+@@ -702,7 +702,7 @@ BASE_FEATURE(kFallbackAfterDecodeError,
// Show toolbar button that opens dialog for controlling media sessions.
BASE_FEATURE(kGlobalMediaControls,
"GlobalMediaControls",
@@ -18,7 +18,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -721,7 +721,7 @@ BASE_FEATURE(kGlobalMediaControlsCrOSUpdatedUI,
+@@ -725,7 +725,7 @@ BASE_FEATURE(kGlobalMediaControlsCrOSUpdatedUI,
// If enabled, users can request Media Remoting without fullscreen-in-tab.
BASE_FEATURE(kMediaRemotingWithoutFullscreen,
"MediaRemotingWithoutFullscreen",
@@ -27,7 +27,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -733,7 +733,7 @@ BASE_FEATURE(kMediaRemotingWithoutFullscreen,
+@@ -737,7 +737,7 @@ BASE_FEATURE(kMediaRemotingWithoutFullscreen,
BASE_FEATURE(kGlobalMediaControlsPictureInPicture,
"GlobalMediaControlsPictureInPicture",
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -36,7 +36,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -777,7 +777,7 @@ BASE_FEATURE(kUnifiedAutoplay,
+@@ -776,7 +776,7 @@ BASE_FEATURE(kUnifiedAutoplay,
"UnifiedAutoplay",
base::FEATURE_ENABLED_BY_DEFAULT);
@@ -45,7 +45,7 @@
// Enable vaapi video decoding on linux. This is already enabled by default on
// chromeos, but needs an experiment on linux.
BASE_FEATURE(kVaapiVideoDecodeLinux,
-@@ -861,7 +861,7 @@ BASE_FEATURE(kVaapiVp9SModeHWEncoding,
+@@ -856,7 +856,7 @@ BASE_FEATURE(kVaapiVp9SModeHWEncoding,
"VaapiVp9SModeHWEncoding",
base::FEATURE_DISABLED_BY_DEFAULT);
#endif // defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS)
@@ -54,7 +54,7 @@
// Enables the new V4L2StatefulVideoDecoder instead of V4L2VideoDecoder.
BASE_FEATURE(kV4L2FlatStatelessVideoDecoder,
"V4L2FlatStatelessVideoDecoder",
-@@ -1396,7 +1396,7 @@ const base::Feature MEDIA_EXPORT kUseOutOfProcessVideo
+@@ -1384,7 +1384,7 @@ const base::Feature MEDIA_EXPORT kUseOutOfProcessVideo
};
#endif // BUILDFLAG(ALLOW_OOP_VIDEO_DECODER)
diff --git a/www/ungoogled-chromium/files/patch-media_base_media__switches.h b/www/ungoogled-chromium/files/patch-media_base_media__switches.h
index e49c32fe42a6..097363ff628a 100644
--- a/www/ungoogled-chromium/files/patch-media_base_media__switches.h
+++ b/www/ungoogled-chromium/files/patch-media_base_media__switches.h
@@ -1,16 +1,16 @@
---- media/base/media_switches.h.orig 2023-11-11 14:10:41 UTC
+--- media/base/media_switches.h.orig 2023-12-23 12:33:28 UTC
+++ media/base/media_switches.h
-@@ -320,7 +320,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseElementInsteadOf
+@@ -322,7 +322,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseDecoderStreamFor
+ MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseElementInsteadOfRegionCapture);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseFakeDeviceForMediaStream);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseMediaHistoryStore);
- MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseR16Texture);
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVideoDecodeLinux);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVideoDecodeLinuxGL);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVideoEncodeLinux);
-@@ -339,7 +339,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVp8TemporalLay
- MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVp9kSVCHWEncoding);
+@@ -340,7 +340,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiH264TemporalLa
+ MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVp8TemporalLayerHWEncoding);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVp9SModeHWEncoding);
#endif // defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS)
-#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
@@ -18,7 +18,7 @@
MEDIA_EXPORT BASE_DECLARE_FEATURE(kV4L2FlatStatelessVideoDecoder);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kV4L2FlatStatefulVideoDecoder);
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
-@@ -459,7 +459,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kExposeOutOfProcessV
+@@ -461,7 +461,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kExposeOutOfProcessV
MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseOutOfProcessVideoDecoding);
#endif // BUILDFLAG(ALLOW_OOP_VIDEO_DECODER)
diff --git a/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__buffer__tracker__factory__impl.cc b/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__buffer__tracker__factory__impl.cc
index 01f4fa62a75b..5313324008ed 100644
--- a/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__buffer__tracker__factory__impl.cc
+++ b/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__buffer__tracker__factory__impl.cc
@@ -1,4 +1,4 @@
---- media/capture/video/video_capture_buffer_tracker_factory_impl.cc.orig 2023-09-17 07:59:53 UTC
+--- media/capture/video/video_capture_buffer_tracker_factory_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ media/capture/video/video_capture_buffer_tracker_factory_impl.cc
@@ -14,7 +14,7 @@
#include "media/capture/video/chromeos/gpu_memory_buffer_tracker_cros.h"
@@ -8,8 +8,8 @@
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
#include "media/capture/video/linux/v4l2_gpu_memory_buffer_tracker.h"
#elif BUILDFLAG(IS_WIN)
- #include "media/capture/video/shared_memory_buffer_tracker.h"
-@@ -43,7 +43,7 @@ VideoCaptureBufferTrackerFactoryImpl::CreateTracker(
+ #include "media/capture/video/win/gpu_memory_buffer_tracker_win.h"
+@@ -42,7 +42,7 @@ VideoCaptureBufferTrackerFactoryImpl::CreateTracker(
return std::make_unique<GpuMemoryBufferTrackerCros>();
#elif BUILDFLAG(IS_APPLE)
return std::make_unique<GpuMemoryBufferTrackerApple>();
diff --git a/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__device__client.cc b/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__device__client.cc
index 8c25f3a5f0ce..bc180a2015c1 100644
--- a/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__device__client.cc
+++ b/www/ungoogled-chromium/files/patch-media_capture_video_video__capture__device__client.cc
@@ -1,6 +1,6 @@
---- media/capture/video/video_capture_device_client.cc.orig 2023-07-21 09:49:17 UTC
+--- media/capture/video/video_capture_device_client.cc.orig 2023-12-23 12:33:28 UTC
+++ media/capture/video/video_capture_device_client.cc
-@@ -149,7 +149,7 @@ FourccAndFlip GetFourccAndFlipFromPixelFormat(
+@@ -150,7 +150,7 @@ FourccAndFlip GetFourccAndFlipFromPixelFormat(
CHECK(!is_width_odd && !is_height_odd);
return {libyuv::FOURCC_UYVY};
case media::PIXEL_FORMAT_RGB24:
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc
index b53ed64ba188..7145be00c95f 100644
--- a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc
+++ b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc
@@ -1,8 +1,8 @@
---- media/gpu/chromeos/video_decoder_pipeline.cc.orig 2023-11-11 14:10:41 UTC
+--- media/gpu/chromeos/video_decoder_pipeline.cc.orig 2023-12-23 12:33:28 UTC
+++ media/gpu/chromeos/video_decoder_pipeline.cc
-@@ -1033,14 +1033,14 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
- }
+@@ -1048,14 +1048,14 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
}
+ #endif
-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(USE_VAAPI)
+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(USE_VAAPI)
@@ -17,7 +17,7 @@
// Linux w/ V4L2 should not use a custom allocator
// Only tested with video_decode_accelerator_tests
// TODO(wenst@) Test with full Chromium Browser
-@@ -1177,7 +1177,7 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
+@@ -1192,7 +1192,7 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
<< " VideoFrames";
auxiliary_frame_pool_->set_parent_task_runner(decoder_task_runner_);
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc
index b54f53652d25..6ae70645f1f4 100644
--- a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc
+++ b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc
@@ -1,6 +1,6 @@
---- media/gpu/vaapi/vaapi_video_decoder.cc.orig 2023-08-18 10:26:52 UTC
+--- media/gpu/vaapi/vaapi_video_decoder.cc.orig 2023-12-23 12:33:28 UTC
+++ media/gpu/vaapi/vaapi_video_decoder.cc
-@@ -779,7 +779,7 @@ void VaapiVideoDecoder::ApplyResolutionChangeWithScree
+@@ -776,7 +776,7 @@ void VaapiVideoDecoder::ApplyResolutionChangeWithScree
const gfx::Size decoder_natural_size =
aspect_ratio_.GetNaturalSize(decoder_visible_rect);
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc
index 896887e8d04e..a2df0937a157 100644
--- a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc
+++ b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc
@@ -1,6 +1,6 @@
---- media/gpu/vaapi/vaapi_wrapper.cc.orig 2023-11-04 07:08:51 UTC
+--- media/gpu/vaapi/vaapi_wrapper.cc.orig 2023-12-23 12:33:28 UTC
+++ media/gpu/vaapi/vaapi_wrapper.cc
-@@ -82,7 +82,7 @@ extern "C" {
+@@ -71,7 +71,7 @@
using media_gpu_vaapi::kModuleVa_prot;
#endif
@@ -9,7 +9,7 @@
#include "base/files/file_util.h"
#include "base/strings/string_split.h"
#endif
-@@ -1506,7 +1506,7 @@ bool IsVBREncodingSupported(VAProfile va_profile) {
+@@ -1445,7 +1445,7 @@ bool IsVBREncodingSupported(VAProfile va_profile) {
return VASupportedProfiles::Get().IsProfileSupported(mode, va_profile);
}
@@ -18,7 +18,7 @@
// Some VA-API drivers (vdpau-va-driver) will crash if used with VA/DRM on
// NVIDIA GPUs. This function checks if such drivers are present.
bool IsBrokenNvidiaVaapiDriverPresent() {
-@@ -1567,7 +1567,7 @@ void VADisplayStateSingleton::PreSandboxInitialization
+@@ -1506,7 +1506,7 @@ void VADisplayStateSingleton::PreSandboxInitialization
VADisplayStateSingleton& va_display_state = GetInstance();
base::AutoLock lock(va_display_state.lock_);
@@ -27,7 +27,7 @@
std::string va_driver_name;
auto env = base::Environment::Create();
if (env->GetVar("LIBVA_DRIVER_NAME", &va_driver_name) &&
-@@ -1604,7 +1604,7 @@ void VADisplayStateSingleton::PreSandboxInitialization
+@@ -1543,7 +1543,7 @@ void VADisplayStateSingleton::PreSandboxInitialization
if (base::EqualsCaseInsensitiveASCII(version_name, "vgem")) {
continue;
}
@@ -36,7 +36,7 @@
// Skip NVIDIA GPUs if the VA-API driver used for them is known for crashing
// with VA/DRM.
if (is_nvidia_va_drm_broken &&
-@@ -1637,7 +1637,7 @@ VADisplayStateHandle VADisplayStateSingleton::GetHandl
+@@ -1576,7 +1576,7 @@ VADisplayStateHandle VADisplayStateSingleton::GetHandl
return {};
}
diff --git a/www/ungoogled-chromium/files/patch-media_media__options.gni b/www/ungoogled-chromium/files/patch-media_media__options.gni
index aa6c5ac3fde2..2c1fc6bcd671 100644
--- a/www/ungoogled-chromium/files/patch-media_media__options.gni
+++ b/www/ungoogled-chromium/files/patch-media_media__options.gni
@@ -1,6 +1,6 @@
---- media/media_options.gni.orig 2023-10-13 13:20:35 UTC
+--- media/media_options.gni.orig 2023-12-23 12:33:28 UTC
+++ media/media_options.gni
-@@ -181,12 +181,15 @@ declare_args() {
+@@ -187,12 +187,15 @@ declare_args() {
# Enables runtime selection of ALSA library for audio.
use_alsa = false
@@ -17,7 +17,7 @@
(!is_castos || (target_cpu == "x86" || target_cpu == "x64") ||
is_cast_audio_only)) {
use_alsa = true
-@@ -202,6 +205,10 @@ declare_args() {
+@@ -208,6 +211,10 @@ declare_args() {
if (!use_cras && !is_castos && !is_asan && !is_tsan) {
use_pulseaudio = true
}
diff --git a/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc b/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc
index 414326c81986..ea8532f595de 100644
--- a/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc
+++ b/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc
@@ -1,6 +1,6 @@
---- media/video/gpu_memory_buffer_video_frame_pool.cc.orig 2023-11-04 07:08:51 UTC
+--- media/video/gpu_memory_buffer_video_frame_pool.cc.orig 2023-12-23 12:33:28 UTC
+++ media/video/gpu_memory_buffer_video_frame_pool.cc
-@@ -773,7 +773,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHa
+@@ -774,7 +774,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHa
}
bool is_software_backed_video_frame = !video_frame->HasTextures();
@@ -9,7 +9,7 @@
is_software_backed_video_frame &= !video_frame->HasDmaBufs();
#endif
-@@ -1223,7 +1223,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
+@@ -1224,7 +1224,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
}
#endif
@@ -18,7 +18,7 @@
is_webgpu_compatible = (gpu_memory_buffer != nullptr);
if (is_webgpu_compatible) {
is_webgpu_compatible &=
-@@ -1242,7 +1242,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
+@@ -1243,7 +1243,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
gpu::SHARED_IMAGE_USAGE_DISPLAY_READ |
gpu::SHARED_IMAGE_USAGE_SCANOUT;
diff --git a/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc b/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc
index 06d59f708a80..1529957e7ab9 100644
--- a/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc
+++ b/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc
@@ -1,6 +1,6 @@
---- media/video/video_encode_accelerator_adapter.cc.orig 2023-09-17 07:59:53 UTC
+--- media/video/video_encode_accelerator_adapter.cc.orig 2023-12-23 12:33:28 UTC
+++ media/video/video_encode_accelerator_adapter.cc
-@@ -140,7 +140,7 @@ VideoEncodeAccelerator::Config SetUpVeaConfig(
+@@ -153,7 +153,7 @@ VideoEncodeAccelerator::Config SetUpVeaConfig(
if (is_rgb)
config.input_format = PIXEL_FORMAT_I420;
@@ -9,7 +9,7 @@
if (format != PIXEL_FORMAT_I420 ||
!VideoFrame::IsStorageTypeMappable(storage_type)) {
// ChromeOS/Linux hardware video encoders supports I420 on-memory
-@@ -465,7 +465,7 @@ void VideoEncodeAcceleratorAdapter::InitializeInternal
+@@ -478,7 +478,7 @@ void VideoEncodeAcceleratorAdapter::InitializeInternal
SetUpVeaConfig(profile_, options_, format, first_frame->storage_type(),
supported_rc_modes_, required_encoder_type_);
diff --git a/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni b/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni
index 40253193d9e2..d99980f5fddf 100644
--- a/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni
+++ b/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni
@@ -1,6 +1,6 @@
---- mojo/public/tools/bindings/mojom.gni.orig 2023-09-17 07:59:53 UTC
+--- mojo/public/tools/bindings/mojom.gni.orig 2023-12-23 12:33:28 UTC
+++ mojo/public/tools/bindings/mojom.gni
-@@ -743,6 +743,16 @@ template("mojom") {
+@@ -758,6 +758,16 @@ template("mojom") {
enabled_features += [ "is_apple" ]
}
diff --git a/www/ungoogled-chromium/files/patch-net_BUILD.gn b/www/ungoogled-chromium/files/patch-net_BUILD.gn
index a720dd59132b..3c23ef8bb76b 100644
--- a/www/ungoogled-chromium/files/patch-net_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-net_BUILD.gn
@@ -1,4 +1,4 @@
---- net/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- net/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ net/BUILD.gn
@@ -122,7 +122,7 @@ net_configs = [
"//build/config/compiler:wexit_time_destructors",
@@ -9,8 +9,8 @@
net_configs += [ "//build/config/linux:libresolv" ]
}
-@@ -1244,6 +1244,19 @@ component("net") {
- deps += [ "//third_party/xdg_shared_mime_info" ]
+@@ -1242,6 +1242,19 @@ component("net") {
+ ]
}
+ if (is_bsd) {
@@ -29,7 +29,7 @@
if (is_mac) {
sources += [
"base/network_notification_thread_mac.cc",
-@@ -1400,7 +1413,7 @@ component("net") {
+@@ -1398,7 +1411,7 @@ component("net") {
}
# Use getifaddrs() on POSIX platforms, except Linux.
@@ -38,7 +38,7 @@
sources += [
"base/network_interfaces_getifaddrs.cc",
"base/network_interfaces_getifaddrs.h",
-@@ -2893,7 +2906,7 @@ test("net_unittests") {
+@@ -2888,7 +2901,7 @@ test("net_unittests") {
]
}
@@ -47,9 +47,9 @@
sources += [
"base/address_tracker_linux_unittest.cc",
"base/network_interfaces_linux_unittest.cc",
-@@ -2965,6 +2978,10 @@ test("net_unittests") {
- "//url:buildflags",
- ]
+@@ -2976,6 +2989,10 @@ test("net_unittests") {
+ ]
+ }
+ if (is_bsd) {
+ deps += [ "//sandbox/policy" ]
diff --git a/www/ungoogled-chromium/files/patch-net_base_features.cc b/www/ungoogled-chromium/files/patch-net_base_features.cc
index adfeba5868bf..cd4a602cc266 100644
--- a/www/ungoogled-chromium/files/patch-net_base_features.cc
+++ b/www/ungoogled-chromium/files/patch-net_base_features.cc
@@ -1,11 +1,15 @@
---- net/base/features.cc.orig 2023-11-04 07:08:51 UTC
+--- net/base/features.cc.orig 2023-12-23 12:33:28 UTC
+++ net/base/features.cc
-@@ -171,7 +171,7 @@ BASE_FEATURE(kSameSiteDefaultChecksMethodRigorously,
- #if BUILDFLAG(CHROME_ROOT_STORE_OPTIONAL)
- BASE_FEATURE(kChromeRootStoreUsed,
- "ChromeRootStoreUsed",
--#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)
-+#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
- base::FEATURE_ENABLED_BY_DEFAULT
- #else
- base::FEATURE_DISABLED_BY_DEFAULT
+@@ -470,7 +470,12 @@ BASE_FEATURE(kSpdyHeadersToHttpResponseUseBuilder,
+ "SpdyHeadersToHttpResponseUseBuilder",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+
++#if BUILDFLAG(IS_OPENBSD)
++// No IP_RECVTOS support
+ BASE_FEATURE(kReceiveEcn, "ReceiveEcn", base::FEATURE_DISABLED_BY_DEFAULT);
++#else
++BASE_FEATURE(kReceiveEcn, "ReceiveEcn", base::FEATURE_DISABLED_BY_DEFAULT);
++#endif
+
+ // TODO(crbug.com/634470): Remove this feature flag in January 2024 if the new
+ // limit sticks.
diff --git a/www/ungoogled-chromium/files/patch-net_cert_cert__verifier.cc b/www/ungoogled-chromium/files/patch-net_cert_cert__verifier.cc
deleted file mode 100644
index 49d2c4238998..000000000000
--- a/www/ungoogled-chromium/files/patch-net_cert_cert__verifier.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- net/cert/cert_verifier.cc.orig 2023-06-05 19:39:05 UTC
-+++ net/cert/cert_verifier.cc
-@@ -40,7 +40,7 @@ class DefaultCertVerifyProcFactory : public net::CertV
- return CertVerifyProc::CreateBuiltinWithChromeRootStore(
- std::move(cert_net_fetcher), impl_params.crl_set,
- base::OptionalToPtr(impl_params.root_store_data));
--#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- return CertVerifyProc::CreateBuiltinVerifyProc(std::move(cert_net_fetcher),
- impl_params.crl_set);
- #else
diff --git a/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.cc b/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.cc
deleted file mode 100644
index cb956b32f394..000000000000
--- a/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- net/cert/cert_verify_proc.cc.orig 2023-06-05 19:39:05 UTC
-+++ net/cert/cert_verify_proc.cc
-@@ -409,7 +409,7 @@ base::Value::Dict CertVerifyParams(
-
- } // namespace
-
--#if !(BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || \
-+#if !(BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(CHROME_ROOT_STORE_ONLY))
- // static
- scoped_refptr<CertVerifyProc> CertVerifyProc::CreateSystemVerifyProc(
diff --git a/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc__unittest.cc b/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc__unittest.cc
deleted file mode 100644
index 904b57c1fb63..000000000000
--- a/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc__unittest.cc
+++ /dev/null
@@ -1,20 +0,0 @@
---- net/cert/cert_verify_proc_unittest.cc.orig 2023-11-04 07:08:51 UTC
-+++ net/cert/cert_verify_proc_unittest.cc
-@@ -213,7 +213,7 @@ scoped_refptr<CertVerifyProc> CreateCertVerifyProc(
- case CERT_VERIFY_PROC_IOS:
- return base::MakeRefCounted<CertVerifyProcIOS>(std::move(crl_set));
- #endif
--#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- case CERT_VERIFY_PROC_BUILTIN:
- return CreateCertVerifyProcBuiltin(std::move(cert_net_fetcher),
- std::move(crl_set),
-@@ -242,7 +242,7 @@ constexpr CertVerifyProcType kAllCertVerifiers[] = {
- CERT_VERIFY_PROC_ANDROID,
- #elif BUILDFLAG(IS_IOS)
- CERT_VERIFY_PROC_IOS,
--#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- CERT_VERIFY_PROC_BUILTIN,
- #endif
- #if BUILDFLAG(CHROME_ROOT_STORE_SUPPORTED)
diff --git a/www/ungoogled-chromium/files/patch-net_cert_test__root__certs__unittest.cc b/www/ungoogled-chromium/files/patch-net_cert_test__root__certs__unittest.cc
deleted file mode 100644
index 98da2e2ee384..000000000000
--- a/www/ungoogled-chromium/files/patch-net_cert_test__root__certs__unittest.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- net/cert/test_root_certs_unittest.cc.orig 2023-06-05 19:39:05 UTC
-+++ net/cert/test_root_certs_unittest.cc
-@@ -46,7 +46,7 @@ scoped_refptr<CertVerifyProc> CreateCertVerifyProc() {
- return CertVerifyProc::CreateBuiltinWithChromeRootStore(
- /*cert_net_fetcher=*/nullptr, CRLSet::BuiltinCRLSet().get(),
- /*root_store_data=*/nullptr);
--#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- return CertVerifyProc::CreateBuiltinVerifyProc(/*cert_net_fetcher=*/nullptr,
- CRLSet::BuiltinCRLSet().get());
- #else
diff --git a/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc b/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc
index 4a30fa453f5c..f46d960787b4 100644
--- a/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc
+++ b/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc
@@ -1,6 +1,16 @@
---- net/socket/udp_socket_posix.cc.orig 2023-07-21 09:49:17 UTC
+--- net/socket/udp_socket_posix.cc.orig 2023-12-23 12:33:28 UTC
+++ net/socket/udp_socket_posix.cc
-@@ -591,7 +591,7 @@ int UDPSocketPosix::SetDoNotFragment() {
+@@ -612,12 +612,17 @@ int UDPSocketPosix::SetRecvEcn() {
+ }
+ }
+
++#ifdef IP_RECVTOS
+ int rv = setsockopt(socket_, IPPROTO_IP, IP_RECVTOS, &ecn, sizeof(ecn));
++#else
++ int rv = -1;
++ errno = EOPNOTSUPP;
++#endif
+ return rv == 0 ? OK : MapSystemError(errno);
}
void UDPSocketPosix::SetMsgConfirm(bool confirm) {
@@ -9,7 +19,7 @@
if (confirm) {
sendto_flags_ |= MSG_CONFIRM;
} else {
-@@ -612,7 +612,7 @@ int UDPSocketPosix::SetBroadcast(bool broadcast) {
+@@ -638,7 +643,7 @@ int UDPSocketPosix::SetBroadcast(bool broadcast) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
int value = broadcast ? 1 : 0;
int rv;
@@ -18,7 +28,7 @@
// SO_REUSEPORT on OSX permits multiple processes to each receive
// UDP multicast or broadcast datagrams destined for the bound
// port.
-@@ -925,7 +925,7 @@ int UDPSocketPosix::DoBind(const IPEndPoint& address)
+@@ -951,7 +956,7 @@ int UDPSocketPosix::DoBind(const IPEndPoint& address)
#if BUILDFLAG(IS_CHROMEOS_ASH)
if (last_error == EINVAL)
return ERR_ADDRESS_IN_USE;
diff --git a/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc b/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc
index 13ea3098199c..3555c12e8bfc 100644
--- a/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc
+++ b/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc
@@ -1,4 +1,4 @@
---- net/tools/cert_verify_tool/cert_verify_tool.cc.orig 2023-06-05 19:39:05 UTC
+--- net/tools/cert_verify_tool/cert_verify_tool.cc.orig 2023-12-23 12:33:28 UTC
+++ net/tools/cert_verify_tool/cert_verify_tool.cc
@@ -31,7 +31,7 @@
#include "net/url_request/url_request_context_builder.h"
@@ -9,7 +9,7 @@
#include "net/proxy_resolution/proxy_config.h"
#include "net/proxy_resolution/proxy_config_service_fixed.h"
#endif
-@@ -61,7 +61,7 @@ void SetUpOnNetworkThread(
+@@ -63,7 +63,7 @@ void SetUpOnNetworkThread(
base::WaitableEvent* initialization_complete_event) {
net::URLRequestContextBuilder url_request_context_builder;
url_request_context_builder.set_user_agent(GetUserAgent());
@@ -18,16 +18,7 @@
// On Linux, use a fixed ProxyConfigService, since the default one
// depends on glib.
//
-@@ -223,7 +223,7 @@ std::unique_ptr<CertVerifyImpl> CreateCertVerifyImplFr
- scoped_refptr<net::CertNetFetcher> cert_net_fetcher,
- scoped_refptr<net::CRLSet> crl_set,
- RootStoreType root_store_type) {
--#if !(BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || \
-+#if !(BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(CHROME_ROOT_STORE_ONLY))
- if (impl_name == "platform") {
- if (root_store_type != RootStoreType::kSystem) {
-@@ -537,7 +537,7 @@ int main(int argc, char** argv) {
+@@ -545,7 +545,7 @@ int main(int argc, char** argv) {
std::string impls_str = command_line.GetSwitchValueASCII("impls");
if (impls_str.empty()) {
// Default value.
diff --git a/www/ungoogled-chromium/files/patch-printing_printing__context__linux.cc b/www/ungoogled-chromium/files/patch-printing_printing__context__linux.cc
index 8d258b310328..75c7a9945b89 100644
--- a/www/ungoogled-chromium/files/patch-printing_printing__context__linux.cc
+++ b/www/ungoogled-chromium/files/patch-printing_printing__context__linux.cc
@@ -1,4 +1,4 @@
---- printing/printing_context_linux.cc.orig 2023-10-13 13:20:35 UTC
+--- printing/printing_context_linux.cc.orig 2023-12-23 12:33:28 UTC
+++ printing/printing_context_linux.cc
@@ -23,7 +23,7 @@
#endif
@@ -9,7 +9,7 @@
#include "ui/linux/linux_ui.h"
#endif
-@@ -72,7 +72,7 @@ mojom::ResultCode PrintingContextLinux::UseDefaultSett
+@@ -68,7 +68,7 @@ mojom::ResultCode PrintingContextLinux::UseDefaultSett
ResetSettings();
@@ -18,7 +18,7 @@
if (!ui::LinuxUi::instance())
return mojom::ResultCode::kSuccess;
-@@ -88,7 +88,7 @@ mojom::ResultCode PrintingContextLinux::UseDefaultSett
+@@ -84,7 +84,7 @@ mojom::ResultCode PrintingContextLinux::UseDefaultSett
}
gfx::Size PrintingContextLinux::GetPdfPaperSizeDeviceUnits() {
@@ -27,7 +27,7 @@
if (ui::LinuxUi::instance())
return ui::LinuxUi::instance()->GetPdfPaperSize(this);
#endif
-@@ -101,7 +101,7 @@ mojom::ResultCode PrintingContextLinux::UpdatePrinterS
+@@ -97,7 +97,7 @@ mojom::ResultCode PrintingContextLinux::UpdatePrinterS
DCHECK(!printer_settings.show_system_dialog);
DCHECK(!in_print_job_);
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_heartbeat__sender.cc b/www/ungoogled-chromium/files/patch-remoting_host_heartbeat__sender.cc
deleted file mode 100644
index b36c1e52ea2a..000000000000
--- a/www/ungoogled-chromium/files/patch-remoting_host_heartbeat__sender.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- remoting/host/heartbeat_sender.cc.orig 2023-04-08 11:38:38 UTC
-+++ remoting/host/heartbeat_sender.cc
-@@ -111,7 +111,7 @@ const net::BackoffEntry::Policy kBackoffPolicy = {
- std::string GetHostname() {
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag
- // switch of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- return net::GetHostName();
- #elif BUILDFLAG(IS_WIN)
- wchar_t buffer[MAX_PATH] = {0};
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc b/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc
index 3e6f5db5a61c..9be23b1853e9 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc
@@ -1,4 +1,4 @@
---- remoting/host/me2me_desktop_environment.cc.orig 2023-03-10 11:01:21 UTC
+--- remoting/host/me2me_desktop_environment.cc.orig 2023-12-23 12:33:28 UTC
+++ remoting/host/me2me_desktop_environment.cc
@@ -125,7 +125,7 @@ std::string Me2MeDesktopEnvironment::GetCapabilities()
capabilities += protocol::kRemoteWebAuthnCapability;
@@ -9,7 +9,7 @@
if (!IsRunningWayland()) {
capabilities += " ";
capabilities += protocol::kMultiStreamCapability;
-@@ -164,7 +164,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
+@@ -171,7 +171,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
// properly under Xvfb.
mutable_desktop_capture_options()->set_use_update_notifications(true);
@@ -18,7 +18,7 @@
// Setting this option to false means that the capture differ wrapper will not
// be used when the X11 capturer is selected. This reduces the X11 capture
// time by a few milliseconds per frame and is safe because we can rely on
-@@ -173,7 +173,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
+@@ -180,7 +180,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
mutable_desktop_capture_options()->set_detect_updated_region(false);
#endif
@@ -27,7 +27,7 @@
if (IsRunningWayland()) {
mutable_desktop_capture_options()->set_prefer_cursor_embedded(false);
}
-@@ -198,7 +198,7 @@ bool Me2MeDesktopEnvironment::InitializeSecurity(
+@@ -205,7 +205,7 @@ bool Me2MeDesktopEnvironment::InitializeSecurity(
// Otherwise, if the session is shared with the local user start monitoring
// the local input and create the in-session UI.
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_policy__watcher.cc b/www/ungoogled-chromium/files/patch-remoting_host_policy__watcher.cc
index 87a8c4949708..41132c516f8f 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_policy__watcher.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_policy__watcher.cc
@@ -1,6 +1,6 @@
---- remoting/host/policy_watcher.cc.orig 2023-09-17 07:59:53 UTC
+--- remoting/host/policy_watcher.cc.orig 2023-12-23 12:33:28 UTC
+++ remoting/host/policy_watcher.cc
-@@ -184,7 +184,7 @@ base::Value::Dict PolicyWatcher::GetDefaultPolicies()
+@@ -191,7 +191,7 @@ base::Value::Dict PolicyWatcher::GetDefaultPolicies()
true);
result.Set(key::kRemoteAccessHostAllowEnterpriseFileTransfer, false);
#endif
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc b/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc
index d60722a6aa84..02d2503311f7 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc
@@ -1,6 +1,6 @@
---- remoting/host/remoting_me2me_host.cc.orig 2023-09-17 07:59:53 UTC
+--- remoting/host/remoting_me2me_host.cc.orig 2023-12-23 12:33:28 UTC
+++ remoting/host/remoting_me2me_host.cc
-@@ -126,7 +126,7 @@
+@@ -127,7 +127,7 @@
#include "remoting/host/mac/permission_utils.h"
#endif // BUILDFLAG(IS_APPLE)
@@ -9,7 +9,7 @@
#if defined(REMOTING_USE_X11)
#include <gtk/gtk.h>
#endif // defined(REMOTING_USE_X11)
-@@ -137,13 +137,13 @@
+@@ -138,13 +138,13 @@
#endif // defined(REMOTING_USE_X11)
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
@@ -25,7 +25,7 @@
#include "remoting/host/host_utmp_logger.h"
#endif
-@@ -156,7 +156,7 @@
+@@ -157,7 +157,7 @@
#include "remoting/host/win/session_desktop_environment.h"
#endif // BUILDFLAG(IS_WIN)
@@ -34,7 +34,7 @@
#include "remoting/host/linux/wayland_manager.h"
#include "remoting/host/linux/wayland_utils.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -186,7 +186,7 @@ const char kApplicationName[] = "chromoting";
+@@ -187,7 +187,7 @@ const char kApplicationName[] = "chromoting";
const char kStdinConfigPath[] = "-";
#endif // !defined(REMOTING_MULTI_PROCESS)
@@ -43,7 +43,7 @@
// The command line switch used to pass name of the pipe to capture audio on
// linux.
const char kAudioPipeSwitchName[] = "audio-pipe-name";
-@@ -397,7 +397,7 @@ class HostProcess : public ConfigWatcher::Delegate,
+@@ -394,7 +394,7 @@ class HostProcess : public ConfigWatcher::Delegate,
std::unique_ptr<ChromotingHostContext> context_;
@@ -52,7 +52,7 @@
// Watch for certificate changes and kill the host when changes occur
std::unique_ptr<CertificateWatcher> cert_watcher_;
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -466,7 +466,7 @@ class HostProcess : public ConfigWatcher::Delegate,
+@@ -463,7 +463,7 @@ class HostProcess : public ConfigWatcher::Delegate,
std::unique_ptr<HostStatusLogger> host_status_logger_;
std::unique_ptr<HostEventLogger> host_event_logger_;
@@ -61,7 +61,7 @@
std::unique_ptr<HostUTMPLogger> host_utmp_logger_;
#endif
std::unique_ptr<HostPowerSaveBlocker> power_save_blocker_;
-@@ -775,7 +775,7 @@ void HostProcess::StartOnNetworkThread() {
+@@ -772,7 +772,7 @@ void HostProcess::StartOnNetworkThread() {
void HostProcess::ShutdownOnNetworkThread() {
DCHECK(context_->network_task_runner()->BelongsToCurrentThread());
config_watcher_.reset();
@@ -70,7 +70,7 @@
cert_watcher_.reset();
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
}
-@@ -836,7 +836,7 @@ void HostProcess::CreateAuthenticatorFactory() {
+@@ -833,7 +833,7 @@ void HostProcess::CreateAuthenticatorFactory() {
DCHECK(third_party_auth_config_.token_url.is_valid());
DCHECK(third_party_auth_config_.token_validation_url.is_valid());
@@ -79,7 +79,7 @@
if (!cert_watcher_) {
cert_watcher_ = std::make_unique<CertificateWatcher>(
base::BindRepeating(&HostProcess::ShutdownHost,
-@@ -955,13 +955,13 @@ void HostProcess::StartOnUiThread() {
+@@ -952,13 +952,13 @@ void HostProcess::StartOnUiThread() {
base::BindRepeating(&HostProcess::OnPolicyUpdate, base::Unretained(this)),
base::BindRepeating(&HostProcess::OnPolicyError, base::Unretained(this)));
@@ -95,7 +95,7 @@
// If an audio pipe is specific on the command-line then initialize
// AudioCapturerLinux to capture from it.
base::FilePath audio_pipe_name =
-@@ -1033,7 +1033,7 @@ void HostProcess::ShutdownOnUiThread() {
+@@ -1030,7 +1030,7 @@ void HostProcess::ShutdownOnUiThread() {
// It is now safe for the HostProcess to be deleted.
self_ = nullptr;
@@ -104,7 +104,7 @@
// Cause the global AudioPipeReader to be freed, otherwise the audio
// thread will remain in-use and prevent the process from exiting.
// TODO(wez): DesktopEnvironmentFactory should own the pipe reader.
-@@ -1041,7 +1041,7 @@ void HostProcess::ShutdownOnUiThread() {
+@@ -1038,7 +1038,7 @@ void HostProcess::ShutdownOnUiThread() {
AudioCapturerLinux::InitializePipeReader(nullptr, base::FilePath());
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
@@ -113,7 +113,7 @@
context_->input_task_runner()->PostTask(
FROM_HERE,
base::BindOnce([]() { delete ui::X11EventSource::GetInstance(); }));
-@@ -1392,7 +1392,7 @@ bool HostProcess::OnUsernamePolicyUpdate(const base::V
+@@ -1391,7 +1391,7 @@ bool HostProcess::OnUsernamePolicyUpdate(const base::V
// Returns false: never restart the host after this policy update.
DCHECK(context_->network_task_runner()->BelongsToCurrentThread());
@@ -122,7 +122,7 @@
absl::optional<bool> host_username_match_required =
policies.FindBool(policy::key::kRemoteAccessHostMatchUsername);
if (!host_username_match_required.has_value()) {
-@@ -1801,7 +1801,7 @@ void HostProcess::StartHost() {
+@@ -1800,7 +1800,7 @@ void HostProcess::StartHost() {
// won't be advertised if it's missing a registry key or something.
desktop_environment_options_.set_enable_remote_open_url(true);
@@ -131,7 +131,7 @@
desktop_environment_options_.set_enable_remote_webauthn(is_googler_);
#endif
-@@ -1834,7 +1834,7 @@ void HostProcess::StartHost() {
+@@ -1833,7 +1833,7 @@ void HostProcess::StartHost() {
host_status_logger_ = std::make_unique<HostStatusLogger>(
host_->status_monitor(), log_to_server_.get());
@@ -140,7 +140,7 @@
const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
if (cmd_line->HasSwitch(kEnableUtempter)) {
host_utmp_logger_ =
-@@ -1866,7 +1866,7 @@ void HostProcess::StartHost() {
+@@ -1865,7 +1865,7 @@ void HostProcess::StartHost() {
host_->Start(host_owner_);
@@ -149,7 +149,7 @@
// For Windows, ChromotingHostServices connections are handled by the daemon
// process, then the message pipe is forwarded to the network process.
host_->StartChromotingHostServices();
-@@ -1999,7 +1999,7 @@ int HostProcessMain() {
+@@ -1998,7 +1998,7 @@ int HostProcessMain() {
HOST_LOG << "Starting host process: version " << STRINGIZE(VERSION);
const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
@@ -158,7 +158,7 @@
#if defined(REMOTING_USE_X11)
// Initialize Xlib for multi-threaded use, allowing non-Chromium code to
// use X11 safely (such as the WebRTC capturer, GTK ...)
-@@ -2048,7 +2048,7 @@ int HostProcessMain() {
+@@ -2047,7 +2047,7 @@ int HostProcessMain() {
std::unique_ptr<net::NetworkChangeNotifier> network_change_notifier(
net::NetworkChangeNotifier::CreateIfNeeded());
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc b/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc
index 065ec8f10ce6..5a51c6e6955d 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc
@@ -1,6 +1,6 @@
---- remoting/host/setup/start_host_main.cc.orig 2023-03-10 11:01:21 UTC
+--- remoting/host/setup/start_host_main.cc.orig 2023-12-23 12:33:28 UTC
+++ remoting/host/setup/start_host_main.cc
-@@ -33,7 +33,7 @@
+@@ -36,7 +36,7 @@
#include <unistd.h>
#endif // BUILDFLAG(IS_POSIX)
@@ -9,7 +9,7 @@
#include "remoting/host/setup/daemon_controller_delegate_linux.h"
#include "remoting/host/setup/start_host_as_root.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -130,7 +130,7 @@ void OnDone(HostStarter::Result result) {
+@@ -242,7 +242,7 @@ bool InitializeCorpMachineParams(HostStarter::Params&
} // namespace
int StartHostMain(int argc, char** argv) {
@@ -18,9 +18,9 @@
// Minimize the amount of code that runs as root on Posix systems.
if (getuid() == 0) {
return remoting::StartHostAsRoot(argc, argv);
-@@ -173,7 +173,7 @@ int StartHostMain(int argc, char** argv) {
- // for the account which generated |code|.
- std::string host_owner = command_line->GetSwitchValueASCII("host-owner");
+@@ -274,7 +274,7 @@ int StartHostMain(int argc, char** argv) {
+
+ mojo::core::Init();
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-remoting_protocol_webrtc__video__stream.cc b/www/ungoogled-chromium/files/patch-remoting_protocol_webrtc__video__stream.cc
index 41370122eb2e..7b835d23c384 100644
--- a/www/ungoogled-chromium/files/patch-remoting_protocol_webrtc__video__stream.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_protocol_webrtc__video__stream.cc
@@ -1,6 +1,6 @@
---- remoting/protocol/webrtc_video_stream.cc.orig 2023-04-08 11:38:38 UTC
+--- remoting/protocol/webrtc_video_stream.cc.orig 2023-12-23 12:33:28 UTC
+++ remoting/protocol/webrtc_video_stream.cc
-@@ -260,7 +260,7 @@ WebrtcVideoStream::WebrtcVideoStream(const std::string
+@@ -263,7 +263,7 @@ WebrtcVideoStream::WebrtcVideoStream(const std::string
: stream_name_(stream_name), session_options_(session_options) {
// TODO(joedow): Dig into the threading model on other platforms to see if they
// can also be updated to run on a dedicated thread.
diff --git a/www/ungoogled-chromium/files/patch-sandbox_policy_BUILD.gn b/www/ungoogled-chromium/files/patch-sandbox_policy_BUILD.gn
index 9ee1b6d5276b..efba7a903001 100644
--- a/www/ungoogled-chromium/files/patch-sandbox_policy_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-sandbox_policy_BUILD.gn
@@ -1,15 +1,15 @@
---- sandbox/policy/BUILD.gn.orig 2023-08-18 10:26:52 UTC
+--- sandbox/policy/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ sandbox/policy/BUILD.gn
-@@ -35,7 +35,7 @@ component("policy") {
- "//sandbox/policy/mojom",
- ]
- public_deps = [ "//sandbox:common" ]
+@@ -48,7 +48,7 @@ component("policy") {
+ "//sandbox/linux:seccomp_bpf",
+ ]
+ }
- if (is_linux || is_chromeos) {
+ if ((is_linux || is_chromeos) && !is_bsd) {
sources += [
"linux/bpf_audio_policy_linux.cc",
"linux/bpf_audio_policy_linux.h",
-@@ -106,6 +106,27 @@ component("policy") {
+@@ -119,6 +119,27 @@ component("policy") {
"//sandbox/linux:sandbox_services",
"//sandbox/linux:seccomp_bpf",
"//sandbox/linux:suid_sandbox_client",
diff --git a/www/ungoogled-chromium/files/patch-sandbox_policy_mojom_sandbox.mojom b/www/ungoogled-chromium/files/patch-sandbox_policy_mojom_sandbox.mojom
index 3d0ac4f087f4..4787ff2538b2 100644
--- a/www/ungoogled-chromium/files/patch-sandbox_policy_mojom_sandbox.mojom
+++ b/www/ungoogled-chromium/files/patch-sandbox_policy_mojom_sandbox.mojom
@@ -1,6 +1,6 @@
---- sandbox/policy/mojom/sandbox.mojom.orig 2023-07-21 09:49:17 UTC
+--- sandbox/policy/mojom/sandbox.mojom.orig 2023-12-23 12:33:28 UTC
+++ sandbox/policy/mojom/sandbox.mojom
-@@ -72,6 +72,12 @@ enum Sandbox {
+@@ -77,6 +77,12 @@ enum Sandbox {
[EnableIf=is_fuchsia]
kVideoCapture,
diff --git a/www/ungoogled-chromium/files/patch-sandbox_policy_sandbox__type.cc b/www/ungoogled-chromium/files/patch-sandbox_policy_sandbox__type.cc
index 096e3aa2890f..10025f1738f0 100644
--- a/www/ungoogled-chromium/files/patch-sandbox_policy_sandbox__type.cc
+++ b/www/ungoogled-chromium/files/patch-sandbox_policy_sandbox__type.cc
@@ -1,4 +1,4 @@
---- sandbox/policy/sandbox_type.cc.orig 2023-07-21 09:49:17 UTC
+--- sandbox/policy/sandbox_type.cc.orig 2023-12-23 12:33:28 UTC
+++ sandbox/policy/sandbox_type.cc
@@ -38,7 +38,7 @@ bool IsUnsandboxedSandboxType(Sandbox sandbox_type) {
#endif
@@ -9,7 +9,7 @@
case Sandbox::kVideoCapture:
return false;
#endif
-@@ -61,7 +61,7 @@ bool IsUnsandboxedSandboxType(Sandbox sandbox_type) {
+@@ -63,7 +63,7 @@ bool IsUnsandboxedSandboxType(Sandbox sandbox_type) {
case Sandbox::kMirroring:
case Sandbox::kNaClLoader:
#endif
@@ -18,7 +18,7 @@
case Sandbox::kHardwareVideoDecoding:
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
#if BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -71,7 +71,7 @@ bool IsUnsandboxedSandboxType(Sandbox sandbox_type) {
+@@ -73,7 +73,7 @@ bool IsUnsandboxedSandboxType(Sandbox sandbox_type) {
case Sandbox::kLibassistant:
#endif // BUILDFLAG(ENABLE_CROS_LIBASSISTANT)
#endif // // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -27,7 +27,7 @@
case Sandbox::kZygoteIntermediateSandbox:
case Sandbox::kHardwareVideoEncoding:
#endif
-@@ -127,7 +127,7 @@ void SetCommandLineFlagsForSandboxType(base::CommandLi
+@@ -130,7 +130,7 @@ void SetCommandLineFlagsForSandboxType(base::CommandLi
#endif
case Sandbox::kPrintCompositor:
case Sandbox::kAudio:
@@ -36,7 +36,7 @@
case Sandbox::kVideoCapture:
#endif
#if BUILDFLAG(IS_WIN)
-@@ -138,10 +138,10 @@ void SetCommandLineFlagsForSandboxType(base::CommandLi
+@@ -141,10 +141,10 @@ void SetCommandLineFlagsForSandboxType(base::CommandLi
case Sandbox::kMediaFoundationCdm:
case Sandbox::kWindowsSystemProxyResolver:
#endif // BUILDFLAG(IS_WIN)
@@ -49,7 +49,7 @@
case Sandbox::kHardwareVideoEncoding:
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#if BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -169,7 +169,7 @@ void SetCommandLineFlagsForSandboxType(base::CommandLi
+@@ -172,7 +172,7 @@ void SetCommandLineFlagsForSandboxType(base::CommandLi
case Sandbox::kNaClLoader:
break;
#endif // BUILDFLAG(IS_MAC)
@@ -59,15 +59,15 @@
break;
#endif
@@ -216,7 +216,7 @@ sandbox::mojom::Sandbox SandboxTypeFromCommandLine(
- if (process_type == switches::kNaClBrokerProcess)
- return Sandbox::kNoSandbox;
+ #endif
+ }
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
// Intermediate process gains a sandbox later.
if (process_type == switches::kZygoteProcessType)
return Sandbox::kZygoteIntermediateSandbox;
-@@ -260,7 +260,7 @@ std::string StringFromUtilitySandboxType(Sandbox sandb
+@@ -262,7 +262,7 @@ std::string StringFromUtilitySandboxType(Sandbox sandb
return switches::kUtilitySandbox;
case Sandbox::kAudio:
return switches::kAudioSandbox;
@@ -76,7 +76,7 @@
case Sandbox::kVideoCapture:
return switches::kVideoCaptureSandbox;
#endif
-@@ -290,11 +290,11 @@ std::string StringFromUtilitySandboxType(Sandbox sandb
+@@ -292,11 +292,11 @@ std::string StringFromUtilitySandboxType(Sandbox sandb
case Sandbox::kMirroring:
return switches::kMirroringSandbox;
#endif
@@ -90,7 +90,7 @@
case Sandbox::kHardwareVideoEncoding:
return switches::kHardwareVideoEncodingSandbox;
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -314,7 +314,7 @@ std::string StringFromUtilitySandboxType(Sandbox sandb
+@@ -316,7 +316,7 @@ std::string StringFromUtilitySandboxType(Sandbox sandb
#if BUILDFLAG(IS_MAC)
case Sandbox::kNaClLoader:
#endif // BUILDFLAG(IS_MAC)
@@ -99,7 +99,7 @@
case Sandbox::kZygoteIntermediateSandbox:
#endif
NOTREACHED();
-@@ -382,11 +382,11 @@ sandbox::mojom::Sandbox UtilitySandboxTypeFromString(
+@@ -388,11 +388,11 @@ sandbox::mojom::Sandbox UtilitySandboxTypeFromString(
if (sandbox_string == switches::kScreenAISandbox)
return Sandbox::kScreenAI;
#endif
diff --git a/www/ungoogled-chromium/files/patch-sandbox_policy_switches.cc b/www/ungoogled-chromium/files/patch-sandbox_policy_switches.cc
index 4a703d0c0fdf..76da058cfd0b 100644
--- a/www/ungoogled-chromium/files/patch-sandbox_policy_switches.cc
+++ b/www/ungoogled-chromium/files/patch-sandbox_policy_switches.cc
@@ -1,6 +1,6 @@
---- sandbox/policy/switches.cc.orig 2023-07-21 09:49:17 UTC
+--- sandbox/policy/switches.cc.orig 2023-12-23 12:33:28 UTC
+++ sandbox/policy/switches.cc
-@@ -53,10 +53,10 @@ const char kWindowsSystemProxyResolverSandbox[] = "pro
+@@ -54,10 +54,10 @@ const char kWindowsSystemProxyResolverSandbox[] = "pro
const char kMirroringSandbox[] = "mirroring";
#endif // BUILDFLAG(IS_MAC)
@@ -13,7 +13,7 @@
const char kHardwareVideoEncodingSandbox[] = "hardware_video_encoding";
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -95,7 +95,9 @@ const char kGpuSandboxFailuresFatal[] = "gpu-sandbox-f
+@@ -96,7 +96,9 @@ const char kGpuSandboxFailuresFatal[] = "gpu-sandbox-f
// Meant to be used as a browser-level switch for testing purposes only.
const char kNoSandbox[] = "no-sandbox";
diff --git a/www/ungoogled-chromium/files/patch-sandbox_policy_switches.h b/www/ungoogled-chromium/files/patch-sandbox_policy_switches.h
index 9e9eca15e0fb..4d27ab1e73ba 100644
--- a/www/ungoogled-chromium/files/patch-sandbox_policy_switches.h
+++ b/www/ungoogled-chromium/files/patch-sandbox_policy_switches.h
@@ -1,6 +1,6 @@
---- sandbox/policy/switches.h.orig 2023-07-21 09:49:17 UTC
+--- sandbox/policy/switches.h.orig 2023-12-23 12:33:28 UTC
+++ sandbox/policy/switches.h
-@@ -56,10 +56,10 @@ SANDBOX_POLICY_EXPORT extern const char kWindowsSystem
+@@ -57,10 +57,10 @@ SANDBOX_POLICY_EXPORT extern const char kWindowsSystem
SANDBOX_POLICY_EXPORT extern const char kMirroringSandbox[];
#endif // BUILDFLAG(IS_MAC)
@@ -13,7 +13,7 @@
SANDBOX_POLICY_EXPORT extern const char kHardwareVideoEncodingSandbox[];
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -80,7 +80,8 @@ SANDBOX_POLICY_EXPORT extern const char kDisableSetuid
+@@ -81,7 +81,8 @@ SANDBOX_POLICY_EXPORT extern const char kDisableSetuid
SANDBOX_POLICY_EXPORT extern const char kGpuSandboxAllowSysVShm[];
SANDBOX_POLICY_EXPORT extern const char kGpuSandboxFailuresFatal[];
SANDBOX_POLICY_EXPORT extern const char kNoSandbox[];
diff --git a/www/ungoogled-chromium/files/patch-services_cert__verifier_cert__verifier__creation.cc b/www/ungoogled-chromium/files/patch-services_cert__verifier_cert__verifier__creation.cc
deleted file mode 100644
index b9bb6a8b0eb0..000000000000
--- a/www/ungoogled-chromium/files/patch-services_cert__verifier_cert__verifier__creation.cc
+++ /dev/null
@@ -1,28 +0,0 @@
---- services/cert_verifier/cert_verifier_creation.cc.orig 2023-11-04 07:08:51 UTC
-+++ services/cert_verifier/cert_verifier_creation.cc
-@@ -13,7 +13,7 @@
- #include "net/cert/multi_threaded_cert_verifier.h"
- #include "net/net_buildflags.h"
-
--#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- #include "net/cert/cert_verify_proc_builtin.h"
- #include "net/cert/internal/system_trust_store.h"
- #endif
-@@ -108,7 +108,7 @@ class CertVerifyProcFactoryImpl : public net::CertVeri
- user_slot_restriction_ ? crypto::ScopedPK11Slot(PK11_ReferenceSlot(
- user_slot_restriction_.get()))
- : nullptr));
--#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX)
-+#elif BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- return net::CreateCertVerifyProcBuiltin(std::move(cert_net_fetcher),
- std::move(crl_set),
- net::CreateSslSystemTrustStore());
-@@ -174,6 +174,7 @@ class CertVerifyProcFactoryImpl : public net::CertVeri
- bool IsUsingCertNetFetcher() {
- #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || \
-+ BUILDFLAG(IS_BSD) || \
- BUILDFLAG(CHROME_ROOT_STORE_SUPPORTED)
- return true;
- #else
diff --git a/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc b/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc
index a7b0ece35371..5930f534ca7a 100644
--- a/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc
+++ b/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc
@@ -1,6 +1,6 @@
---- services/device/geolocation/location_arbitrator.cc.orig 2023-10-13 13:20:35 UTC
+--- services/device/geolocation/location_arbitrator.cc.orig 2023-12-23 12:33:28 UTC
+++ services/device/geolocation/location_arbitrator.cc
-@@ -188,7 +188,7 @@ LocationArbitrator::NewNetworkLocationProvider(
+@@ -193,7 +193,7 @@ LocationArbitrator::NewNetworkLocationProvider(
std::unique_ptr<LocationProvider>
LocationArbitrator::NewSystemLocationProvider() {
diff --git a/www/ungoogled-chromium/files/patch-services_network_BUILD.gn b/www/ungoogled-chromium/files/patch-services_network_BUILD.gn
index 23d3fc68e3a1..0cdba42b2519 100644
--- a/www/ungoogled-chromium/files/patch-services_network_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-services_network_BUILD.gn
@@ -1,10 +1,10 @@
---- services/network/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- services/network/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ services/network/BUILD.gn
-@@ -391,7 +391,6 @@ if (is_linux || is_chromeos) {
+@@ -394,7 +394,6 @@ if (is_linux || is_chromeos) {
]
deps = [
"//base:base",
- "//sandbox/linux:sandbox_services",
"//sandbox/policy:policy",
]
- }
+ configs += [ "//build/config/compiler:wexit_time_destructors" ]
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__context.cc b/www/ungoogled-chromium/files/patch-services_network_network__context.cc
index 7233b57f5c72..e1250b4ada0a 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__context.cc
+++ b/www/ungoogled-chromium/files/patch-services_network_network__context.cc
@@ -1,6 +1,6 @@
---- services/network/network_context.cc.orig 2023-11-04 07:08:51 UTC
+--- services/network/network_context.cc.orig 2023-12-23 12:33:28 UTC
+++ services/network/network_context.cc
-@@ -446,7 +446,7 @@ NetworkContext::NetworkContextHttpAuthPreferences::
+@@ -470,7 +470,7 @@ NetworkContext::NetworkContextHttpAuthPreferences::
NetworkContext::NetworkContextHttpAuthPreferences::
~NetworkContextHttpAuthPreferences() = default;
@@ -9,7 +9,7 @@
bool NetworkContext::NetworkContextHttpAuthPreferences::AllowGssapiLibraryLoad()
const {
if (network_service_) {
-@@ -2364,7 +2364,7 @@ void NetworkContext::OnHttpAuthDynamicParamsChanged(
+@@ -2385,7 +2385,7 @@ void NetworkContext::OnHttpAuthDynamicParamsChanged(
http_auth_dynamic_network_service_params->android_negotiate_account_type);
#endif // BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__context.h b/www/ungoogled-chromium/files/patch-services_network_network__context.h
index 34b1e7850ef8..753099c096d1 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__context.h
+++ b/www/ungoogled-chromium/files/patch-services_network_network__context.h
@@ -1,6 +1,6 @@
---- services/network/network_context.h.orig 2023-11-04 07:08:51 UTC
+--- services/network/network_context.h.orig 2023-12-23 12:33:28 UTC
+++ services/network/network_context.h
-@@ -675,7 +675,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkContext
+@@ -677,7 +677,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkContext
public:
explicit NetworkContextHttpAuthPreferences(NetworkService* network_service);
~NetworkContextHttpAuthPreferences() override;
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__service.cc b/www/ungoogled-chromium/files/patch-services_network_network__service.cc
index 1661475fd30b..075b8ca407fb 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__service.cc
+++ b/www/ungoogled-chromium/files/patch-services_network_network__service.cc
@@ -1,4 +1,4 @@
---- services/network/network_service.cc.orig 2023-10-13 13:20:35 UTC
+--- services/network/network_service.cc.orig 2023-12-23 12:33:28 UTC
+++ services/network/network_service.cc
@@ -96,7 +96,7 @@
#include "third_party/boringssl/src/include/openssl/cpu.h"
@@ -9,7 +9,7 @@
BUILDFLAG(IS_CHROMEOS_LACROS)
#include "components/os_crypt/sync/key_storage_config_linux.h"
-@@ -1003,7 +1003,7 @@ void NetworkService::SetExplicitlyAllowedPorts(
+@@ -998,7 +998,7 @@ void NetworkService::SetExplicitlyAllowedPorts(
net::SetExplicitlyAllowedPorts(ports);
}
@@ -18,7 +18,7 @@
void NetworkService::SetGssapiLibraryLoadObserver(
mojo::PendingRemote<mojom::GssapiLibraryLoadObserver>
gssapi_library_load_observer) {
-@@ -1085,7 +1085,7 @@ NetworkService::CreateHttpAuthHandlerFactory(NetworkCo
+@@ -1080,7 +1080,7 @@ NetworkService::CreateHttpAuthHandlerFactory(NetworkCo
);
}
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__service.h b/www/ungoogled-chromium/files/patch-services_network_network__service.h
index 5e843a5fa9f7..6ee082caae20 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__service.h
+++ b/www/ungoogled-chromium/files/patch-services_network_network__service.h
@@ -1,6 +1,6 @@
---- services/network/network_service.h.orig 2023-10-13 13:20:35 UTC
+--- services/network/network_service.h.orig 2023-12-23 12:33:28 UTC
+++ services/network/network_service.h
-@@ -235,7 +235,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
+@@ -225,7 +225,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
mojo::PendingReceiver<mojom::NetworkServiceTest> receiver) override;
void SetFirstPartySets(net::GlobalFirstPartySets sets) override;
void SetExplicitlyAllowedPorts(const std::vector<uint16_t>& ports) override;
@@ -9,7 +9,7 @@
void SetGssapiLibraryLoadObserver(
mojo::PendingRemote<mojom::GssapiLibraryLoadObserver>
gssapi_library_load_observer) override;
-@@ -266,7 +266,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
+@@ -252,7 +252,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
std::unique_ptr<net::HttpAuthHandlerFactory> CreateHttpAuthHandlerFactory(
NetworkContext* network_context);
@@ -18,7 +18,7 @@
// This is called just before a GSSAPI library may be loaded.
void OnBeforeGssapiLibraryLoad();
#endif // BUILDFLAG(IS_LINUX)
-@@ -507,7 +507,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
+@@ -494,7 +494,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
// leaking stale listeners between tests.
std::unique_ptr<net::NetworkChangeNotifier> mock_network_change_notifier_;
diff --git a/www/ungoogled-chromium/files/patch-services_network_public_cpp_BUILD.gn b/www/ungoogled-chromium/files/patch-services_network_public_cpp_BUILD.gn
index 2260c73cf9f6..6890b14497bf 100644
--- a/www/ungoogled-chromium/files/patch-services_network_public_cpp_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-services_network_public_cpp_BUILD.gn
@@ -1,6 +1,6 @@
---- services/network/public/cpp/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- services/network/public/cpp/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ services/network/public/cpp/BUILD.gn
-@@ -471,7 +471,7 @@ component("cpp_base") {
+@@ -485,7 +485,7 @@ component("cpp_base") {
sources += [ "transferable_directory_fuchsia.cc" ]
}
diff --git a/www/ungoogled-chromium/files/patch-services_network_public_cpp_features.cc b/www/ungoogled-chromium/files/patch-services_network_public_cpp_features.cc
deleted file mode 100644
index 1f21f629cb8f..000000000000
--- a/www/ungoogled-chromium/files/patch-services_network_public_cpp_features.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- services/network/public/cpp/features.cc.orig 2023-11-04 07:08:51 UTC
-+++ services/network/public/cpp/features.cc
-@@ -371,7 +371,7 @@ BASE_FEATURE(kPrivateNetworkAccessPermissionPrompt,
- // that can be adequately sandboxed.
- BASE_FEATURE(kOutOfProcessSystemDnsResolution,
- "OutOfProcessSystemDnsResolution",
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- base::FEATURE_ENABLED_BY_DEFAULT
- #else
- base::FEATURE_DISABLED_BY_DEFAULT
diff --git a/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn b/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn
index b2907ddb58a8..5c9ef60e4ac0 100644
--- a/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn
@@ -1,6 +1,6 @@
---- services/network/public/mojom/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- services/network/public/mojom/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ services/network/public/mojom/BUILD.gn
-@@ -515,11 +515,11 @@ mojom("url_loader_base") {
+@@ -521,11 +521,11 @@ mojom("url_loader_base") {
}
enabled_features = []
@@ -14,7 +14,7 @@
# TODO(crbug.com/1431866): Remove this once is_linux in the mojom IDL does
# not include lacros.
enabled_features += [ "use_network_interface_change_listener" ]
-@@ -1431,7 +1431,7 @@ mojom("mojom") {
+@@ -1442,7 +1442,7 @@ mojom("mojom") {
}
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc
index a932813dd36c..ae38a7b88e74 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc
@@ -1,4 +1,4 @@
---- third_party/blink/renderer/controller/blink_initializer.cc.orig 2023-10-13 13:20:35 UTC
+--- third_party/blink/renderer/controller/blink_initializer.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/controller/blink_initializer.cc
@@ -78,12 +78,12 @@
#include "third_party/blink/renderer/controller/private_memory_footprint_provider.h"
@@ -15,7 +15,7 @@
#include "third_party/blink/renderer/controller/highest_pmf_reporter.h"
#include "third_party/blink/renderer/controller/user_level_memory_pressure_signal_generator.h"
#endif
-@@ -227,7 +227,7 @@ void BlinkInitializer::RegisterInterfaces(mojo::Binder
+@@ -232,7 +232,7 @@ void BlinkInitializer::RegisterInterfaces(mojo::Binder
main_thread_task_runner);
#endif
@@ -24,7 +24,7 @@
binders.Add<mojom::blink::MemoryUsageMonitorLinux>(
ConvertToBaseRepeatingCallback(
CrossThreadBindRepeating(&MemoryUsageMonitorPosix::Bind)),
-@@ -266,7 +266,7 @@ void BlinkInitializer::RegisterMemoryWatchers(Platform
+@@ -271,7 +271,7 @@ void BlinkInitializer::RegisterMemoryWatchers(Platform
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_editing_editing__behavior.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_editing_editing__behavior.cc
index 5118d4554e1d..300f66b29bde 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_editing_editing__behavior.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_editing_editing__behavior.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/editing/editing_behavior.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/core/editing/editing_behavior.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/core/editing/editing_behavior.cc
-@@ -279,7 +279,7 @@ bool EditingBehavior::ShouldInsertCharacter(const Keyb
+@@ -324,7 +324,7 @@ bool EditingBehavior::ShouldInsertCharacter(const Keyb
// unexpected behaviour
if (ch < ' ')
return false;
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc
index d776b4c86469..bf36a9571a5b 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/exported/web_view_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/core/exported/web_view_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/core/exported/web_view_impl.cc
-@@ -421,7 +421,7 @@ SkFontHinting RendererPreferencesToSkiaHinting(
+@@ -424,7 +424,7 @@ SkFontHinting RendererPreferencesToSkiaHinting(
const blink::RendererPreferences& prefs) {
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
@@ -9,7 +9,7 @@
if (!prefs.should_antialias_text) {
// When anti-aliasing is off, GTK maps all non-zero hinting settings to
// 'Normal' hinting so we do the same. Otherwise, folks who have 'Slight'
-@@ -3327,7 +3327,7 @@ void WebViewImpl::UpdateFontRenderingFromRendererPrefs
+@@ -3351,7 +3351,7 @@ void WebViewImpl::UpdateFontRenderingFromRendererPrefs
renderer_preferences_.use_subpixel_positioning);
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc
index 54389507a927..a0baeb575a87 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/frame/web_frame_test.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/core/frame/web_frame_test.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/core/frame/web_frame_test.cc
-@@ -6461,7 +6461,7 @@ TEST_F(WebFrameTest, DISABLED_PositionForPointTest) {
+@@ -6468,7 +6468,7 @@ TEST_F(WebFrameTest, DISABLED_PositionForPointTest) {
}
#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
// TODO(crbug.com/1090246): Fix these tests on Fuchsia and re-enable.
// TODO(crbug.com/1317375): Build these tests on all platforms.
#define MAYBE_SelectRangeStaysHorizontallyAlignedWhenMoved \
-@@ -6870,7 +6870,7 @@ TEST_F(CompositedSelectionBoundsTest, LargeSelectionSc
+@@ -6877,7 +6877,7 @@ TEST_F(CompositedSelectionBoundsTest, LargeSelectionSc
TEST_F(CompositedSelectionBoundsTest, LargeSelectionNoScroll) {
RunTest("composited_selection_bounds_large_selection_noscroll.html");
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_parser_html__document__parser__fastpath.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_parser_html__document__parser__fastpath.cc
index cacb9138d62c..33c943a32788 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_parser_html__document__parser__fastpath.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_parser_html__document__parser__fastpath.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/html/parser/html_document_parser_fastpath.cc.orig 2023-07-21 09:49:17 UTC
+--- third_party/blink/renderer/core/html/parser/html_document_parser_fastpath.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/core/html/parser/html_document_parser_fastpath.cc
-@@ -169,7 +169,11 @@ class HTMLFastPathParser {
+@@ -172,7 +172,11 @@ class HTMLFastPathParser {
using Span = base::span<const Char>;
using USpan = base::span<const UChar>;
// 32 matches that used by HTMLToken::Attribute.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_layout__ng__view.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc
index 3c77fedb6433..f019344f4cdd 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_layout__ng__view.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc
@@ -1,15 +1,15 @@
---- third_party/blink/renderer/core/layout/ng/layout_ng_view.cc.orig 2023-11-04 07:08:51 UTC
-+++ third_party/blink/renderer/core/layout/ng/layout_ng_view.cc
-@@ -15,7 +15,7 @@
- #include "third_party/blink/renderer/core/svg/svg_document_extensions.h"
- #include "ui/display/screen_info.h"
+--- third_party/blink/renderer/core/layout/layout_view.cc.orig 2023-12-23 12:33:28 UTC
++++ third_party/blink/renderer/core/layout/layout_view.cc
+@@ -73,7 +73,7 @@
+ #include "ui/gfx/geometry/quad_f.h"
+ #include "ui/gfx/geometry/size_conversions.h"
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
#include "third_party/blink/renderer/platform/fonts/font_cache.h"
#endif
-@@ -51,7 +51,7 @@ void LayoutNGView::UpdateLayout() {
+@@ -808,7 +808,7 @@ void LayoutView::UpdateLayout() {
fragmentation_context_.Clear();
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_grid_ng__grid__layout__algorithm.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_grid_ng__grid__layout__algorithm.cc
deleted file mode 100644
index 1c501f0b192d..000000000000
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_grid_ng__grid__layout__algorithm.cc
+++ /dev/null
@@ -1,30 +0,0 @@
---- third_party/blink/renderer/core/layout/ng/grid/ng_grid_layout_algorithm.cc.orig 2023-10-13 13:20:35 UTC
-+++ third_party/blink/renderer/core/layout/ng/grid/ng_grid_layout_algorithm.cc
-@@ -3452,7 +3452,13 @@ void NGGridLayoutAlgorithm::PlaceGridItems(
- DCHECK(out_row_break_between);
-
- const auto& container_space = ConstraintSpace();
-+#if defined(__clang__) && (__clang_major__ >= 16)
- const auto& [grid_items, layout_data, tree_size] = sizing_tree.TreeRootData();
-+#else
-+ const auto& [g_i, l_d, t_s] = sizing_tree.TreeRootData();
-+ const auto& grid_items = g_i;
-+ const auto& layout_data = l_d;
-+#endif
-
- const auto* cached_layout_subtree = container_space.GridLayoutSubtree();
- const auto container_writing_direction =
-@@ -3616,7 +3622,13 @@ void NGGridLayoutAlgorithm::PlaceGridItemsForFragmenta
-
- // TODO(ikilpatrick): Update |SetHasSeenAllChildren| and early exit if true.
- const auto& constraint_space = ConstraintSpace();
-+#if defined(__clang__) && (__clang_major__ >= 16)
- const auto& [grid_items, layout_data, tree_size] = sizing_tree.TreeRootData();
-+#else
-+ const auto& [g_i, l_d, t_s] = sizing_tree.TreeRootData();
-+ const auto& grid_items = g_i;
-+ const auto& layout_data = l_d;
-+#endif
-
- const auto* cached_layout_subtree = constraint_space.GridLayoutSubtree();
- const auto container_writing_direction =
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc
index b7465f75f783..138df21d9064 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc
@@ -1,4 +1,4 @@
---- third_party/blink/renderer/core/scroll/scrollbar_theme_aura.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/core/scroll/scrollbar_theme_aura.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/core/scroll/scrollbar_theme_aura.cc
@@ -157,7 +157,7 @@ bool ScrollbarThemeAura::SupportsDragSnapBack() const
// is true for at least GTK and QT apps).
@@ -9,7 +9,7 @@
return false;
#else
return true;
-@@ -357,7 +357,7 @@ bool ScrollbarThemeAura::ShouldCenterOnThumb(const Scr
+@@ -360,7 +360,7 @@ bool ScrollbarThemeAura::ShouldCenterOnThumb(const Scr
const WebMouseEvent& event) {
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_canvas_canvas2d_canvas__style.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_canvas_canvas2d_canvas__style.cc
index 1185fffaf190..956868774350 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_canvas_canvas2d_canvas__style.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_canvas_canvas2d_canvas__style.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/modules/canvas/canvas2d/canvas_style.cc.orig 2023-08-18 10:26:52 UTC
+--- third_party/blink/renderer/modules/canvas/canvas2d/canvas_style.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/modules/canvas/canvas2d/canvas_style.cc
-@@ -76,6 +76,9 @@ bool ParseCanvasColorString(const String& color_string
+@@ -83,6 +83,9 @@ bool ParseCanvasColorString(const String& color_string
void CanvasStyle::ApplyToFlags(cc::PaintFlags& flags,
float global_alpha) const {
@@ -10,7 +10,7 @@
switch (type_) {
case kColor:
ApplyColorToFlags(flags, global_alpha);
-@@ -83,12 +86,20 @@ void CanvasStyle::ApplyToFlags(cc::PaintFlags& flags,
+@@ -90,12 +93,20 @@ void CanvasStyle::ApplyToFlags(cc::PaintFlags& flags,
case kGradient:
GetCanvasGradient()->GetGradient()->ApplyToFlags(flags, SkMatrix::I(),
ImageDrawOptions());
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc
index 085cc3dc15d8..656405787501 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/modules/ml/webnn/ml_graph_xnnpack.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/modules/ml/webnn/ml_graph_xnnpack.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/modules/ml/webnn/ml_graph_xnnpack.cc
-@@ -195,7 +195,7 @@ class SharedXnnpackContext : public ThreadSafeRefCount
+@@ -196,7 +196,7 @@ class SharedXnnpackContext : public ThreadSafeRefCount
~SharedXnnpackContext() {
base::AutoLock auto_lock(SharedXnnpackContextLock());
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc
index 1c7018445f2e..ee261f45ca31 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.cc
-@@ -6328,7 +6328,7 @@ void WebGLRenderingContextBase::TexImageHelperMediaVid
+@@ -6312,7 +6312,7 @@ void WebGLRenderingContextBase::TexImageHelperMediaVid
constexpr bool kAllowZeroCopyImages = true;
#endif
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc
index 18f2b7322bdc..a90e004c9d99 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc
@@ -1,4 +1,4 @@
---- third_party/blink/renderer/modules/webgpu/gpu_queue.cc.orig 2023-10-13 13:20:35 UTC
+--- third_party/blink/renderer/modules/webgpu/gpu_queue.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/modules/webgpu/gpu_queue.cc
@@ -746,7 +746,7 @@ bool GPUQueue::CopyFromCanvasSourceImage(
// on linux platform.
@@ -7,5 +7,5 @@
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
bool forceReadback = true;
- #elif BUILDFLAG(IS_WIN)
- bool forceReadback =
+ #elif BUILDFLAG(IS_ANDROID)
+ // TODO(crbug.com/dawn/1969): Some Android devices don't fail to copy from
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn
index 9118f18ad922..f67ecc10916a 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/platform/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/platform/BUILD.gn
-@@ -1919,7 +1919,7 @@ static_library("test_support") {
+@@ -1928,7 +1928,7 @@ static_library("test_support") {
]
# fuzzed_data_provider may not work with a custom toolchain.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__description.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__description.cc
index 89c34b6ebdce..225e306e37f6 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__description.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__description.cc
@@ -1,4 +1,4 @@
---- third_party/blink/renderer/platform/fonts/font_description.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/platform/fonts/font_description.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/platform/fonts/font_description.cc
@@ -39,7 +39,7 @@
#include "third_party/blink/renderer/platform/wtf/text/string_hash.h"
@@ -9,9 +9,9 @@
#include "third_party/blink/renderer/platform/fonts/font_cache.h"
#endif
-@@ -275,7 +275,7 @@ FontCacheKey FontDescription::CacheKey(
- static_cast<unsigned>(fields_.orientation_) << 1 | // bit 2-3
- static_cast<unsigned>(fields_.subpixel_text_position_); // bit 1
+@@ -277,7 +277,7 @@ FontCacheKey FontDescription::CacheKey(
+ static_cast<unsigned>(fields_.orientation_) << 1 | // bit 2-3
+ static_cast<unsigned>(fields_.subpixel_text_position_); // bit 1
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5 b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5
index f69635ea896a..fa96b4e88617 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/runtime_enabled_features.json5.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/platform/runtime_enabled_features.json5.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/platform/runtime_enabled_features.json5
-@@ -1936,7 +1936,7 @@
+@@ -2006,7 +2006,7 @@
base_feature_status: "enabled",
copied_from_base_feature_if: "overridden",
origin_trial_feature_name: "FullscreenPopupWindows",
@@ -9,7 +9,7 @@
},
{
name: "GamepadButtonAxisEvents",
-@@ -2849,7 +2849,7 @@
+@@ -2929,7 +2929,7 @@
name: "PaymentHandlerMinimalHeaderUX",
origin_trial_feature_name: "PaymentHandlerMinimalHeaderUX",
origin_trial_allows_third_party: true,
@@ -18,7 +18,7 @@
status: "stable",
},
{
-@@ -3874,7 +3874,7 @@
+@@ -3976,7 +3976,7 @@
name: "UnrestrictedSharedArrayBuffer",
base_feature: "none",
origin_trial_feature_name: "UnrestrictedSharedArrayBuffer",
@@ -26,8 +26,8 @@
+ origin_trial_os: ["win", "mac", "linux", "fuchsia", "chromeos", "openbsd", "freebsd"],
},
{
- name: "URLPatternCompareComponent",
-@@ -4077,12 +4077,12 @@
+ // This flag makes IDL reflected attributes with the "URL" IDL attribute
+@@ -4194,12 +4194,12 @@
status: "experimental",
base_feature: "none",
origin_trial_feature_name: "WebAppUrlHandling",
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_video__capture_video__capture__impl.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_video__capture_video__capture__impl.cc
index a7fa08607084..c55a414a47d9 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_video__capture_video__capture__impl.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_video__capture_video__capture__impl.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/video_capture/video_capture_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/platform/video_capture/video_capture_impl.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/blink/renderer/platform/video_capture/video_capture_impl.cc
-@@ -623,7 +623,7 @@ bool VideoCaptureImpl::VideoFrameBufferPreparer::BindV
+@@ -634,7 +634,7 @@ bool VideoCaptureImpl::VideoFrameBufferPreparer::BindV
}
const unsigned texture_target =
diff --git a/www/ungoogled-chromium/files/patch-third__party_boringssl_src_util_generate__build__files.py b/www/ungoogled-chromium/files/patch-third__party_boringssl_src_util_generate__build__files.py
index 6afebae5df60..b9b5f5b0237c 100644
--- a/www/ungoogled-chromium/files/patch-third__party_boringssl_src_util_generate__build__files.py
+++ b/www/ungoogled-chromium/files/patch-third__party_boringssl_src_util_generate__build__files.py
@@ -1,6 +1,6 @@
---- third_party/boringssl/src/util/generate_build_files.py.orig 2023-10-13 13:20:35 UTC
+--- third_party/boringssl/src/util/generate_build_files.py.orig 2023-12-23 12:33:28 UTC
+++ third_party/boringssl/src/util/generate_build_files.py
-@@ -766,10 +766,10 @@ def main(platforms):
+@@ -772,10 +772,10 @@ def main(platforms):
]
# Generate err_data.c
@@ -15,7 +15,7 @@
crypto_c_files.append('err_data.c')
crypto_c_files.sort()
-@@ -780,11 +780,11 @@ def main(platforms):
+@@ -786,11 +786,11 @@ def main(platforms):
crypto_test_files = []
if EMBED_TEST_DATA:
# Generate crypto_test_data.cc
diff --git a/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_posix_close__multiple.cc b/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_posix_close__multiple.cc
index 8533fc01858e..c792384c035a 100644
--- a/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_posix_close__multiple.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_posix_close__multiple.cc
@@ -1,6 +1,6 @@
---- third_party/crashpad/crashpad/util/posix/close_multiple.cc.orig 2022-10-01 07:40:07 UTC
+--- third_party/crashpad/crashpad/util/posix/close_multiple.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/crashpad/crashpad/util/posix/close_multiple.cc
-@@ -72,7 +72,7 @@ void CloseNowOrOnExec(int fd, bool ebadf_ok) {
+@@ -73,7 +73,7 @@ void CloseNowOrOnExec(int fd, bool ebadf_ok) {
// This is an advantage over looping over all possible file descriptors, because
// no attempt needs to be made to close file descriptors that are not open.
bool CloseMultipleNowOrOnExecUsingFDDir(int min_fd, int preserve_fd) {
diff --git a/www/ungoogled-chromium/files/patch-third__party_dawn_include_dawn_native_VulkanBackend.h b/www/ungoogled-chromium/files/patch-third__party_dawn_include_dawn_native_VulkanBackend.h
index 4c6003b37fc0..de1b386bf721 100644
--- a/www/ungoogled-chromium/files/patch-third__party_dawn_include_dawn_native_VulkanBackend.h
+++ b/www/ungoogled-chromium/files/patch-third__party_dawn_include_dawn_native_VulkanBackend.h
@@ -1,6 +1,6 @@
---- third_party/dawn/include/dawn/native/VulkanBackend.h.orig 2023-08-18 10:26:52 UTC
+--- third_party/dawn/include/dawn/native/VulkanBackend.h.orig 2023-12-23 12:33:28 UTC
+++ third_party/dawn/include/dawn/native/VulkanBackend.h
-@@ -80,7 +80,7 @@ struct ExternalImageExportInfoVk : ExternalImageExport
+@@ -83,7 +83,7 @@ struct ExternalImageExportInfoVk : ExternalImageExport
};
// Can't use DAWN_PLATFORM_IS(LINUX) since header included in both Dawn and Chrome
diff --git a/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_common_Platform.h b/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_common_Platform.h
index 5c54a67cf6c3..553f4c200915 100644
--- a/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_common_Platform.h
+++ b/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_common_Platform.h
@@ -1,6 +1,6 @@
---- third_party/dawn/src/dawn/common/Platform.h.orig 2023-04-08 11:38:38 UTC
+--- third_party/dawn/src/dawn/common/Platform.h.orig 2023-12-23 12:33:28 UTC
+++ third_party/dawn/src/dawn/common/Platform.h
-@@ -46,6 +46,11 @@
+@@ -59,6 +59,11 @@
#error "Unsupported Windows platform."
#endif
diff --git a/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_native_vulkan_BackendVk.cpp b/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_native_vulkan_BackendVk.cpp
index d0d7e5d2bd34..e53819b71028 100644
--- a/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_native_vulkan_BackendVk.cpp
+++ b/www/ungoogled-chromium/files/patch-third__party_dawn_src_dawn_native_vulkan_BackendVk.cpp
@@ -1,6 +1,6 @@
---- third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp.orig 2022-10-01 07:40:07 UTC
+--- third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp.orig 2023-12-23 12:33:28 UTC
+++ third_party/dawn/src/dawn/native/vulkan/BackendVk.cpp
-@@ -42,7 +42,7 @@ constexpr char kSwiftshaderLibName[] = "libvk_swiftsha
+@@ -55,7 +55,7 @@ constexpr char kSwiftshaderLibName[] = "libvk_swiftsha
#endif
#if DAWN_PLATFORM_IS(LINUX)
diff --git a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_chromium_scripts_build__ffmpeg.py b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_chromium_scripts_build__ffmpeg.py
index 87731313ef77..a07a8a242d01 100644
--- a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_chromium_scripts_build__ffmpeg.py
+++ b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_chromium_scripts_build__ffmpeg.py
@@ -1,4 +1,4 @@
---- third_party/ffmpeg/chromium/scripts/build_ffmpeg.py.orig 2023-09-17 07:59:53 UTC
+--- third_party/ffmpeg/chromium/scripts/build_ffmpeg.py.orig 2023-12-23 12:33:28 UTC
+++ third_party/ffmpeg/chromium/scripts/build_ffmpeg.py
@@ -32,7 +32,7 @@ NDK_ROOT_DIR = os.path.abspath(
SUCCESS_TOKEN = 'THIS_BUILD_WORKED'
@@ -74,7 +74,7 @@
parser.print_help()
return 1
-@@ -744,7 +752,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
+@@ -742,7 +750,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
'--enable-parser=vp3,vp8',
])
@@ -83,7 +83,7 @@
if target_arch == 'x64':
if target_os == 'android':
configure_flags['Common'].extend([
-@@ -754,7 +762,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
+@@ -752,7 +760,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
configure_flags['Common'].extend([
'--enable-lto',
'--arch=x86_64',
@@ -92,7 +92,7 @@
])
if host_arch != 'x64':
-@@ -845,7 +853,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
+@@ -843,7 +851,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
'--extra-cflags=-mfpu=vfpv3-d16',
])
elif target_arch == 'arm64':
@@ -101,7 +101,7 @@
if host_arch != 'arm64':
configure_flags['Common'].extend([
'--enable-cross-compile',
-@@ -910,7 +918,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
+@@ -908,7 +916,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
'--disable-mips64r2',
'--enable-msa',
])
@@ -110,7 +110,7 @@
configure_flags['Common'].extend([
'--enable-cross-compile',
'--target-os=linux',
-@@ -1061,7 +1069,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
+@@ -1059,7 +1067,7 @@ def ConfigureAndBuild(target_arch, target_os, host_os,
'Chrome', configure_flags['Common'] + configure_flags['ChromeAndroid'] +
configure_args)
diff --git a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure
index ab02947fec9f..cf279d0d359f 100644
--- a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure
+++ b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure
@@ -1,6 +1,6 @@
---- third_party/ffmpeg/configure.orig 2023-09-17 07:59:53 UTC
+--- third_party/ffmpeg/configure.orig 2023-12-23 12:33:28 UTC
+++ third_party/ffmpeg/configure
-@@ -5613,6 +5613,7 @@ case $target_os in
+@@ -5604,6 +5604,7 @@ case $target_os in
disable symver
;;
freebsd)
diff --git a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavcodec_x86_fft.asm b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavcodec_x86_fft.asm
deleted file mode 100644
index 6c81a6b1d8cf..000000000000
--- a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavcodec_x86_fft.asm
+++ /dev/null
@@ -1,108 +0,0 @@
---- third_party/ffmpeg/libavcodec/x86/fft.asm.orig 2023-07-21 09:49:17 UTC
-+++ third_party/ffmpeg/libavcodec/x86/fft.asm
-@@ -296,6 +296,7 @@ INIT_YMM avx
- %if HAVE_AVX_EXTERNAL
- align 16
- fft8_avx:
-+ _CET_ENDBR
- mova m0, Z(0)
- mova m1, Z(1)
- T8_AVX m0, m1, m2, m3, m4
-@@ -306,6 +307,7 @@ fft8_avx:
-
- align 16
- fft16_avx:
-+ _CET_ENDBR
- mova m2, Z(2)
- mova m3, Z(3)
- T4_SSE m2, m3, m7
-@@ -343,6 +345,7 @@ fft16_avx:
-
- align 16
- fft32_avx:
-+ _CET_ENDBR
- call fft16_avx
-
- mova m0, Z(4)
-@@ -367,6 +370,7 @@ fft32_avx:
- ret
-
- fft32_interleave_avx:
-+ _CET_ENDBR
- call fft32_avx
- mov r2d, 32
- .deint_loop:
-@@ -390,6 +394,7 @@ INIT_XMM sse
- align 16
- fft4_avx:
- fft4_sse:
-+ _CET_ENDBR
- mova m0, Z(0)
- mova m1, Z(1)
- T4_SSE m0, m1, m2
-@@ -399,6 +404,7 @@ fft4_sse:
-
- align 16
- fft8_sse:
-+ _CET_ENDBR
- mova m0, Z(0)
- mova m1, Z(1)
- T4_SSE m0, m1, m2
-@@ -413,6 +419,7 @@ fft8_sse:
-
- align 16
- fft16_sse:
-+ _CET_ENDBR
- mova m0, Z(0)
- mova m1, Z(1)
- T4_SSE m0, m1, m2
-@@ -441,6 +448,7 @@ fft16_sse:
- %macro DECL_PASS 2+ ; name, payload
- align 16
- %1:
-+ _CET_ENDBR
- DEFINE_ARGS zc, w, n, o1, o3
- lea o3q, [nq*3]
- lea o1q, [nq*8]
-@@ -457,10 +465,6 @@ DEFINE_ARGS zc, w, n, o1, o3
- %macro FFT_DISPATCH 2; clobbers 5 GPRs, 8 XMMs
- lea r2, [dispatch_tab%1]
- mov r2, [r2 + (%2q-2)*gprsize]
--%ifdef PIC
-- lea r3, [$$]
-- add r2, r3
--%endif
- call r2
- %endmacro ; FFT_DISPATCH
-
-@@ -585,11 +589,7 @@ cglobal imdct_calc, 3,5,3
- jl .loop
- RET
-
--%ifdef PIC
--%define SECTION_REL - $$
--%else
- %define SECTION_REL
--%endif
-
- %macro DECL_FFT 1-2 ; nbits, suffix
- %ifidn %0, 1
-@@ -613,6 +613,7 @@ cglobal imdct_calc, 3,5,3
-
- align 16
- fft %+ n %+ fullsuffix:
-+ _CET_ENDBR
- call fft %+ n2 %+ SUFFIX
- add r0, n*4 - (n&(-2<<%1))
- call fft %+ n4 %+ SUFFIX
-@@ -627,8 +628,10 @@ fft %+ n %+ fullsuffix:
- %endrep
- %undef n
-
-+[SECTION .data.rel.ro write]
- align 8
- dispatch_tab %+ fullsuffix: pointer list_of_fft
-+__?SECT?__
- %endmacro ; DECL_FFT
-
- %if HAVE_AVX_EXTERNAL
diff --git a/www/ungoogled-chromium/files/patch-third__party_node_node.gni b/www/ungoogled-chromium/files/patch-third__party_node_node.gni
new file mode 100644
index 000000000000..f7392cb91e6d
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_node_node.gni
@@ -0,0 +1,11 @@
+--- third_party/node/node.gni.orig 2023-12-23 12:33:28 UTC
++++ third_party/node/node.gni
+@@ -19,7 +19,7 @@ template("node") {
+
+ # When use_remoteexec=true or use_siso=true, node actions run on remote
+ # Linux worker. So it should include linux node binary in inputs.
+- if (is_linux || is_chromeos || use_remoteexec || use_siso) {
++ if ((is_linux || is_chromeos || use_remoteexec || use_siso) && !is_bsd) {
+ inputs += [
+ "//third_party/node/linux/node-linux-x64.tar.gz.sha1",
+ "//third_party/node/linux/node-linux-x64/bin/node",
diff --git a/www/ungoogled-chromium/files/patch-third__party_pdfium_pdfium.gni b/www/ungoogled-chromium/files/patch-third__party_pdfium_pdfium.gni
index b077a823c94a..d15263e8116f 100644
--- a/www/ungoogled-chromium/files/patch-third__party_pdfium_pdfium.gni
+++ b/www/ungoogled-chromium/files/patch-third__party_pdfium_pdfium.gni
@@ -1,11 +1,11 @@
---- third_party/pdfium/pdfium.gni.orig 2023-10-13 13:20:35 UTC
+--- third_party/pdfium/pdfium.gni.orig 2023-12-23 12:33:28 UTC
+++ third_party/pdfium/pdfium.gni
-@@ -46,7 +46,7 @@ declare_args() {
+@@ -41,7 +41,7 @@ declare_args() {
# PDFium will use PartitionAlloc partitions to separate strings, scalars,
# etc. from other allocations. However, the use of PartitionAlloc for new or
# malloc is controlled by args in build_overrides/partition_alloc.gni.
- pdf_use_partition_alloc = pdf_use_partition_alloc_override
+ pdf_use_partition_alloc = pdf_use_partition_alloc_override && use_partition_alloc_as_malloc
- # Temporary config allowing chromium to switch its location of the
- # partition_alloc library. https://crbug.com/1467773
+ # Build PDFium to use Skia (experimental) for all PDFium graphics.
+ # If enabled, coexists in build with AGG graphics and the default
diff --git a/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h b/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h
index 61e3831a1fe5..780047685cde 100644
--- a/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h
+++ b/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h
@@ -1,6 +1,6 @@
---- third_party/perfetto/include/perfetto/base/time.h.orig 2023-11-04 07:08:51 UTC
+--- third_party/perfetto/include/perfetto/base/time.h.orig 2023-12-23 12:33:28 UTC
+++ third_party/perfetto/include/perfetto/base/time.h
-@@ -166,6 +166,9 @@ inline TimeNanos GetTimeInternalNs(clockid_t clk_id) {
+@@ -167,6 +167,9 @@ inline TimeNanos GetTimeInternalNs(clockid_t clk_id) {
// Return ns from boot. Conversely to GetWallTimeNs, this clock counts also time
// during suspend (when supported).
inline TimeNanos GetBootTimeNs() {
@@ -10,7 +10,7 @@
// Determine if CLOCK_BOOTTIME is available on the first call.
static const clockid_t kBootTimeClockSource = [] {
struct timespec ts = {};
-@@ -173,6 +176,7 @@ inline TimeNanos GetBootTimeNs() {
+@@ -174,6 +177,7 @@ inline TimeNanos GetBootTimeNs() {
return res == 0 ? CLOCK_BOOTTIME : kWallTimeClockSource;
}();
return GetTimeInternalNs(kBootTimeClockSource);
@@ -18,7 +18,7 @@
}
inline TimeNanos GetWallTimeNs() {
-@@ -180,7 +184,13 @@ inline TimeNanos GetWallTimeNs() {
+@@ -181,7 +185,13 @@ inline TimeNanos GetWallTimeNs() {
}
inline TimeNanos GetWallTimeRawNs() {
diff --git a/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_ext_base_thread__utils.h b/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_ext_base_thread__utils.h
index e2ce5e40d538..e1754836b082 100644
--- a/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_ext_base_thread__utils.h
+++ b/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_ext_base_thread__utils.h
@@ -1,6 +1,6 @@
---- third_party/perfetto/include/perfetto/ext/base/thread_utils.h.orig 2022-10-01 07:40:07 UTC
+--- third_party/perfetto/include/perfetto/ext/base/thread_utils.h.orig 2023-12-23 12:33:28 UTC
+++ third_party/perfetto/include/perfetto/ext/base/thread_utils.h
-@@ -40,9 +40,10 @@
+@@ -41,9 +41,10 @@
namespace perfetto {
namespace base {
diff --git a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_base_string__utils.cc b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_base_string__utils.cc
index fe53f317b466..df0f040f788e 100644
--- a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_base_string__utils.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_base_string__utils.cc
@@ -1,6 +1,6 @@
---- third_party/perfetto/src/base/string_utils.cc.orig 2022-10-01 07:40:07 UTC
+--- third_party/perfetto/src/base/string_utils.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/perfetto/src/base/string_utils.cc
-@@ -36,9 +36,10 @@ namespace base {
+@@ -38,9 +38,10 @@ namespace base {
// Locale-independant as possible version of strtod.
double StrToD(const char* nptr, char** endptr) {
diff --git a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc
index 7e6744d9db1c..a948ded56936 100644
--- a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc
@@ -1,6 +1,6 @@
---- third_party/perfetto/src/trace_processor/db/storage/numeric_storage.cc.orig 2023-10-14 11:56:57 UTC
+--- third_party/perfetto/src/trace_processor/db/storage/numeric_storage.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/perfetto/src/trace_processor/db/storage/numeric_storage.cc
-@@ -245,8 +245,13 @@ BitVector NumericStorage::LinearSearch(FilterOp op,
+@@ -251,8 +251,13 @@ BitVector NumericStorage::LinearSearchInternal(FilterO
} else if (const auto* i32 = std::get_if<int32_t>(&*val)) {
auto* start = static_cast<const int32_t*>(data_) + range.start;
TypedLinearSearch(*i32, start, op, builder);
diff --git a/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h b/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h
new file mode 100644
index 000000000000..6b0aebb7e6fe
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h
@@ -0,0 +1,32 @@
+--- third_party/speech-dispatcher/libspeechd_version.h.orig 2023-12-23 12:33:28 UTC
++++ third_party/speech-dispatcher/libspeechd_version.h
+@@ -0,0 +1,29 @@
++/*
++ * libspeechd_version.h - Shared library for easy access to Speech Dispatcher functions (header)
++ *
++ * Copyright (C) 2001, 2002, 2003, 2004 Brailcom, o.p.s.
++ *
++ * This is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU Lesser General Public License as published by
++ * the Free Software Foundation; either version 2.1, or (at your option)
++ * any later version.
++ *
++ * This software is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public License
++ * along with this program. If not, see <https://www.gnu.org/licenses/>.
++ *
++ * $Id: patch-third_party_speech-dispatcher_libspeechd_version_h,v 1.1 2023/12/11 14:26:54 robert Exp $
++ */
++
++#ifndef _LIBSPEECHD_VERSION_H
++#define _LIBSPEECHD_VERSION_H
++
++#define LIBSPEECHD_MAJOR_VERSION 0
++#define LIBSPEECHD_MINOR_VERSION 11
++#define LIBSPEECHD_MICRO_VERSION 5
++
++#endif /* ifndef _LIBSPEECHD_VERSION_H */
diff --git a/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_speechd__types.h b/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_speechd__types.h
new file mode 100644
index 000000000000..6bb099075b9d
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_speechd__types.h
@@ -0,0 +1,142 @@
+--- third_party/speech-dispatcher/speechd_types.h.orig 2023-12-23 12:33:28 UTC
++++ third_party/speech-dispatcher/speechd_types.h
+@@ -119,4 +119,139 @@ typedef struct {
+ SPDVoice voice;
+ } SPDMsgSettings;
+
++/* TEMP_FAILURE_RETRY seems to be available only on Linux. For systems that
++ * don't have this macro we provide our own version. This code was taken from
++ * file "/usr/include/unistd.h" from Debian package "libc6-dev"
++ * version 2.3.2.ds1-20. */
++#ifndef TEMP_FAILURE_RETRY
++#define TEMP_FAILURE_RETRY(expression) \
++ (__extension__ \
++ ({ long int __result; \
++ do __result = (long int) (expression); \
++ while (__result == -1L && errno == EINTR); \
++ __result; }))
++#endif
++
++#endif /* not ifndef SPEECHD_TYPES */
++
++/*
++ * speechd_types.h - types for Speech Dispatcher
++ *
++ * Copyright (C) 2001, 2002, 2003 Brailcom, o.p.s.
++ *
++ * This is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU Lesser General Public License as published by
++ * the Free Software Foundation; either version 2.1, or (at your option)
++ * any later version.
++ *
++ * This software is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public License
++ * along with this program. If not, see <https://www.gnu.org/licenses/>.
++ */
++
++#ifndef SPEECHD_TYPES_H
++#define SPEECHD_TYPES_H
++
++typedef enum {
++ SPD_PUNCT_ALL = 0,
++ SPD_PUNCT_NONE = 1,
++ SPD_PUNCT_SOME = 2,
++ SPD_PUNCT_MOST = 3
++} SPDPunctuation;
++
++typedef enum {
++ SPD_CAP_NONE = 0,
++ SPD_CAP_SPELL = 1,
++ SPD_CAP_ICON = 2
++} SPDCapitalLetters;
++
++typedef enum {
++ SPD_SPELL_OFF = 0,
++ SPD_SPELL_ON = 1
++} SPDSpelling;
++
++typedef enum {
++ SPD_MALE1 = 1,
++ SPD_MALE2 = 2,
++ SPD_MALE3 = 3,
++ SPD_FEMALE1 = 4,
++ SPD_FEMALE2 = 5,
++ SPD_FEMALE3 = 6,
++ SPD_CHILD_MALE = 7,
++ SPD_CHILD_FEMALE = 8,
++ SPD_UNSPECIFIED = -1
++} SPDVoiceType;
++
++typedef struct {
++ char *name; /* Name of the voice (id) */
++ char *language; /* 2/3-letter ISO language code,
++ * possibly followed by 2/3-letter ISO region code,
++ * e.g. en-US */
++ char *variant; /* a not-well defined string describing dialect etc. */
++} SPDVoice;
++
++typedef enum {
++ SPD_DATA_TEXT = 0,
++ SPD_DATA_SSML = 1
++} SPDDataMode;
++
++typedef enum {
++ SPD_IMPORTANT = 1,
++ SPD_MESSAGE = 2,
++ SPD_TEXT = 3,
++ SPD_NOTIFICATION = 4,
++ SPD_PROGRESS = 5
++} SPDPriority;
++
++typedef enum {
++ SPD_BEGIN = 1,
++ SPD_END = 2,
++ SPD_INDEX_MARKS = 4,
++ SPD_CANCEL = 8,
++ SPD_PAUSE = 16,
++ SPD_RESUME = 32,
++
++ SPD_ALL = 0x3f
++} SPDNotification;
++
++typedef enum {
++ SPD_EVENT_BEGIN,
++ SPD_EVENT_END,
++ SPD_EVENT_INDEX_MARK,
++ SPD_EVENT_CANCEL,
++ SPD_EVENT_PAUSE,
++ SPD_EVENT_RESUME
++} SPDNotificationType;
++
++typedef enum {
++ SORT_BY_TIME = 0,
++ SORT_BY_ALPHABET = 1
++} ESort;
++
++typedef enum {
++ SPD_MSGTYPE_TEXT = 0,
++ SPD_MSGTYPE_SOUND_ICON = 1,
++ SPD_MSGTYPE_CHAR = 2,
++ SPD_MSGTYPE_KEY = 3,
++ SPD_MSGTYPE_SPELL = 99
++} SPDMessageType;
++
++typedef struct {
++ signed int rate;
++ signed int pitch;
++ signed int pitch_range;
++ signed int volume;
++
++ SPDPunctuation punctuation_mode;
++ SPDSpelling spelling_mode;
++ SPDCapitalLetters cap_let_recogn;
++
++ SPDVoiceType voice_type;
++ SPDVoice voice;
++} SPDMsgSettings;
++
+ #endif /* not ifndef SPEECHD_TYPES */
diff --git a/www/ungoogled-chromium/files/patch-third__party_sqlite_src_amalgamation_sqlite3.c b/www/ungoogled-chromium/files/patch-third__party_sqlite_src_amalgamation_sqlite3.c
index f4ff2edb1b37..fd8a00bf5591 100644
--- a/www/ungoogled-chromium/files/patch-third__party_sqlite_src_amalgamation_sqlite3.c
+++ b/www/ungoogled-chromium/files/patch-third__party_sqlite_src_amalgamation_sqlite3.c
@@ -1,6 +1,6 @@
---- third_party/sqlite/src/amalgamation/sqlite3.c.orig 2023-08-18 10:26:52 UTC
+--- third_party/sqlite/src/amalgamation/sqlite3.c.orig 2023-12-23 12:33:28 UTC
+++ third_party/sqlite/src/amalgamation/sqlite3.c
-@@ -43843,7 +43843,12 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nB
+@@ -44312,7 +44312,12 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nB
memset(zBuf, 0, nBuf);
randomnessPid = osGetpid(0);
#if !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS)
@@ -13,7 +13,7 @@
int fd, got;
fd = robust_open("/dev/urandom", O_RDONLY, 0);
if( fd<0 ){
-@@ -43858,6 +43863,7 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nB
+@@ -44327,6 +44332,7 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nB
robust_close(0, fd, __LINE__);
}
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_tflite_features.gni b/www/ungoogled-chromium/files/patch-third__party_tflite_features.gni
index c6c8251f3286..7fbcbdc8c5c1 100644
--- a/www/ungoogled-chromium/files/patch-third__party_tflite_features.gni
+++ b/www/ungoogled-chromium/files/patch-third__party_tflite_features.gni
@@ -1,11 +1,14 @@
---- third_party/tflite/features.gni.orig 2022-12-02 17:56:32 UTC
+--- third_party/tflite/features.gni.orig 2023-12-23 12:33:28 UTC
+++ third_party/tflite/features.gni
-@@ -7,7 +7,7 @@ import("//build/config/chrome_build.gni")
- declare_args() {
+@@ -8,9 +8,9 @@ declare_args() {
# This enables building TFLite with XNNPACK. Currently only available for
- # Linux or Windows on x64.
-- build_tflite_with_xnnpack = (is_win || is_linux) && current_cpu == "x64"
-+ build_tflite_with_xnnpack = ((is_win || is_linux) && current_cpu == "x64") && !is_bsd
+ # Linux or macOS arm64/x64 and Windows x64 targets.
+ build_tflite_with_xnnpack =
+- ((is_linux || is_mac) &&
++ (((is_linux || is_mac) &&
+ (current_cpu == "arm64" || current_cpu == "x64")) ||
+- (is_win && current_cpu == "x64")
++ (is_win && current_cpu == "x64")) && !is_bsd
# Turns on TFLITE_WITH_RUY, using ruy as the gemm backend instead of gemmlowp.
build_tflite_with_ruy = true
diff --git a/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h b/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h
index 9faee003ae78..1ceb82d0ae31 100644
--- a/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h
+++ b/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h
@@ -1,11 +1,39383 @@
---- third_party/vulkan_memory_allocator/include/vk_mem_alloc.h.orig 2022-02-28 16:54:41 UTC
+--- third_party/vulkan_memory_allocator/include/vk_mem_alloc.h.orig 2023-12-23 12:33:28 UTC
+++ third_party/vulkan_memory_allocator/include/vk_mem_alloc.h
-@@ -2503,7 +2503,7 @@ void *vma_aligned_alloc(size_t alignment, size_t size)
-
- return memalign(alignment, size);
- }
--#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
+@@ -1,19690 +1,19690 @@
+-//
+-// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
+-//
+-// Permission is hereby granted, free of charge, to any person obtaining a copy
+-// of this software and associated documentation files (the "Software"), to deal
+-// in the Software without restriction, including without limitation the rights
+-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+-// copies of the Software, and to permit persons to whom the Software is
+-// furnished to do so, subject to the following conditions:
+-//
+-// The above copyright notice and this permission notice shall be included in
+-// all copies or substantial portions of the Software.
+-//
+-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+-// THE SOFTWARE.
+-//
+-
+-#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
+-#define AMD_VULKAN_MEMORY_ALLOCATOR_H
+-
+-/** \mainpage Vulkan Memory Allocator
+-
+-<b>Version 3.1.0-development</b>
+-
+-Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
+-License: MIT
+-
+-<b>API documentation divided into groups:</b> [Modules](modules.html)
+-
+-\section main_table_of_contents Table of contents
+-
+-- <b>User guide</b>
+- - \subpage quick_start
+- - [Project setup](@ref quick_start_project_setup)
+- - [Initialization](@ref quick_start_initialization)
+- - [Resource allocation](@ref quick_start_resource_allocation)
+- - \subpage choosing_memory_type
+- - [Usage](@ref choosing_memory_type_usage)
+- - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
+- - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
+- - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
+- - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
+- - \subpage memory_mapping
+- - [Mapping functions](@ref memory_mapping_mapping_functions)
+- - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
+- - [Cache flush and invalidate](@ref memory_mapping_cache_control)
+- - \subpage staying_within_budget
+- - [Querying for budget](@ref staying_within_budget_querying_for_budget)
+- - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
+- - \subpage resource_aliasing
+- - \subpage custom_memory_pools
+- - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
+- - [Linear allocation algorithm](@ref linear_algorithm)
+- - [Free-at-once](@ref linear_algorithm_free_at_once)
+- - [Stack](@ref linear_algorithm_stack)
+- - [Double stack](@ref linear_algorithm_double_stack)
+- - [Ring buffer](@ref linear_algorithm_ring_buffer)
+- - \subpage defragmentation
+- - \subpage statistics
+- - [Numeric statistics](@ref statistics_numeric_statistics)
+- - [JSON dump](@ref statistics_json_dump)
+- - \subpage allocation_annotation
+- - [Allocation user data](@ref allocation_user_data)
+- - [Allocation names](@ref allocation_names)
+- - \subpage virtual_allocator
+- - \subpage debugging_memory_usage
+- - [Memory initialization](@ref debugging_memory_usage_initialization)
+- - [Margins](@ref debugging_memory_usage_margins)
+- - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
+- - \subpage opengl_interop
+-- \subpage usage_patterns
+- - [GPU-only resource](@ref usage_patterns_gpu_only)
+- - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
+- - [Readback](@ref usage_patterns_readback)
+- - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
+- - [Other use cases](@ref usage_patterns_other_use_cases)
+-- \subpage configuration
+- - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
+- - [Custom host memory allocator](@ref custom_memory_allocator)
+- - [Device memory allocation callbacks](@ref allocation_callbacks)
+- - [Device heap memory limit](@ref heap_memory_limit)
+-- <b>Extension support</b>
+- - \subpage vk_khr_dedicated_allocation
+- - \subpage enabling_buffer_device_address
+- - \subpage vk_ext_memory_priority
+- - \subpage vk_amd_device_coherent_memory
+-- \subpage general_considerations
+- - [Thread safety](@ref general_considerations_thread_safety)
+- - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
+- - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
+- - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
+- - [Features not supported](@ref general_considerations_features_not_supported)
+-
+-\section main_see_also See also
+-
+-- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
+-- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
+-
+-\defgroup group_init Library initialization
+-
+-\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
+-
+-\defgroup group_alloc Memory allocation
+-
+-\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
+-Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
+-
+-\defgroup group_virtual Virtual allocator
+-
+-\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
+-for user-defined purpose without allocating any real GPU memory.
+-
+-\defgroup group_stats Statistics
+-
+-\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
+-See documentation chapter: \ref statistics.
+-*/
+-
+-
+-#ifdef __cplusplus
+-extern "C" {
+-#endif
+-
+-#include <vulkan/vulkan.h>
+-
+-#if !defined(VMA_VULKAN_VERSION)
+- #if defined(VK_VERSION_1_3)
+- #define VMA_VULKAN_VERSION 1003000
+- #elif defined(VK_VERSION_1_2)
+- #define VMA_VULKAN_VERSION 1002000
+- #elif defined(VK_VERSION_1_1)
+- #define VMA_VULKAN_VERSION 1001000
+- #else
+- #define VMA_VULKAN_VERSION 1000000
+- #endif
+-#endif
+-
+-#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
+- extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
+- extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
+- extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
+- extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
+- extern PFN_vkAllocateMemory vkAllocateMemory;
+- extern PFN_vkFreeMemory vkFreeMemory;
+- extern PFN_vkMapMemory vkMapMemory;
+- extern PFN_vkUnmapMemory vkUnmapMemory;
+- extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
+- extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
+- extern PFN_vkBindBufferMemory vkBindBufferMemory;
+- extern PFN_vkBindImageMemory vkBindImageMemory;
+- extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
+- extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
+- extern PFN_vkCreateBuffer vkCreateBuffer;
+- extern PFN_vkDestroyBuffer vkDestroyBuffer;
+- extern PFN_vkCreateImage vkCreateImage;
+- extern PFN_vkDestroyImage vkDestroyImage;
+- extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
+- #if VMA_VULKAN_VERSION >= 1001000
+- extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
+- extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
+- extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
+- extern PFN_vkBindImageMemory2 vkBindImageMemory2;
+- extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
+- #endif // #if VMA_VULKAN_VERSION >= 1001000
+-#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
+-
+-#if !defined(VMA_DEDICATED_ALLOCATION)
+- #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
+- #define VMA_DEDICATED_ALLOCATION 1
+- #else
+- #define VMA_DEDICATED_ALLOCATION 0
+- #endif
+-#endif
+-
+-#if !defined(VMA_BIND_MEMORY2)
+- #if VK_KHR_bind_memory2
+- #define VMA_BIND_MEMORY2 1
+- #else
+- #define VMA_BIND_MEMORY2 0
+- #endif
+-#endif
+-
+-#if !defined(VMA_MEMORY_BUDGET)
+- #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
+- #define VMA_MEMORY_BUDGET 1
+- #else
+- #define VMA_MEMORY_BUDGET 0
+- #endif
+-#endif
+-
+-// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
+-#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
+- #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
+- #define VMA_BUFFER_DEVICE_ADDRESS 1
+- #else
+- #define VMA_BUFFER_DEVICE_ADDRESS 0
+- #endif
+-#endif
+-
+-// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
+-#if !defined(VMA_MEMORY_PRIORITY)
+- #if VK_EXT_memory_priority
+- #define VMA_MEMORY_PRIORITY 1
+- #else
+- #define VMA_MEMORY_PRIORITY 0
+- #endif
+-#endif
+-
+-// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
+-#if !defined(VMA_EXTERNAL_MEMORY)
+- #if VK_KHR_external_memory
+- #define VMA_EXTERNAL_MEMORY 1
+- #else
+- #define VMA_EXTERNAL_MEMORY 0
+- #endif
+-#endif
+-
+-// Define these macros to decorate all public functions with additional code,
+-// before and after returned type, appropriately. This may be useful for
+-// exporting the functions when compiling VMA as a separate library. Example:
+-// #define VMA_CALL_PRE __declspec(dllexport)
+-// #define VMA_CALL_POST __cdecl
+-#ifndef VMA_CALL_PRE
+- #define VMA_CALL_PRE
+-#endif
+-#ifndef VMA_CALL_POST
+- #define VMA_CALL_POST
+-#endif
+-
+-// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan
+-// structure that will be extended via the pNext chain.
+-#ifndef VMA_EXTENDS_VK_STRUCT
+- #define VMA_EXTENDS_VK_STRUCT(vkStruct)
+-#endif
+-
+-// Define this macro to decorate pointers with an attribute specifying the
+-// length of the array they point to if they are not null.
+-//
+-// The length may be one of
+-// - The name of another parameter in the argument list where the pointer is declared
+-// - The name of another member in the struct where the pointer is declared
+-// - The name of a member of a struct type, meaning the value of that member in
+-// the context of the call. For example
+-// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
+-// this means the number of memory heaps available in the device associated
+-// with the VmaAllocator being dealt with.
+-#ifndef VMA_LEN_IF_NOT_NULL
+- #define VMA_LEN_IF_NOT_NULL(len)
+-#endif
+-
+-// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
+-// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
+-#ifndef VMA_NULLABLE
+- #ifdef __clang__
+- #define VMA_NULLABLE _Nullable
+- #else
+- #define VMA_NULLABLE
+- #endif
+-#endif
+-
+-// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
+-// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
+-#ifndef VMA_NOT_NULL
+- #ifdef __clang__
+- #define VMA_NOT_NULL _Nonnull
+- #else
+- #define VMA_NOT_NULL
+- #endif
+-#endif
+-
+-// If non-dispatchable handles are represented as pointers then we can give
+-// then nullability annotations
+-#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
+- #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+- #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
+- #else
+- #define VMA_NOT_NULL_NON_DISPATCHABLE
+- #endif
+-#endif
+-
+-#ifndef VMA_NULLABLE_NON_DISPATCHABLE
+- #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+- #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
+- #else
+- #define VMA_NULLABLE_NON_DISPATCHABLE
+- #endif
+-#endif
+-
+-#ifndef VMA_STATS_STRING_ENABLED
+- #define VMA_STATS_STRING_ENABLED 1
+-#endif
+-
+-////////////////////////////////////////////////////////////////////////////////
+-////////////////////////////////////////////////////////////////////////////////
+-//
+-// INTERFACE
+-//
+-////////////////////////////////////////////////////////////////////////////////
+-////////////////////////////////////////////////////////////////////////////////
+-
+-// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
+-#ifndef _VMA_ENUM_DECLARATIONS
+-
+-/**
+-\addtogroup group_init
+-@{
+-*/
+-
+-/// Flags for created #VmaAllocator.
+-typedef enum VmaAllocatorCreateFlagBits
+-{
+- /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
+-
+- Using this flag may increase performance because internal mutexes are not used.
+- */
+- VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
+- /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
+-
+- The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
+- When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
+-
+- Using this extension will automatically allocate dedicated blocks of memory for
+- some buffers and images instead of suballocating place for them out of bigger
+- memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
+- flag) when it is recommended by the driver. It may improve performance on some
+- GPUs.
+-
+- You may set this flag only if you found out that following device extensions are
+- supported, you enabled them while creating Vulkan device passed as
+- VmaAllocatorCreateInfo::device, and you want them to be used internally by this
+- library:
+-
+- - VK_KHR_get_memory_requirements2 (device extension)
+- - VK_KHR_dedicated_allocation (device extension)
+-
+- When this flag is set, you can experience following warnings reported by Vulkan
+- validation layer. You can ignore them.
+-
+- > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
+- */
+- VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
+- /**
+- Enables usage of VK_KHR_bind_memory2 extension.
+-
+- The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
+- When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
+-
+- You may set this flag only if you found out that this device extension is supported,
+- you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
+- and you want it to be used internally by this library.
+-
+- The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
+- which allow to pass a chain of `pNext` structures while binding.
+- This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
+- */
+- VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
+- /**
+- Enables usage of VK_EXT_memory_budget extension.
+-
+- You may set this flag only if you found out that this device extension is supported,
+- you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
+- and you want it to be used internally by this library, along with another instance extension
+- VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
+-
+- The extension provides query for current memory usage and budget, which will probably
+- be more accurate than an estimation used by the library otherwise.
+- */
+- VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
+- /**
+- Enables usage of VK_AMD_device_coherent_memory extension.
+-
+- You may set this flag only if you:
+-
+- - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
+- - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
+- - want it to be used internally by this library.
+-
+- The extension and accompanying device feature provide access to memory types with
+- `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
+- They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
+-
+- When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
+- To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
+- returning `VK_ERROR_FEATURE_NOT_PRESENT`.
+- */
+- VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
+- /**
+- Enables usage of "buffer device address" feature, which allows you to use function
+- `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
+-
+- You may set this flag only if you:
+-
+- 1. (For Vulkan version < 1.2) Found as available and enabled device extension
+- VK_KHR_buffer_device_address.
+- This extension is promoted to core Vulkan 1.2.
+- 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
+-
+- When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
+- The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
+- allocated memory blocks wherever it might be needed.
+-
+- For more information, see documentation chapter \ref enabling_buffer_device_address.
+- */
+- VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
+- /**
+- Enables usage of VK_EXT_memory_priority extension in the library.
+-
+- You may set this flag only if you found available and enabled this device extension,
+- along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
+- while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
+-
+- When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
+- are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
+-
+- A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
+- Larger values are higher priority. The granularity of the priorities is implementation-dependent.
+- It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
+- The value to be used for default priority is 0.5.
+- For more details, see the documentation of the VK_EXT_memory_priority extension.
+- */
+- VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
+-
+- VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+-} VmaAllocatorCreateFlagBits;
+-/// See #VmaAllocatorCreateFlagBits.
+-typedef VkFlags VmaAllocatorCreateFlags;
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_alloc
+-@{
+-*/
+-
+-/// \brief Intended usage of the allocated memory.
+-typedef enum VmaMemoryUsage
+-{
+- /** No intended memory usage specified.
+- Use other members of VmaAllocationCreateInfo to specify your requirements.
+- */
+- VMA_MEMORY_USAGE_UNKNOWN = 0,
+- /**
+- \deprecated Obsolete, preserved for backward compatibility.
+- Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+- */
+- VMA_MEMORY_USAGE_GPU_ONLY = 1,
+- /**
+- \deprecated Obsolete, preserved for backward compatibility.
+- Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
+- */
+- VMA_MEMORY_USAGE_CPU_ONLY = 2,
+- /**
+- \deprecated Obsolete, preserved for backward compatibility.
+- Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+- */
+- VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
+- /**
+- \deprecated Obsolete, preserved for backward compatibility.
+- Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
+- */
+- VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
+- /**
+- \deprecated Obsolete, preserved for backward compatibility.
+- Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+- */
+- VMA_MEMORY_USAGE_CPU_COPY = 5,
+- /**
+- Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
+- Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
+-
+- Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
+-
+- Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+- */
+- VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
+- /**
+- Selects best memory type automatically.
+- This flag is recommended for most common use cases.
+-
+- When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
+- you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
+- in VmaAllocationCreateInfo::flags.
+-
+- It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
+- vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
+- and not with generic memory allocation functions.
+- */
+- VMA_MEMORY_USAGE_AUTO = 7,
+- /**
+- Selects best memory type automatically with preference for GPU (device) memory.
+-
+- When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
+- you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
+- in VmaAllocationCreateInfo::flags.
+-
+- It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
+- vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
+- and not with generic memory allocation functions.
+- */
+- VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
+- /**
+- Selects best memory type automatically with preference for CPU (host) memory.
+-
+- When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
+- you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
+- in VmaAllocationCreateInfo::flags.
+-
+- It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
+- vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
+- and not with generic memory allocation functions.
+- */
+- VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
+-
+- VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
+-} VmaMemoryUsage;
+-
+-/// Flags to be passed as VmaAllocationCreateInfo::flags.
+-typedef enum VmaAllocationCreateFlagBits
+-{
+- /** \brief Set this flag if the allocation should have its own memory block.
+-
+- Use it for special, big resources, like fullscreen images used as attachments.
+- */
+- VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
+-
+- /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
+-
+- If new allocation cannot be placed in any of the existing blocks, allocation
+- fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
+-
+- You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
+- #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
+- */
+- VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
+- /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
+-
+- Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
+-
+- It is valid to use this flag for allocation made from memory type that is not
+- `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
+- useful if you need an allocation that is efficient to use on GPU
+- (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
+- support it (e.g. Intel GPU).
+- */
+- VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
+- /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
+-
+- Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
+- null-terminated string. Instead of copying pointer value, a local copy of the
+- string is made and stored in allocation's `pName`. The string is automatically
+- freed together with the allocation. It is also used in vmaBuildStatsString().
+- */
+- VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
+- /** Allocation will be created from upper stack in a double stack pool.
+-
+- This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
+- */
+- VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
+- /** Create both buffer/image and allocation, but don't bind them together.
+- It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
+- The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
+- Otherwise it is ignored.
+-
+- If you want to make sure the new buffer/image is not tied to the new memory allocation
+- through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
+- use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
+- */
+- VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
+- /** Create allocation only if additional device memory required for it, if any, won't exceed
+- memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+- */
+- VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
+- /** \brief Set this flag if the allocated memory will have aliasing resources.
+-
+- Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
+- Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
+- */
+- VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
+- /**
+- Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
+-
+- - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
+- you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
+- - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
+- This includes allocations created in \ref custom_memory_pools.
+-
+- Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
+- never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
+-
+- \warning Violating this declaration may work correctly, but will likely be very slow.
+- Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
+- Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
+- */
+- VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
+- /**
+- Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
+-
+- - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
+- you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
+- - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
+- This includes allocations created in \ref custom_memory_pools.
+-
+- Declares that mapped memory can be read, written, and accessed in random order,
+- so a `HOST_CACHED` memory type is required.
+- */
+- VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
+- /**
+- Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
+- it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
+- if it may improve performance.
+-
+- By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
+- (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
+- issue an explicit transfer to write/read your data.
+- To prepare for this possibility, don't forget to add appropriate flags like
+- `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
+- */
+- VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
+- /** Allocation strategy that chooses smallest possible free range for the allocation
+- to minimize memory usage and fragmentation, possibly at the expense of allocation time.
+- */
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
+- /** Allocation strategy that chooses first suitable free range for the allocation -
+- not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
+- to minimize allocation time, possibly at the expense of allocation quality.
+- */
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
+- /** Allocation strategy that chooses always the lowest offset in available space.
+- This is not the most efficient strategy but achieves highly packed data.
+- Used internally by defragmentation, not recommended in typical usage.
+- */
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000,
+- /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
+- */
+- VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
+- /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
+- */
+- VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
+- /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
+- */
+- VMA_ALLOCATION_CREATE_STRATEGY_MASK =
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
+-
+- VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+-} VmaAllocationCreateFlagBits;
+-/// See #VmaAllocationCreateFlagBits.
+-typedef VkFlags VmaAllocationCreateFlags;
+-
+-/// Flags to be passed as VmaPoolCreateInfo::flags.
+-typedef enum VmaPoolCreateFlagBits
+-{
+- /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
+-
+- This is an optional optimization flag.
+-
+- If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
+- vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
+- knows exact type of your allocations so it can handle Buffer-Image Granularity
+- in the optimal way.
+-
+- If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
+- exact type of such allocations is not known, so allocator must be conservative
+- in handling Buffer-Image Granularity, which can lead to suboptimal allocation
+- (wasted memory). In that case, if you can make sure you always allocate only
+- buffers and linear images or only optimal images out of this pool, use this flag
+- to make allocator disregard Buffer-Image Granularity and so make allocations
+- faster and more optimal.
+- */
+- VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
+-
+- /** \brief Enables alternative, linear allocation algorithm in this pool.
+-
+- Specify this flag to enable linear allocation algorithm, which always creates
+- new allocations after last one and doesn't reuse space from allocations freed in
+- between. It trades memory consumption for simplified algorithm and data
+- structure, which has better performance and uses less memory for metadata.
+-
+- By using this flag, you can achieve behavior of free-at-once, stack,
+- ring buffer, and double stack.
+- For details, see documentation chapter \ref linear_algorithm.
+- */
+- VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
+-
+- /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
+- */
+- VMA_POOL_CREATE_ALGORITHM_MASK =
+- VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
+-
+- VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+-} VmaPoolCreateFlagBits;
+-/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
+-typedef VkFlags VmaPoolCreateFlags;
+-
+-/// Flags to be passed as VmaDefragmentationInfo::flags.
+-typedef enum VmaDefragmentationFlagBits
+-{
+- /* \brief Use simple but fast algorithm for defragmentation.
+- May not achieve best results but will require least time to compute and least allocations to copy.
+- */
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
+- /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
+- Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
+- */
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
+- /* \brief Perform full defragmentation of memory.
+- Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
+- */
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
+- /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
+- Only available when bufferImageGranularity is greater than 1, since it aims to reduce
+- alignment issues between different types of resources.
+- Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
+- */
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
+-
+- /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
+- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
+-
+- VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+-} VmaDefragmentationFlagBits;
+-/// See #VmaDefragmentationFlagBits.
+-typedef VkFlags VmaDefragmentationFlags;
+-
+-/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
+-typedef enum VmaDefragmentationMoveOperation
+-{
+- /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
+- VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
+- /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
+- VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
+- /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
+- VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
+-} VmaDefragmentationMoveOperation;
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_virtual
+-@{
+-*/
+-
+-/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
+-typedef enum VmaVirtualBlockCreateFlagBits
+-{
+- /** \brief Enables alternative, linear allocation algorithm in this virtual block.
+-
+- Specify this flag to enable linear allocation algorithm, which always creates
+- new allocations after last one and doesn't reuse space from allocations freed in
+- between. It trades memory consumption for simplified algorithm and data
+- structure, which has better performance and uses less memory for metadata.
+-
+- By using this flag, you can achieve behavior of free-at-once, stack,
+- ring buffer, and double stack.
+- For details, see documentation chapter \ref linear_algorithm.
+- */
+- VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
+-
+- /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
+- */
+- VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
+- VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
+-
+- VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+-} VmaVirtualBlockCreateFlagBits;
+-/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
+-typedef VkFlags VmaVirtualBlockCreateFlags;
+-
+-/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
+-typedef enum VmaVirtualAllocationCreateFlagBits
+-{
+- /** \brief Allocation will be created from upper stack in a double stack pool.
+-
+- This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
+- */
+- VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
+- /** \brief Allocation strategy that tries to minimize memory usage.
+- */
+- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
+- /** \brief Allocation strategy that tries to minimize allocation time.
+- */
+- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
+- /** Allocation strategy that chooses always the lowest offset in available space.
+- This is not the most efficient strategy but achieves highly packed data.
+- */
+- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
+- /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
+-
+- These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
+- */
+- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
+-
+- VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
+-} VmaVirtualAllocationCreateFlagBits;
+-/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
+-typedef VkFlags VmaVirtualAllocationCreateFlags;
+-
+-/** @} */
+-
+-#endif // _VMA_ENUM_DECLARATIONS
+-
+-#ifndef _VMA_DATA_TYPES_DECLARATIONS
+-
+-/**
+-\addtogroup group_init
+-@{ */
+-
+-/** \struct VmaAllocator
+-\brief Represents main object of this library initialized.
+-
+-Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
+-Call function vmaDestroyAllocator() to destroy it.
+-
+-It is recommended to create just one object of this type per `VkDevice` object,
+-right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
+-*/
+-VK_DEFINE_HANDLE(VmaAllocator)
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_alloc
+-@{
+-*/
+-
+-/** \struct VmaPool
+-\brief Represents custom memory pool
+-
+-Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
+-Call function vmaDestroyPool() to destroy it.
+-
+-For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
+-*/
+-VK_DEFINE_HANDLE(VmaPool)
+-
+-/** \struct VmaAllocation
+-\brief Represents single memory allocation.
+-
+-It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
+-plus unique offset.
+-
+-There are multiple ways to create such object.
+-You need to fill structure VmaAllocationCreateInfo.
+-For more information see [Choosing memory type](@ref choosing_memory_type).
+-
+-Although the library provides convenience functions that create Vulkan buffer or image,
+-allocate memory for it and bind them together,
+-binding of the allocation to a buffer or an image is out of scope of the allocation itself.
+-Allocation object can exist without buffer/image bound,
+-binding can be done manually by the user, and destruction of it can be done
+-independently of destruction of the allocation.
+-
+-The object also remembers its size and some other information.
+-To retrieve this information, use function vmaGetAllocationInfo() and inspect
+-returned structure VmaAllocationInfo.
+-*/
+-VK_DEFINE_HANDLE(VmaAllocation)
+-
+-/** \struct VmaDefragmentationContext
+-\brief An opaque object that represents started defragmentation process.
+-
+-Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
+-Call function vmaEndDefragmentation() to destroy it.
+-*/
+-VK_DEFINE_HANDLE(VmaDefragmentationContext)
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_virtual
+-@{
+-*/
+-
+-/** \struct VmaVirtualAllocation
+-\brief Represents single memory allocation done inside VmaVirtualBlock.
+-
+-Use it as a unique identifier to virtual allocation within the single block.
+-
+-Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
+-*/
+-VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_virtual
+-@{
+-*/
+-
+-/** \struct VmaVirtualBlock
+-\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
+-
+-Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
+-For more information, see documentation chapter \ref virtual_allocator.
+-
+-This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
+-*/
+-VK_DEFINE_HANDLE(VmaVirtualBlock)
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_init
+-@{
+-*/
+-
+-/// Callback function called after successful vkAllocateMemory.
+-typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t memoryType,
+- VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
+- VkDeviceSize size,
+- void* VMA_NULLABLE pUserData);
+-
+-/// Callback function called before vkFreeMemory.
+-typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t memoryType,
+- VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
+- VkDeviceSize size,
+- void* VMA_NULLABLE pUserData);
+-
+-/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
+-
+-Provided for informative purpose, e.g. to gather statistics about number of
+-allocations or total amount of memory allocated in Vulkan.
+-
+-Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
+-*/
+-typedef struct VmaDeviceMemoryCallbacks
+-{
+- /// Optional, can be null.
+- PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
+- /// Optional, can be null.
+- PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
+- /// Optional, can be null.
+- void* VMA_NULLABLE pUserData;
+-} VmaDeviceMemoryCallbacks;
+-
+-/** \brief Pointers to some Vulkan functions - a subset used by the library.
+-
+-Used in VmaAllocatorCreateInfo::pVulkanFunctions.
+-*/
+-typedef struct VmaVulkanFunctions
+-{
+- /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
+- PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
+- /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
+- PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
+- PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
+- PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
+- PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
+- PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
+- PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
+- PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
+- PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
+- PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
+- PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
+- PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
+- PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
+- PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
+- PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
+- PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
+- PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
+- PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
+- PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
+-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
+- PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
+- /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
+- PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
+-#endif
+-#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+- /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
+- PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
+- /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
+- PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
+-#endif
+-#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+- PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
+-#endif
+-#if VMA_VULKAN_VERSION >= 1003000
+- /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
+- PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
+- /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
+- PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
+-#endif
+-} VmaVulkanFunctions;
+-
+-/// Description of a Allocator to be created.
+-typedef struct VmaAllocatorCreateInfo
+-{
+- /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
+- VmaAllocatorCreateFlags flags;
+- /// Vulkan physical device.
+- /** It must be valid throughout whole lifetime of created allocator. */
+- VkPhysicalDevice VMA_NOT_NULL physicalDevice;
+- /// Vulkan device.
+- /** It must be valid throughout whole lifetime of created allocator. */
+- VkDevice VMA_NOT_NULL device;
+- /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
+- /** Set to 0 to use default, which is currently 256 MiB. */
+- VkDeviceSize preferredLargeHeapBlockSize;
+- /// Custom CPU memory allocation callbacks. Optional.
+- /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
+- const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
+- /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
+- /** Optional, can be null. */
+- const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
+- /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
+-
+- If not NULL, it must be a pointer to an array of
+- `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
+- maximum number of bytes that can be allocated out of particular Vulkan memory
+- heap.
+-
+- Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
+- heap. This is also the default in case of `pHeapSizeLimit` = NULL.
+-
+- If there is a limit defined for a heap:
+-
+- - If user tries to allocate more memory from that heap using this allocator,
+- the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+- - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
+- value of this limit will be reported instead when using vmaGetMemoryProperties().
+-
+- Warning! Using this feature may not be equivalent to installing a GPU with
+- smaller amount of memory, because graphics driver doesn't necessary fail new
+- allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
+- exceeded. It may return success and just silently migrate some device memory
+- blocks to system RAM. This driver behavior can also be controlled using
+- VK_AMD_memory_overallocation_behavior extension.
+- */
+- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
+-
+- /** \brief Pointers to Vulkan functions. Can be null.
+-
+- For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
+- */
+- const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
+- /** \brief Handle to Vulkan instance object.
+-
+- Starting from version 3.0.0 this member is no longer optional, it must be set!
+- */
+- VkInstance VMA_NOT_NULL instance;
+- /** \brief Optional. The highest version of Vulkan that the application is designed to use.
+-
+- It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
+- The patch version number specified is ignored. Only the major and minor versions are considered.
+- It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
+- Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
+- Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
+- */
+- uint32_t vulkanApiVersion;
+-#if VMA_EXTERNAL_MEMORY
+- /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
+-
+- If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
+- elements, defining external memory handle types of particular Vulkan memory type,
+- to be passed using `VkExportMemoryAllocateInfoKHR`.
+-
+- Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
+- This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
+- */
+- const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
+-#endif // #if VMA_EXTERNAL_MEMORY
+-} VmaAllocatorCreateInfo;
+-
+-/// Information about existing #VmaAllocator object.
+-typedef struct VmaAllocatorInfo
+-{
+- /** \brief Handle to Vulkan instance object.
+-
+- This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
+- */
+- VkInstance VMA_NOT_NULL instance;
+- /** \brief Handle to Vulkan physical device object.
+-
+- This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
+- */
+- VkPhysicalDevice VMA_NOT_NULL physicalDevice;
+- /** \brief Handle to Vulkan device object.
+-
+- This is the same value as has been passed through VmaAllocatorCreateInfo::device.
+- */
+- VkDevice VMA_NOT_NULL device;
+-} VmaAllocatorInfo;
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_stats
+-@{
+-*/
+-
+-/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
+-
+-These are fast to calculate.
+-See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
+-*/
+-typedef struct VmaStatistics
+-{
+- /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
+- */
+- uint32_t blockCount;
+- /** \brief Number of #VmaAllocation objects allocated.
+-
+- Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
+- */
+- uint32_t allocationCount;
+- /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
+-
+- \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
+- (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
+- "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
+- */
+- VkDeviceSize blockBytes;
+- /** \brief Total number of bytes occupied by all #VmaAllocation objects.
+-
+- Always less or equal than `blockBytes`.
+- Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
+- but unused by any #VmaAllocation.
+- */
+- VkDeviceSize allocationBytes;
+-} VmaStatistics;
+-
+-/** \brief More detailed statistics than #VmaStatistics.
+-
+-These are slower to calculate. Use for debugging purposes.
+-See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
+-
+-Previous version of the statistics API provided averages, but they have been removed
+-because they can be easily calculated as:
+-
+-\code
+-VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
+-VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
+-VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
+-\endcode
+-*/
+-typedef struct VmaDetailedStatistics
+-{
+- /// Basic statistics.
+- VmaStatistics statistics;
+- /// Number of free ranges of memory between allocations.
+- uint32_t unusedRangeCount;
+- /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
+- VkDeviceSize allocationSizeMin;
+- /// Largest allocation size. 0 if there are 0 allocations.
+- VkDeviceSize allocationSizeMax;
+- /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
+- VkDeviceSize unusedRangeSizeMin;
+- /// Largest empty range size. 0 if there are 0 empty ranges.
+- VkDeviceSize unusedRangeSizeMax;
+-} VmaDetailedStatistics;
+-
+-/** \brief General statistics from current state of the Allocator -
+-total memory usage across all memory heaps and types.
+-
+-These are slower to calculate. Use for debugging purposes.
+-See function vmaCalculateStatistics().
+-*/
+-typedef struct VmaTotalStatistics
+-{
+- VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
+- VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
+- VmaDetailedStatistics total;
+-} VmaTotalStatistics;
+-
+-/** \brief Statistics of current memory usage and available budget for a specific memory heap.
+-
+-These are fast to calculate.
+-See function vmaGetHeapBudgets().
+-*/
+-typedef struct VmaBudget
+-{
+- /** \brief Statistics fetched from the library.
+- */
+- VmaStatistics statistics;
+- /** \brief Estimated current memory usage of the program, in bytes.
+-
+- Fetched from system using VK_EXT_memory_budget extension if enabled.
+-
+- It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
+- also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
+- `VkDeviceMemory` blocks allocated outside of this library, if any.
+- */
+- VkDeviceSize usage;
+- /** \brief Estimated amount of memory available to the program, in bytes.
+-
+- Fetched from system using VK_EXT_memory_budget extension if enabled.
+-
+- It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
+- external to the program, decided by the operating system.
+- Difference `budget - usage` is the amount of additional memory that can probably
+- be allocated without problems. Exceeding the budget may result in various problems.
+- */
+- VkDeviceSize budget;
+-} VmaBudget;
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_alloc
+-@{
+-*/
+-
+-/** \brief Parameters of new #VmaAllocation.
+-
+-To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
+-*/
+-typedef struct VmaAllocationCreateInfo
+-{
+- /// Use #VmaAllocationCreateFlagBits enum.
+- VmaAllocationCreateFlags flags;
+- /** \brief Intended usage of memory.
+-
+- You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
+- If `pool` is not null, this member is ignored.
+- */
+- VmaMemoryUsage usage;
+- /** \brief Flags that must be set in a Memory Type chosen for an allocation.
+-
+- Leave 0 if you specify memory requirements in other way. \n
+- If `pool` is not null, this member is ignored.*/
+- VkMemoryPropertyFlags requiredFlags;
+- /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
+-
+- Set to 0 if no additional flags are preferred. \n
+- If `pool` is not null, this member is ignored. */
+- VkMemoryPropertyFlags preferredFlags;
+- /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
+-
+- Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
+- it meets other requirements specified by this structure, with no further
+- restrictions on memory type index. \n
+- If `pool` is not null, this member is ignored.
+- */
+- uint32_t memoryTypeBits;
+- /** \brief Pool that this allocation should be created in.
+-
+- Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
+- `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
+- */
+- VmaPool VMA_NULLABLE pool;
+- /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
+-
+- If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
+- null or pointer to a null-terminated string. The string will be then copied to
+- internal buffer, so it doesn't need to be valid after allocation call.
+- */
+- void* VMA_NULLABLE pUserData;
+- /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
+-
+- It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
+- and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+- Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
+- */
+- float priority;
+-} VmaAllocationCreateInfo;
+-
+-/// Describes parameter of created #VmaPool.
+-typedef struct VmaPoolCreateInfo
+-{
+- /** \brief Vulkan memory type index to allocate this pool from.
+- */
+- uint32_t memoryTypeIndex;
+- /** \brief Use combination of #VmaPoolCreateFlagBits.
+- */
+- VmaPoolCreateFlags flags;
+- /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
+-
+- Specify nonzero to set explicit, constant size of memory blocks used by this
+- pool.
+-
+- Leave 0 to use default and let the library manage block sizes automatically.
+- Sizes of particular blocks may vary.
+- In this case, the pool will also support dedicated allocations.
+- */
+- VkDeviceSize blockSize;
+- /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
+-
+- Set to 0 to have no preallocated blocks and allow the pool be completely empty.
+- */
+- size_t minBlockCount;
+- /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
+-
+- Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
+-
+- Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
+- throughout whole lifetime of this pool.
+- */
+- size_t maxBlockCount;
+- /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
+-
+- It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
+- Otherwise, this variable is ignored.
+- */
+- float priority;
+- /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
+-
+- Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
+- It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
+- e.g. when doing interop with OpenGL.
+- */
+- VkDeviceSize minAllocationAlignment;
+- /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
+-
+- Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
+- It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
+- Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
+-
+- Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
+- can be attached automatically by this library when using other, more convenient of its features.
+- */
+- void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;
+-} VmaPoolCreateInfo;
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_alloc
+-@{
+-*/
+-
+-/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
+-typedef struct VmaAllocationInfo
+-{
+- /** \brief Memory type index that this allocation was allocated from.
+-
+- It never changes.
+- */
+- uint32_t memoryType;
+- /** \brief Handle to Vulkan memory object.
+-
+- Same memory object can be shared by multiple allocations.
+-
+- It can change after the allocation is moved during \ref defragmentation.
+- */
+- VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
+- /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
+-
+- You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
+- vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
+- not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
+- and apply this offset automatically.
+-
+- It can change after the allocation is moved during \ref defragmentation.
+- */
+- VkDeviceSize offset;
+- /** \brief Size of this allocation, in bytes.
+-
+- It never changes.
+-
+- \note Allocation size returned in this variable may be greater than the size
+- requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
+- allocation is accessible for operations on memory e.g. using a pointer after
+- mapping with vmaMapMemory(), but operations on the resource e.g. using
+- `vkCmdCopyBuffer` must be limited to the size of the resource.
+- */
+- VkDeviceSize size;
+- /** \brief Pointer to the beginning of this allocation as mapped data.
+-
+- If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
+- created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
+-
+- It can change after call to vmaMapMemory(), vmaUnmapMemory().
+- It can also change after the allocation is moved during \ref defragmentation.
+- */
+- void* VMA_NULLABLE pMappedData;
+- /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
+-
+- It can change after call to vmaSetAllocationUserData() for this allocation.
+- */
+- void* VMA_NULLABLE pUserData;
+- /** \brief Custom allocation name that was set with vmaSetAllocationName().
+-
+- It can change after call to vmaSetAllocationName() for this allocation.
+-
+- Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
+- additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
+- */
+- const char* VMA_NULLABLE pName;
+-} VmaAllocationInfo;
+-
+-/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.
+-
+-Should return true if the defragmentation needs to stop current pass.
+-*/
+-typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);
+-
+-/** \brief Parameters for defragmentation.
+-
+-To be used with function vmaBeginDefragmentation().
+-*/
+-typedef struct VmaDefragmentationInfo
+-{
+- /// \brief Use combination of #VmaDefragmentationFlagBits.
+- VmaDefragmentationFlags flags;
+- /** \brief Custom pool to be defragmented.
+-
+- If null then default pools will undergo defragmentation process.
+- */
+- VmaPool VMA_NULLABLE pool;
+- /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
+-
+- `0` means no limit.
+- */
+- VkDeviceSize maxBytesPerPass;
+- /** \brief Maximum number of allocations that can be moved during single pass to a different place.
+-
+- `0` means no limit.
+- */
+- uint32_t maxAllocationsPerPass;
+- /** \brief Optional custom callback for stopping vmaBeginDefragmentation().
+-
+- Have to return true for breaking current defragmentation pass.
+- */
+- PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;
+- /// \brief Optional data to pass to custom callback for stopping pass of defragmentation.
+- void* VMA_NULLABLE pBreakCallbackUserData;
+-} VmaDefragmentationInfo;
+-
+-/// Single move of an allocation to be done for defragmentation.
+-typedef struct VmaDefragmentationMove
+-{
+- /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
+- VmaDefragmentationMoveOperation operation;
+- /// Allocation that should be moved.
+- VmaAllocation VMA_NOT_NULL srcAllocation;
+- /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
+-
+- \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
+- to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
+- vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
+- */
+- VmaAllocation VMA_NOT_NULL dstTmpAllocation;
+-} VmaDefragmentationMove;
+-
+-/** \brief Parameters for incremental defragmentation steps.
+-
+-To be used with function vmaBeginDefragmentationPass().
+-*/
+-typedef struct VmaDefragmentationPassMoveInfo
+-{
+- /// Number of elements in the `pMoves` array.
+- uint32_t moveCount;
+- /** \brief Array of moves to be performed by the user in the current defragmentation pass.
+-
+- Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
+-
+- For each element, you should:
+-
+- 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
+- 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
+- 3. Make sure these commands finished executing on the GPU.
+- 4. Destroy the old buffer/image.
+-
+- Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
+- After this call, the allocation will point to the new place in memory.
+-
+- Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
+-
+- Alternatively, if you decide you want to completely remove the allocation:
+-
+- 1. Destroy its buffer/image.
+- 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
+-
+- Then, after vmaEndDefragmentationPass() the allocation will be freed.
+- */
+- VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
+-} VmaDefragmentationPassMoveInfo;
+-
+-/// Statistics returned for defragmentation process in function vmaEndDefragmentation().
+-typedef struct VmaDefragmentationStats
+-{
+- /// Total number of bytes that have been copied while moving allocations to different places.
+- VkDeviceSize bytesMoved;
+- /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
+- VkDeviceSize bytesFreed;
+- /// Number of allocations that have been moved to different places.
+- uint32_t allocationsMoved;
+- /// Number of empty `VkDeviceMemory` objects that have been released to the system.
+- uint32_t deviceMemoryBlocksFreed;
+-} VmaDefragmentationStats;
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_virtual
+-@{
+-*/
+-
+-/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
+-typedef struct VmaVirtualBlockCreateInfo
+-{
+- /** \brief Total size of the virtual block.
+-
+- Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
+- For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
+- */
+- VkDeviceSize size;
+-
+- /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
+- */
+- VmaVirtualBlockCreateFlags flags;
+-
+- /** \brief Custom CPU memory allocation callbacks. Optional.
+-
+- Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
+- */
+- const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
+-} VmaVirtualBlockCreateInfo;
+-
+-/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
+-typedef struct VmaVirtualAllocationCreateInfo
+-{
+- /** \brief Size of the allocation.
+-
+- Cannot be zero.
+- */
+- VkDeviceSize size;
+- /** \brief Required alignment of the allocation. Optional.
+-
+- Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
+- */
+- VkDeviceSize alignment;
+- /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
+- */
+- VmaVirtualAllocationCreateFlags flags;
+- /** \brief Custom pointer to be associated with the allocation. Optional.
+-
+- It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
+- */
+- void* VMA_NULLABLE pUserData;
+-} VmaVirtualAllocationCreateInfo;
+-
+-/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
+-typedef struct VmaVirtualAllocationInfo
+-{
+- /** \brief Offset of the allocation.
+-
+- Offset at which the allocation was made.
+- */
+- VkDeviceSize offset;
+- /** \brief Size of the allocation.
+-
+- Same value as passed in VmaVirtualAllocationCreateInfo::size.
+- */
+- VkDeviceSize size;
+- /** \brief Custom pointer associated with the allocation.
+-
+- Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
+- */
+- void* VMA_NULLABLE pUserData;
+-} VmaVirtualAllocationInfo;
+-
+-/** @} */
+-
+-#endif // _VMA_DATA_TYPES_DECLARATIONS
+-
+-#ifndef _VMA_FUNCTION_HEADERS
+-
+-/**
+-\addtogroup group_init
+-@{
+-*/
+-
+-/// Creates #VmaAllocator object.
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
+- const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
+-
+-/// Destroys allocator object.
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
+- VmaAllocator VMA_NULLABLE allocator);
+-
+-/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
+-
+-It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
+-`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
+-
+-/**
+-PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
+-You can access it here, without fetching it again on your own.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
+-
+-/**
+-PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
+-You can access it here, without fetching it again on your own.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
+-
+-/**
+-\brief Given Memory Type Index, returns Property Flags of this memory type.
+-
+-This is just a convenience function. Same information can be obtained using
+-vmaGetMemoryProperties().
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t memoryTypeIndex,
+- VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
+-
+-/** \brief Sets index of the current frame.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t frameIndex);
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_stats
+-@{
+-*/
+-
+-/** \brief Retrieves statistics from current state of the Allocator.
+-
+-This function is called "calculate" not "get" because it has to traverse all
+-internal data structures, so it may be quite slow. Use it for debugging purposes.
+-For faster but more brief statistics suitable to be called every frame or every allocation,
+-use vmaGetHeapBudgets().
+-
+-Note that when using allocator from multiple threads, returned information may immediately
+-become outdated.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaTotalStatistics* VMA_NOT_NULL pStats);
+-
+-/** \brief Retrieves information about current memory usage and budget for all memory heaps.
+-
+-\param allocator
+-\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
+-
+-This function is called "get" not "calculate" because it is very fast, suitable to be called
+-every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
+-
+-Note that when using allocator from multiple threads, returned information may immediately
+-become outdated.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_alloc
+-@{
+-*/
+-
+-/**
+-\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
+-
+-This algorithm tries to find a memory type that:
+-
+-- Is allowed by memoryTypeBits.
+-- Contains all the flags from pAllocationCreateInfo->requiredFlags.
+-- Matches intended usage.
+-- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
+-
+-\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
+-from this function or any other allocating function probably means that your
+-device doesn't support any memory type with requested features for the specific
+-type of resource you want to use it for. Please check parameters of your
+-resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t memoryTypeBits,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+- uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
+-
+-/**
+-\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
+-
+-It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
+-It internally creates a temporary, dummy buffer that never has memory bound.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+- uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
+-
+-/**
+-\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
+-
+-It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
+-It internally creates a temporary, dummy image that never has memory bound.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+- uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
+-
+-/** \brief Allocates Vulkan device memory and creates #VmaPool object.
+-
+-\param allocator Allocator object.
+-\param pCreateInfo Parameters of pool to create.
+-\param[out] pPool Handle to created pool.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
+-
+-/** \brief Destroys #VmaPool object and frees Vulkan device memory.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaPool VMA_NULLABLE pool);
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_stats
+-@{
+-*/
+-
+-/** \brief Retrieves statistics of existing #VmaPool object.
+-
+-\param allocator Allocator object.
+-\param pool Pool object.
+-\param[out] pPoolStats Statistics of specified pool.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaPool VMA_NOT_NULL pool,
+- VmaStatistics* VMA_NOT_NULL pPoolStats);
+-
+-/** \brief Retrieves detailed statistics of existing #VmaPool object.
+-
+-\param allocator Allocator object.
+-\param pool Pool object.
+-\param[out] pPoolStats Statistics of specified pool.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaPool VMA_NOT_NULL pool,
+- VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_alloc
+-@{
+-*/
+-
+-/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
+-
+-Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
+-`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
+-`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
+-
+-Possible return values:
+-
+-- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
+-- `VK_SUCCESS` - corruption detection has been performed and succeeded.
+-- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
+- `VMA_ASSERT` is also fired in that case.
+-- Other value: Error returned by Vulkan, e.g. memory mapping failure.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaPool VMA_NOT_NULL pool);
+-
+-/** \brief Retrieves name of a custom pool.
+-
+-After the call `ppName` is either null or points to an internally-owned null-terminated string
+-containing name of the pool that was previously set. The pointer becomes invalid when the pool is
+-destroyed or its name is changed using vmaSetPoolName().
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaPool VMA_NOT_NULL pool,
+- const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
+-
+-/** \brief Sets name of a custom pool.
+-
+-`pName` can be either null or pointer to a null-terminated string with new name for the pool.
+-Function makes internal copy of the string, so it can be changed or freed immediately after this call.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaPool VMA_NOT_NULL pool,
+- const char* VMA_NULLABLE pName);
+-
+-/** \brief General purpose memory allocation.
+-
+-\param allocator
+-\param pVkMemoryRequirements
+-\param pCreateInfo
+-\param[out] pAllocation Handle to allocated memory.
+-\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+-
+-You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
+-
+-It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
+-vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+-
+-/** \brief General purpose memory allocation for multiple allocation objects at once.
+-
+-\param allocator Allocator object.
+-\param pVkMemoryRequirements Memory requirements for each allocation.
+-\param pCreateInfo Creation parameters for each allocation.
+-\param allocationCount Number of allocations to make.
+-\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
+-\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
+-
+-You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
+-
+-Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
+-It is just a general purpose allocation function able to make multiple allocations at once.
+-It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
+-
+-All allocations are made using same parameters. All of them are created out of the same memory pool and type.
+-If any allocation fails, all allocations already made within this function call are also freed, so that when
+-returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
+- size_t allocationCount,
+- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
+- VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
+-
+-/** \brief Allocates memory suitable for given `VkBuffer`.
+-
+-\param allocator
+-\param buffer
+-\param pCreateInfo
+-\param[out] pAllocation Handle to allocated memory.
+-\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+-
+-It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
+-
+-This is a special-purpose function. In most cases you should use vmaCreateBuffer().
+-
+-You must free the allocation using vmaFreeMemory() when no longer needed.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+-
+-/** \brief Allocates memory suitable for given `VkImage`.
+-
+-\param allocator
+-\param image
+-\param pCreateInfo
+-\param[out] pAllocation Handle to allocated memory.
+-\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+-
+-It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
+-
+-This is a special-purpose function. In most cases you should use vmaCreateImage().
+-
+-You must free the allocation using vmaFreeMemory() when no longer needed.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+-
+-/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
+-
+-Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VmaAllocation VMA_NULLABLE allocation);
+-
+-/** \brief Frees memory and destroys multiple allocations.
+-
+-Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
+-It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
+-vmaAllocateMemoryPages() and other functions.
+-It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
+-
+-Allocations in `pAllocations` array can come from any memory pools and types.
+-Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
+- VmaAllocator VMA_NOT_NULL allocator,
+- size_t allocationCount,
+- const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
+-
+-/** \brief Returns current information about specified allocation.
+-
+-Current parameters of given allocation are returned in `pAllocationInfo`.
+-
+-Although this function doesn't lock any mutex, so it should be quite efficient,
+-you should avoid calling it too often.
+-You can retrieve same VmaAllocationInfo structure while creating your resource, from function
+-vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
+-(e.g. due to defragmentation).
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
+-
+-/** \brief Sets pUserData in given allocation to new value.
+-
+-The value of pointer `pUserData` is copied to allocation's `pUserData`.
+-It is opaque, so you can use it however you want - e.g.
+-as a pointer, ordinal number or some handle to you own data.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- void* VMA_NULLABLE pUserData);
+-
+-/** \brief Sets pName in given allocation to new value.
+-
+-`pName` must be either null, or pointer to a null-terminated string. The function
+-makes local copy of the string and sets it as allocation's `pName`. String
+-passed as pName doesn't need to be valid for whole lifetime of the allocation -
+-you can free it after this call. String previously pointed by allocation's
+-`pName` is freed from memory.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- const char* VMA_NULLABLE pName);
+-
+-/**
+-\brief Given an allocation, returns Property Flags of its memory type.
+-
+-This is just a convenience function. Same information can be obtained using
+-vmaGetAllocationInfo() + vmaGetMemoryProperties().
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
+-
+-/** \brief Maps memory represented by given allocation and returns pointer to it.
+-
+-Maps memory represented by given allocation to make it accessible to CPU code.
+-When succeeded, `*ppData` contains pointer to first byte of this memory.
+-
+-\warning
+-If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
+-correctly offsetted to the beginning of region assigned to this particular allocation.
+-Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
+-You should not add VmaAllocationInfo::offset to it!
+-
+-Mapping is internally reference-counted and synchronized, so despite raw Vulkan
+-function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
+-multiple times simultaneously, it is safe to call this function on allocations
+-assigned to the same memory block. Actual Vulkan memory will be mapped on first
+-mapping and unmapped on last unmapping.
+-
+-If the function succeeded, you must call vmaUnmapMemory() to unmap the
+-allocation when mapping is no longer needed or before freeing the allocation, at
+-the latest.
+-
+-It also safe to call this function multiple times on the same allocation. You
+-must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
+-
+-It is also safe to call this function on allocation created with
+-#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
+-You must still call vmaUnmapMemory() same number of times as you called
+-vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
+-"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
+-
+-This function fails when used on allocation made in memory type that is not
+-`HOST_VISIBLE`.
+-
+-This function doesn't automatically flush or invalidate caches.
+-If the allocation is made from a memory types that is not `HOST_COHERENT`,
+-you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- void* VMA_NULLABLE* VMA_NOT_NULL ppData);
+-
+-/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
+-
+-For details, see description of vmaMapMemory().
+-
+-This function doesn't automatically flush or invalidate caches.
+-If the allocation is made from a memory types that is not `HOST_COHERENT`,
+-you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation);
+-
+-/** \brief Flushes memory of given allocation.
+-
+-Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
+-It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
+-Unmap operation doesn't do that automatically.
+-
+-- `offset` must be relative to the beginning of allocation.
+-- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
+-- `offset` and `size` don't have to be aligned.
+- They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
+-- If `size` is 0, this call is ignored.
+-- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
+- this call is ignored.
+-
+-Warning! `offset` and `size` are relative to the contents of given `allocation`.
+-If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
+-Do not pass allocation's offset as `offset`!!!
+-
+-This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
+-called, otherwise `VK_SUCCESS`.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize offset,
+- VkDeviceSize size);
+-
+-/** \brief Invalidates memory of given allocation.
+-
+-Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
+-It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
+-Map operation doesn't do that automatically.
+-
+-- `offset` must be relative to the beginning of allocation.
+-- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
+-- `offset` and `size` don't have to be aligned.
+- They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
+-- If `size` is 0, this call is ignored.
+-- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
+- this call is ignored.
+-
+-Warning! `offset` and `size` are relative to the contents of given `allocation`.
+-If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
+-Do not pass allocation's offset as `offset`!!!
+-
+-This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
+-it is called, otherwise `VK_SUCCESS`.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize offset,
+- VkDeviceSize size);
+-
+-/** \brief Flushes memory of given set of allocations.
+-
+-Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
+-For more information, see documentation of vmaFlushAllocation().
+-
+-\param allocator
+-\param allocationCount
+-\param allocations
+-\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
+-\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
+-
+-This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
+-called, otherwise `VK_SUCCESS`.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t allocationCount,
+- const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
+-
+-/** \brief Invalidates memory of given set of allocations.
+-
+-Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
+-For more information, see documentation of vmaInvalidateAllocation().
+-
+-\param allocator
+-\param allocationCount
+-\param allocations
+-\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
+-\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
+-
+-This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
+-called, otherwise `VK_SUCCESS`.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t allocationCount,
+- const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
+- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
+- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
+-
+-/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
+-
+-\param allocator
+-\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
+-
+-Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
+-`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
+-`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
+-
+-Possible return values:
+-
+-- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
+-- `VK_SUCCESS` - corruption detection has been performed and succeeded.
+-- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
+- `VMA_ASSERT` is also fired in that case.
+-- Other value: Error returned by Vulkan, e.g. memory mapping failure.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
+- VmaAllocator VMA_NOT_NULL allocator,
+- uint32_t memoryTypeBits);
+-
+-/** \brief Begins defragmentation process.
+-
+-\param allocator Allocator object.
+-\param pInfo Structure filled with parameters of defragmentation.
+-\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
+-\returns
+-- `VK_SUCCESS` if defragmentation can begin.
+-- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
+-
+-For more information about defragmentation, see documentation chapter:
+-[Defragmentation](@ref defragmentation).
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
+- VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
+-
+-/** \brief Ends defragmentation process.
+-
+-\param allocator Allocator object.
+-\param context Context object that has been created by vmaBeginDefragmentation().
+-\param[out] pStats Optional stats for the defragmentation. Can be null.
+-
+-Use this function to finish defragmentation started by vmaBeginDefragmentation().
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaDefragmentationContext VMA_NOT_NULL context,
+- VmaDefragmentationStats* VMA_NULLABLE pStats);
+-
+-/** \brief Starts single defragmentation pass.
+-
+-\param allocator Allocator object.
+-\param context Context object that has been created by vmaBeginDefragmentation().
+-\param[out] pPassInfo Computed information for current pass.
+-\returns
+-- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
+-- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
+- and then preferably try another pass with vmaBeginDefragmentationPass().
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaDefragmentationContext VMA_NOT_NULL context,
+- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
+-
+-/** \brief Ends single defragmentation pass.
+-
+-\param allocator Allocator object.
+-\param context Context object that has been created by vmaBeginDefragmentation().
+-\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
+-
+-Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
+-
+-Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
+-After this call:
+-
+-- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
+- (which is the default) will be pointing to the new destination place.
+-- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
+- will be freed.
+-
+-If no more moves are possible you can end whole defragmentation.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaDefragmentationContext VMA_NOT_NULL context,
+- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
+-
+-/** \brief Binds buffer to allocation.
+-
+-Binds specified buffer to region of memory represented by specified allocation.
+-Gets `VkDeviceMemory` handle and offset from the allocation.
+-If you want to create a buffer, allocate memory for it and bind them together separately,
+-you should use this function for binding instead of standard `vkBindBufferMemory()`,
+-because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
+-allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
+-(which is illegal in Vulkan).
+-
+-It is recommended to use function vmaCreateBuffer() instead of this one.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
+-
+-/** \brief Binds buffer to allocation with additional parameters.
+-
+-\param allocator
+-\param allocation
+-\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
+-\param buffer
+-\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
+-
+-This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
+-
+-If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
+-or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize allocationLocalOffset,
+- VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
+- const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);
+-
+-/** \brief Binds image to allocation.
+-
+-Binds specified image to region of memory represented by specified allocation.
+-Gets `VkDeviceMemory` handle and offset from the allocation.
+-If you want to create an image, allocate memory for it and bind them together separately,
+-you should use this function for binding instead of standard `vkBindImageMemory()`,
+-because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
+-allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
+-(which is illegal in Vulkan).
+-
+-It is recommended to use function vmaCreateImage() instead of this one.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
+-
+-/** \brief Binds image to allocation with additional parameters.
+-
+-\param allocator
+-\param allocation
+-\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
+-\param image
+-\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
+-
+-This function is similar to vmaBindImageMemory(), but it provides additional parameters.
+-
+-If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
+-or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize allocationLocalOffset,
+- VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
+- const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);
+-
+-/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
+-
+-\param allocator
+-\param pBufferCreateInfo
+-\param pAllocationCreateInfo
+-\param[out] pBuffer Buffer that was created.
+-\param[out] pAllocation Allocation that was created.
+-\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
+-
+-This function automatically:
+-
+--# Creates buffer.
+--# Allocates appropriate memory for it.
+--# Binds the buffer with the memory.
+-
+-If any of these operations fail, buffer and allocation are not created,
+-returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
+-
+-If the function succeeded, you must destroy both buffer and allocation when you
+-no longer need them using either convenience function vmaDestroyBuffer() or
+-separately, using `vkDestroyBuffer()` and vmaFreeMemory().
+-
+-If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
+-VK_KHR_dedicated_allocation extension is used internally to query driver whether
+-it requires or prefers the new buffer to have dedicated allocation. If yes,
+-and if dedicated allocation is possible
+-(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
+-allocation for this buffer, just like when using
+-#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+-
+-\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
+-although recommended as a good practice, is out of scope of this library and could be implemented
+-by the user as a higher-level logic on top of VMA.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
+- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+-
+-/** \brief Creates a buffer with additional minimum alignment.
+-
+-Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
+-minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
+-for interop with OpenGL.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+- VkDeviceSize minAlignment,
+- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
+- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+-
+-/** \brief Creates a new `VkBuffer`, binds already created memory for it.
+-
+-\param allocator
+-\param allocation Allocation that provides memory to be used for binding new buffer to it.
+-\param pBufferCreateInfo
+-\param[out] pBuffer Buffer that was created.
+-
+-This function automatically:
+-
+--# Creates buffer.
+--# Binds the buffer with the supplied memory.
+-
+-If any of these operations fail, buffer is not created,
+-returned value is negative error code and `*pBuffer` is null.
+-
+-If the function succeeded, you must destroy the buffer when you
+-no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
+-allocation you can use convenience function vmaDestroyBuffer().
+-
+-\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2().
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
+-
+-/** \brief Creates a new `VkBuffer`, binds already created memory for it.
+-
+-\param allocator
+-\param allocation Allocation that provides memory to be used for binding new buffer to it.
+-\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0.
+-\param pBufferCreateInfo
+-\param[out] pBuffer Buffer that was created.
+-
+-This function automatically:
+-
+--# Creates buffer.
+--# Binds the buffer with the supplied memory.
+-
+-If any of these operations fail, buffer is not created,
+-returned value is negative error code and `*pBuffer` is null.
+-
+-If the function succeeded, you must destroy the buffer when you
+-no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
+-allocation you can use convenience function vmaDestroyBuffer().
+-
+-\note This is a new version of the function augmented with parameter `allocationLocalOffset`.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize allocationLocalOffset,
+- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
+-
+-/** \brief Destroys Vulkan buffer and frees allocated memory.
+-
+-This is just a convenience function equivalent to:
+-
+-\code
+-vkDestroyBuffer(device, buffer, allocationCallbacks);
+-vmaFreeMemory(allocator, allocation);
+-\endcode
+-
+-It is safe to pass null as buffer and/or allocation.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
+- VmaAllocation VMA_NULLABLE allocation);
+-
+-/// Function similar to vmaCreateBuffer().
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
+- VmaAllocator VMA_NOT_NULL allocator,
+- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
+- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
+- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
+- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
+-
+-/// Function similar to vmaCreateAliasingBuffer() but for images.
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
+-
+-/// Function similar to vmaCreateAliasingBuffer2() but for images.
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize allocationLocalOffset,
+- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
+-
+-/** \brief Destroys Vulkan image and frees allocated memory.
+-
+-This is just a convenience function equivalent to:
+-
+-\code
+-vkDestroyImage(device, image, allocationCallbacks);
+-vmaFreeMemory(allocator, allocation);
+-\endcode
+-
+-It is safe to pass null as image and/or allocation.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
+- VmaAllocation VMA_NULLABLE allocation);
+-
+-/** @} */
+-
+-/**
+-\addtogroup group_virtual
+-@{
+-*/
+-
+-/** \brief Creates new #VmaVirtualBlock object.
+-
+-\param pCreateInfo Parameters for creation.
+-\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
+- const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
+-
+-/** \brief Destroys #VmaVirtualBlock object.
+-
+-Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
+-You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
+-if you are sure this is what you want. If you do neither, an assert is called.
+-
+-If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
+-don't forget to free them.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
+- VmaVirtualBlock VMA_NULLABLE virtualBlock);
+-
+-/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
+-*/
+-VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock);
+-
+-/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
+-
+-/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
+-
+-If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
+-(despite the function doesn't ever allocate actual GPU memory).
+-`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
+-
+-\param virtualBlock Virtual block
+-\param pCreateInfo Parameters for the allocation
+-\param[out] pAllocation Returned handle of the new allocation
+-\param[out] pOffset Returned offset of the new allocation. Optional, can be null.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
+- VkDeviceSize* VMA_NULLABLE pOffset);
+-
+-/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
+-
+-It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
+-
+-/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
+-
+-You must either call this function or free each virtual allocation individually with vmaVirtualFree()
+-before destroying a virtual block. Otherwise, an assert is called.
+-
+-If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
+-don't forget to free it as well.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock);
+-
+-/** \brief Changes custom pointer associated with given virtual allocation.
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
+- void* VMA_NULLABLE pUserData);
+-
+-/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
+-
+-This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaStatistics* VMA_NOT_NULL pStats);
+-
+-/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
+-
+-This function is slow to call. Use for debugging purposes.
+-For less detailed statistics, see vmaGetVirtualBlockStatistics().
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaDetailedStatistics* VMA_NOT_NULL pStats);
+-
+-/** @} */
+-
+-#if VMA_STATS_STRING_ENABLED
+-/**
+-\addtogroup group_stats
+-@{
+-*/
+-
+-/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
+-\param virtualBlock Virtual block.
+-\param[out] ppStatsString Returned string.
+-\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
+-
+-Returned string must be freed using vmaFreeVirtualBlockStatsString().
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
+- VkBool32 detailedMap);
+-
+-/// Frees a string returned by vmaBuildVirtualBlockStatsString().
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
+- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- char* VMA_NULLABLE pStatsString);
+-
+-/** \brief Builds and returns statistics as a null-terminated string in JSON format.
+-\param allocator
+-\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
+-\param detailedMap
+-*/
+-VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
+- VmaAllocator VMA_NOT_NULL allocator,
+- char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
+- VkBool32 detailedMap);
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
+- VmaAllocator VMA_NOT_NULL allocator,
+- char* VMA_NULLABLE pStatsString);
+-
+-/** @} */
+-
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-#endif // _VMA_FUNCTION_HEADERS
+-
+-#ifdef __cplusplus
+-}
+-#endif
+-
+-#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
+-
+-////////////////////////////////////////////////////////////////////////////////
+-////////////////////////////////////////////////////////////////////////////////
+-//
+-// IMPLEMENTATION
+-//
+-////////////////////////////////////////////////////////////////////////////////
+-////////////////////////////////////////////////////////////////////////////////
+-
+-// For Visual Studio IntelliSense.
+-#if defined(__cplusplus) && defined(__INTELLISENSE__)
+-#define VMA_IMPLEMENTATION
+-#endif
+-
+-#ifdef VMA_IMPLEMENTATION
+-#undef VMA_IMPLEMENTATION
+-
+-#include <cstdint>
+-#include <cstdlib>
+-#include <cstring>
+-#include <utility>
+-#include <type_traits>
+-
+-#ifdef _MSC_VER
+- #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
+-#endif
+-#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
+- #include <bit> // For std::popcount
+-#endif
+-
+-#if VMA_STATS_STRING_ENABLED
+- #include <cstdio> // For snprintf
+-#endif
+-
+-/*******************************************************************************
+-CONFIGURATION SECTION
+-
+-Define some of these macros before each #include of this header or change them
+-here if you need other then default behavior depending on your environment.
+-*/
+-#ifndef _VMA_CONFIGURATION
+-
+-/*
+-Define this macro to 1 to make the library fetch pointers to Vulkan functions
+-internally, like:
+-
+- vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
+-*/
+-#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
+- #define VMA_STATIC_VULKAN_FUNCTIONS 1
+-#endif
+-
+-/*
+-Define this macro to 1 to make the library fetch pointers to Vulkan functions
+-internally, like:
+-
+- vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
+-
+-To use this feature in new versions of VMA you now have to pass
+-VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
+-VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
+-*/
+-#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
+- #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
+-#endif
+-
+-#ifndef VMA_USE_STL_SHARED_MUTEX
+- #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
+- #define VMA_USE_STL_SHARED_MUTEX 1
+- // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
+- // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
+- #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
+- #define VMA_USE_STL_SHARED_MUTEX 1
+- #else
+- #define VMA_USE_STL_SHARED_MUTEX 0
+- #endif
+-#endif
+-
+-/*
+-Define this macro to include custom header files without having to edit this file directly, e.g.:
+-
+- // Inside of "my_vma_configuration_user_includes.h":
+-
+- #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
+- #include "my_custom_min.h" // for my_custom_min
+- #include <algorithm>
+- #include <mutex>
+-
+- // Inside a different file, which includes "vk_mem_alloc.h":
+-
+- #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
+- #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
+- #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
+- #include "vk_mem_alloc.h"
+- ...
+-
+-The following headers are used in this CONFIGURATION section only, so feel free to
+-remove them if not needed.
+-*/
+-#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
+- #include <cassert> // for assert
+- #include <algorithm> // for min, max
+- #include <mutex>
+-#else
+- #include VMA_CONFIGURATION_USER_INCLUDES_H
+-#endif
+-
+-#ifndef VMA_NULL
+- // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
+- #define VMA_NULL nullptr
+-#endif
+-
+-// Used to silence warnings for implicit fallthrough.
+-#ifndef VMA_FALLTHROUGH
+- #if __has_cpp_attribute(clang::fallthrough)
+- #define VMA_FALLTHROUGH [[clang::fallthrough]];
+- #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
+- #define VMA_FALLTHROUGH [[fallthrough]]
+- #else
+- #define VMA_FALLTHROUGH
+- #endif
+-#endif
+-
+-// Normal assert to check for programmer's errors, especially in Debug configuration.
+-#ifndef VMA_ASSERT
+- #ifdef NDEBUG
+- #define VMA_ASSERT(expr)
+- #else
+- #define VMA_ASSERT(expr) assert(expr)
+- #endif
+-#endif
+-
+-// Assert that will be called very often, like inside data structures e.g. operator[].
+-// Making it non-empty can make program slow.
+-#ifndef VMA_HEAVY_ASSERT
+- #ifdef NDEBUG
+- #define VMA_HEAVY_ASSERT(expr)
+- #else
+- #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
+- #endif
+-#endif
+-
+-// If your compiler is not compatible with C++17 and definition of
+-// aligned_alloc() function is missing, uncommenting following line may help:
+-
+-//#include <malloc.h>
+-
+-#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
+-#include <cstdlib>
+-void* vma_aligned_alloc(size_t alignment, size_t size)
+-{
+- // alignment must be >= sizeof(void*)
+- if(alignment < sizeof(void*))
+- {
+- alignment = sizeof(void*);
+- }
+-
+- return memalign(alignment, size);
+-}
+-#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
+-#include <cstdlib>
+-
+-#if defined(__APPLE__)
+-#include <AvailabilityMacros.h>
+-#endif
+-
+-void *vma_aligned_alloc(size_t alignment, size_t size)
+-{
+- // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
+- // Therefore, for now disable this specific exception until a proper solution is found.
+- //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
+- //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
+- // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
+- // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
+- // // MAC_OS_X_VERSION_10_16), even though the function is marked
+- // // available for 10.15. That is why the preprocessor checks for 10.16 but
+- // // the __builtin_available checks for 10.15.
+- // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
+- // if (__builtin_available(macOS 10.15, iOS 13, *))
+- // return aligned_alloc(alignment, size);
+- //#endif
+- //#endif
+-
+- // alignment must be >= sizeof(void*)
+- if(alignment < sizeof(void*))
+- {
+- alignment = sizeof(void*);
+- }
+-
+- void *pointer;
+- if(posix_memalign(&pointer, alignment, size) == 0)
+- return pointer;
+- return VMA_NULL;
+-}
+-#elif defined(_WIN32)
+-void* vma_aligned_alloc(size_t alignment, size_t size)
+-{
+- return _aligned_malloc(size, alignment);
+-}
+-#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
+-void* vma_aligned_alloc(size_t alignment, size_t size)
+-{
+- return aligned_alloc(alignment, size);
+-}
+-#else
+-void* vma_aligned_alloc(size_t alignment, size_t size)
+-{
+- VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
+- return VMA_NULL;
+-}
+-#endif
+-
+-#if defined(_WIN32)
+-static void vma_aligned_free(void* ptr)
+-{
+- _aligned_free(ptr);
+-}
+-#else
+-static void vma_aligned_free(void* VMA_NULLABLE ptr)
+-{
+- free(ptr);
+-}
+-#endif
+-
+-#ifndef VMA_ALIGN_OF
+- #define VMA_ALIGN_OF(type) (alignof(type))
+-#endif
+-
+-#ifndef VMA_SYSTEM_ALIGNED_MALLOC
+- #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
+-#endif
+-
+-#ifndef VMA_SYSTEM_ALIGNED_FREE
+- // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
+- #if defined(VMA_SYSTEM_FREE)
+- #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
+- #else
+- #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
+- #endif
+-#endif
+-
+-#ifndef VMA_COUNT_BITS_SET
+- // Returns number of bits set to 1 in (v)
+- #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
+-#endif
+-
+-#ifndef VMA_BITSCAN_LSB
+- // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
+- #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
+-#endif
+-
+-#ifndef VMA_BITSCAN_MSB
+- // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
+- #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
+-#endif
+-
+-#ifndef VMA_MIN
+- #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
+-#endif
+-
+-#ifndef VMA_MAX
+- #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
+-#endif
+-
+-#ifndef VMA_SWAP
+- #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
+-#endif
+-
+-#ifndef VMA_SORT
+- #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
+-#endif
+-
+-#ifndef VMA_DEBUG_LOG_FORMAT
+- #define VMA_DEBUG_LOG_FORMAT(format, ...)
+- /*
+- #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \
+- printf((format), __VA_ARGS__); \
+- printf("\n"); \
+- } while(false)
+- */
+-#endif
+-
+-#ifndef VMA_DEBUG_LOG
+- #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str))
+-#endif
+-
+-#ifndef VMA_CLASS_NO_COPY
+- #define VMA_CLASS_NO_COPY(className) \
+- private: \
+- className(const className&) = delete; \
+- className& operator=(const className&) = delete;
+-#endif
+-#ifndef VMA_CLASS_NO_COPY_NO_MOVE
+- #define VMA_CLASS_NO_COPY_NO_MOVE(className) \
+- private: \
+- className(const className&) = delete; \
+- className(className&&) = delete; \
+- className& operator=(const className&) = delete; \
+- className& operator=(className&&) = delete;
+-#endif
+-
+-// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
+-#if VMA_STATS_STRING_ENABLED
+- static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
+- {
+- snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
+- }
+- static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
+- {
+- snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
+- }
+- static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
+- {
+- snprintf(outStr, strLen, "%p", ptr);
+- }
+-#endif
+-
+-#ifndef VMA_MUTEX
+- class VmaMutex
+- {
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)
+- public:
+- VmaMutex() { }
+- void Lock() { m_Mutex.lock(); }
+- void Unlock() { m_Mutex.unlock(); }
+- bool TryLock() { return m_Mutex.try_lock(); }
+- private:
+- std::mutex m_Mutex;
+- };
+- #define VMA_MUTEX VmaMutex
+-#endif
+-
+-// Read-write mutex, where "read" is shared access, "write" is exclusive access.
+-#ifndef VMA_RW_MUTEX
+- #if VMA_USE_STL_SHARED_MUTEX
+- // Use std::shared_mutex from C++17.
+- #include <shared_mutex>
+- class VmaRWMutex
+- {
+- public:
+- void LockRead() { m_Mutex.lock_shared(); }
+- void UnlockRead() { m_Mutex.unlock_shared(); }
+- bool TryLockRead() { return m_Mutex.try_lock_shared(); }
+- void LockWrite() { m_Mutex.lock(); }
+- void UnlockWrite() { m_Mutex.unlock(); }
+- bool TryLockWrite() { return m_Mutex.try_lock(); }
+- private:
+- std::shared_mutex m_Mutex;
+- };
+- #define VMA_RW_MUTEX VmaRWMutex
+- #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
+- // Use SRWLOCK from WinAPI.
+- // Minimum supported client = Windows Vista, server = Windows Server 2008.
+- class VmaRWMutex
+- {
+- public:
+- VmaRWMutex() { InitializeSRWLock(&m_Lock); }
+- void LockRead() { AcquireSRWLockShared(&m_Lock); }
+- void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
+- bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
+- void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
+- void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
+- bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
+- private:
+- SRWLOCK m_Lock;
+- };
+- #define VMA_RW_MUTEX VmaRWMutex
+- #else
+- // Less efficient fallback: Use normal mutex.
+- class VmaRWMutex
+- {
+- public:
+- void LockRead() { m_Mutex.Lock(); }
+- void UnlockRead() { m_Mutex.Unlock(); }
+- bool TryLockRead() { return m_Mutex.TryLock(); }
+- void LockWrite() { m_Mutex.Lock(); }
+- void UnlockWrite() { m_Mutex.Unlock(); }
+- bool TryLockWrite() { return m_Mutex.TryLock(); }
+- private:
+- VMA_MUTEX m_Mutex;
+- };
+- #define VMA_RW_MUTEX VmaRWMutex
+- #endif // #if VMA_USE_STL_SHARED_MUTEX
+-#endif // #ifndef VMA_RW_MUTEX
+-
+-/*
+-If providing your own implementation, you need to implement a subset of std::atomic.
+-*/
+-#ifndef VMA_ATOMIC_UINT32
+- #include <atomic>
+- #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
+-#endif
+-
+-#ifndef VMA_ATOMIC_UINT64
+- #include <atomic>
+- #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
+-#endif
+-
+-#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
+- /**
+- Every allocation will have its own memory block.
+- Define to 1 for debugging purposes only.
+- */
+- #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
+-#endif
+-
+-#ifndef VMA_MIN_ALIGNMENT
+- /**
+- Minimum alignment of all allocations, in bytes.
+- Set to more than 1 for debugging purposes. Must be power of two.
+- */
+- #ifdef VMA_DEBUG_ALIGNMENT // Old name
+- #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
+- #else
+- #define VMA_MIN_ALIGNMENT (1)
+- #endif
+-#endif
+-
+-#ifndef VMA_DEBUG_MARGIN
+- /**
+- Minimum margin after every allocation, in bytes.
+- Set nonzero for debugging purposes only.
+- */
+- #define VMA_DEBUG_MARGIN (0)
+-#endif
+-
+-#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
+- /**
+- Define this macro to 1 to automatically fill new allocations and destroyed
+- allocations with some bit pattern.
+- */
+- #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
+-#endif
+-
+-#ifndef VMA_DEBUG_DETECT_CORRUPTION
+- /**
+- Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
+- enable writing magic value to the margin after every allocation and
+- validating it, so that memory corruptions (out-of-bounds writes) are detected.
+- */
+- #define VMA_DEBUG_DETECT_CORRUPTION (0)
+-#endif
+-
+-#ifndef VMA_DEBUG_GLOBAL_MUTEX
+- /**
+- Set this to 1 for debugging purposes only, to enable single mutex protecting all
+- entry calls to the library. Can be useful for debugging multithreading issues.
+- */
+- #define VMA_DEBUG_GLOBAL_MUTEX (0)
+-#endif
+-
+-#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
+- /**
+- Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
+- Set to more than 1 for debugging purposes only. Must be power of two.
+- */
+- #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
+-#endif
+-
+-#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
+- /*
+- Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
+- and return error instead of leaving up to Vulkan implementation what to do in such cases.
+- */
+- #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
+-#endif
+-
+-#ifndef VMA_SMALL_HEAP_MAX_SIZE
+- /// Maximum size of a memory heap in Vulkan to consider it "small".
+- #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
+-#endif
+-
+-#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
+- /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
+- #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
+-#endif
+-
+-/*
+-Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
+-or a persistently mapped allocation is created and destroyed several times in a row.
+-It keeps additional +1 mapping of a device memory block to prevent calling actual
+-vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
+-tools like RenderDoc.
+-*/
+-#ifndef VMA_MAPPING_HYSTERESIS_ENABLED
+- #define VMA_MAPPING_HYSTERESIS_ENABLED 1
+-#endif
+-
+-#define VMA_VALIDATE(cond) do { if(!(cond)) { \
+- VMA_ASSERT(0 && "Validation failed: " #cond); \
+- return false; \
+- } } while(false)
+-
+-/*******************************************************************************
+-END OF CONFIGURATION
+-*/
+-#endif // _VMA_CONFIGURATION
+-
+-
+-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
+-static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
+-// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
+-static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
+-
+-// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
+-static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
+-static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
+-static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
+-static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
+-static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
+-static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
+-static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
+-static const uint32_t VMA_VENDOR_ID_AMD = 4098;
+-
+-// This one is tricky. Vulkan specification defines this code as available since
+-// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
+-// See pull request #207.
+-#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
+-
+-
+-#if VMA_STATS_STRING_ENABLED
+-// Correspond to values of enum VmaSuballocationType.
+-static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
+-{
+- "FREE",
+- "UNKNOWN",
+- "BUFFER",
+- "IMAGE_UNKNOWN",
+- "IMAGE_LINEAR",
+- "IMAGE_OPTIMAL",
+-};
+-#endif
+-
+-static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
+- { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
+-
+-
+-#ifndef _VMA_ENUM_DECLARATIONS
+-
+-enum VmaSuballocationType
+-{
+- VMA_SUBALLOCATION_TYPE_FREE = 0,
+- VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
+- VMA_SUBALLOCATION_TYPE_BUFFER = 2,
+- VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
+- VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
+- VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
+- VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
+-};
+-
+-enum VMA_CACHE_OPERATION
+-{
+- VMA_CACHE_FLUSH,
+- VMA_CACHE_INVALIDATE
+-};
+-
+-enum class VmaAllocationRequestType
+-{
+- Normal,
+- TLSF,
+- // Used by "Linear" algorithm.
+- UpperAddress,
+- EndOf1st,
+- EndOf2nd,
+-};
+-
+-#endif // _VMA_ENUM_DECLARATIONS
+-
+-#ifndef _VMA_FORWARD_DECLARATIONS
+-// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
+-VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle)
+-
+-struct VmaMutexLock;
+-struct VmaMutexLockRead;
+-struct VmaMutexLockWrite;
+-
+-template<typename T>
+-struct AtomicTransactionalIncrement;
+-
+-template<typename T>
+-struct VmaStlAllocator;
+-
+-template<typename T, typename AllocatorT>
+-class VmaVector;
+-
+-template<typename T, typename AllocatorT, size_t N>
+-class VmaSmallVector;
+-
+-template<typename T>
+-class VmaPoolAllocator;
+-
+-template<typename T>
+-struct VmaListItem;
+-
+-template<typename T>
+-class VmaRawList;
+-
+-template<typename T, typename AllocatorT>
+-class VmaList;
+-
+-template<typename ItemTypeTraits>
+-class VmaIntrusiveLinkedList;
+-
+-// Unused in this version
+-#if 0
+-template<typename T1, typename T2>
+-struct VmaPair;
+-template<typename FirstT, typename SecondT>
+-struct VmaPairFirstLess;
+-
+-template<typename KeyT, typename ValueT>
+-class VmaMap;
+-#endif
+-
+-#if VMA_STATS_STRING_ENABLED
+-class VmaStringBuilder;
+-class VmaJsonWriter;
+-#endif
+-
+-class VmaDeviceMemoryBlock;
+-
+-struct VmaDedicatedAllocationListItemTraits;
+-class VmaDedicatedAllocationList;
+-
+-struct VmaSuballocation;
+-struct VmaSuballocationOffsetLess;
+-struct VmaSuballocationOffsetGreater;
+-struct VmaSuballocationItemSizeLess;
+-
+-typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
+-
+-struct VmaAllocationRequest;
+-
+-class VmaBlockMetadata;
+-class VmaBlockMetadata_Linear;
+-class VmaBlockMetadata_TLSF;
+-
+-class VmaBlockVector;
+-
+-struct VmaPoolListItemTraits;
+-
+-struct VmaCurrentBudgetData;
+-
+-class VmaAllocationObjectAllocator;
+-
+-#endif // _VMA_FORWARD_DECLARATIONS
+-
+-
+-#ifndef _VMA_FUNCTIONS
+-
+-/*
+-Returns number of bits set to 1 in (v).
+-
+-On specific platforms and compilers you can use instrinsics like:
+-
+-Visual Studio:
+- return __popcnt(v);
+-GCC, Clang:
+- return static_cast<uint32_t>(__builtin_popcount(v));
+-
+-Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
+-But you need to check in runtime whether user's CPU supports these, as some old processors don't.
+-*/
+-static inline uint32_t VmaCountBitsSet(uint32_t v)
+-{
+-#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
+- return std::popcount(v);
+-#else
+- uint32_t c = v - ((v >> 1) & 0x55555555);
+- c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
+- c = ((c >> 4) + c) & 0x0F0F0F0F;
+- c = ((c >> 8) + c) & 0x00FF00FF;
+- c = ((c >> 16) + c) & 0x0000FFFF;
+- return c;
+-#endif
+-}
+-
+-static inline uint8_t VmaBitScanLSB(uint64_t mask)
+-{
+-#if defined(_MSC_VER) && defined(_WIN64)
+- unsigned long pos;
+- if (_BitScanForward64(&pos, mask))
+- return static_cast<uint8_t>(pos);
+- return UINT8_MAX;
+-#elif defined __GNUC__ || defined __clang__
+- return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
+-#else
+- uint8_t pos = 0;
+- uint64_t bit = 1;
+- do
+- {
+- if (mask & bit)
+- return pos;
+- bit <<= 1;
+- } while (pos++ < 63);
+- return UINT8_MAX;
+-#endif
+-}
+-
+-static inline uint8_t VmaBitScanLSB(uint32_t mask)
+-{
+-#ifdef _MSC_VER
+- unsigned long pos;
+- if (_BitScanForward(&pos, mask))
+- return static_cast<uint8_t>(pos);
+- return UINT8_MAX;
+-#elif defined __GNUC__ || defined __clang__
+- return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
+-#else
+- uint8_t pos = 0;
+- uint32_t bit = 1;
+- do
+- {
+- if (mask & bit)
+- return pos;
+- bit <<= 1;
+- } while (pos++ < 31);
+- return UINT8_MAX;
+-#endif
+-}
+-
+-static inline uint8_t VmaBitScanMSB(uint64_t mask)
+-{
+-#if defined(_MSC_VER) && defined(_WIN64)
+- unsigned long pos;
+- if (_BitScanReverse64(&pos, mask))
+- return static_cast<uint8_t>(pos);
+-#elif defined __GNUC__ || defined __clang__
+- if (mask)
+- return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
+-#else
+- uint8_t pos = 63;
+- uint64_t bit = 1ULL << 63;
+- do
+- {
+- if (mask & bit)
+- return pos;
+- bit >>= 1;
+- } while (pos-- > 0);
+-#endif
+- return UINT8_MAX;
+-}
+-
+-static inline uint8_t VmaBitScanMSB(uint32_t mask)
+-{
+-#ifdef _MSC_VER
+- unsigned long pos;
+- if (_BitScanReverse(&pos, mask))
+- return static_cast<uint8_t>(pos);
+-#elif defined __GNUC__ || defined __clang__
+- if (mask)
+- return 31 - static_cast<uint8_t>(__builtin_clz(mask));
+-#else
+- uint8_t pos = 31;
+- uint32_t bit = 1UL << 31;
+- do
+- {
+- if (mask & bit)
+- return pos;
+- bit >>= 1;
+- } while (pos-- > 0);
+-#endif
+- return UINT8_MAX;
+-}
+-
+-/*
+-Returns true if given number is a power of two.
+-T must be unsigned integer number or signed integer but always nonnegative.
+-For 0 returns true.
+-*/
+-template <typename T>
+-inline bool VmaIsPow2(T x)
+-{
+- return (x & (x - 1)) == 0;
+-}
+-
+-// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
+-// Use types like uint32_t, uint64_t as T.
+-template <typename T>
+-static inline T VmaAlignUp(T val, T alignment)
+-{
+- VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+- return (val + alignment - 1) & ~(alignment - 1);
+-}
+-
+-// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
+-// Use types like uint32_t, uint64_t as T.
+-template <typename T>
+-static inline T VmaAlignDown(T val, T alignment)
+-{
+- VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
+- return val & ~(alignment - 1);
+-}
+-
+-// Division with mathematical rounding to nearest number.
+-template <typename T>
+-static inline T VmaRoundDiv(T x, T y)
+-{
+- return (x + (y / (T)2)) / y;
+-}
+-
+-// Divide by 'y' and round up to nearest integer.
+-template <typename T>
+-static inline T VmaDivideRoundingUp(T x, T y)
+-{
+- return (x + y - (T)1) / y;
+-}
+-
+-// Returns smallest power of 2 greater or equal to v.
+-static inline uint32_t VmaNextPow2(uint32_t v)
+-{
+- v--;
+- v |= v >> 1;
+- v |= v >> 2;
+- v |= v >> 4;
+- v |= v >> 8;
+- v |= v >> 16;
+- v++;
+- return v;
+-}
+-
+-static inline uint64_t VmaNextPow2(uint64_t v)
+-{
+- v--;
+- v |= v >> 1;
+- v |= v >> 2;
+- v |= v >> 4;
+- v |= v >> 8;
+- v |= v >> 16;
+- v |= v >> 32;
+- v++;
+- return v;
+-}
+-
+-// Returns largest power of 2 less or equal to v.
+-static inline uint32_t VmaPrevPow2(uint32_t v)
+-{
+- v |= v >> 1;
+- v |= v >> 2;
+- v |= v >> 4;
+- v |= v >> 8;
+- v |= v >> 16;
+- v = v ^ (v >> 1);
+- return v;
+-}
+-
+-static inline uint64_t VmaPrevPow2(uint64_t v)
+-{
+- v |= v >> 1;
+- v |= v >> 2;
+- v |= v >> 4;
+- v |= v >> 8;
+- v |= v >> 16;
+- v |= v >> 32;
+- v = v ^ (v >> 1);
+- return v;
+-}
+-
+-static inline bool VmaStrIsEmpty(const char* pStr)
+-{
+- return pStr == VMA_NULL || *pStr == '\0';
+-}
+-
+-/*
+-Returns true if two memory blocks occupy overlapping pages.
+-ResourceA must be in less memory offset than ResourceB.
+-
+-Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
+-chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
+-*/
+-static inline bool VmaBlocksOnSamePage(
+- VkDeviceSize resourceAOffset,
+- VkDeviceSize resourceASize,
+- VkDeviceSize resourceBOffset,
+- VkDeviceSize pageSize)
+-{
+- VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
+- VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
+- VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
+- VkDeviceSize resourceBStart = resourceBOffset;
+- VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
+- return resourceAEndPage == resourceBStartPage;
+-}
+-
+-/*
+-Returns true if given suballocation types could conflict and must respect
+-VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
+-or linear image and another one is optimal image. If type is unknown, behave
+-conservatively.
+-*/
+-static inline bool VmaIsBufferImageGranularityConflict(
+- VmaSuballocationType suballocType1,
+- VmaSuballocationType suballocType2)
+-{
+- if (suballocType1 > suballocType2)
+- {
+- VMA_SWAP(suballocType1, suballocType2);
+- }
+-
+- switch (suballocType1)
+- {
+- case VMA_SUBALLOCATION_TYPE_FREE:
+- return false;
+- case VMA_SUBALLOCATION_TYPE_UNKNOWN:
+- return true;
+- case VMA_SUBALLOCATION_TYPE_BUFFER:
+- return
+- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+- case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
+- return
+- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
+- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+- case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
+- return
+- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
+- case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
+- return false;
+- default:
+- VMA_ASSERT(0);
+- return true;
+- }
+-}
+-
+-static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
+-{
+-#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+- uint32_t* pDst = (uint32_t*)((char*)pData + offset);
+- const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+- for (size_t i = 0; i < numberCount; ++i, ++pDst)
+- {
+- *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
+- }
+-#else
+- // no-op
+-#endif
+-}
+-
+-static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
+-{
+-#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
+- const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
+- const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
+- for (size_t i = 0; i < numberCount; ++i, ++pSrc)
+- {
+- if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
+- {
+- return false;
+- }
+- }
+-#endif
+- return true;
+-}
+-
+-/*
+-Fills structure with parameters of an example buffer to be used for transfers
+-during GPU memory defragmentation.
+-*/
+-static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
+-{
+- memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
+- outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+- outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+- outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
+-}
+-
+-
+-/*
+-Performs binary search and returns iterator to first element that is greater or
+-equal to (key), according to comparison (cmp).
+-
+-Cmp should return true if first argument is less than second argument.
+-
+-Returned value is the found element, if present in the collection or place where
+-new element with value (key) should be inserted.
+-*/
+-template <typename CmpLess, typename IterT, typename KeyT>
+-static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
+-{
+- size_t down = 0, up = size_t(end - beg);
+- while (down < up)
+- {
+- const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
+- if (cmp(*(beg + mid), key))
+- {
+- down = mid + 1;
+- }
+- else
+- {
+- up = mid;
+- }
+- }
+- return beg + down;
+-}
+-
+-template<typename CmpLess, typename IterT, typename KeyT>
+-IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
+-{
+- IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
+- beg, end, value, cmp);
+- if (it == end ||
+- (!cmp(*it, value) && !cmp(value, *it)))
+- {
+- return it;
+- }
+- return end;
+-}
+-
+-/*
+-Returns true if all pointers in the array are not-null and unique.
+-Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
+-T must be pointer type, e.g. VmaAllocation, VmaPool.
+-*/
+-template<typename T>
+-static bool VmaValidatePointerArray(uint32_t count, const T* arr)
+-{
+- for (uint32_t i = 0; i < count; ++i)
+- {
+- const T iPtr = arr[i];
+- if (iPtr == VMA_NULL)
+- {
+- return false;
+- }
+- for (uint32_t j = i + 1; j < count; ++j)
+- {
+- if (iPtr == arr[j])
+- {
+- return false;
+- }
+- }
+- }
+- return true;
+-}
+-
+-template<typename MainT, typename NewT>
+-static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
+-{
+- newStruct->pNext = mainStruct->pNext;
+- mainStruct->pNext = newStruct;
+-}
+-
+-// This is the main algorithm that guides the selection of a memory type best for an allocation -
+-// converts usage to required/preferred/not preferred flags.
+-static bool FindMemoryPreferences(
+- bool isIntegratedGPU,
+- const VmaAllocationCreateInfo& allocCreateInfo,
+- VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
+- VkMemoryPropertyFlags& outRequiredFlags,
+- VkMemoryPropertyFlags& outPreferredFlags,
+- VkMemoryPropertyFlags& outNotPreferredFlags)
+-{
+- outRequiredFlags = allocCreateInfo.requiredFlags;
+- outPreferredFlags = allocCreateInfo.preferredFlags;
+- outNotPreferredFlags = 0;
+-
+- switch(allocCreateInfo.usage)
+- {
+- case VMA_MEMORY_USAGE_UNKNOWN:
+- break;
+- case VMA_MEMORY_USAGE_GPU_ONLY:
+- if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+- {
+- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- }
+- break;
+- case VMA_MEMORY_USAGE_CPU_ONLY:
+- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+- break;
+- case VMA_MEMORY_USAGE_CPU_TO_GPU:
+- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+- if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+- {
+- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- }
+- break;
+- case VMA_MEMORY_USAGE_GPU_TO_CPU:
+- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+- outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+- break;
+- case VMA_MEMORY_USAGE_CPU_COPY:
+- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- break;
+- case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
+- outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
+- break;
+- case VMA_MEMORY_USAGE_AUTO:
+- case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
+- case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
+- {
+- if(bufImgUsage == UINT32_MAX)
+- {
+- VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
+- return false;
+- }
+- // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
+- const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
+- const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
+- const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
+- const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
+- const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
+- const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
+-
+- // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
+- if(hostAccessRandom)
+- {
+- if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
+- {
+- // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
+- // Omitting HOST_VISIBLE here is intentional.
+- // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
+- // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
+- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+- }
+- else
+- {
+- // Always CPU memory, cached.
+- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+- }
+- }
+- // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
+- else if(hostAccessSequentialWrite)
+- {
+- // Want uncached and write-combined.
+- outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+-
+- if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
+- {
+- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+- }
+- else
+- {
+- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+- // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
+- if(deviceAccess)
+- {
+- // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
+- if(preferHost)
+- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- else
+- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- }
+- // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
+- else
+- {
+- // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
+- if(preferDevice)
+- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- else
+- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- }
+- }
+- }
+- // No CPU access
+- else
+- {
+- // if(deviceAccess)
+- //
+- // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,
+- // unless there is a clear preference from the user not to do so.
+- //
+- // else:
+- //
+- // No direct GPU access, no CPU access, just transfers.
+- // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
+- // a "swap file" copy to free some GPU memory (then better CPU memory).
+- // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
+-
+- if(preferHost)
+- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- else
+- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+- }
+- break;
+- }
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- // Avoid DEVICE_COHERENT unless explicitly requested.
+- if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
+- (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
+- {
+- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
+- }
+-
+- return true;
+-}
+-
+-////////////////////////////////////////////////////////////////////////////////
+-// Memory allocation
+-
+-static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
+-{
+- void* result = VMA_NULL;
+- if ((pAllocationCallbacks != VMA_NULL) &&
+- (pAllocationCallbacks->pfnAllocation != VMA_NULL))
+- {
+- result = (*pAllocationCallbacks->pfnAllocation)(
+- pAllocationCallbacks->pUserData,
+- size,
+- alignment,
+- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+- }
+- else
+- {
+- result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
+- }
+- VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
+- return result;
+-}
+-
+-static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
+-{
+- if ((pAllocationCallbacks != VMA_NULL) &&
+- (pAllocationCallbacks->pfnFree != VMA_NULL))
+- {
+- (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
+- }
+- else
+- {
+- VMA_SYSTEM_ALIGNED_FREE(ptr);
+- }
+-}
+-
+-template<typename T>
+-static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
+-{
+- return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
+-}
+-
+-template<typename T>
+-static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
+-{
+- return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
+-}
+-
+-#define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
+-
+-#define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
+-
+-template<typename T>
+-static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
+-{
+- ptr->~T();
+- VmaFree(pAllocationCallbacks, ptr);
+-}
+-
+-template<typename T>
+-static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
+-{
+- if (ptr != VMA_NULL)
+- {
+- for (size_t i = count; i--; )
+- {
+- ptr[i].~T();
+- }
+- VmaFree(pAllocationCallbacks, ptr);
+- }
+-}
+-
+-static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
+-{
+- if (srcStr != VMA_NULL)
+- {
+- const size_t len = strlen(srcStr);
+- char* const result = vma_new_array(allocs, char, len + 1);
+- memcpy(result, srcStr, len + 1);
+- return result;
+- }
+- return VMA_NULL;
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
+-{
+- if (srcStr != VMA_NULL)
+- {
+- char* const result = vma_new_array(allocs, char, strLen + 1);
+- memcpy(result, srcStr, strLen);
+- result[strLen] = '\0';
+- return result;
+- }
+- return VMA_NULL;
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
+-{
+- if (str != VMA_NULL)
+- {
+- const size_t len = strlen(str);
+- vma_delete_array(allocs, str, len + 1);
+- }
+-}
+-
+-template<typename CmpLess, typename VectorT>
+-size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
+-{
+- const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+- vector.data(),
+- vector.data() + vector.size(),
+- value,
+- CmpLess()) - vector.data();
+- VmaVectorInsert(vector, indexToInsert, value);
+- return indexToInsert;
+-}
+-
+-template<typename CmpLess, typename VectorT>
+-bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
+-{
+- CmpLess comparator;
+- typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
+- vector.begin(),
+- vector.end(),
+- value,
+- comparator);
+- if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
+- {
+- size_t indexToRemove = it - vector.begin();
+- VmaVectorRemove(vector, indexToRemove);
+- return true;
+- }
+- return false;
+-}
+-#endif // _VMA_FUNCTIONS
+-
+-#ifndef _VMA_STATISTICS_FUNCTIONS
+-
+-static void VmaClearStatistics(VmaStatistics& outStats)
+-{
+- outStats.blockCount = 0;
+- outStats.allocationCount = 0;
+- outStats.blockBytes = 0;
+- outStats.allocationBytes = 0;
+-}
+-
+-static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
+-{
+- inoutStats.blockCount += src.blockCount;
+- inoutStats.allocationCount += src.allocationCount;
+- inoutStats.blockBytes += src.blockBytes;
+- inoutStats.allocationBytes += src.allocationBytes;
+-}
+-
+-static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
+-{
+- VmaClearStatistics(outStats.statistics);
+- outStats.unusedRangeCount = 0;
+- outStats.allocationSizeMin = VK_WHOLE_SIZE;
+- outStats.allocationSizeMax = 0;
+- outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
+- outStats.unusedRangeSizeMax = 0;
+-}
+-
+-static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+-{
+- inoutStats.statistics.allocationCount++;
+- inoutStats.statistics.allocationBytes += size;
+- inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
+- inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
+-}
+-
+-static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
+-{
+- inoutStats.unusedRangeCount++;
+- inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
+- inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
+-}
+-
+-static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
+-{
+- VmaAddStatistics(inoutStats.statistics, src.statistics);
+- inoutStats.unusedRangeCount += src.unusedRangeCount;
+- inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
+- inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
+- inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
+- inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
+-}
+-
+-#endif // _VMA_STATISTICS_FUNCTIONS
+-
+-#ifndef _VMA_MUTEX_LOCK
+-// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
+-struct VmaMutexLock
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock)
+-public:
+- VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
+- m_pMutex(useMutex ? &mutex : VMA_NULL)
+- {
+- if (m_pMutex) { m_pMutex->Lock(); }
+- }
+- ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
+-
+-private:
+- VMA_MUTEX* m_pMutex;
+-};
+-
+-// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
+-struct VmaMutexLockRead
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead)
+-public:
+- VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
+- m_pMutex(useMutex ? &mutex : VMA_NULL)
+- {
+- if (m_pMutex) { m_pMutex->LockRead(); }
+- }
+- ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
+-
+-private:
+- VMA_RW_MUTEX* m_pMutex;
+-};
+-
+-// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
+-struct VmaMutexLockWrite
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite)
+-public:
+- VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
+- : m_pMutex(useMutex ? &mutex : VMA_NULL)
+- {
+- if (m_pMutex) { m_pMutex->LockWrite(); }
+- }
+- ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
+-
+-private:
+- VMA_RW_MUTEX* m_pMutex;
+-};
+-
+-#if VMA_DEBUG_GLOBAL_MUTEX
+- static VMA_MUTEX gDebugGlobalMutex;
+- #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
+-#else
+- #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-#endif
+-#endif // _VMA_MUTEX_LOCK
+-
+-#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
+-// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
+-template<typename AtomicT>
+-struct AtomicTransactionalIncrement
+-{
+-public:
+- using T = decltype(AtomicT().load());
+-
+- ~AtomicTransactionalIncrement()
+- {
+- if(m_Atomic)
+- --(*m_Atomic);
+- }
+-
+- void Commit() { m_Atomic = nullptr; }
+- T Increment(AtomicT* atomic)
+- {
+- m_Atomic = atomic;
+- return m_Atomic->fetch_add(1);
+- }
+-
+-private:
+- AtomicT* m_Atomic = nullptr;
+-};
+-#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
+-
+-#ifndef _VMA_STL_ALLOCATOR
+-// STL-compatible allocator.
+-template<typename T>
+-struct VmaStlAllocator
+-{
+- const VkAllocationCallbacks* const m_pCallbacks;
+- typedef T value_type;
+-
+- VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
+- template<typename U>
+- VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
+- VmaStlAllocator(const VmaStlAllocator&) = default;
+- VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
+-
+- T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
+- void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
+-
+- template<typename U>
+- bool operator==(const VmaStlAllocator<U>& rhs) const
+- {
+- return m_pCallbacks == rhs.m_pCallbacks;
+- }
+- template<typename U>
+- bool operator!=(const VmaStlAllocator<U>& rhs) const
+- {
+- return m_pCallbacks != rhs.m_pCallbacks;
+- }
+-};
+-#endif // _VMA_STL_ALLOCATOR
+-
+-#ifndef _VMA_VECTOR
+-/* Class with interface compatible with subset of std::vector.
+-T must be POD because constructors and destructors are not called and memcpy is
+-used for these objects. */
+-template<typename T, typename AllocatorT>
+-class VmaVector
+-{
+-public:
+- typedef T value_type;
+- typedef T* iterator;
+- typedef const T* const_iterator;
+-
+- VmaVector(const AllocatorT& allocator);
+- VmaVector(size_t count, const AllocatorT& allocator);
+- // This version of the constructor is here for compatibility with pre-C++14 std::vector.
+- // value is unused.
+- VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
+- VmaVector(const VmaVector<T, AllocatorT>& src);
+- VmaVector& operator=(const VmaVector& rhs);
+- ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
+-
+- bool empty() const { return m_Count == 0; }
+- size_t size() const { return m_Count; }
+- T* data() { return m_pArray; }
+- T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
+- T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
+- const T* data() const { return m_pArray; }
+- const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
+- const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
+-
+- iterator begin() { return m_pArray; }
+- iterator end() { return m_pArray + m_Count; }
+- const_iterator cbegin() const { return m_pArray; }
+- const_iterator cend() const { return m_pArray + m_Count; }
+- const_iterator begin() const { return cbegin(); }
+- const_iterator end() const { return cend(); }
+-
+- void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
+- void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
+- void push_front(const T& src) { insert(0, src); }
+-
+- void push_back(const T& src);
+- void reserve(size_t newCapacity, bool freeMemory = false);
+- void resize(size_t newCount);
+- void clear() { resize(0); }
+- void shrink_to_fit();
+- void insert(size_t index, const T& src);
+- void remove(size_t index);
+-
+- T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
+- const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
+-
+-private:
+- AllocatorT m_Allocator;
+- T* m_pArray;
+- size_t m_Count;
+- size_t m_Capacity;
+-};
+-
+-#ifndef _VMA_VECTOR_FUNCTIONS
+-template<typename T, typename AllocatorT>
+-VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
+- : m_Allocator(allocator),
+- m_pArray(VMA_NULL),
+- m_Count(0),
+- m_Capacity(0) {}
+-
+-template<typename T, typename AllocatorT>
+-VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
+- : m_Allocator(allocator),
+- m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
+- m_Count(count),
+- m_Capacity(count) {}
+-
+-template<typename T, typename AllocatorT>
+-VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
+- : m_Allocator(src.m_Allocator),
+- m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
+- m_Count(src.m_Count),
+- m_Capacity(src.m_Count)
+-{
+- if (m_Count != 0)
+- {
+- memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
+- }
+-}
+-
+-template<typename T, typename AllocatorT>
+-VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
+-{
+- if (&rhs != this)
+- {
+- resize(rhs.m_Count);
+- if (m_Count != 0)
+- {
+- memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
+- }
+- }
+- return *this;
+-}
+-
+-template<typename T, typename AllocatorT>
+-void VmaVector<T, AllocatorT>::push_back(const T& src)
+-{
+- const size_t newIndex = size();
+- resize(newIndex + 1);
+- m_pArray[newIndex] = src;
+-}
+-
+-template<typename T, typename AllocatorT>
+-void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
+-{
+- newCapacity = VMA_MAX(newCapacity, m_Count);
+-
+- if ((newCapacity < m_Capacity) && !freeMemory)
+- {
+- newCapacity = m_Capacity;
+- }
+-
+- if (newCapacity != m_Capacity)
+- {
+- T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
+- if (m_Count != 0)
+- {
+- memcpy(newArray, m_pArray, m_Count * sizeof(T));
+- }
+- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+- m_Capacity = newCapacity;
+- m_pArray = newArray;
+- }
+-}
+-
+-template<typename T, typename AllocatorT>
+-void VmaVector<T, AllocatorT>::resize(size_t newCount)
+-{
+- size_t newCapacity = m_Capacity;
+- if (newCount > m_Capacity)
+- {
+- newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
+- }
+-
+- if (newCapacity != m_Capacity)
+- {
+- T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
+- const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
+- if (elementsToCopy != 0)
+- {
+- memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
+- }
+- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+- m_Capacity = newCapacity;
+- m_pArray = newArray;
+- }
+-
+- m_Count = newCount;
+-}
+-
+-template<typename T, typename AllocatorT>
+-void VmaVector<T, AllocatorT>::shrink_to_fit()
+-{
+- if (m_Capacity > m_Count)
+- {
+- T* newArray = VMA_NULL;
+- if (m_Count > 0)
+- {
+- newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
+- memcpy(newArray, m_pArray, m_Count * sizeof(T));
+- }
+- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
+- m_Capacity = m_Count;
+- m_pArray = newArray;
+- }
+-}
+-
+-template<typename T, typename AllocatorT>
+-void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
+-{
+- VMA_HEAVY_ASSERT(index <= m_Count);
+- const size_t oldCount = size();
+- resize(oldCount + 1);
+- if (index < oldCount)
+- {
+- memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
+- }
+- m_pArray[index] = src;
+-}
+-
+-template<typename T, typename AllocatorT>
+-void VmaVector<T, AllocatorT>::remove(size_t index)
+-{
+- VMA_HEAVY_ASSERT(index < m_Count);
+- const size_t oldCount = size();
+- if (index < oldCount - 1)
+- {
+- memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
+- }
+- resize(oldCount - 1);
+-}
+-#endif // _VMA_VECTOR_FUNCTIONS
+-
+-template<typename T, typename allocatorT>
+-static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
+-{
+- vec.insert(index, item);
+-}
+-
+-template<typename T, typename allocatorT>
+-static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
+-{
+- vec.remove(index);
+-}
+-#endif // _VMA_VECTOR
+-
+-#ifndef _VMA_SMALL_VECTOR
+-/*
+-This is a vector (a variable-sized array), optimized for the case when the array is small.
+-
+-It contains some number of elements in-place, which allows it to avoid heap allocation
+-when the actual number of elements is below that threshold. This allows normal "small"
+-cases to be fast without losing generality for large inputs.
+-*/
+-template<typename T, typename AllocatorT, size_t N>
+-class VmaSmallVector
+-{
+-public:
+- typedef T value_type;
+- typedef T* iterator;
+-
+- VmaSmallVector(const AllocatorT& allocator);
+- VmaSmallVector(size_t count, const AllocatorT& allocator);
+- template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
+- VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
+- template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
+- VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
+- ~VmaSmallVector() = default;
+-
+- bool empty() const { return m_Count == 0; }
+- size_t size() const { return m_Count; }
+- T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+- T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
+- T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
+- const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
+- const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
+- const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
+-
+- iterator begin() { return data(); }
+- iterator end() { return data() + m_Count; }
+-
+- void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
+- void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
+- void push_front(const T& src) { insert(0, src); }
+-
+- void push_back(const T& src);
+- void resize(size_t newCount, bool freeMemory = false);
+- void clear(bool freeMemory = false);
+- void insert(size_t index, const T& src);
+- void remove(size_t index);
+-
+- T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
+- const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
+-
+-private:
+- size_t m_Count;
+- T m_StaticArray[N]; // Used when m_Size <= N
+- VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
+-};
+-
+-#ifndef _VMA_SMALL_VECTOR_FUNCTIONS
+-template<typename T, typename AllocatorT, size_t N>
+-VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
+- : m_Count(0),
+- m_DynamicArray(allocator) {}
+-
+-template<typename T, typename AllocatorT, size_t N>
+-VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
+- : m_Count(count),
+- m_DynamicArray(count > N ? count : 0, allocator) {}
+-
+-template<typename T, typename AllocatorT, size_t N>
+-void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
+-{
+- const size_t newIndex = size();
+- resize(newIndex + 1);
+- data()[newIndex] = src;
+-}
+-
+-template<typename T, typename AllocatorT, size_t N>
+-void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
+-{
+- if (newCount > N && m_Count > N)
+- {
+- // Any direction, staying in m_DynamicArray
+- m_DynamicArray.resize(newCount);
+- if (freeMemory)
+- {
+- m_DynamicArray.shrink_to_fit();
+- }
+- }
+- else if (newCount > N && m_Count <= N)
+- {
+- // Growing, moving from m_StaticArray to m_DynamicArray
+- m_DynamicArray.resize(newCount);
+- if (m_Count > 0)
+- {
+- memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
+- }
+- }
+- else if (newCount <= N && m_Count > N)
+- {
+- // Shrinking, moving from m_DynamicArray to m_StaticArray
+- if (newCount > 0)
+- {
+- memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
+- }
+- m_DynamicArray.resize(0);
+- if (freeMemory)
+- {
+- m_DynamicArray.shrink_to_fit();
+- }
+- }
+- else
+- {
+- // Any direction, staying in m_StaticArray - nothing to do here
+- }
+- m_Count = newCount;
+-}
+-
+-template<typename T, typename AllocatorT, size_t N>
+-void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
+-{
+- m_DynamicArray.clear();
+- if (freeMemory)
+- {
+- m_DynamicArray.shrink_to_fit();
+- }
+- m_Count = 0;
+-}
+-
+-template<typename T, typename AllocatorT, size_t N>
+-void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
+-{
+- VMA_HEAVY_ASSERT(index <= m_Count);
+- const size_t oldCount = size();
+- resize(oldCount + 1);
+- T* const dataPtr = data();
+- if (index < oldCount)
+- {
+- // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
+- memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
+- }
+- dataPtr[index] = src;
+-}
+-
+-template<typename T, typename AllocatorT, size_t N>
+-void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
+-{
+- VMA_HEAVY_ASSERT(index < m_Count);
+- const size_t oldCount = size();
+- if (index < oldCount - 1)
+- {
+- // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
+- T* const dataPtr = data();
+- memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
+- }
+- resize(oldCount - 1);
+-}
+-#endif // _VMA_SMALL_VECTOR_FUNCTIONS
+-#endif // _VMA_SMALL_VECTOR
+-
+-#ifndef _VMA_POOL_ALLOCATOR
+-/*
+-Allocator for objects of type T using a list of arrays (pools) to speed up
+-allocation. Number of elements that can be allocated is not bounded because
+-allocator can create multiple blocks.
+-*/
+-template<typename T>
+-class VmaPoolAllocator
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator)
+-public:
+- VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
+- ~VmaPoolAllocator();
+- template<typename... Types> T* Alloc(Types&&... args);
+- void Free(T* ptr);
+-
+-private:
+- union Item
+- {
+- uint32_t NextFreeIndex;
+- alignas(T) char Value[sizeof(T)];
+- };
+- struct ItemBlock
+- {
+- Item* pItems;
+- uint32_t Capacity;
+- uint32_t FirstFreeIndex;
+- };
+-
+- const VkAllocationCallbacks* m_pAllocationCallbacks;
+- const uint32_t m_FirstBlockCapacity;
+- VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
+-
+- ItemBlock& CreateNewBlock();
+-};
+-
+-#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
+-template<typename T>
+-VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
+- : m_pAllocationCallbacks(pAllocationCallbacks),
+- m_FirstBlockCapacity(firstBlockCapacity),
+- m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
+-{
+- VMA_ASSERT(m_FirstBlockCapacity > 1);
+-}
+-
+-template<typename T>
+-VmaPoolAllocator<T>::~VmaPoolAllocator()
+-{
+- for (size_t i = m_ItemBlocks.size(); i--;)
+- vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
+- m_ItemBlocks.clear();
+-}
+-
+-template<typename T>
+-template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
+-{
+- for (size_t i = m_ItemBlocks.size(); i--; )
+- {
+- ItemBlock& block = m_ItemBlocks[i];
+- // This block has some free items: Use first one.
+- if (block.FirstFreeIndex != UINT32_MAX)
+- {
+- Item* const pItem = &block.pItems[block.FirstFreeIndex];
+- block.FirstFreeIndex = pItem->NextFreeIndex;
+- T* result = (T*)&pItem->Value;
+- new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
+- return result;
+- }
+- }
+-
+- // No block has free item: Create new one and use it.
+- ItemBlock& newBlock = CreateNewBlock();
+- Item* const pItem = &newBlock.pItems[0];
+- newBlock.FirstFreeIndex = pItem->NextFreeIndex;
+- T* result = (T*)&pItem->Value;
+- new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
+- return result;
+-}
+-
+-template<typename T>
+-void VmaPoolAllocator<T>::Free(T* ptr)
+-{
+- // Search all memory blocks to find ptr.
+- for (size_t i = m_ItemBlocks.size(); i--; )
+- {
+- ItemBlock& block = m_ItemBlocks[i];
+-
+- // Casting to union.
+- Item* pItemPtr;
+- memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
+-
+- // Check if pItemPtr is in address range of this block.
+- if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
+- {
+- ptr->~T(); // Explicit destructor call.
+- const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
+- pItemPtr->NextFreeIndex = block.FirstFreeIndex;
+- block.FirstFreeIndex = index;
+- return;
+- }
+- }
+- VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
+-}
+-
+-template<typename T>
+-typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
+-{
+- const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
+- m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
+-
+- const ItemBlock newBlock =
+- {
+- vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
+- newBlockCapacity,
+- 0
+- };
+-
+- m_ItemBlocks.push_back(newBlock);
+-
+- // Setup singly-linked list of all free items in this block.
+- for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
+- newBlock.pItems[i].NextFreeIndex = i + 1;
+- newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
+- return m_ItemBlocks.back();
+-}
+-#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
+-#endif // _VMA_POOL_ALLOCATOR
+-
+-#ifndef _VMA_RAW_LIST
+-template<typename T>
+-struct VmaListItem
+-{
+- VmaListItem* pPrev;
+- VmaListItem* pNext;
+- T Value;
+-};
+-
+-// Doubly linked list.
+-template<typename T>
+-class VmaRawList
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList)
+-public:
+- typedef VmaListItem<T> ItemType;
+-
+- VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
+- // Intentionally not calling Clear, because that would be unnecessary
+- // computations to return all items to m_ItemAllocator as free.
+- ~VmaRawList() = default;
+-
+- size_t GetCount() const { return m_Count; }
+- bool IsEmpty() const { return m_Count == 0; }
+-
+- ItemType* Front() { return m_pFront; }
+- ItemType* Back() { return m_pBack; }
+- const ItemType* Front() const { return m_pFront; }
+- const ItemType* Back() const { return m_pBack; }
+-
+- ItemType* PushFront();
+- ItemType* PushBack();
+- ItemType* PushFront(const T& value);
+- ItemType* PushBack(const T& value);
+- void PopFront();
+- void PopBack();
+-
+- // Item can be null - it means PushBack.
+- ItemType* InsertBefore(ItemType* pItem);
+- // Item can be null - it means PushFront.
+- ItemType* InsertAfter(ItemType* pItem);
+- ItemType* InsertBefore(ItemType* pItem, const T& value);
+- ItemType* InsertAfter(ItemType* pItem, const T& value);
+-
+- void Clear();
+- void Remove(ItemType* pItem);
+-
+-private:
+- const VkAllocationCallbacks* const m_pAllocationCallbacks;
+- VmaPoolAllocator<ItemType> m_ItemAllocator;
+- ItemType* m_pFront;
+- ItemType* m_pBack;
+- size_t m_Count;
+-};
+-
+-#ifndef _VMA_RAW_LIST_FUNCTIONS
+-template<typename T>
+-VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
+- : m_pAllocationCallbacks(pAllocationCallbacks),
+- m_ItemAllocator(pAllocationCallbacks, 128),
+- m_pFront(VMA_NULL),
+- m_pBack(VMA_NULL),
+- m_Count(0) {}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::PushFront()
+-{
+- ItemType* const pNewItem = m_ItemAllocator.Alloc();
+- pNewItem->pPrev = VMA_NULL;
+- if (IsEmpty())
+- {
+- pNewItem->pNext = VMA_NULL;
+- m_pFront = pNewItem;
+- m_pBack = pNewItem;
+- m_Count = 1;
+- }
+- else
+- {
+- pNewItem->pNext = m_pFront;
+- m_pFront->pPrev = pNewItem;
+- m_pFront = pNewItem;
+- ++m_Count;
+- }
+- return pNewItem;
+-}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::PushBack()
+-{
+- ItemType* const pNewItem = m_ItemAllocator.Alloc();
+- pNewItem->pNext = VMA_NULL;
+- if(IsEmpty())
+- {
+- pNewItem->pPrev = VMA_NULL;
+- m_pFront = pNewItem;
+- m_pBack = pNewItem;
+- m_Count = 1;
+- }
+- else
+- {
+- pNewItem->pPrev = m_pBack;
+- m_pBack->pNext = pNewItem;
+- m_pBack = pNewItem;
+- ++m_Count;
+- }
+- return pNewItem;
+-}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
+-{
+- ItemType* const pNewItem = PushFront();
+- pNewItem->Value = value;
+- return pNewItem;
+-}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
+-{
+- ItemType* const pNewItem = PushBack();
+- pNewItem->Value = value;
+- return pNewItem;
+-}
+-
+-template<typename T>
+-void VmaRawList<T>::PopFront()
+-{
+- VMA_HEAVY_ASSERT(m_Count > 0);
+- ItemType* const pFrontItem = m_pFront;
+- ItemType* const pNextItem = pFrontItem->pNext;
+- if (pNextItem != VMA_NULL)
+- {
+- pNextItem->pPrev = VMA_NULL;
+- }
+- m_pFront = pNextItem;
+- m_ItemAllocator.Free(pFrontItem);
+- --m_Count;
+-}
+-
+-template<typename T>
+-void VmaRawList<T>::PopBack()
+-{
+- VMA_HEAVY_ASSERT(m_Count > 0);
+- ItemType* const pBackItem = m_pBack;
+- ItemType* const pPrevItem = pBackItem->pPrev;
+- if(pPrevItem != VMA_NULL)
+- {
+- pPrevItem->pNext = VMA_NULL;
+- }
+- m_pBack = pPrevItem;
+- m_ItemAllocator.Free(pBackItem);
+- --m_Count;
+-}
+-
+-template<typename T>
+-void VmaRawList<T>::Clear()
+-{
+- if (IsEmpty() == false)
+- {
+- ItemType* pItem = m_pBack;
+- while (pItem != VMA_NULL)
+- {
+- ItemType* const pPrevItem = pItem->pPrev;
+- m_ItemAllocator.Free(pItem);
+- pItem = pPrevItem;
+- }
+- m_pFront = VMA_NULL;
+- m_pBack = VMA_NULL;
+- m_Count = 0;
+- }
+-}
+-
+-template<typename T>
+-void VmaRawList<T>::Remove(ItemType* pItem)
+-{
+- VMA_HEAVY_ASSERT(pItem != VMA_NULL);
+- VMA_HEAVY_ASSERT(m_Count > 0);
+-
+- if(pItem->pPrev != VMA_NULL)
+- {
+- pItem->pPrev->pNext = pItem->pNext;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_pFront == pItem);
+- m_pFront = pItem->pNext;
+- }
+-
+- if(pItem->pNext != VMA_NULL)
+- {
+- pItem->pNext->pPrev = pItem->pPrev;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_pBack == pItem);
+- m_pBack = pItem->pPrev;
+- }
+-
+- m_ItemAllocator.Free(pItem);
+- --m_Count;
+-}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
+-{
+- if(pItem != VMA_NULL)
+- {
+- ItemType* const prevItem = pItem->pPrev;
+- ItemType* const newItem = m_ItemAllocator.Alloc();
+- newItem->pPrev = prevItem;
+- newItem->pNext = pItem;
+- pItem->pPrev = newItem;
+- if(prevItem != VMA_NULL)
+- {
+- prevItem->pNext = newItem;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_pFront == pItem);
+- m_pFront = newItem;
+- }
+- ++m_Count;
+- return newItem;
+- }
+- else
+- return PushBack();
+-}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
+-{
+- if(pItem != VMA_NULL)
+- {
+- ItemType* const nextItem = pItem->pNext;
+- ItemType* const newItem = m_ItemAllocator.Alloc();
+- newItem->pNext = nextItem;
+- newItem->pPrev = pItem;
+- pItem->pNext = newItem;
+- if(nextItem != VMA_NULL)
+- {
+- nextItem->pPrev = newItem;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_pBack == pItem);
+- m_pBack = newItem;
+- }
+- ++m_Count;
+- return newItem;
+- }
+- else
+- return PushFront();
+-}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
+-{
+- ItemType* const newItem = InsertBefore(pItem);
+- newItem->Value = value;
+- return newItem;
+-}
+-
+-template<typename T>
+-VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
+-{
+- ItemType* const newItem = InsertAfter(pItem);
+- newItem->Value = value;
+- return newItem;
+-}
+-#endif // _VMA_RAW_LIST_FUNCTIONS
+-#endif // _VMA_RAW_LIST
+-
+-#ifndef _VMA_LIST
+-template<typename T, typename AllocatorT>
+-class VmaList
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaList)
+-public:
+- class reverse_iterator;
+- class const_iterator;
+- class const_reverse_iterator;
+-
+- class iterator
+- {
+- friend class const_iterator;
+- friend class VmaList<T, AllocatorT>;
+- public:
+- iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+- iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+-
+- T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+- T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+-
+- bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+- bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+-
+- iterator operator++(int) { iterator result = *this; ++*this; return result; }
+- iterator operator--(int) { iterator result = *this; --*this; return result; }
+-
+- iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
+- iterator& operator--();
+-
+- private:
+- VmaRawList<T>* m_pList;
+- VmaListItem<T>* m_pItem;
+-
+- iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
+- };
+- class reverse_iterator
+- {
+- friend class const_reverse_iterator;
+- friend class VmaList<T, AllocatorT>;
+- public:
+- reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+- reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+-
+- T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+- T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+-
+- bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+- bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+-
+- reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
+- reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
+-
+- reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
+- reverse_iterator& operator--();
+-
+- private:
+- VmaRawList<T>* m_pList;
+- VmaListItem<T>* m_pItem;
+-
+- reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
+- };
+- class const_iterator
+- {
+- friend class VmaList<T, AllocatorT>;
+- public:
+- const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+- const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+- const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+-
+- iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
+-
+- const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+- const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+-
+- bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+- bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+-
+- const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
+- const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
+-
+- const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
+- const_iterator& operator--();
+-
+- private:
+- const VmaRawList<T>* m_pList;
+- const VmaListItem<T>* m_pItem;
+-
+- const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
+- };
+- class const_reverse_iterator
+- {
+- friend class VmaList<T, AllocatorT>;
+- public:
+- const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
+- const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+- const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
+-
+- reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
+-
+- const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
+- const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
+-
+- bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
+- bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
+-
+- const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
+- const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
+-
+- const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
+- const_reverse_iterator& operator--();
+-
+- private:
+- const VmaRawList<T>* m_pList;
+- const VmaListItem<T>* m_pItem;
+-
+- const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
+- };
+-
+- VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
+-
+- bool empty() const { return m_RawList.IsEmpty(); }
+- size_t size() const { return m_RawList.GetCount(); }
+-
+- iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
+- iterator end() { return iterator(&m_RawList, VMA_NULL); }
+-
+- const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
+- const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
+-
+- const_iterator begin() const { return cbegin(); }
+- const_iterator end() const { return cend(); }
+-
+- reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
+- reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
+-
+- const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
+- const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
+-
+- const_reverse_iterator rbegin() const { return crbegin(); }
+- const_reverse_iterator rend() const { return crend(); }
+-
+- void push_back(const T& value) { m_RawList.PushBack(value); }
+- iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
+-
+- void clear() { m_RawList.Clear(); }
+- void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
+-
+-private:
+- VmaRawList<T> m_RawList;
+-};
+-
+-#ifndef _VMA_LIST_FUNCTIONS
+-template<typename T, typename AllocatorT>
+-typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
+-{
+- if (m_pItem != VMA_NULL)
+- {
+- m_pItem = m_pItem->pPrev;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+- m_pItem = m_pList->Back();
+- }
+- return *this;
+-}
+-
+-template<typename T, typename AllocatorT>
+-typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
+-{
+- if (m_pItem != VMA_NULL)
+- {
+- m_pItem = m_pItem->pNext;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+- m_pItem = m_pList->Front();
+- }
+- return *this;
+-}
+-
+-template<typename T, typename AllocatorT>
+-typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
+-{
+- if (m_pItem != VMA_NULL)
+- {
+- m_pItem = m_pItem->pPrev;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+- m_pItem = m_pList->Back();
+- }
+- return *this;
+-}
+-
+-template<typename T, typename AllocatorT>
+-typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
+-{
+- if (m_pItem != VMA_NULL)
+- {
+- m_pItem = m_pItem->pNext;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
+- m_pItem = m_pList->Back();
+- }
+- return *this;
+-}
+-#endif // _VMA_LIST_FUNCTIONS
+-#endif // _VMA_LIST
+-
+-#ifndef _VMA_INTRUSIVE_LINKED_LIST
+-/*
+-Expected interface of ItemTypeTraits:
+-struct MyItemTypeTraits
+-{
+- typedef MyItem ItemType;
+- static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
+- static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
+- static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
+- static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
+-};
+-*/
+-template<typename ItemTypeTraits>
+-class VmaIntrusiveLinkedList
+-{
+-public:
+- typedef typename ItemTypeTraits::ItemType ItemType;
+- static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
+- static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
+-
+- // Movable, not copyable.
+- VmaIntrusiveLinkedList() = default;
+- VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
+- VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
+- VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
+- VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
+- ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
+-
+- size_t GetCount() const { return m_Count; }
+- bool IsEmpty() const { return m_Count == 0; }
+- ItemType* Front() { return m_Front; }
+- ItemType* Back() { return m_Back; }
+- const ItemType* Front() const { return m_Front; }
+- const ItemType* Back() const { return m_Back; }
+-
+- void PushBack(ItemType* item);
+- void PushFront(ItemType* item);
+- ItemType* PopBack();
+- ItemType* PopFront();
+-
+- // MyItem can be null - it means PushBack.
+- void InsertBefore(ItemType* existingItem, ItemType* newItem);
+- // MyItem can be null - it means PushFront.
+- void InsertAfter(ItemType* existingItem, ItemType* newItem);
+- void Remove(ItemType* item);
+- void RemoveAll();
+-
+-private:
+- ItemType* m_Front = VMA_NULL;
+- ItemType* m_Back = VMA_NULL;
+- size_t m_Count = 0;
+-};
+-
+-#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
+-template<typename ItemTypeTraits>
+-VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
+- : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
+-{
+- src.m_Front = src.m_Back = VMA_NULL;
+- src.m_Count = 0;
+-}
+-
+-template<typename ItemTypeTraits>
+-VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
+-{
+- if (&src != this)
+- {
+- VMA_HEAVY_ASSERT(IsEmpty());
+- m_Front = src.m_Front;
+- m_Back = src.m_Back;
+- m_Count = src.m_Count;
+- src.m_Front = src.m_Back = VMA_NULL;
+- src.m_Count = 0;
+- }
+- return *this;
+-}
+-
+-template<typename ItemTypeTraits>
+-void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
+-{
+- VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+- if (IsEmpty())
+- {
+- m_Front = item;
+- m_Back = item;
+- m_Count = 1;
+- }
+- else
+- {
+- ItemTypeTraits::AccessPrev(item) = m_Back;
+- ItemTypeTraits::AccessNext(m_Back) = item;
+- m_Back = item;
+- ++m_Count;
+- }
+-}
+-
+-template<typename ItemTypeTraits>
+-void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
+-{
+- VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
+- if (IsEmpty())
+- {
+- m_Front = item;
+- m_Back = item;
+- m_Count = 1;
+- }
+- else
+- {
+- ItemTypeTraits::AccessNext(item) = m_Front;
+- ItemTypeTraits::AccessPrev(m_Front) = item;
+- m_Front = item;
+- ++m_Count;
+- }
+-}
+-
+-template<typename ItemTypeTraits>
+-typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
+-{
+- VMA_HEAVY_ASSERT(m_Count > 0);
+- ItemType* const backItem = m_Back;
+- ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
+- if (prevItem != VMA_NULL)
+- {
+- ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
+- }
+- m_Back = prevItem;
+- --m_Count;
+- ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
+- ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
+- return backItem;
+-}
+-
+-template<typename ItemTypeTraits>
+-typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
+-{
+- VMA_HEAVY_ASSERT(m_Count > 0);
+- ItemType* const frontItem = m_Front;
+- ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
+- if (nextItem != VMA_NULL)
+- {
+- ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
+- }
+- m_Front = nextItem;
+- --m_Count;
+- ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
+- ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
+- return frontItem;
+-}
+-
+-template<typename ItemTypeTraits>
+-void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
+-{
+- VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+- if (existingItem != VMA_NULL)
+- {
+- ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
+- ItemTypeTraits::AccessPrev(newItem) = prevItem;
+- ItemTypeTraits::AccessNext(newItem) = existingItem;
+- ItemTypeTraits::AccessPrev(existingItem) = newItem;
+- if (prevItem != VMA_NULL)
+- {
+- ItemTypeTraits::AccessNext(prevItem) = newItem;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_Front == existingItem);
+- m_Front = newItem;
+- }
+- ++m_Count;
+- }
+- else
+- PushBack(newItem);
+-}
+-
+-template<typename ItemTypeTraits>
+-void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
+-{
+- VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
+- if (existingItem != VMA_NULL)
+- {
+- ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
+- ItemTypeTraits::AccessNext(newItem) = nextItem;
+- ItemTypeTraits::AccessPrev(newItem) = existingItem;
+- ItemTypeTraits::AccessNext(existingItem) = newItem;
+- if (nextItem != VMA_NULL)
+- {
+- ItemTypeTraits::AccessPrev(nextItem) = newItem;
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_Back == existingItem);
+- m_Back = newItem;
+- }
+- ++m_Count;
+- }
+- else
+- return PushFront(newItem);
+-}
+-
+-template<typename ItemTypeTraits>
+-void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
+-{
+- VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
+- if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
+- {
+- ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_Front == item);
+- m_Front = ItemTypeTraits::GetNext(item);
+- }
+-
+- if (ItemTypeTraits::GetNext(item) != VMA_NULL)
+- {
+- ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
+- }
+- else
+- {
+- VMA_HEAVY_ASSERT(m_Back == item);
+- m_Back = ItemTypeTraits::GetPrev(item);
+- }
+- ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+- ItemTypeTraits::AccessNext(item) = VMA_NULL;
+- --m_Count;
+-}
+-
+-template<typename ItemTypeTraits>
+-void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
+-{
+- if (!IsEmpty())
+- {
+- ItemType* item = m_Back;
+- while (item != VMA_NULL)
+- {
+- ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
+- ItemTypeTraits::AccessPrev(item) = VMA_NULL;
+- ItemTypeTraits::AccessNext(item) = VMA_NULL;
+- item = prevItem;
+- }
+- m_Front = VMA_NULL;
+- m_Back = VMA_NULL;
+- m_Count = 0;
+- }
+-}
+-#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
+-#endif // _VMA_INTRUSIVE_LINKED_LIST
+-
+-// Unused in this version.
+-#if 0
+-
+-#ifndef _VMA_PAIR
+-template<typename T1, typename T2>
+-struct VmaPair
+-{
+- T1 first;
+- T2 second;
+-
+- VmaPair() : first(), second() {}
+- VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
+-};
+-
+-template<typename FirstT, typename SecondT>
+-struct VmaPairFirstLess
+-{
+- bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
+- {
+- return lhs.first < rhs.first;
+- }
+- bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
+- {
+- return lhs.first < rhsFirst;
+- }
+-};
+-#endif // _VMA_PAIR
+-
+-#ifndef _VMA_MAP
+-/* Class compatible with subset of interface of std::unordered_map.
+-KeyT, ValueT must be POD because they will be stored in VmaVector.
+-*/
+-template<typename KeyT, typename ValueT>
+-class VmaMap
+-{
+-public:
+- typedef VmaPair<KeyT, ValueT> PairType;
+- typedef PairType* iterator;
+-
+- VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
+-
+- iterator begin() { return m_Vector.begin(); }
+- iterator end() { return m_Vector.end(); }
+- size_t size() { return m_Vector.size(); }
+-
+- void insert(const PairType& pair);
+- iterator find(const KeyT& key);
+- void erase(iterator it);
+-
+-private:
+- VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
+-};
+-
+-#ifndef _VMA_MAP_FUNCTIONS
+-template<typename KeyT, typename ValueT>
+-void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
+-{
+- const size_t indexToInsert = VmaBinaryFindFirstNotLess(
+- m_Vector.data(),
+- m_Vector.data() + m_Vector.size(),
+- pair,
+- VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
+- VmaVectorInsert(m_Vector, indexToInsert, pair);
+-}
+-
+-template<typename KeyT, typename ValueT>
+-VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
+-{
+- PairType* it = VmaBinaryFindFirstNotLess(
+- m_Vector.data(),
+- m_Vector.data() + m_Vector.size(),
+- key,
+- VmaPairFirstLess<KeyT, ValueT>());
+- if ((it != m_Vector.end()) && (it->first == key))
+- {
+- return it;
+- }
+- else
+- {
+- return m_Vector.end();
+- }
+-}
+-
+-template<typename KeyT, typename ValueT>
+-void VmaMap<KeyT, ValueT>::erase(iterator it)
+-{
+- VmaVectorRemove(m_Vector, it - m_Vector.begin());
+-}
+-#endif // _VMA_MAP_FUNCTIONS
+-#endif // _VMA_MAP
+-
+-#endif // #if 0
+-
+-#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
+-class VmaStringBuilder
+-{
+-public:
+- VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
+- ~VmaStringBuilder() = default;
+-
+- size_t GetLength() const { return m_Data.size(); }
+- const char* GetData() const { return m_Data.data(); }
+- void AddNewLine() { Add('\n'); }
+- void Add(char ch) { m_Data.push_back(ch); }
+-
+- void Add(const char* pStr);
+- void AddNumber(uint32_t num);
+- void AddNumber(uint64_t num);
+- void AddPointer(const void* ptr);
+-
+-private:
+- VmaVector<char, VmaStlAllocator<char>> m_Data;
+-};
+-
+-#ifndef _VMA_STRING_BUILDER_FUNCTIONS
+-void VmaStringBuilder::Add(const char* pStr)
+-{
+- const size_t strLen = strlen(pStr);
+- if (strLen > 0)
+- {
+- const size_t oldCount = m_Data.size();
+- m_Data.resize(oldCount + strLen);
+- memcpy(m_Data.data() + oldCount, pStr, strLen);
+- }
+-}
+-
+-void VmaStringBuilder::AddNumber(uint32_t num)
+-{
+- char buf[11];
+- buf[10] = '\0';
+- char* p = &buf[10];
+- do
+- {
+- *--p = '0' + (char)(num % 10);
+- num /= 10;
+- } while (num);
+- Add(p);
+-}
+-
+-void VmaStringBuilder::AddNumber(uint64_t num)
+-{
+- char buf[21];
+- buf[20] = '\0';
+- char* p = &buf[20];
+- do
+- {
+- *--p = '0' + (char)(num % 10);
+- num /= 10;
+- } while (num);
+- Add(p);
+-}
+-
+-void VmaStringBuilder::AddPointer(const void* ptr)
+-{
+- char buf[21];
+- VmaPtrToStr(buf, sizeof(buf), ptr);
+- Add(buf);
+-}
+-#endif //_VMA_STRING_BUILDER_FUNCTIONS
+-#endif // _VMA_STRING_BUILDER
+-
+-#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
+-/*
+-Allows to conveniently build a correct JSON document to be written to the
+-VmaStringBuilder passed to the constructor.
+-*/
+-class VmaJsonWriter
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter)
+-public:
+- // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
+- VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
+- ~VmaJsonWriter();
+-
+- // Begins object by writing "{".
+- // Inside an object, you must call pairs of WriteString and a value, e.g.:
+- // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
+- // Will write: { "A": 1, "B": 2 }
+- void BeginObject(bool singleLine = false);
+- // Ends object by writing "}".
+- void EndObject();
+-
+- // Begins array by writing "[".
+- // Inside an array, you can write a sequence of any values.
+- void BeginArray(bool singleLine = false);
+- // Ends array by writing "[".
+- void EndArray();
+-
+- // Writes a string value inside "".
+- // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
+- void WriteString(const char* pStr);
+-
+- // Begins writing a string value.
+- // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
+- // WriteString to conveniently build the string content incrementally, made of
+- // parts including numbers.
+- void BeginString(const char* pStr = VMA_NULL);
+- // Posts next part of an open string.
+- void ContinueString(const char* pStr);
+- // Posts next part of an open string. The number is converted to decimal characters.
+- void ContinueString(uint32_t n);
+- void ContinueString(uint64_t n);
+- // Posts next part of an open string. Pointer value is converted to characters
+- // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
+- void ContinueString_Pointer(const void* ptr);
+- // Ends writing a string value by writing '"'.
+- void EndString(const char* pStr = VMA_NULL);
+-
+- // Writes a number value.
+- void WriteNumber(uint32_t n);
+- void WriteNumber(uint64_t n);
+- // Writes a boolean value - false or true.
+- void WriteBool(bool b);
+- // Writes a null value.
+- void WriteNull();
+-
+-private:
+- enum COLLECTION_TYPE
+- {
+- COLLECTION_TYPE_OBJECT,
+- COLLECTION_TYPE_ARRAY,
+- };
+- struct StackItem
+- {
+- COLLECTION_TYPE type;
+- uint32_t valueCount;
+- bool singleLineMode;
+- };
+-
+- static const char* const INDENT;
+-
+- VmaStringBuilder& m_SB;
+- VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
+- bool m_InsideString;
+-
+- void BeginValue(bool isString);
+- void WriteIndent(bool oneLess = false);
+-};
+-const char* const VmaJsonWriter::INDENT = " ";
+-
+-#ifndef _VMA_JSON_WRITER_FUNCTIONS
+-VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
+- : m_SB(sb),
+- m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
+- m_InsideString(false) {}
+-
+-VmaJsonWriter::~VmaJsonWriter()
+-{
+- VMA_ASSERT(!m_InsideString);
+- VMA_ASSERT(m_Stack.empty());
+-}
+-
+-void VmaJsonWriter::BeginObject(bool singleLine)
+-{
+- VMA_ASSERT(!m_InsideString);
+-
+- BeginValue(false);
+- m_SB.Add('{');
+-
+- StackItem item;
+- item.type = COLLECTION_TYPE_OBJECT;
+- item.valueCount = 0;
+- item.singleLineMode = singleLine;
+- m_Stack.push_back(item);
+-}
+-
+-void VmaJsonWriter::EndObject()
+-{
+- VMA_ASSERT(!m_InsideString);
+-
+- WriteIndent(true);
+- m_SB.Add('}');
+-
+- VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
+- m_Stack.pop_back();
+-}
+-
+-void VmaJsonWriter::BeginArray(bool singleLine)
+-{
+- VMA_ASSERT(!m_InsideString);
+-
+- BeginValue(false);
+- m_SB.Add('[');
+-
+- StackItem item;
+- item.type = COLLECTION_TYPE_ARRAY;
+- item.valueCount = 0;
+- item.singleLineMode = singleLine;
+- m_Stack.push_back(item);
+-}
+-
+-void VmaJsonWriter::EndArray()
+-{
+- VMA_ASSERT(!m_InsideString);
+-
+- WriteIndent(true);
+- m_SB.Add(']');
+-
+- VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
+- m_Stack.pop_back();
+-}
+-
+-void VmaJsonWriter::WriteString(const char* pStr)
+-{
+- BeginString(pStr);
+- EndString();
+-}
+-
+-void VmaJsonWriter::BeginString(const char* pStr)
+-{
+- VMA_ASSERT(!m_InsideString);
+-
+- BeginValue(true);
+- m_SB.Add('"');
+- m_InsideString = true;
+- if (pStr != VMA_NULL && pStr[0] != '\0')
+- {
+- ContinueString(pStr);
+- }
+-}
+-
+-void VmaJsonWriter::ContinueString(const char* pStr)
+-{
+- VMA_ASSERT(m_InsideString);
+-
+- const size_t strLen = strlen(pStr);
+- for (size_t i = 0; i < strLen; ++i)
+- {
+- char ch = pStr[i];
+- if (ch == '\\')
+- {
+- m_SB.Add("\\\\");
+- }
+- else if (ch == '"')
+- {
+- m_SB.Add("\\\"");
+- }
+- else if (ch >= 32)
+- {
+- m_SB.Add(ch);
+- }
+- else switch (ch)
+- {
+- case '\b':
+- m_SB.Add("\\b");
+- break;
+- case '\f':
+- m_SB.Add("\\f");
+- break;
+- case '\n':
+- m_SB.Add("\\n");
+- break;
+- case '\r':
+- m_SB.Add("\\r");
+- break;
+- case '\t':
+- m_SB.Add("\\t");
+- break;
+- default:
+- VMA_ASSERT(0 && "Character not currently supported.");
+- }
+- }
+-}
+-
+-void VmaJsonWriter::ContinueString(uint32_t n)
+-{
+- VMA_ASSERT(m_InsideString);
+- m_SB.AddNumber(n);
+-}
+-
+-void VmaJsonWriter::ContinueString(uint64_t n)
+-{
+- VMA_ASSERT(m_InsideString);
+- m_SB.AddNumber(n);
+-}
+-
+-void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
+-{
+- VMA_ASSERT(m_InsideString);
+- m_SB.AddPointer(ptr);
+-}
+-
+-void VmaJsonWriter::EndString(const char* pStr)
+-{
+- VMA_ASSERT(m_InsideString);
+- if (pStr != VMA_NULL && pStr[0] != '\0')
+- {
+- ContinueString(pStr);
+- }
+- m_SB.Add('"');
+- m_InsideString = false;
+-}
+-
+-void VmaJsonWriter::WriteNumber(uint32_t n)
+-{
+- VMA_ASSERT(!m_InsideString);
+- BeginValue(false);
+- m_SB.AddNumber(n);
+-}
+-
+-void VmaJsonWriter::WriteNumber(uint64_t n)
+-{
+- VMA_ASSERT(!m_InsideString);
+- BeginValue(false);
+- m_SB.AddNumber(n);
+-}
+-
+-void VmaJsonWriter::WriteBool(bool b)
+-{
+- VMA_ASSERT(!m_InsideString);
+- BeginValue(false);
+- m_SB.Add(b ? "true" : "false");
+-}
+-
+-void VmaJsonWriter::WriteNull()
+-{
+- VMA_ASSERT(!m_InsideString);
+- BeginValue(false);
+- m_SB.Add("null");
+-}
+-
+-void VmaJsonWriter::BeginValue(bool isString)
+-{
+- if (!m_Stack.empty())
+- {
+- StackItem& currItem = m_Stack.back();
+- if (currItem.type == COLLECTION_TYPE_OBJECT &&
+- currItem.valueCount % 2 == 0)
+- {
+- VMA_ASSERT(isString);
+- }
+-
+- if (currItem.type == COLLECTION_TYPE_OBJECT &&
+- currItem.valueCount % 2 != 0)
+- {
+- m_SB.Add(": ");
+- }
+- else if (currItem.valueCount > 0)
+- {
+- m_SB.Add(", ");
+- WriteIndent();
+- }
+- else
+- {
+- WriteIndent();
+- }
+- ++currItem.valueCount;
+- }
+-}
+-
+-void VmaJsonWriter::WriteIndent(bool oneLess)
+-{
+- if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
+- {
+- m_SB.AddNewLine();
+-
+- size_t count = m_Stack.size();
+- if (count > 0 && oneLess)
+- {
+- --count;
+- }
+- for (size_t i = 0; i < count; ++i)
+- {
+- m_SB.Add(INDENT);
+- }
+- }
+-}
+-#endif // _VMA_JSON_WRITER_FUNCTIONS
+-
+-static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
+-{
+- json.BeginObject();
+-
+- json.WriteString("BlockCount");
+- json.WriteNumber(stat.statistics.blockCount);
+- json.WriteString("BlockBytes");
+- json.WriteNumber(stat.statistics.blockBytes);
+- json.WriteString("AllocationCount");
+- json.WriteNumber(stat.statistics.allocationCount);
+- json.WriteString("AllocationBytes");
+- json.WriteNumber(stat.statistics.allocationBytes);
+- json.WriteString("UnusedRangeCount");
+- json.WriteNumber(stat.unusedRangeCount);
+-
+- if (stat.statistics.allocationCount > 1)
+- {
+- json.WriteString("AllocationSizeMin");
+- json.WriteNumber(stat.allocationSizeMin);
+- json.WriteString("AllocationSizeMax");
+- json.WriteNumber(stat.allocationSizeMax);
+- }
+- if (stat.unusedRangeCount > 1)
+- {
+- json.WriteString("UnusedRangeSizeMin");
+- json.WriteNumber(stat.unusedRangeSizeMin);
+- json.WriteString("UnusedRangeSizeMax");
+- json.WriteNumber(stat.unusedRangeSizeMax);
+- }
+- json.EndObject();
+-}
+-#endif // _VMA_JSON_WRITER
+-
+-#ifndef _VMA_MAPPING_HYSTERESIS
+-
+-class VmaMappingHysteresis
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis)
+-public:
+- VmaMappingHysteresis() = default;
+-
+- uint32_t GetExtraMapping() const { return m_ExtraMapping; }
+-
+- // Call when Map was called.
+- // Returns true if switched to extra +1 mapping reference count.
+- bool PostMap()
+- {
+-#if VMA_MAPPING_HYSTERESIS_ENABLED
+- if(m_ExtraMapping == 0)
+- {
+- ++m_MajorCounter;
+- if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
+- {
+- m_ExtraMapping = 1;
+- m_MajorCounter = 0;
+- m_MinorCounter = 0;
+- return true;
+- }
+- }
+- else // m_ExtraMapping == 1
+- PostMinorCounter();
+-#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
+- return false;
+- }
+-
+- // Call when Unmap was called.
+- void PostUnmap()
+- {
+-#if VMA_MAPPING_HYSTERESIS_ENABLED
+- if(m_ExtraMapping == 0)
+- ++m_MajorCounter;
+- else // m_ExtraMapping == 1
+- PostMinorCounter();
+-#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
+- }
+-
+- // Call when allocation was made from the memory block.
+- void PostAlloc()
+- {
+-#if VMA_MAPPING_HYSTERESIS_ENABLED
+- if(m_ExtraMapping == 1)
+- ++m_MajorCounter;
+- else // m_ExtraMapping == 0
+- PostMinorCounter();
+-#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
+- }
+-
+- // Call when allocation was freed from the memory block.
+- // Returns true if switched to extra -1 mapping reference count.
+- bool PostFree()
+- {
+-#if VMA_MAPPING_HYSTERESIS_ENABLED
+- if(m_ExtraMapping == 1)
+- {
+- ++m_MajorCounter;
+- if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
+- m_MajorCounter > m_MinorCounter + 1)
+- {
+- m_ExtraMapping = 0;
+- m_MajorCounter = 0;
+- m_MinorCounter = 0;
+- return true;
+- }
+- }
+- else // m_ExtraMapping == 0
+- PostMinorCounter();
+-#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
+- return false;
+- }
+-
+-private:
+- static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
+-
+- uint32_t m_MinorCounter = 0;
+- uint32_t m_MajorCounter = 0;
+- uint32_t m_ExtraMapping = 0; // 0 or 1.
+-
+- void PostMinorCounter()
+- {
+- if(m_MinorCounter < m_MajorCounter)
+- {
+- ++m_MinorCounter;
+- }
+- else if(m_MajorCounter > 0)
+- {
+- --m_MajorCounter;
+- --m_MinorCounter;
+- }
+- }
+-};
+-
+-#endif // _VMA_MAPPING_HYSTERESIS
+-
+-#ifndef _VMA_DEVICE_MEMORY_BLOCK
+-/*
+-Represents a single block of device memory (`VkDeviceMemory`) with all the
+-data about its regions (aka suballocations, #VmaAllocation), assigned and free.
+-
+-Thread-safety:
+-- Access to m_pMetadata must be externally synchronized.
+-- Map, Unmap, Bind* are synchronized internally.
+-*/
+-class VmaDeviceMemoryBlock
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock)
+-public:
+- VmaBlockMetadata* m_pMetadata;
+-
+- VmaDeviceMemoryBlock(VmaAllocator hAllocator);
+- ~VmaDeviceMemoryBlock();
+-
+- // Always call after construction.
+- void Init(
+- VmaAllocator hAllocator,
+- VmaPool hParentPool,
+- uint32_t newMemoryTypeIndex,
+- VkDeviceMemory newMemory,
+- VkDeviceSize newSize,
+- uint32_t id,
+- uint32_t algorithm,
+- VkDeviceSize bufferImageGranularity);
+- // Always call before destruction.
+- void Destroy(VmaAllocator allocator);
+-
+- VmaPool GetParentPool() const { return m_hParentPool; }
+- VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
+- uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+- uint32_t GetId() const { return m_Id; }
+- void* GetMappedData() const { return m_pMappedData; }
+- uint32_t GetMapRefCount() const { return m_MapCount; }
+-
+- // Call when allocation/free was made from m_pMetadata.
+- // Used for m_MappingHysteresis.
+- void PostAlloc(VmaAllocator hAllocator);
+- void PostFree(VmaAllocator hAllocator);
+-
+- // Validates all data structures inside this object. If not valid, returns false.
+- bool Validate() const;
+- VkResult CheckCorruption(VmaAllocator hAllocator);
+-
+- // ppData can be null.
+- VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
+- void Unmap(VmaAllocator hAllocator, uint32_t count);
+-
+- VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
+- VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
+-
+- VkResult BindBufferMemory(
+- const VmaAllocator hAllocator,
+- const VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkBuffer hBuffer,
+- const void* pNext);
+- VkResult BindImageMemory(
+- const VmaAllocator hAllocator,
+- const VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkImage hImage,
+- const void* pNext);
+-
+-private:
+- VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
+- uint32_t m_MemoryTypeIndex;
+- uint32_t m_Id;
+- VkDeviceMemory m_hMemory;
+-
+- /*
+- Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
+- Also protects m_MapCount, m_pMappedData.
+- Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
+- */
+- VMA_MUTEX m_MapAndBindMutex;
+- VmaMappingHysteresis m_MappingHysteresis;
+- uint32_t m_MapCount;
+- void* m_pMappedData;
+-};
+-#endif // _VMA_DEVICE_MEMORY_BLOCK
+-
+-#ifndef _VMA_ALLOCATION_T
+-struct VmaAllocation_T
+-{
+- friend struct VmaDedicatedAllocationListItemTraits;
+-
+- enum FLAGS
+- {
+- FLAG_PERSISTENT_MAP = 0x01,
+- FLAG_MAPPING_ALLOWED = 0x02,
+- };
+-
+-public:
+- enum ALLOCATION_TYPE
+- {
+- ALLOCATION_TYPE_NONE,
+- ALLOCATION_TYPE_BLOCK,
+- ALLOCATION_TYPE_DEDICATED,
+- };
+-
+- // This struct is allocated using VmaPoolAllocator.
+- VmaAllocation_T(bool mappingAllowed);
+- ~VmaAllocation_T();
+-
+- void InitBlockAllocation(
+- VmaDeviceMemoryBlock* block,
+- VmaAllocHandle allocHandle,
+- VkDeviceSize alignment,
+- VkDeviceSize size,
+- uint32_t memoryTypeIndex,
+- VmaSuballocationType suballocationType,
+- bool mapped);
+- // pMappedData not null means allocation is created with MAPPED flag.
+- void InitDedicatedAllocation(
+- VmaPool hParentPool,
+- uint32_t memoryTypeIndex,
+- VkDeviceMemory hMemory,
+- VmaSuballocationType suballocationType,
+- void* pMappedData,
+- VkDeviceSize size);
+-
+- ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
+- VkDeviceSize GetAlignment() const { return m_Alignment; }
+- VkDeviceSize GetSize() const { return m_Size; }
+- void* GetUserData() const { return m_pUserData; }
+- const char* GetName() const { return m_pName; }
+- VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
+-
+- VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
+- uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+- bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
+- bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
+-
+- void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
+- void SetName(VmaAllocator hAllocator, const char* pName);
+- void FreeName(VmaAllocator hAllocator);
+- uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
+- VmaAllocHandle GetAllocHandle() const;
+- VkDeviceSize GetOffset() const;
+- VmaPool GetParentPool() const;
+- VkDeviceMemory GetMemory() const;
+- void* GetMappedData() const;
+-
+- void BlockAllocMap();
+- void BlockAllocUnmap();
+- VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
+- void DedicatedAllocUnmap(VmaAllocator hAllocator);
+-
+-#if VMA_STATS_STRING_ENABLED
+- uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
+-
+- void InitBufferImageUsage(uint32_t bufferImageUsage);
+- void PrintParameters(class VmaJsonWriter& json) const;
+-#endif
+-
+-private:
+- // Allocation out of VmaDeviceMemoryBlock.
+- struct BlockAllocation
+- {
+- VmaDeviceMemoryBlock* m_Block;
+- VmaAllocHandle m_AllocHandle;
+- };
+- // Allocation for an object that has its own private VkDeviceMemory.
+- struct DedicatedAllocation
+- {
+- VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
+- VkDeviceMemory m_hMemory;
+- void* m_pMappedData; // Not null means memory is mapped.
+- VmaAllocation_T* m_Prev;
+- VmaAllocation_T* m_Next;
+- };
+- union
+- {
+- // Allocation out of VmaDeviceMemoryBlock.
+- BlockAllocation m_BlockAllocation;
+- // Allocation for an object that has its own private VkDeviceMemory.
+- DedicatedAllocation m_DedicatedAllocation;
+- };
+-
+- VkDeviceSize m_Alignment;
+- VkDeviceSize m_Size;
+- void* m_pUserData;
+- char* m_pName;
+- uint32_t m_MemoryTypeIndex;
+- uint8_t m_Type; // ALLOCATION_TYPE
+- uint8_t m_SuballocationType; // VmaSuballocationType
+- // Reference counter for vmaMapMemory()/vmaUnmapMemory().
+- uint8_t m_MapCount;
+- uint8_t m_Flags; // enum FLAGS
+-#if VMA_STATS_STRING_ENABLED
+- uint32_t m_BufferImageUsage; // 0 if unknown.
+-#endif
+-};
+-#endif // _VMA_ALLOCATION_T
+-
+-#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
+-struct VmaDedicatedAllocationListItemTraits
+-{
+- typedef VmaAllocation_T ItemType;
+-
+- static ItemType* GetPrev(const ItemType* item)
+- {
+- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+- return item->m_DedicatedAllocation.m_Prev;
+- }
+- static ItemType* GetNext(const ItemType* item)
+- {
+- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+- return item->m_DedicatedAllocation.m_Next;
+- }
+- static ItemType*& AccessPrev(ItemType* item)
+- {
+- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+- return item->m_DedicatedAllocation.m_Prev;
+- }
+- static ItemType*& AccessNext(ItemType* item)
+- {
+- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+- return item->m_DedicatedAllocation.m_Next;
+- }
+-};
+-#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
+-
+-#ifndef _VMA_DEDICATED_ALLOCATION_LIST
+-/*
+-Stores linked list of VmaAllocation_T objects.
+-Thread-safe, synchronized internally.
+-*/
+-class VmaDedicatedAllocationList
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)
+-public:
+- VmaDedicatedAllocationList() {}
+- ~VmaDedicatedAllocationList();
+-
+- void Init(bool useMutex) { m_UseMutex = useMutex; }
+- bool Validate();
+-
+- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
+- void AddStatistics(VmaStatistics& inoutStats);
+-#if VMA_STATS_STRING_ENABLED
+- // Writes JSON array with the list of allocations.
+- void BuildStatsString(VmaJsonWriter& json);
+-#endif
+-
+- bool IsEmpty();
+- void Register(VmaAllocation alloc);
+- void Unregister(VmaAllocation alloc);
+-
+-private:
+- typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
+-
+- bool m_UseMutex = true;
+- VMA_RW_MUTEX m_Mutex;
+- DedicatedAllocationLinkedList m_AllocationList;
+-};
+-
+-#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
+-
+-VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
+-{
+- VMA_HEAVY_ASSERT(Validate());
+-
+- if (!m_AllocationList.IsEmpty())
+- {
+- VMA_ASSERT(false && "Unfreed dedicated allocations found!");
+- }
+-}
+-
+-bool VmaDedicatedAllocationList::Validate()
+-{
+- const size_t declaredCount = m_AllocationList.GetCount();
+- size_t actualCount = 0;
+- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
+- for (VmaAllocation alloc = m_AllocationList.Front();
+- alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
+- {
+- ++actualCount;
+- }
+- VMA_VALIDATE(actualCount == declaredCount);
+-
+- return true;
+-}
+-
+-void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
+-{
+- for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
+- {
+- const VkDeviceSize size = item->GetSize();
+- inoutStats.statistics.blockCount++;
+- inoutStats.statistics.blockBytes += size;
+- VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
+- }
+-}
+-
+-void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
+-{
+- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
+-
+- const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
+- inoutStats.blockCount += allocCount;
+- inoutStats.allocationCount += allocCount;
+-
+- for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
+- {
+- const VkDeviceSize size = item->GetSize();
+- inoutStats.blockBytes += size;
+- inoutStats.allocationBytes += size;
+- }
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
+-{
+- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
+- json.BeginArray();
+- for (VmaAllocation alloc = m_AllocationList.Front();
+- alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
+- {
+- json.BeginObject(true);
+- alloc->PrintParameters(json);
+- json.EndObject();
+- }
+- json.EndArray();
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-bool VmaDedicatedAllocationList::IsEmpty()
+-{
+- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
+- return m_AllocationList.IsEmpty();
+-}
+-
+-void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
+-{
+- VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
+- m_AllocationList.PushBack(alloc);
+-}
+-
+-void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
+-{
+- VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
+- m_AllocationList.Remove(alloc);
+-}
+-#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
+-#endif // _VMA_DEDICATED_ALLOCATION_LIST
+-
+-#ifndef _VMA_SUBALLOCATION
+-/*
+-Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
+-allocated memory block or free.
+-*/
+-struct VmaSuballocation
+-{
+- VkDeviceSize offset;
+- VkDeviceSize size;
+- void* userData;
+- VmaSuballocationType type;
+-};
+-
+-// Comparator for offsets.
+-struct VmaSuballocationOffsetLess
+-{
+- bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
+- {
+- return lhs.offset < rhs.offset;
+- }
+-};
+-
+-struct VmaSuballocationOffsetGreater
+-{
+- bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
+- {
+- return lhs.offset > rhs.offset;
+- }
+-};
+-
+-struct VmaSuballocationItemSizeLess
+-{
+- bool operator()(const VmaSuballocationList::iterator lhs,
+- const VmaSuballocationList::iterator rhs) const
+- {
+- return lhs->size < rhs->size;
+- }
+-
+- bool operator()(const VmaSuballocationList::iterator lhs,
+- VkDeviceSize rhsSize) const
+- {
+- return lhs->size < rhsSize;
+- }
+-};
+-#endif // _VMA_SUBALLOCATION
+-
+-#ifndef _VMA_ALLOCATION_REQUEST
+-/*
+-Parameters of planned allocation inside a VmaDeviceMemoryBlock.
+-item points to a FREE suballocation.
+-*/
+-struct VmaAllocationRequest
+-{
+- VmaAllocHandle allocHandle;
+- VkDeviceSize size;
+- VmaSuballocationList::iterator item;
+- void* customData;
+- uint64_t algorithmData;
+- VmaAllocationRequestType type;
+-};
+-#endif // _VMA_ALLOCATION_REQUEST
+-
+-#ifndef _VMA_BLOCK_METADATA
+-/*
+-Data structure used for bookkeeping of allocations and unused ranges of memory
+-in a single VkDeviceMemory block.
+-*/
+-class VmaBlockMetadata
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata)
+-public:
+- // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
+- VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual);
+- virtual ~VmaBlockMetadata() = default;
+-
+- virtual void Init(VkDeviceSize size) { m_Size = size; }
+- bool IsVirtual() const { return m_IsVirtual; }
+- VkDeviceSize GetSize() const { return m_Size; }
+-
+- // Validates all data structures inside this object. If not valid, returns false.
+- virtual bool Validate() const = 0;
+- virtual size_t GetAllocationCount() const = 0;
+- virtual size_t GetFreeRegionsCount() const = 0;
+- virtual VkDeviceSize GetSumFreeSize() const = 0;
+- // Returns true if this block is empty - contains only single free suballocation.
+- virtual bool IsEmpty() const = 0;
+- virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
+- virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
+- virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
+-
+- virtual VmaAllocHandle GetAllocationListBegin() const = 0;
+- virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
+- virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
+-
+- // Shouldn't modify blockCount.
+- virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
+- virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
+-
+-#if VMA_STATS_STRING_ENABLED
+- virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
+-#endif
+-
+- // Tries to find a place for suballocation with given parameters inside this block.
+- // If succeeded, fills pAllocationRequest and returns true.
+- // If failed, returns false.
+- virtual bool CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest) = 0;
+-
+- virtual VkResult CheckCorruption(const void* pBlockData) = 0;
+-
+- // Makes actual allocation based on request. Request must already be checked and valid.
+- virtual void Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData) = 0;
+-
+- // Frees suballocation assigned to given memory region.
+- virtual void Free(VmaAllocHandle allocHandle) = 0;
+-
+- // Frees all allocations.
+- // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
+- virtual void Clear() = 0;
+-
+- virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
+- virtual void DebugLogAllAllocations() const = 0;
+-
+-protected:
+- const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
+- VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
+- VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); }
+-
+- void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
+-#if VMA_STATS_STRING_ENABLED
+- // mapRefCount == UINT32_MAX means unspecified.
+- void PrintDetailedMap_Begin(class VmaJsonWriter& json,
+- VkDeviceSize unusedBytes,
+- size_t allocationCount,
+- size_t unusedRangeCount) const;
+- void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
+- VkDeviceSize offset, VkDeviceSize size, void* userData) const;
+- void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
+- VkDeviceSize offset,
+- VkDeviceSize size) const;
+- void PrintDetailedMap_End(class VmaJsonWriter& json) const;
+-#endif
+-
+-private:
+- VkDeviceSize m_Size;
+- const VkAllocationCallbacks* m_pAllocationCallbacks;
+- const VkDeviceSize m_BufferImageGranularity;
+- const bool m_IsVirtual;
+-};
+-
+-#ifndef _VMA_BLOCK_METADATA_FUNCTIONS
+-VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual)
+- : m_Size(0),
+- m_pAllocationCallbacks(pAllocationCallbacks),
+- m_BufferImageGranularity(bufferImageGranularity),
+- m_IsVirtual(isVirtual) {}
+-
+-void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
+-{
+- if (IsVirtual())
+- {
+- VMA_DEBUG_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData);
+- }
+- else
+- {
+- VMA_ASSERT(userData != VMA_NULL);
+- VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
+-
+- userData = allocation->GetUserData();
+- const char* name = allocation->GetName();
+-
+-#if VMA_STATS_STRING_ENABLED
+- VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u",
+- offset, size, userData, name ? name : "vma_empty",
+- VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
+- allocation->GetBufferImageUsage());
+-#else
+- VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u",
+- offset, size, userData, name ? name : "vma_empty",
+- (uint32_t)allocation->GetSuballocationType());
+-#endif // VMA_STATS_STRING_ENABLED
+- }
+-
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
+- VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
+-{
+- json.WriteString("TotalBytes");
+- json.WriteNumber(GetSize());
+-
+- json.WriteString("UnusedBytes");
+- json.WriteNumber(unusedBytes);
+-
+- json.WriteString("Allocations");
+- json.WriteNumber((uint64_t)allocationCount);
+-
+- json.WriteString("UnusedRanges");
+- json.WriteNumber((uint64_t)unusedRangeCount);
+-
+- json.WriteString("Suballocations");
+- json.BeginArray();
+-}
+-
+-void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
+- VkDeviceSize offset, VkDeviceSize size, void* userData) const
+-{
+- json.BeginObject(true);
+-
+- json.WriteString("Offset");
+- json.WriteNumber(offset);
+-
+- if (IsVirtual())
+- {
+- json.WriteString("Size");
+- json.WriteNumber(size);
+- if (userData)
+- {
+- json.WriteString("CustomData");
+- json.BeginString();
+- json.ContinueString_Pointer(userData);
+- json.EndString();
+- }
+- }
+- else
+- {
+- ((VmaAllocation)userData)->PrintParameters(json);
+- }
+-
+- json.EndObject();
+-}
+-
+-void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
+- VkDeviceSize offset, VkDeviceSize size) const
+-{
+- json.BeginObject(true);
+-
+- json.WriteString("Offset");
+- json.WriteNumber(offset);
+-
+- json.WriteString("Type");
+- json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
+-
+- json.WriteString("Size");
+- json.WriteNumber(size);
+-
+- json.EndObject();
+-}
+-
+-void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
+-{
+- json.EndArray();
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-#endif // _VMA_BLOCK_METADATA_FUNCTIONS
+-#endif // _VMA_BLOCK_METADATA
+-
+-#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
+-// Before deleting object of this class remember to call 'Destroy()'
+-class VmaBlockBufferImageGranularity final
+-{
+-public:
+- struct ValidationContext
+- {
+- const VkAllocationCallbacks* allocCallbacks;
+- uint16_t* pageAllocs;
+- };
+-
+- VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
+- ~VmaBlockBufferImageGranularity();
+-
+- bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
+-
+- void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
+- // Before destroying object you must call free it's memory
+- void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
+-
+- void RoundupAllocRequest(VmaSuballocationType allocType,
+- VkDeviceSize& inOutAllocSize,
+- VkDeviceSize& inOutAllocAlignment) const;
+-
+- bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
+- VkDeviceSize allocSize,
+- VkDeviceSize blockOffset,
+- VkDeviceSize blockSize,
+- VmaSuballocationType allocType) const;
+-
+- void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
+- void FreePages(VkDeviceSize offset, VkDeviceSize size);
+- void Clear();
+-
+- ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
+- bool isVirutal) const;
+- bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
+- bool FinishValidation(ValidationContext& ctx) const;
+-
+-private:
+- static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
+-
+- struct RegionInfo
+- {
+- uint8_t allocType;
+- uint16_t allocCount;
+- };
+-
+- VkDeviceSize m_BufferImageGranularity;
+- uint32_t m_RegionCount;
+- RegionInfo* m_RegionInfo;
+-
+- uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
+- uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
+-
+- uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
+- void AllocPage(RegionInfo& page, uint8_t allocType);
+-};
+-
+-#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
+-VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
+- : m_BufferImageGranularity(bufferImageGranularity),
+- m_RegionCount(0),
+- m_RegionInfo(VMA_NULL) {}
+-
+-VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
+-{
+- VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
+-}
+-
+-void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
+-{
+- if (IsEnabled())
+- {
+- m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
+- m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
+- memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
+- }
+-}
+-
+-void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
+-{
+- if (m_RegionInfo)
+- {
+- vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
+- m_RegionInfo = VMA_NULL;
+- }
+-}
+-
+-void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
+- VkDeviceSize& inOutAllocSize,
+- VkDeviceSize& inOutAllocAlignment) const
+-{
+- if (m_BufferImageGranularity > 1 &&
+- m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
+- {
+- if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
+- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
+- {
+- inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
+- inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
+- }
+- }
+-}
+-
+-bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
+- VkDeviceSize allocSize,
+- VkDeviceSize blockOffset,
+- VkDeviceSize blockSize,
+- VmaSuballocationType allocType) const
+-{
+- if (IsEnabled())
+- {
+- uint32_t startPage = GetStartPage(inOutAllocOffset);
+- if (m_RegionInfo[startPage].allocCount > 0 &&
+- VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
+- {
+- inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
+- if (blockSize < allocSize + inOutAllocOffset - blockOffset)
+- return true;
+- ++startPage;
+- }
+- uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
+- if (endPage != startPage &&
+- m_RegionInfo[endPage].allocCount > 0 &&
+- VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
+- {
+- return true;
+- }
+- }
+- return false;
+-}
+-
+-void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
+-{
+- if (IsEnabled())
+- {
+- uint32_t startPage = GetStartPage(offset);
+- AllocPage(m_RegionInfo[startPage], allocType);
+-
+- uint32_t endPage = GetEndPage(offset, size);
+- if (startPage != endPage)
+- AllocPage(m_RegionInfo[endPage], allocType);
+- }
+-}
+-
+-void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
+-{
+- if (IsEnabled())
+- {
+- uint32_t startPage = GetStartPage(offset);
+- --m_RegionInfo[startPage].allocCount;
+- if (m_RegionInfo[startPage].allocCount == 0)
+- m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
+- uint32_t endPage = GetEndPage(offset, size);
+- if (startPage != endPage)
+- {
+- --m_RegionInfo[endPage].allocCount;
+- if (m_RegionInfo[endPage].allocCount == 0)
+- m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
+- }
+- }
+-}
+-
+-void VmaBlockBufferImageGranularity::Clear()
+-{
+- if (m_RegionInfo)
+- memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
+-}
+-
+-VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
+- const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
+-{
+- ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
+- if (!isVirutal && IsEnabled())
+- {
+- ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
+- memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
+- }
+- return ctx;
+-}
+-
+-bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
+- VkDeviceSize offset, VkDeviceSize size) const
+-{
+- if (IsEnabled())
+- {
+- uint32_t start = GetStartPage(offset);
+- ++ctx.pageAllocs[start];
+- VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
+-
+- uint32_t end = GetEndPage(offset, size);
+- if (start != end)
+- {
+- ++ctx.pageAllocs[end];
+- VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
+- }
+- }
+- return true;
+-}
+-
+-bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
+-{
+- // Check proper page structure
+- if (IsEnabled())
+- {
+- VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
+-
+- for (uint32_t page = 0; page < m_RegionCount; ++page)
+- {
+- VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
+- }
+- vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
+- ctx.pageAllocs = VMA_NULL;
+- }
+- return true;
+-}
+-
+-uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
+-{
+- return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
+-}
+-
+-void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
+-{
+- // When current alloc type is free then it can be overridden by new type
+- if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
+- page.allocType = allocType;
+-
+- ++page.allocCount;
+-}
+-#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
+-#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
+-
+-#if 0
+-#ifndef _VMA_BLOCK_METADATA_GENERIC
+-class VmaBlockMetadata_Generic : public VmaBlockMetadata
+-{
+- friend class VmaDefragmentationAlgorithm_Generic;
+- friend class VmaDefragmentationAlgorithm_Fast;
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Generic)
+-public:
+- VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual);
+- virtual ~VmaBlockMetadata_Generic() = default;
+-
+- size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
+- VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
+- bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); }
+- void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); }
+- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
+-
+- void Init(VkDeviceSize size) override;
+- bool Validate() const override;
+-
+- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
+- void AddStatistics(VmaStatistics& inoutStats) const override;
+-
+-#if VMA_STATS_STRING_ENABLED
+- void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
+-#endif
+-
+- bool CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest) override;
+-
+- VkResult CheckCorruption(const void* pBlockData) override;
+-
+- void Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData) override;
+-
+- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
+- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
+- VmaAllocHandle GetAllocationListBegin() const override;
+- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
+- void Clear() override;
+- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
+- void DebugLogAllAllocations() const override;
+-
+-private:
+- uint32_t m_FreeCount;
+- VkDeviceSize m_SumFreeSize;
+- VmaSuballocationList m_Suballocations;
+- // Suballocations that are free. Sorted by size, ascending.
+- VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator>> m_FreeSuballocationsBySize;
+-
+- VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); }
+-
+- VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const;
+- bool ValidateFreeSuballocationList() const;
+-
+- // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
+- // If yes, fills pOffset and returns true. If no, returns false.
+- bool CheckAllocation(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- VmaSuballocationList::const_iterator suballocItem,
+- VmaAllocHandle* pAllocHandle) const;
+-
+- // Given free suballocation, it merges it with following one, which must also be free.
+- void MergeFreeWithNext(VmaSuballocationList::iterator item);
+- // Releases given suballocation, making it free.
+- // Merges it with adjacent free suballocations if applicable.
+- // Returns iterator to new free suballocation at this place.
+- VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
+- // Given free suballocation, it inserts it into sorted list of
+- // m_FreeSuballocationsBySize if it is suitable.
+- void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
+- // Given free suballocation, it removes it from sorted list of
+- // m_FreeSuballocationsBySize if it is suitable.
+- void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
+-};
+-
+-#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
+-VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual)
+- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
+- m_FreeCount(0),
+- m_SumFreeSize(0),
+- m_Suballocations(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
+- m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(pAllocationCallbacks)) {}
+-
+-void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
+-{
+- VmaBlockMetadata::Init(size);
+-
+- m_FreeCount = 1;
+- m_SumFreeSize = size;
+-
+- VmaSuballocation suballoc = {};
+- suballoc.offset = 0;
+- suballoc.size = size;
+- suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+-
+- m_Suballocations.push_back(suballoc);
+- m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
+-}
+-
+-bool VmaBlockMetadata_Generic::Validate() const
+-{
+- VMA_VALIDATE(!m_Suballocations.empty());
+-
+- // Expected offset of new suballocation as calculated from previous ones.
+- VkDeviceSize calculatedOffset = 0;
+- // Expected number of free suballocations as calculated from traversing their list.
+- uint32_t calculatedFreeCount = 0;
+- // Expected sum size of free suballocations as calculated from traversing their list.
+- VkDeviceSize calculatedSumFreeSize = 0;
+- // Expected number of free suballocations that should be registered in
+- // m_FreeSuballocationsBySize calculated from traversing their list.
+- size_t freeSuballocationsToRegister = 0;
+- // True if previous visited suballocation was free.
+- bool prevFree = false;
+-
+- const VkDeviceSize debugMargin = GetDebugMargin();
+-
+- for (const auto& subAlloc : m_Suballocations)
+- {
+- // Actual offset of this suballocation doesn't match expected one.
+- VMA_VALIDATE(subAlloc.offset == calculatedOffset);
+-
+- const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
+- // Two adjacent free suballocations are invalid. They should be merged.
+- VMA_VALIDATE(!prevFree || !currFree);
+-
+- VmaAllocation alloc = (VmaAllocation)subAlloc.userData;
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
+- }
+-
+- if (currFree)
+- {
+- calculatedSumFreeSize += subAlloc.size;
+- ++calculatedFreeCount;
+- ++freeSuballocationsToRegister;
+-
+- // Margin required between allocations - every free space must be at least that large.
+- VMA_VALIDATE(subAlloc.size >= debugMargin);
+- }
+- else
+- {
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1);
+- VMA_VALIDATE(alloc->GetSize() == subAlloc.size);
+- }
+-
+- // Margin required between allocations - previous allocation must be free.
+- VMA_VALIDATE(debugMargin == 0 || prevFree);
+- }
+-
+- calculatedOffset += subAlloc.size;
+- prevFree = currFree;
+- }
+-
+- // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
+- // match expected one.
+- VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
+-
+- VkDeviceSize lastSize = 0;
+- for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
+- {
+- VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
+-
+- // Only free suballocations can be registered in m_FreeSuballocationsBySize.
+- VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+- // They must be sorted by size ascending.
+- VMA_VALIDATE(suballocItem->size >= lastSize);
+-
+- lastSize = suballocItem->size;
+- }
+-
+- // Check if totals match calculated values.
+- VMA_VALIDATE(ValidateFreeSuballocationList());
+- VMA_VALIDATE(calculatedOffset == GetSize());
+- VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
+- VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
+-
+- return true;
+-}
+-
+-void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
+-{
+- const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
+- inoutStats.statistics.blockCount++;
+- inoutStats.statistics.blockBytes += GetSize();
+-
+- for (const auto& suballoc : m_Suballocations)
+- {
+- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
+- else
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
+- }
+-}
+-
+-void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const
+-{
+- inoutStats.blockCount++;
+- inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount;
+- inoutStats.blockBytes += GetSize();
+- inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
+-{
+- PrintDetailedMap_Begin(json,
+- m_SumFreeSize, // unusedBytes
+- m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
+- m_FreeCount, // unusedRangeCount
+- mapRefCount);
+-
+- for (const auto& suballoc : m_Suballocations)
+- {
+- if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
+- }
+- else
+- {
+- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
+- }
+- }
+-
+- PrintDetailedMap_End(json);
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-bool VmaBlockMetadata_Generic::CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest)
+-{
+- VMA_ASSERT(allocSize > 0);
+- VMA_ASSERT(!upperAddress);
+- VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+- VMA_ASSERT(pAllocationRequest != VMA_NULL);
+- VMA_HEAVY_ASSERT(Validate());
+-
+- allocSize = AlignAllocationSize(allocSize);
+-
+- pAllocationRequest->type = VmaAllocationRequestType::Normal;
+- pAllocationRequest->size = allocSize;
+-
+- const VkDeviceSize debugMargin = GetDebugMargin();
+-
+- // There is not enough total free space in this block to fulfill the request: Early return.
+- if (m_SumFreeSize < allocSize + debugMargin)
+- {
+- return false;
+- }
+-
+- // New algorithm, efficiently searching freeSuballocationsBySize.
+- const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
+- if (freeSuballocCount > 0)
+- {
+- if (strategy == 0 ||
+- strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
+- {
+- // Find first free suballocation with size not less than allocSize + debugMargin.
+- VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
+- m_FreeSuballocationsBySize.data(),
+- m_FreeSuballocationsBySize.data() + freeSuballocCount,
+- allocSize + debugMargin,
+- VmaSuballocationItemSizeLess());
+- size_t index = it - m_FreeSuballocationsBySize.data();
+- for (; index < freeSuballocCount; ++index)
+- {
+- if (CheckAllocation(
+- allocSize,
+- allocAlignment,
+- allocType,
+- m_FreeSuballocationsBySize[index],
+- &pAllocationRequest->allocHandle))
+- {
+- pAllocationRequest->item = m_FreeSuballocationsBySize[index];
+- return true;
+- }
+- }
+- }
+- else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
+- {
+- for (VmaSuballocationList::iterator it = m_Suballocations.begin();
+- it != m_Suballocations.end();
+- ++it)
+- {
+- if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
+- allocSize,
+- allocAlignment,
+- allocType,
+- it,
+- &pAllocationRequest->allocHandle))
+- {
+- pAllocationRequest->item = it;
+- return true;
+- }
+- }
+- }
+- else
+- {
+- VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ));
+- // Search staring from biggest suballocations.
+- for (size_t index = freeSuballocCount; index--; )
+- {
+- if (CheckAllocation(
+- allocSize,
+- allocAlignment,
+- allocType,
+- m_FreeSuballocationsBySize[index],
+- &pAllocationRequest->allocHandle))
+- {
+- pAllocationRequest->item = m_FreeSuballocationsBySize[index];
+- return true;
+- }
+- }
+- }
+- }
+-
+- return false;
+-}
+-
+-VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
+-{
+- for (auto& suballoc : m_Suballocations)
+- {
+- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
+- {
+- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+- return VK_ERROR_UNKNOWN_COPY;
+- }
+- }
+- }
+-
+- return VK_SUCCESS;
+-}
+-
+-void VmaBlockMetadata_Generic::Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData)
+-{
+- VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+- VMA_ASSERT(request.item != m_Suballocations.end());
+- VmaSuballocation& suballoc = *request.item;
+- // Given suballocation is a free block.
+- VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+-
+- // Given offset is inside this suballocation.
+- VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset);
+- const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1;
+- VMA_ASSERT(suballoc.size >= paddingBegin + request.size);
+- const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size;
+-
+- // Unregister this free suballocation from m_FreeSuballocationsBySize and update
+- // it to become used.
+- UnregisterFreeSuballocation(request.item);
+-
+- suballoc.offset = (VkDeviceSize)request.allocHandle - 1;
+- suballoc.size = request.size;
+- suballoc.type = type;
+- suballoc.userData = userData;
+-
+- // If there are any free bytes remaining at the end, insert new free suballocation after current one.
+- if (paddingEnd)
+- {
+- VmaSuballocation paddingSuballoc = {};
+- paddingSuballoc.offset = suballoc.offset + suballoc.size;
+- paddingSuballoc.size = paddingEnd;
+- paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+- VmaSuballocationList::iterator next = request.item;
+- ++next;
+- const VmaSuballocationList::iterator paddingEndItem =
+- m_Suballocations.insert(next, paddingSuballoc);
+- RegisterFreeSuballocation(paddingEndItem);
+- }
+-
+- // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
+- if (paddingBegin)
+- {
+- VmaSuballocation paddingSuballoc = {};
+- paddingSuballoc.offset = suballoc.offset - paddingBegin;
+- paddingSuballoc.size = paddingBegin;
+- paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+- const VmaSuballocationList::iterator paddingBeginItem =
+- m_Suballocations.insert(request.item, paddingSuballoc);
+- RegisterFreeSuballocation(paddingBeginItem);
+- }
+-
+- // Update totals.
+- m_FreeCount = m_FreeCount - 1;
+- if (paddingBegin > 0)
+- {
+- ++m_FreeCount;
+- }
+- if (paddingEnd > 0)
+- {
+- ++m_FreeCount;
+- }
+- m_SumFreeSize -= request.size;
+-}
+-
+-void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
+-{
+- outInfo.offset = (VkDeviceSize)allocHandle - 1;
+- const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset);
+- outInfo.size = suballoc.size;
+- outInfo.pUserData = suballoc.userData;
+-}
+-
+-void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const
+-{
+- return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData;
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const
+-{
+- if (IsEmpty())
+- return VK_NULL_HANDLE;
+-
+- for (const auto& suballoc : m_Suballocations)
+- {
+- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+- return (VmaAllocHandle)(suballoc.offset + 1);
+- }
+- VMA_ASSERT(false && "Should contain at least 1 allocation!");
+- return VK_NULL_HANDLE;
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const
+-{
+- VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1);
+-
+- for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it)
+- {
+- if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
+- return (VmaAllocHandle)(it->offset + 1);
+- }
+- return VK_NULL_HANDLE;
+-}
+-
+-void VmaBlockMetadata_Generic::Clear()
+-{
+- const VkDeviceSize size = GetSize();
+-
+- VMA_ASSERT(IsVirtual());
+- m_FreeCount = 1;
+- m_SumFreeSize = size;
+- m_Suballocations.clear();
+- m_FreeSuballocationsBySize.clear();
+-
+- VmaSuballocation suballoc = {};
+- suballoc.offset = 0;
+- suballoc.size = size;
+- suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+- m_Suballocations.push_back(suballoc);
+-
+- m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
+-}
+-
+-void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
+-{
+- VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1);
+- suballoc.userData = userData;
+-}
+-
+-void VmaBlockMetadata_Generic::DebugLogAllAllocations() const
+-{
+- for (const auto& suballoc : m_Suballocations)
+- {
+- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+- DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData);
+- }
+-}
+-
+-VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const
+-{
+- VMA_HEAVY_ASSERT(!m_Suballocations.empty());
+- const VkDeviceSize last = m_Suballocations.rbegin()->offset;
+- if (last == offset)
+- return m_Suballocations.rbegin().drop_const();
+- const VkDeviceSize first = m_Suballocations.begin()->offset;
+- if (first == offset)
+- return m_Suballocations.begin().drop_const();
+-
+- const size_t suballocCount = m_Suballocations.size();
+- const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount;
+- auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator
+- {
+- for (auto suballocItem = begin;
+- suballocItem != end;
+- ++suballocItem)
+- {
+- if (suballocItem->offset == offset)
+- return suballocItem.drop_const();
+- }
+- VMA_ASSERT(false && "Not found!");
+- return m_Suballocations.end().drop_const();
+- };
+- // If requested offset is closer to the end of range, search from the end
+- if (offset - first > suballocCount * step / 2)
+- {
+- return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend());
+- }
+- return findSuballocation(m_Suballocations.begin(), m_Suballocations.end());
+-}
+-
+-bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
+-{
+- VkDeviceSize lastSize = 0;
+- for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
+- {
+- const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
+-
+- VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
+- VMA_VALIDATE(it->size >= lastSize);
+- lastSize = it->size;
+- }
+- return true;
+-}
+-
+-bool VmaBlockMetadata_Generic::CheckAllocation(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- VmaSuballocationList::const_iterator suballocItem,
+- VmaAllocHandle* pAllocHandle) const
+-{
+- VMA_ASSERT(allocSize > 0);
+- VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+- VMA_ASSERT(suballocItem != m_Suballocations.cend());
+- VMA_ASSERT(pAllocHandle != VMA_NULL);
+-
+- const VkDeviceSize debugMargin = GetDebugMargin();
+- const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
+-
+- const VmaSuballocation& suballoc = *suballocItem;
+- VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+-
+- // Size of this suballocation is too small for this request: Early return.
+- if (suballoc.size < allocSize)
+- {
+- return false;
+- }
+-
+- // Start from offset equal to beginning of this suballocation.
+- VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
+-
+- // Apply debugMargin from the end of previous alloc.
+- if (debugMargin > 0)
+- {
+- offset += debugMargin;
+- }
+-
+- // Apply alignment.
+- offset = VmaAlignUp(offset, allocAlignment);
+-
+- // Check previous suballocations for BufferImageGranularity conflicts.
+- // Make bigger alignment if necessary.
+- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
+- {
+- bool bufferImageGranularityConflict = false;
+- VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
+- while (prevSuballocItem != m_Suballocations.cbegin())
+- {
+- --prevSuballocItem;
+- const VmaSuballocation& prevSuballoc = *prevSuballocItem;
+- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+- {
+- bufferImageGranularityConflict = true;
+- break;
+- }
+- }
+- else
+- // Already on previous page.
+- break;
+- }
+- if (bufferImageGranularityConflict)
+- {
+- offset = VmaAlignUp(offset, bufferImageGranularity);
+- }
+- }
+-
+- // Calculate padding at the beginning based on current offset.
+- const VkDeviceSize paddingBegin = offset - suballoc.offset;
+-
+- // Fail if requested size plus margin after is bigger than size of this suballocation.
+- if (paddingBegin + allocSize + debugMargin > suballoc.size)
+- {
+- return false;
+- }
+-
+- // Check next suballocations for BufferImageGranularity conflicts.
+- // If conflict exists, allocation cannot be made here.
+- if (allocSize % bufferImageGranularity || offset % bufferImageGranularity)
+- {
+- VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
+- ++nextSuballocItem;
+- while (nextSuballocItem != m_Suballocations.cend())
+- {
+- const VmaSuballocation& nextSuballoc = *nextSuballocItem;
+- if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+- {
+- return false;
+- }
+- }
+- else
+- {
+- // Already on next page.
+- break;
+- }
+- ++nextSuballocItem;
+- }
+- }
+-
+- *pAllocHandle = (VmaAllocHandle)(offset + 1);
+- // All tests passed: Success. pAllocHandle is already filled.
+- return true;
+-}
+-
+-void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
+-{
+- VMA_ASSERT(item != m_Suballocations.end());
+- VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+-
+- VmaSuballocationList::iterator nextItem = item;
+- ++nextItem;
+- VMA_ASSERT(nextItem != m_Suballocations.end());
+- VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
+-
+- item->size += nextItem->size;
+- --m_FreeCount;
+- m_Suballocations.erase(nextItem);
+-}
+-
+-VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
+-{
+- // Change this suballocation to be marked as free.
+- VmaSuballocation& suballoc = *suballocItem;
+- suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+- suballoc.userData = VMA_NULL;
+-
+- // Update totals.
+- ++m_FreeCount;
+- m_SumFreeSize += suballoc.size;
+-
+- // Merge with previous and/or next suballocation if it's also free.
+- bool mergeWithNext = false;
+- bool mergeWithPrev = false;
+-
+- VmaSuballocationList::iterator nextItem = suballocItem;
+- ++nextItem;
+- if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
+- {
+- mergeWithNext = true;
+- }
+-
+- VmaSuballocationList::iterator prevItem = suballocItem;
+- if (suballocItem != m_Suballocations.begin())
+- {
+- --prevItem;
+- if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- mergeWithPrev = true;
+- }
+- }
+-
+- if (mergeWithNext)
+- {
+- UnregisterFreeSuballocation(nextItem);
+- MergeFreeWithNext(suballocItem);
+- }
+-
+- if (mergeWithPrev)
+- {
+- UnregisterFreeSuballocation(prevItem);
+- MergeFreeWithNext(prevItem);
+- RegisterFreeSuballocation(prevItem);
+- return prevItem;
+- }
+- else
+- {
+- RegisterFreeSuballocation(suballocItem);
+- return suballocItem;
+- }
+-}
+-
+-void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
+-{
+- VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+- VMA_ASSERT(item->size > 0);
+-
+- // You may want to enable this validation at the beginning or at the end of
+- // this function, depending on what do you want to check.
+- VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+-
+- if (m_FreeSuballocationsBySize.empty())
+- {
+- m_FreeSuballocationsBySize.push_back(item);
+- }
+- else
+- {
+- VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
+- }
+-
+- //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+-}
+-
+-void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
+-{
+- VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
+- VMA_ASSERT(item->size > 0);
+-
+- // You may want to enable this validation at the beginning or at the end of
+- // this function, depending on what do you want to check.
+- VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+-
+- VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
+- m_FreeSuballocationsBySize.data(),
+- m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
+- item,
+- VmaSuballocationItemSizeLess());
+- for (size_t index = it - m_FreeSuballocationsBySize.data();
+- index < m_FreeSuballocationsBySize.size();
+- ++index)
+- {
+- if (m_FreeSuballocationsBySize[index] == item)
+- {
+- VmaVectorRemove(m_FreeSuballocationsBySize, index);
+- return;
+- }
+- VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
+- }
+- VMA_ASSERT(0 && "Not found.");
+-
+- //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
+-}
+-#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
+-#endif // _VMA_BLOCK_METADATA_GENERIC
+-#endif // #if 0
+-
+-#ifndef _VMA_BLOCK_METADATA_LINEAR
+-/*
+-Allocations and their references in internal data structure look like this:
+-
+-if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
+-
+- 0 +-------+
+- | |
+- | |
+- | |
+- +-------+
+- | Alloc | 1st[m_1stNullItemsBeginCount]
+- +-------+
+- | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+- +-------+
+- | ... |
+- +-------+
+- | Alloc | 1st[1st.size() - 1]
+- +-------+
+- | |
+- | |
+- | |
+-GetSize() +-------+
+-
+-if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
+-
+- 0 +-------+
+- | Alloc | 2nd[0]
+- +-------+
+- | Alloc | 2nd[1]
+- +-------+
+- | ... |
+- +-------+
+- | Alloc | 2nd[2nd.size() - 1]
+- +-------+
+- | |
+- | |
+- | |
+- +-------+
+- | Alloc | 1st[m_1stNullItemsBeginCount]
+- +-------+
+- | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+- +-------+
+- | ... |
+- +-------+
+- | Alloc | 1st[1st.size() - 1]
+- +-------+
+- | |
+-GetSize() +-------+
+-
+-if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
+-
+- 0 +-------+
+- | |
+- | |
+- | |
+- +-------+
+- | Alloc | 1st[m_1stNullItemsBeginCount]
+- +-------+
+- | Alloc | 1st[m_1stNullItemsBeginCount + 1]
+- +-------+
+- | ... |
+- +-------+
+- | Alloc | 1st[1st.size() - 1]
+- +-------+
+- | |
+- | |
+- | |
+- +-------+
+- | Alloc | 2nd[2nd.size() - 1]
+- +-------+
+- | ... |
+- +-------+
+- | Alloc | 2nd[1]
+- +-------+
+- | Alloc | 2nd[0]
+-GetSize() +-------+
+-
+-*/
+-class VmaBlockMetadata_Linear : public VmaBlockMetadata
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear)
+-public:
+- VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual);
+- virtual ~VmaBlockMetadata_Linear() = default;
+-
+- VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
+- bool IsEmpty() const override { return GetAllocationCount() == 0; }
+- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
+-
+- void Init(VkDeviceSize size) override;
+- bool Validate() const override;
+- size_t GetAllocationCount() const override;
+- size_t GetFreeRegionsCount() const override;
+-
+- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
+- void AddStatistics(VmaStatistics& inoutStats) const override;
+-
+-#if VMA_STATS_STRING_ENABLED
+- void PrintDetailedMap(class VmaJsonWriter& json) const override;
+-#endif
+-
+- bool CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest) override;
+-
+- VkResult CheckCorruption(const void* pBlockData) override;
+-
+- void Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData) override;
+-
+- void Free(VmaAllocHandle allocHandle) override;
+- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
+- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
+- VmaAllocHandle GetAllocationListBegin() const override;
+- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
+- VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
+- void Clear() override;
+- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
+- void DebugLogAllAllocations() const override;
+-
+-private:
+- /*
+- There are two suballocation vectors, used in ping-pong way.
+- The one with index m_1stVectorIndex is called 1st.
+- The one with index (m_1stVectorIndex ^ 1) is called 2nd.
+- 2nd can be non-empty only when 1st is not empty.
+- When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
+- */
+- typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
+-
+- enum SECOND_VECTOR_MODE
+- {
+- SECOND_VECTOR_EMPTY,
+- /*
+- Suballocations in 2nd vector are created later than the ones in 1st, but they
+- all have smaller offset.
+- */
+- SECOND_VECTOR_RING_BUFFER,
+- /*
+- Suballocations in 2nd vector are upper side of double stack.
+- They all have offsets higher than those in 1st vector.
+- Top of this stack means smaller offsets, but higher indices in this vector.
+- */
+- SECOND_VECTOR_DOUBLE_STACK,
+- };
+-
+- VkDeviceSize m_SumFreeSize;
+- SuballocationVectorType m_Suballocations0, m_Suballocations1;
+- uint32_t m_1stVectorIndex;
+- SECOND_VECTOR_MODE m_2ndVectorMode;
+- // Number of items in 1st vector with hAllocation = null at the beginning.
+- size_t m_1stNullItemsBeginCount;
+- // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
+- size_t m_1stNullItemsMiddleCount;
+- // Number of items in 2nd vector with hAllocation = null.
+- size_t m_2ndNullItemsCount;
+-
+- SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+- SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
+- const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
+- const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
+-
+- VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
+- bool ShouldCompact1st() const;
+- void CleanupAfterFree();
+-
+- bool CreateAllocationRequest_LowerAddress(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest);
+- bool CreateAllocationRequest_UpperAddress(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest);
+-};
+-
+-#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
+-VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual)
+- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
+- m_SumFreeSize(0),
+- m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
+- m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
+- m_1stVectorIndex(0),
+- m_2ndVectorMode(SECOND_VECTOR_EMPTY),
+- m_1stNullItemsBeginCount(0),
+- m_1stNullItemsMiddleCount(0),
+- m_2ndNullItemsCount(0) {}
+-
+-void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
+-{
+- VmaBlockMetadata::Init(size);
+- m_SumFreeSize = size;
+-}
+-
+-bool VmaBlockMetadata_Linear::Validate() const
+-{
+- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+-
+- VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
+- VMA_VALIDATE(!suballocations1st.empty() ||
+- suballocations2nd.empty() ||
+- m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
+-
+- if (!suballocations1st.empty())
+- {
+- // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
+- VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
+- // Null item at the end should be just pop_back().
+- VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
+- }
+- if (!suballocations2nd.empty())
+- {
+- // Null item at the end should be just pop_back().
+- VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
+- }
+-
+- VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
+- VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
+-
+- VkDeviceSize sumUsedSize = 0;
+- const size_t suballoc1stCount = suballocations1st.size();
+- const VkDeviceSize debugMargin = GetDebugMargin();
+- VkDeviceSize offset = 0;
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- const size_t suballoc2ndCount = suballocations2nd.size();
+- size_t nullItem2ndCount = 0;
+- for (size_t i = 0; i < suballoc2ndCount; ++i)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[i];
+- const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+-
+- VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
+- }
+- VMA_VALIDATE(suballoc.offset >= offset);
+-
+- if (!currFree)
+- {
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
+- VMA_VALIDATE(alloc->GetSize() == suballoc.size);
+- }
+- sumUsedSize += suballoc.size;
+- }
+- else
+- {
+- ++nullItem2ndCount;
+- }
+-
+- offset = suballoc.offset + suballoc.size + debugMargin;
+- }
+-
+- VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+- }
+-
+- for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
+- {
+- const VmaSuballocation& suballoc = suballocations1st[i];
+- VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
+- suballoc.userData == VMA_NULL);
+- }
+-
+- size_t nullItem1stCount = m_1stNullItemsBeginCount;
+-
+- for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
+- {
+- const VmaSuballocation& suballoc = suballocations1st[i];
+- const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+-
+- VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
+- }
+- VMA_VALIDATE(suballoc.offset >= offset);
+- VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
+-
+- if (!currFree)
+- {
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
+- VMA_VALIDATE(alloc->GetSize() == suballoc.size);
+- }
+- sumUsedSize += suballoc.size;
+- }
+- else
+- {
+- ++nullItem1stCount;
+- }
+-
+- offset = suballoc.offset + suballoc.size + debugMargin;
+- }
+- VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- const size_t suballoc2ndCount = suballocations2nd.size();
+- size_t nullItem2ndCount = 0;
+- for (size_t i = suballoc2ndCount; i--; )
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[i];
+- const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
+-
+- VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
+- }
+- VMA_VALIDATE(suballoc.offset >= offset);
+-
+- if (!currFree)
+- {
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
+- VMA_VALIDATE(alloc->GetSize() == suballoc.size);
+- }
+- sumUsedSize += suballoc.size;
+- }
+- else
+- {
+- ++nullItem2ndCount;
+- }
+-
+- offset = suballoc.offset + suballoc.size + debugMargin;
+- }
+-
+- VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
+- }
+-
+- VMA_VALIDATE(offset <= GetSize());
+- VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
+-
+- return true;
+-}
+-
+-size_t VmaBlockMetadata_Linear::GetAllocationCount() const
+-{
+- return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
+- AccessSuballocations2nd().size() - m_2ndNullItemsCount;
+-}
+-
+-size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
+-{
+- // Function only used for defragmentation, which is disabled for this algorithm
+- VMA_ASSERT(0);
+- return SIZE_MAX;
+-}
+-
+-void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
+-{
+- const VkDeviceSize size = GetSize();
+- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+- const size_t suballoc1stCount = suballocations1st.size();
+- const size_t suballoc2ndCount = suballocations2nd.size();
+-
+- inoutStats.statistics.blockCount++;
+- inoutStats.statistics.blockBytes += size;
+-
+- VkDeviceSize lastOffset = 0;
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+- size_t nextAlloc2ndIndex = 0;
+- while (lastOffset < freeSpace2ndTo1stEnd)
+- {
+- // Find next non-null allocation or move nextAllocIndex to the end.
+- while (nextAlloc2ndIndex < suballoc2ndCount &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex < suballoc2ndCount)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+- if (lastOffset < freeSpace2ndTo1stEnd)
+- {
+- const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
+- }
+-
+- // End of loop.
+- lastOffset = freeSpace2ndTo1stEnd;
+- }
+- }
+- }
+-
+- size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+- const VkDeviceSize freeSpace1stTo2ndEnd =
+- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+- while (lastOffset < freeSpace1stTo2ndEnd)
+- {
+- // Find next non-null allocation or move nextAllocIndex to the end.
+- while (nextAlloc1stIndex < suballoc1stCount &&
+- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc1stIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc1stIndex < suballoc1stCount)
+- {
+- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc1stIndex;
+- }
+- // We are at the end.
+- else
+- {
+- // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+- if (lastOffset < freeSpace1stTo2ndEnd)
+- {
+- const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
+- }
+-
+- // End of loop.
+- lastOffset = freeSpace1stTo2ndEnd;
+- }
+- }
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+- while (lastOffset < size)
+- {
+- // Find next non-null allocation or move nextAllocIndex to the end.
+- while (nextAlloc2ndIndex != SIZE_MAX &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- --nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex != SIZE_MAX)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- --nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- // There is free space from lastOffset to size.
+- if (lastOffset < size)
+- {
+- const VkDeviceSize unusedRangeSize = size - lastOffset;
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
+- }
+-
+- // End of loop.
+- lastOffset = size;
+- }
+- }
+- }
+-}
+-
+-void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
+-{
+- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+- const VkDeviceSize size = GetSize();
+- const size_t suballoc1stCount = suballocations1st.size();
+- const size_t suballoc2ndCount = suballocations2nd.size();
+-
+- inoutStats.blockCount++;
+- inoutStats.blockBytes += size;
+- inoutStats.allocationBytes += size - m_SumFreeSize;
+-
+- VkDeviceSize lastOffset = 0;
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+- size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
+- while (lastOffset < freeSpace2ndTo1stEnd)
+- {
+- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+- while (nextAlloc2ndIndex < suballoc2ndCount &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex < suballoc2ndCount)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- ++inoutStats.allocationCount;
+-
+- // Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- // End of loop.
+- lastOffset = freeSpace2ndTo1stEnd;
+- }
+- }
+- }
+-
+- size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+- const VkDeviceSize freeSpace1stTo2ndEnd =
+- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+- while (lastOffset < freeSpace1stTo2ndEnd)
+- {
+- // Find next non-null allocation or move nextAllocIndex to the end.
+- while (nextAlloc1stIndex < suballoc1stCount &&
+- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc1stIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc1stIndex < suballoc1stCount)
+- {
+- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+-
+- // Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- ++inoutStats.allocationCount;
+-
+- // Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc1stIndex;
+- }
+- // We are at the end.
+- else
+- {
+- // End of loop.
+- lastOffset = freeSpace1stTo2ndEnd;
+- }
+- }
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+- while (lastOffset < size)
+- {
+- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+- while (nextAlloc2ndIndex != SIZE_MAX &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- --nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex != SIZE_MAX)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- ++inoutStats.allocationCount;
+-
+- // Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- --nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- // End of loop.
+- lastOffset = size;
+- }
+- }
+- }
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
+-{
+- const VkDeviceSize size = GetSize();
+- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+- const size_t suballoc1stCount = suballocations1st.size();
+- const size_t suballoc2ndCount = suballocations2nd.size();
+-
+- // FIRST PASS
+-
+- size_t unusedRangeCount = 0;
+- VkDeviceSize usedBytes = 0;
+-
+- VkDeviceSize lastOffset = 0;
+-
+- size_t alloc2ndCount = 0;
+- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+- size_t nextAlloc2ndIndex = 0;
+- while (lastOffset < freeSpace2ndTo1stEnd)
+- {
+- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+- while (nextAlloc2ndIndex < suballoc2ndCount &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex < suballoc2ndCount)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- ++unusedRangeCount;
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- ++alloc2ndCount;
+- usedBytes += suballoc.size;
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- if (lastOffset < freeSpace2ndTo1stEnd)
+- {
+- // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+- ++unusedRangeCount;
+- }
+-
+- // End of loop.
+- lastOffset = freeSpace2ndTo1stEnd;
+- }
+- }
+- }
+-
+- size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
+- size_t alloc1stCount = 0;
+- const VkDeviceSize freeSpace1stTo2ndEnd =
+- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
+- while (lastOffset < freeSpace1stTo2ndEnd)
+- {
+- // Find next non-null allocation or move nextAllocIndex to the end.
+- while (nextAlloc1stIndex < suballoc1stCount &&
+- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc1stIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc1stIndex < suballoc1stCount)
+- {
+- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- ++unusedRangeCount;
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- ++alloc1stCount;
+- usedBytes += suballoc.size;
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc1stIndex;
+- }
+- // We are at the end.
+- else
+- {
+- if (lastOffset < size)
+- {
+- // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+- ++unusedRangeCount;
+- }
+-
+- // End of loop.
+- lastOffset = freeSpace1stTo2ndEnd;
+- }
+- }
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+- while (lastOffset < size)
+- {
+- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+- while (nextAlloc2ndIndex != SIZE_MAX &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- --nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex != SIZE_MAX)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- ++unusedRangeCount;
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- ++alloc2ndCount;
+- usedBytes += suballoc.size;
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- --nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- if (lastOffset < size)
+- {
+- // There is free space from lastOffset to size.
+- ++unusedRangeCount;
+- }
+-
+- // End of loop.
+- lastOffset = size;
+- }
+- }
+- }
+-
+- const VkDeviceSize unusedBytes = size - usedBytes;
+- PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
+-
+- // SECOND PASS
+- lastOffset = 0;
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
+- size_t nextAlloc2ndIndex = 0;
+- while (lastOffset < freeSpace2ndTo1stEnd)
+- {
+- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+- while (nextAlloc2ndIndex < suballoc2ndCount &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex < suballoc2ndCount)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- if (lastOffset < freeSpace2ndTo1stEnd)
+- {
+- // There is free space from lastOffset to freeSpace2ndTo1stEnd.
+- const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
+- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+- }
+-
+- // End of loop.
+- lastOffset = freeSpace2ndTo1stEnd;
+- }
+- }
+- }
+-
+- nextAlloc1stIndex = m_1stNullItemsBeginCount;
+- while (lastOffset < freeSpace1stTo2ndEnd)
+- {
+- // Find next non-null allocation or move nextAllocIndex to the end.
+- while (nextAlloc1stIndex < suballoc1stCount &&
+- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
+- {
+- ++nextAlloc1stIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc1stIndex < suballoc1stCount)
+- {
+- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- ++nextAlloc1stIndex;
+- }
+- // We are at the end.
+- else
+- {
+- if (lastOffset < freeSpace1stTo2ndEnd)
+- {
+- // There is free space from lastOffset to freeSpace1stTo2ndEnd.
+- const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
+- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+- }
+-
+- // End of loop.
+- lastOffset = freeSpace1stTo2ndEnd;
+- }
+- }
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
+- while (lastOffset < size)
+- {
+- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
+- while (nextAlloc2ndIndex != SIZE_MAX &&
+- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
+- {
+- --nextAlloc2ndIndex;
+- }
+-
+- // Found non-null allocation.
+- if (nextAlloc2ndIndex != SIZE_MAX)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
+-
+- // 1. Process free space before this allocation.
+- if (lastOffset < suballoc.offset)
+- {
+- // There is free space from lastOffset to suballoc.offset.
+- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
+- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+- }
+-
+- // 2. Process this allocation.
+- // There is allocation with suballoc.offset, suballoc.size.
+- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
+-
+- // 3. Prepare for next iteration.
+- lastOffset = suballoc.offset + suballoc.size;
+- --nextAlloc2ndIndex;
+- }
+- // We are at the end.
+- else
+- {
+- if (lastOffset < size)
+- {
+- // There is free space from lastOffset to size.
+- const VkDeviceSize unusedRangeSize = size - lastOffset;
+- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
+- }
+-
+- // End of loop.
+- lastOffset = size;
+- }
+- }
+- }
+-
+- PrintDetailedMap_End(json);
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-bool VmaBlockMetadata_Linear::CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest)
+-{
+- VMA_ASSERT(allocSize > 0);
+- VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
+- VMA_ASSERT(pAllocationRequest != VMA_NULL);
+- VMA_HEAVY_ASSERT(Validate());
+- pAllocationRequest->size = allocSize;
+- return upperAddress ?
+- CreateAllocationRequest_UpperAddress(
+- allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
+- CreateAllocationRequest_LowerAddress(
+- allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
+-}
+-
+-VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
+-{
+- VMA_ASSERT(!IsVirtual());
+- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
+- {
+- const VmaSuballocation& suballoc = suballocations1st[i];
+- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
+- {
+- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+- return VK_ERROR_UNKNOWN_COPY;
+- }
+- }
+- }
+-
+- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+- for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
+- {
+- const VmaSuballocation& suballoc = suballocations2nd[i];
+- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
+- {
+- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+- return VK_ERROR_UNKNOWN_COPY;
+- }
+- }
+- }
+-
+- return VK_SUCCESS;
+-}
+-
+-void VmaBlockMetadata_Linear::Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData)
+-{
+- const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
+- const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
+-
+- switch (request.type)
+- {
+- case VmaAllocationRequestType::UpperAddress:
+- {
+- VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
+- "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
+- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+- suballocations2nd.push_back(newSuballoc);
+- m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
+- }
+- break;
+- case VmaAllocationRequestType::EndOf1st:
+- {
+- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+-
+- VMA_ASSERT(suballocations1st.empty() ||
+- offset >= suballocations1st.back().offset + suballocations1st.back().size);
+- // Check if it fits before the end of the block.
+- VMA_ASSERT(offset + request.size <= GetSize());
+-
+- suballocations1st.push_back(newSuballoc);
+- }
+- break;
+- case VmaAllocationRequestType::EndOf2nd:
+- {
+- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
+- VMA_ASSERT(!suballocations1st.empty() &&
+- offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
+- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+-
+- switch (m_2ndVectorMode)
+- {
+- case SECOND_VECTOR_EMPTY:
+- // First allocation from second part ring buffer.
+- VMA_ASSERT(suballocations2nd.empty());
+- m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
+- break;
+- case SECOND_VECTOR_RING_BUFFER:
+- // 2-part ring buffer is already started.
+- VMA_ASSERT(!suballocations2nd.empty());
+- break;
+- case SECOND_VECTOR_DOUBLE_STACK:
+- VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- suballocations2nd.push_back(newSuballoc);
+- }
+- break;
+- default:
+- VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
+- }
+-
+- m_SumFreeSize -= newSuballoc.size;
+-}
+-
+-void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
+-{
+- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+- VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
+-
+- if (!suballocations1st.empty())
+- {
+- // First allocation: Mark it as next empty at the beginning.
+- VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
+- if (firstSuballoc.offset == offset)
+- {
+- firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
+- firstSuballoc.userData = VMA_NULL;
+- m_SumFreeSize += firstSuballoc.size;
+- ++m_1stNullItemsBeginCount;
+- CleanupAfterFree();
+- return;
+- }
+- }
+-
+- // Last allocation in 2-part ring buffer or top of upper stack (same logic).
+- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
+- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- VmaSuballocation& lastSuballoc = suballocations2nd.back();
+- if (lastSuballoc.offset == offset)
+- {
+- m_SumFreeSize += lastSuballoc.size;
+- suballocations2nd.pop_back();
+- CleanupAfterFree();
+- return;
+- }
+- }
+- // Last allocation in 1st vector.
+- else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
+- {
+- VmaSuballocation& lastSuballoc = suballocations1st.back();
+- if (lastSuballoc.offset == offset)
+- {
+- m_SumFreeSize += lastSuballoc.size;
+- suballocations1st.pop_back();
+- CleanupAfterFree();
+- return;
+- }
+- }
+-
+- VmaSuballocation refSuballoc;
+- refSuballoc.offset = offset;
+- // Rest of members stays uninitialized intentionally for better performance.
+-
+- // Item from the middle of 1st vector.
+- {
+- const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
+- suballocations1st.begin() + m_1stNullItemsBeginCount,
+- suballocations1st.end(),
+- refSuballoc,
+- VmaSuballocationOffsetLess());
+- if (it != suballocations1st.end())
+- {
+- it->type = VMA_SUBALLOCATION_TYPE_FREE;
+- it->userData = VMA_NULL;
+- ++m_1stNullItemsMiddleCount;
+- m_SumFreeSize += it->size;
+- CleanupAfterFree();
+- return;
+- }
+- }
+-
+- if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
+- {
+- // Item from the middle of 2nd vector.
+- const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
+- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
+- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
+- if (it != suballocations2nd.end())
+- {
+- it->type = VMA_SUBALLOCATION_TYPE_FREE;
+- it->userData = VMA_NULL;
+- ++m_2ndNullItemsCount;
+- m_SumFreeSize += it->size;
+- CleanupAfterFree();
+- return;
+- }
+- }
+-
+- VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
+-}
+-
+-void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
+-{
+- outInfo.offset = (VkDeviceSize)allocHandle - 1;
+- VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
+- outInfo.size = suballoc.size;
+- outInfo.pUserData = suballoc.userData;
+-}
+-
+-void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
+-{
+- return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
+-{
+- // Function only used for defragmentation, which is disabled for this algorithm
+- VMA_ASSERT(0);
+- return VK_NULL_HANDLE;
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
+-{
+- // Function only used for defragmentation, which is disabled for this algorithm
+- VMA_ASSERT(0);
+- return VK_NULL_HANDLE;
+-}
+-
+-VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
+-{
+- // Function only used for defragmentation, which is disabled for this algorithm
+- VMA_ASSERT(0);
+- return 0;
+-}
+-
+-void VmaBlockMetadata_Linear::Clear()
+-{
+- m_SumFreeSize = GetSize();
+- m_Suballocations0.clear();
+- m_Suballocations1.clear();
+- // Leaving m_1stVectorIndex unchanged - it doesn't matter.
+- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+- m_1stNullItemsBeginCount = 0;
+- m_1stNullItemsMiddleCount = 0;
+- m_2ndNullItemsCount = 0;
+-}
+-
+-void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
+-{
+- VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
+- suballoc.userData = userData;
+-}
+-
+-void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
+-{
+- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
+- if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
+- DebugLogAllocation(it->offset, it->size, it->userData);
+-
+- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+- for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
+- if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
+- DebugLogAllocation(it->offset, it->size, it->userData);
+-}
+-
+-VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
+-{
+- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+-
+- VmaSuballocation refSuballoc;
+- refSuballoc.offset = offset;
+- // Rest of members stays uninitialized intentionally for better performance.
+-
+- // Item from the 1st vector.
+- {
+- SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
+- suballocations1st.begin() + m_1stNullItemsBeginCount,
+- suballocations1st.end(),
+- refSuballoc,
+- VmaSuballocationOffsetLess());
+- if (it != suballocations1st.end())
+- {
+- return const_cast<VmaSuballocation&>(*it);
+- }
+- }
+-
+- if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
+- {
+- // Rest of members stays uninitialized intentionally for better performance.
+- SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
+- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
+- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
+- if (it != suballocations2nd.end())
+- {
+- return const_cast<VmaSuballocation&>(*it);
+- }
+- }
+-
+- VMA_ASSERT(0 && "Allocation not found in linear allocator!");
+- return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
+-}
+-
+-bool VmaBlockMetadata_Linear::ShouldCompact1st() const
+-{
+- const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+- const size_t suballocCount = AccessSuballocations1st().size();
+- return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
+-}
+-
+-void VmaBlockMetadata_Linear::CleanupAfterFree()
+-{
+- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+-
+- if (IsEmpty())
+- {
+- suballocations1st.clear();
+- suballocations2nd.clear();
+- m_1stNullItemsBeginCount = 0;
+- m_1stNullItemsMiddleCount = 0;
+- m_2ndNullItemsCount = 0;
+- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+- }
+- else
+- {
+- const size_t suballoc1stCount = suballocations1st.size();
+- const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
+- VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
+-
+- // Find more null items at the beginning of 1st vector.
+- while (m_1stNullItemsBeginCount < suballoc1stCount &&
+- suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- ++m_1stNullItemsBeginCount;
+- --m_1stNullItemsMiddleCount;
+- }
+-
+- // Find more null items at the end of 1st vector.
+- while (m_1stNullItemsMiddleCount > 0 &&
+- suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- --m_1stNullItemsMiddleCount;
+- suballocations1st.pop_back();
+- }
+-
+- // Find more null items at the end of 2nd vector.
+- while (m_2ndNullItemsCount > 0 &&
+- suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- --m_2ndNullItemsCount;
+- suballocations2nd.pop_back();
+- }
+-
+- // Find more null items at the beginning of 2nd vector.
+- while (m_2ndNullItemsCount > 0 &&
+- suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- --m_2ndNullItemsCount;
+- VmaVectorRemove(suballocations2nd, 0);
+- }
+-
+- if (ShouldCompact1st())
+- {
+- const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
+- size_t srcIndex = m_1stNullItemsBeginCount;
+- for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
+- {
+- while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- ++srcIndex;
+- }
+- if (dstIndex != srcIndex)
+- {
+- suballocations1st[dstIndex] = suballocations1st[srcIndex];
+- }
+- ++srcIndex;
+- }
+- suballocations1st.resize(nonNullItemCount);
+- m_1stNullItemsBeginCount = 0;
+- m_1stNullItemsMiddleCount = 0;
+- }
+-
+- // 2nd vector became empty.
+- if (suballocations2nd.empty())
+- {
+- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+- }
+-
+- // 1st vector became empty.
+- if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
+- {
+- suballocations1st.clear();
+- m_1stNullItemsBeginCount = 0;
+-
+- if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- // Swap 1st with 2nd. Now 2nd is empty.
+- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+- m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
+- while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
+- suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
+- {
+- ++m_1stNullItemsBeginCount;
+- --m_1stNullItemsMiddleCount;
+- }
+- m_2ndNullItemsCount = 0;
+- m_1stVectorIndex ^= 1;
+- }
+- }
+- }
+-
+- VMA_HEAVY_ASSERT(Validate());
+-}
+-
+-bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest)
+-{
+- const VkDeviceSize blockSize = GetSize();
+- const VkDeviceSize debugMargin = GetDebugMargin();
+- const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
+- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- // Try to allocate at the end of 1st vector.
+-
+- VkDeviceSize resultBaseOffset = 0;
+- if (!suballocations1st.empty())
+- {
+- const VmaSuballocation& lastSuballoc = suballocations1st.back();
+- resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
+- }
+-
+- // Start from offset equal to beginning of free space.
+- VkDeviceSize resultOffset = resultBaseOffset;
+-
+- // Apply alignment.
+- resultOffset = VmaAlignUp(resultOffset, allocAlignment);
+-
+- // Check previous suballocations for BufferImageGranularity conflicts.
+- // Make bigger alignment if necessary.
+- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
+- {
+- bool bufferImageGranularityConflict = false;
+- for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
+- {
+- const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
+- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+- {
+- bufferImageGranularityConflict = true;
+- break;
+- }
+- }
+- else
+- // Already on previous page.
+- break;
+- }
+- if (bufferImageGranularityConflict)
+- {
+- resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+- }
+- }
+-
+- const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
+- suballocations2nd.back().offset : blockSize;
+-
+- // There is enough free space at the end after alignment.
+- if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
+- {
+- // Check next suballocations for BufferImageGranularity conflicts.
+- // If conflict exists, allocation cannot be made here.
+- if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
+- {
+- for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
+- {
+- const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
+- if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+- {
+- return false;
+- }
+- }
+- else
+- {
+- // Already on previous page.
+- break;
+- }
+- }
+- }
+-
+- // All tests passed: Success.
+- pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
+- // pAllocationRequest->item, customData unused.
+- pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
+- return true;
+- }
+- }
+-
+- // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
+- // beginning of 1st vector as the end of free space.
+- if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- VMA_ASSERT(!suballocations1st.empty());
+-
+- VkDeviceSize resultBaseOffset = 0;
+- if (!suballocations2nd.empty())
+- {
+- const VmaSuballocation& lastSuballoc = suballocations2nd.back();
+- resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
+- }
+-
+- // Start from offset equal to beginning of free space.
+- VkDeviceSize resultOffset = resultBaseOffset;
+-
+- // Apply alignment.
+- resultOffset = VmaAlignUp(resultOffset, allocAlignment);
+-
+- // Check previous suballocations for BufferImageGranularity conflicts.
+- // Make bigger alignment if necessary.
+- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
+- {
+- bool bufferImageGranularityConflict = false;
+- for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
+- {
+- const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
+- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
+- {
+- bufferImageGranularityConflict = true;
+- break;
+- }
+- }
+- else
+- // Already on previous page.
+- break;
+- }
+- if (bufferImageGranularityConflict)
+- {
+- resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
+- }
+- }
+-
+- size_t index1st = m_1stNullItemsBeginCount;
+-
+- // There is enough free space at the end after alignment.
+- if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
+- (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
+- {
+- // Check next suballocations for BufferImageGranularity conflicts.
+- // If conflict exists, allocation cannot be made here.
+- if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
+- {
+- for (size_t nextSuballocIndex = index1st;
+- nextSuballocIndex < suballocations1st.size();
+- nextSuballocIndex++)
+- {
+- const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
+- if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
+- {
+- return false;
+- }
+- }
+- else
+- {
+- // Already on next page.
+- break;
+- }
+- }
+- }
+-
+- // All tests passed: Success.
+- pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
+- pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
+- // pAllocationRequest->item, customData unused.
+- return true;
+- }
+- }
+-
+- return false;
+-}
+-
+-bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest)
+-{
+- const VkDeviceSize blockSize = GetSize();
+- const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
+- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
+- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
+-
+- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
+- {
+- VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
+- return false;
+- }
+-
+- // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
+- if (allocSize > blockSize)
+- {
+- return false;
+- }
+- VkDeviceSize resultBaseOffset = blockSize - allocSize;
+- if (!suballocations2nd.empty())
+- {
+- const VmaSuballocation& lastSuballoc = suballocations2nd.back();
+- resultBaseOffset = lastSuballoc.offset - allocSize;
+- if (allocSize > lastSuballoc.offset)
+- {
+- return false;
+- }
+- }
+-
+- // Start from offset equal to end of free space.
+- VkDeviceSize resultOffset = resultBaseOffset;
+-
+- const VkDeviceSize debugMargin = GetDebugMargin();
+-
+- // Apply debugMargin at the end.
+- if (debugMargin > 0)
+- {
+- if (resultOffset < debugMargin)
+- {
+- return false;
+- }
+- resultOffset -= debugMargin;
+- }
+-
+- // Apply alignment.
+- resultOffset = VmaAlignDown(resultOffset, allocAlignment);
+-
+- // Check next suballocations from 2nd for BufferImageGranularity conflicts.
+- // Make bigger alignment if necessary.
+- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
+- {
+- bool bufferImageGranularityConflict = false;
+- for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
+- {
+- const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
+- if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
+- {
+- bufferImageGranularityConflict = true;
+- break;
+- }
+- }
+- else
+- // Already on previous page.
+- break;
+- }
+- if (bufferImageGranularityConflict)
+- {
+- resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
+- }
+- }
+-
+- // There is enough free space.
+- const VkDeviceSize endOf1st = !suballocations1st.empty() ?
+- suballocations1st.back().offset + suballocations1st.back().size :
+- 0;
+- if (endOf1st + debugMargin <= resultOffset)
+- {
+- // Check previous suballocations for BufferImageGranularity conflicts.
+- // If conflict exists, allocation cannot be made here.
+- if (bufferImageGranularity > 1)
+- {
+- for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
+- {
+- const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
+- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
+- {
+- if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
+- {
+- return false;
+- }
+- }
+- else
+- {
+- // Already on next page.
+- break;
+- }
+- }
+- }
+-
+- // All tests passed: Success.
+- pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
+- // pAllocationRequest->item unused.
+- pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
+- return true;
+- }
+-
+- return false;
+-}
+-#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
+-#endif // _VMA_BLOCK_METADATA_LINEAR
+-
+-#if 0
+-#ifndef _VMA_BLOCK_METADATA_BUDDY
+-/*
+-- GetSize() is the original size of allocated memory block.
+-- m_UsableSize is this size aligned down to a power of two.
+- All allocations and calculations happen relative to m_UsableSize.
+-- GetUnusableSize() is the difference between them.
+- It is reported as separate, unused range, not available for allocations.
+-
+-Node at level 0 has size = m_UsableSize.
+-Each next level contains nodes with size 2 times smaller than current level.
+-m_LevelCount is the maximum number of levels to use in the current object.
+-*/
+-class VmaBlockMetadata_Buddy : public VmaBlockMetadata
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Buddy)
+-public:
+- VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual);
+- virtual ~VmaBlockMetadata_Buddy();
+-
+- size_t GetAllocationCount() const override { return m_AllocationCount; }
+- VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); }
+- bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; }
+- VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; }
+- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
+- void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); }
+-
+- void Init(VkDeviceSize size) override;
+- bool Validate() const override;
+-
+- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
+- void AddStatistics(VmaStatistics& inoutStats) const override;
+-
+-#if VMA_STATS_STRING_ENABLED
+- void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
+-#endif
+-
+- bool CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest) override;
+-
+- void Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData) override;
+-
+- void Free(VmaAllocHandle allocHandle) override;
+- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
+- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
+- VmaAllocHandle GetAllocationListBegin() const override;
+- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
+- void Clear() override;
+- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
+-
+-private:
+- static const size_t MAX_LEVELS = 48;
+-
+- struct ValidationContext
+- {
+- size_t calculatedAllocationCount = 0;
+- size_t calculatedFreeCount = 0;
+- VkDeviceSize calculatedSumFreeSize = 0;
+- };
+- struct Node
+- {
+- VkDeviceSize offset;
+- enum TYPE
+- {
+- TYPE_FREE,
+- TYPE_ALLOCATION,
+- TYPE_SPLIT,
+- TYPE_COUNT
+- } type;
+- Node* parent;
+- Node* buddy;
+-
+- union
+- {
+- struct
+- {
+- Node* prev;
+- Node* next;
+- } free;
+- struct
+- {
+- void* userData;
+- } allocation;
+- struct
+- {
+- Node* leftChild;
+- } split;
+- };
+- };
+-
+- // Size of the memory block aligned down to a power of two.
+- VkDeviceSize m_UsableSize;
+- uint32_t m_LevelCount;
+- VmaPoolAllocator<Node> m_NodeAllocator;
+- Node* m_Root;
+- struct
+- {
+- Node* front;
+- Node* back;
+- } m_FreeList[MAX_LEVELS];
+-
+- // Number of nodes in the tree with type == TYPE_ALLOCATION.
+- size_t m_AllocationCount;
+- // Number of nodes in the tree with type == TYPE_FREE.
+- size_t m_FreeCount;
+- // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes.
+- // Doesn't include unusable size.
+- VkDeviceSize m_SumFreeSize;
+-
+- VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
+- VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
+-
+- VkDeviceSize AlignAllocationSize(VkDeviceSize size) const
+- {
+- if (!IsVirtual())
+- {
+- size = VmaAlignUp(size, (VkDeviceSize)16);
+- }
+- return VmaNextPow2(size);
+- }
+- Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const;
+- void DeleteNodeChildren(Node* node);
+- bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
+- uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
+- void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const;
+- // Adds node to the front of FreeList at given level.
+- // node->type must be FREE.
+- // node->free.prev, next can be undefined.
+- void AddToFreeListFront(uint32_t level, Node* node);
+- // Removes node from FreeList at given level.
+- // node->type must be FREE.
+- // node->free.prev, next stay untouched.
+- void RemoveFromFreeList(uint32_t level, Node* node);
+- void DebugLogAllAllocationNode(Node* node, uint32_t level) const;
+-
+-#if VMA_STATS_STRING_ENABLED
+- void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
+-#endif
+-};
+-
+-#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
+-VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual)
+- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
+- m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity
+- m_Root(VMA_NULL),
+- m_AllocationCount(0),
+- m_FreeCount(1),
+- m_SumFreeSize(0)
+-{
+- memset(m_FreeList, 0, sizeof(m_FreeList));
+-}
+-
+-VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
+-{
+- DeleteNodeChildren(m_Root);
+- m_NodeAllocator.Free(m_Root);
+-}
+-
+-void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
+-{
+- VmaBlockMetadata::Init(size);
+-
+- m_UsableSize = VmaPrevPow2(size);
+- m_SumFreeSize = m_UsableSize;
+-
+- // Calculate m_LevelCount.
+- const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16;
+- m_LevelCount = 1;
+- while (m_LevelCount < MAX_LEVELS &&
+- LevelToNodeSize(m_LevelCount) >= minNodeSize)
+- {
+- ++m_LevelCount;
+- }
+-
+- Node* rootNode = m_NodeAllocator.Alloc();
+- rootNode->offset = 0;
+- rootNode->type = Node::TYPE_FREE;
+- rootNode->parent = VMA_NULL;
+- rootNode->buddy = VMA_NULL;
+-
+- m_Root = rootNode;
+- AddToFreeListFront(0, rootNode);
+-}
+-
+-bool VmaBlockMetadata_Buddy::Validate() const
+-{
+- // Validate tree.
+- ValidationContext ctx;
+- if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
+- {
+- VMA_VALIDATE(false && "ValidateNode failed.");
+- }
+- VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
+- VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
+-
+- // Validate free node lists.
+- for (uint32_t level = 0; level < m_LevelCount; ++level)
+- {
+- VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
+- m_FreeList[level].front->free.prev == VMA_NULL);
+-
+- for (Node* node = m_FreeList[level].front;
+- node != VMA_NULL;
+- node = node->free.next)
+- {
+- VMA_VALIDATE(node->type == Node::TYPE_FREE);
+-
+- if (node->free.next == VMA_NULL)
+- {
+- VMA_VALIDATE(m_FreeList[level].back == node);
+- }
+- else
+- {
+- VMA_VALIDATE(node->free.next->free.prev == node);
+- }
+- }
+- }
+-
+- // Validate that free lists ar higher levels are empty.
+- for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
+- {
+- VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
+- }
+-
+- return true;
+-}
+-
+-void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
+-{
+- inoutStats.statistics.blockCount++;
+- inoutStats.statistics.blockBytes += GetSize();
+-
+- AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0));
+-
+- const VkDeviceSize unusableSize = GetUnusableSize();
+- if (unusableSize > 0)
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize);
+-}
+-
+-void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const
+-{
+- inoutStats.blockCount++;
+- inoutStats.allocationCount += (uint32_t)m_AllocationCount;
+- inoutStats.blockBytes += GetSize();
+- inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
+-{
+- VmaDetailedStatistics stats;
+- VmaClearDetailedStatistics(stats);
+- AddDetailedStatistics(stats);
+-
+- PrintDetailedMap_Begin(
+- json,
+- stats.statistics.blockBytes - stats.statistics.allocationBytes,
+- stats.statistics.allocationCount,
+- stats.unusedRangeCount,
+- mapRefCount);
+-
+- PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
+-
+- const VkDeviceSize unusableSize = GetUnusableSize();
+- if (unusableSize > 0)
+- {
+- PrintDetailedMap_UnusedRange(json,
+- m_UsableSize, // offset
+- unusableSize); // size
+- }
+-
+- PrintDetailedMap_End(json);
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest)
+-{
+- VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
+-
+- allocSize = AlignAllocationSize(allocSize);
+-
+- // Simple way to respect bufferImageGranularity. May be optimized some day.
+- // Whenever it might be an OPTIMAL image...
+- if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
+- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
+- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
+- {
+- allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity());
+- allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity());
+- }
+-
+- if (allocSize > m_UsableSize)
+- {
+- return false;
+- }
+-
+- const uint32_t targetLevel = AllocSizeToLevel(allocSize);
+- for (uint32_t level = targetLevel; level--; )
+- {
+- for (Node* freeNode = m_FreeList[level].front;
+- freeNode != VMA_NULL;
+- freeNode = freeNode->free.next)
+- {
+- if (freeNode->offset % allocAlignment == 0)
+- {
+- pAllocationRequest->type = VmaAllocationRequestType::Normal;
+- pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1);
+- pAllocationRequest->size = allocSize;
+- pAllocationRequest->customData = (void*)(uintptr_t)level;
+- return true;
+- }
+- }
+- }
+-
+- return false;
+-}
+-
+-void VmaBlockMetadata_Buddy::Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData)
+-{
+- VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
+-
+- const uint32_t targetLevel = AllocSizeToLevel(request.size);
+- uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
+-
+- Node* currNode = m_FreeList[currLevel].front;
+- VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+- const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
+- while (currNode->offset != offset)
+- {
+- currNode = currNode->free.next;
+- VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
+- }
+-
+- // Go down, splitting free nodes.
+- while (currLevel < targetLevel)
+- {
+- // currNode is already first free node at currLevel.
+- // Remove it from list of free nodes at this currLevel.
+- RemoveFromFreeList(currLevel, currNode);
+-
+- const uint32_t childrenLevel = currLevel + 1;
+-
+- // Create two free sub-nodes.
+- Node* leftChild = m_NodeAllocator.Alloc();
+- Node* rightChild = m_NodeAllocator.Alloc();
+-
+- leftChild->offset = currNode->offset;
+- leftChild->type = Node::TYPE_FREE;
+- leftChild->parent = currNode;
+- leftChild->buddy = rightChild;
+-
+- rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
+- rightChild->type = Node::TYPE_FREE;
+- rightChild->parent = currNode;
+- rightChild->buddy = leftChild;
+-
+- // Convert current currNode to split type.
+- currNode->type = Node::TYPE_SPLIT;
+- currNode->split.leftChild = leftChild;
+-
+- // Add child nodes to free list. Order is important!
+- AddToFreeListFront(childrenLevel, rightChild);
+- AddToFreeListFront(childrenLevel, leftChild);
+-
+- ++m_FreeCount;
+- ++currLevel;
+- currNode = m_FreeList[currLevel].front;
+-
+- /*
+- We can be sure that currNode, as left child of node previously split,
+- also fulfills the alignment requirement.
+- */
+- }
+-
+- // Remove from free list.
+- VMA_ASSERT(currLevel == targetLevel &&
+- currNode != VMA_NULL &&
+- currNode->type == Node::TYPE_FREE);
+- RemoveFromFreeList(currLevel, currNode);
+-
+- // Convert to allocation node.
+- currNode->type = Node::TYPE_ALLOCATION;
+- currNode->allocation.userData = userData;
+-
+- ++m_AllocationCount;
+- --m_FreeCount;
+- m_SumFreeSize -= request.size;
+-}
+-
+-void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
+-{
+- uint32_t level = 0;
+- outInfo.offset = (VkDeviceSize)allocHandle - 1;
+- const Node* const node = FindAllocationNode(outInfo.offset, level);
+- outInfo.size = LevelToNodeSize(level);
+- outInfo.pUserData = node->allocation.userData;
+-}
+-
+-void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const
+-{
+- uint32_t level = 0;
+- const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
+- return node->allocation.userData;
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const
+-{
+- // Function only used for defragmentation, which is disabled for this algorithm
+- return VK_NULL_HANDLE;
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const
+-{
+- // Function only used for defragmentation, which is disabled for this algorithm
+- return VK_NULL_HANDLE;
+-}
+-
+-void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node)
+-{
+- if (node->type == Node::TYPE_SPLIT)
+- {
+- DeleteNodeChildren(node->split.leftChild->buddy);
+- DeleteNodeChildren(node->split.leftChild);
+- const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks();
+- m_NodeAllocator.Free(node->split.leftChild->buddy);
+- m_NodeAllocator.Free(node->split.leftChild);
+- }
+-}
+-
+-void VmaBlockMetadata_Buddy::Clear()
+-{
+- DeleteNodeChildren(m_Root);
+- m_Root->type = Node::TYPE_FREE;
+- m_AllocationCount = 0;
+- m_FreeCount = 1;
+- m_SumFreeSize = m_UsableSize;
+-}
+-
+-void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
+-{
+- uint32_t level = 0;
+- Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
+- node->allocation.userData = userData;
+-}
+-
+-VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const
+-{
+- Node* node = m_Root;
+- VkDeviceSize nodeOffset = 0;
+- outLevel = 0;
+- VkDeviceSize levelNodeSize = LevelToNodeSize(0);
+- while (node->type == Node::TYPE_SPLIT)
+- {
+- const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1;
+- if (offset < nodeOffset + nextLevelNodeSize)
+- {
+- node = node->split.leftChild;
+- }
+- else
+- {
+- node = node->split.leftChild->buddy;
+- nodeOffset += nextLevelNodeSize;
+- }
+- ++outLevel;
+- levelNodeSize = nextLevelNodeSize;
+- }
+-
+- VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
+- return node;
+-}
+-
+-bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
+-{
+- VMA_VALIDATE(level < m_LevelCount);
+- VMA_VALIDATE(curr->parent == parent);
+- VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
+- VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
+- switch (curr->type)
+- {
+- case Node::TYPE_FREE:
+- // curr->free.prev, next are validated separately.
+- ctx.calculatedSumFreeSize += levelNodeSize;
+- ++ctx.calculatedFreeCount;
+- break;
+- case Node::TYPE_ALLOCATION:
+- ++ctx.calculatedAllocationCount;
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE(curr->allocation.userData != VMA_NULL);
+- }
+- break;
+- case Node::TYPE_SPLIT:
+- {
+- const uint32_t childrenLevel = level + 1;
+- const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1;
+- const Node* const leftChild = curr->split.leftChild;
+- VMA_VALIDATE(leftChild != VMA_NULL);
+- VMA_VALIDATE(leftChild->offset == curr->offset);
+- if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
+- {
+- VMA_VALIDATE(false && "ValidateNode for left child failed.");
+- }
+- const Node* const rightChild = leftChild->buddy;
+- VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
+- if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
+- {
+- VMA_VALIDATE(false && "ValidateNode for right child failed.");
+- }
+- }
+- break;
+- default:
+- return false;
+- }
+-
+- return true;
+-}
+-
+-uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
+-{
+- // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
+- uint32_t level = 0;
+- VkDeviceSize currLevelNodeSize = m_UsableSize;
+- VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
+- while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
+- {
+- ++level;
+- currLevelNodeSize >>= 1;
+- nextLevelNodeSize >>= 1;
+- }
+- return level;
+-}
+-
+-void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle)
+-{
+- uint32_t level = 0;
+- Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
+-
+- ++m_FreeCount;
+- --m_AllocationCount;
+- m_SumFreeSize += LevelToNodeSize(level);
+-
+- node->type = Node::TYPE_FREE;
+-
+- // Join free nodes if possible.
+- while (level > 0 && node->buddy->type == Node::TYPE_FREE)
+- {
+- RemoveFromFreeList(level, node->buddy);
+- Node* const parent = node->parent;
+-
+- m_NodeAllocator.Free(node->buddy);
+- m_NodeAllocator.Free(node);
+- parent->type = Node::TYPE_FREE;
+-
+- node = parent;
+- --level;
+- --m_FreeCount;
+- }
+-
+- AddToFreeListFront(level, node);
+-}
+-
+-void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const
+-{
+- switch (node->type)
+- {
+- case Node::TYPE_FREE:
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize);
+- break;
+- case Node::TYPE_ALLOCATION:
+- VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize);
+- break;
+- case Node::TYPE_SPLIT:
+- {
+- const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+- const Node* const leftChild = node->split.leftChild;
+- AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize);
+- const Node* const rightChild = leftChild->buddy;
+- AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize);
+- }
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-}
+-
+-void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
+-{
+- VMA_ASSERT(node->type == Node::TYPE_FREE);
+-
+- // List is empty.
+- Node* const frontNode = m_FreeList[level].front;
+- if (frontNode == VMA_NULL)
+- {
+- VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
+- node->free.prev = node->free.next = VMA_NULL;
+- m_FreeList[level].front = m_FreeList[level].back = node;
+- }
+- else
+- {
+- VMA_ASSERT(frontNode->free.prev == VMA_NULL);
+- node->free.prev = VMA_NULL;
+- node->free.next = frontNode;
+- frontNode->free.prev = node;
+- m_FreeList[level].front = node;
+- }
+-}
+-
+-void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
+-{
+- VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
+-
+- // It is at the front.
+- if (node->free.prev == VMA_NULL)
+- {
+- VMA_ASSERT(m_FreeList[level].front == node);
+- m_FreeList[level].front = node->free.next;
+- }
+- else
+- {
+- Node* const prevFreeNode = node->free.prev;
+- VMA_ASSERT(prevFreeNode->free.next == node);
+- prevFreeNode->free.next = node->free.next;
+- }
+-
+- // It is at the back.
+- if (node->free.next == VMA_NULL)
+- {
+- VMA_ASSERT(m_FreeList[level].back == node);
+- m_FreeList[level].back = node->free.prev;
+- }
+- else
+- {
+- Node* const nextFreeNode = node->free.next;
+- VMA_ASSERT(nextFreeNode->free.prev == node);
+- nextFreeNode->free.prev = node->free.prev;
+- }
+-}
+-
+-void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const
+-{
+- switch (node->type)
+- {
+- case Node::TYPE_FREE:
+- break;
+- case Node::TYPE_ALLOCATION:
+- DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData);
+- break;
+- case Node::TYPE_SPLIT:
+- {
+- ++level;
+- DebugLogAllAllocationNode(node->split.leftChild, level);
+- DebugLogAllAllocationNode(node->split.leftChild->buddy, level);
+- }
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
+-{
+- switch (node->type)
+- {
+- case Node::TYPE_FREE:
+- PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
+- break;
+- case Node::TYPE_ALLOCATION:
+- PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData);
+- break;
+- case Node::TYPE_SPLIT:
+- {
+- const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
+- const Node* const leftChild = node->split.leftChild;
+- PrintDetailedMapNode(json, leftChild, childrenNodeSize);
+- const Node* const rightChild = leftChild->buddy;
+- PrintDetailedMapNode(json, rightChild, childrenNodeSize);
+- }
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
+-#endif // _VMA_BLOCK_METADATA_BUDDY
+-#endif // #if 0
+-
+-#ifndef _VMA_BLOCK_METADATA_TLSF
+-// To not search current larger region if first allocation won't succeed and skip to smaller range
+-// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
+-// When fragmentation and reusal of previous blocks doesn't matter then use with
+-// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
+-class VmaBlockMetadata_TLSF : public VmaBlockMetadata
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF)
+-public:
+- VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual);
+- virtual ~VmaBlockMetadata_TLSF();
+-
+- size_t GetAllocationCount() const override { return m_AllocCount; }
+- size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
+- VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
+- bool IsEmpty() const override { return m_NullBlock->offset == 0; }
+- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }
+-
+- void Init(VkDeviceSize size) override;
+- bool Validate() const override;
+-
+- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
+- void AddStatistics(VmaStatistics& inoutStats) const override;
+-
+-#if VMA_STATS_STRING_ENABLED
+- void PrintDetailedMap(class VmaJsonWriter& json) const override;
+-#endif
+-
+- bool CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest) override;
+-
+- VkResult CheckCorruption(const void* pBlockData) override;
+- void Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData) override;
+-
+- void Free(VmaAllocHandle allocHandle) override;
+- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
+- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
+- VmaAllocHandle GetAllocationListBegin() const override;
+- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
+- VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
+- void Clear() override;
+- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
+- void DebugLogAllAllocations() const override;
+-
+-private:
+- // According to original paper it should be preferable 4 or 5:
+- // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
+- // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
+- static const uint8_t SECOND_LEVEL_INDEX = 5;
+- static const uint16_t SMALL_BUFFER_SIZE = 256;
+- static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
+- static const uint8_t MEMORY_CLASS_SHIFT = 7;
+- static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
+-
+- class Block
+- {
+- public:
+- VkDeviceSize offset;
+- VkDeviceSize size;
+- Block* prevPhysical;
+- Block* nextPhysical;
+-
+- void MarkFree() { prevFree = VMA_NULL; }
+- void MarkTaken() { prevFree = this; }
+- bool IsFree() const { return prevFree != this; }
+- void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
+- Block*& PrevFree() { return prevFree; }
+- Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
+-
+- private:
+- Block* prevFree; // Address of the same block here indicates that block is taken
+- union
+- {
+- Block* nextFree;
+- void* userData;
+- };
+- };
+-
+- size_t m_AllocCount;
+- // Total number of free blocks besides null block
+- size_t m_BlocksFreeCount;
+- // Total size of free blocks excluding null block
+- VkDeviceSize m_BlocksFreeSize;
+- uint32_t m_IsFreeBitmap;
+- uint8_t m_MemoryClasses;
+- uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
+- uint32_t m_ListsCount;
+- /*
+- * 0: 0-3 lists for small buffers
+- * 1+: 0-(2^SLI-1) lists for normal buffers
+- */
+- Block** m_FreeList;
+- VmaPoolAllocator<Block> m_BlockAllocator;
+- Block* m_NullBlock;
+- VmaBlockBufferImageGranularity m_GranularityHandler;
+-
+- uint8_t SizeToMemoryClass(VkDeviceSize size) const;
+- uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
+- uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
+- uint32_t GetListIndex(VkDeviceSize size) const;
+-
+- void RemoveFreeBlock(Block* block);
+- void InsertFreeBlock(Block* block);
+- void MergeBlock(Block* block, Block* prev);
+-
+- Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
+- bool CheckBlock(
+- Block& block,
+- uint32_t listIndex,
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- VmaAllocationRequest* pAllocationRequest);
+-};
+-
+-#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
+-VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
+- VkDeviceSize bufferImageGranularity, bool isVirtual)
+- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
+- m_AllocCount(0),
+- m_BlocksFreeCount(0),
+- m_BlocksFreeSize(0),
+- m_IsFreeBitmap(0),
+- m_MemoryClasses(0),
+- m_ListsCount(0),
+- m_FreeList(VMA_NULL),
+- m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
+- m_NullBlock(VMA_NULL),
+- m_GranularityHandler(bufferImageGranularity) {}
+-
+-VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
+-{
+- if (m_FreeList)
+- vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
+- m_GranularityHandler.Destroy(GetAllocationCallbacks());
+-}
+-
+-void VmaBlockMetadata_TLSF::Init(VkDeviceSize size)
+-{
+- VmaBlockMetadata::Init(size);
+-
+- if (!IsVirtual())
+- m_GranularityHandler.Init(GetAllocationCallbacks(), size);
+-
+- m_NullBlock = m_BlockAllocator.Alloc();
+- m_NullBlock->size = size;
+- m_NullBlock->offset = 0;
+- m_NullBlock->prevPhysical = VMA_NULL;
+- m_NullBlock->nextPhysical = VMA_NULL;
+- m_NullBlock->MarkFree();
+- m_NullBlock->NextFree() = VMA_NULL;
+- m_NullBlock->PrevFree() = VMA_NULL;
+- uint8_t memoryClass = SizeToMemoryClass(size);
+- uint16_t sli = SizeToSecondIndex(size, memoryClass);
+- m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
+- if (IsVirtual())
+- m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
+- else
+- m_ListsCount += 4;
+-
+- m_MemoryClasses = memoryClass + uint8_t(2);
+- memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
+-
+- m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
+- memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
+-}
+-
+-bool VmaBlockMetadata_TLSF::Validate() const
+-{
+- VMA_VALIDATE(GetSumFreeSize() <= GetSize());
+-
+- VkDeviceSize calculatedSize = m_NullBlock->size;
+- VkDeviceSize calculatedFreeSize = m_NullBlock->size;
+- size_t allocCount = 0;
+- size_t freeCount = 0;
+-
+- // Check integrity of free lists
+- for (uint32_t list = 0; list < m_ListsCount; ++list)
+- {
+- Block* block = m_FreeList[list];
+- if (block != VMA_NULL)
+- {
+- VMA_VALIDATE(block->IsFree());
+- VMA_VALIDATE(block->PrevFree() == VMA_NULL);
+- while (block->NextFree())
+- {
+- VMA_VALIDATE(block->NextFree()->IsFree());
+- VMA_VALIDATE(block->NextFree()->PrevFree() == block);
+- block = block->NextFree();
+- }
+- }
+- }
+-
+- VkDeviceSize nextOffset = m_NullBlock->offset;
+- auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
+-
+- VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
+- if (m_NullBlock->prevPhysical)
+- {
+- VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
+- }
+- // Check all blocks
+- for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
+- {
+- VMA_VALIDATE(prev->offset + prev->size == nextOffset);
+- nextOffset = prev->offset;
+- calculatedSize += prev->size;
+-
+- uint32_t listIndex = GetListIndex(prev->size);
+- if (prev->IsFree())
+- {
+- ++freeCount;
+- // Check if free block belongs to free list
+- Block* freeBlock = m_FreeList[listIndex];
+- VMA_VALIDATE(freeBlock != VMA_NULL);
+-
+- bool found = false;
+- do
+- {
+- if (freeBlock == prev)
+- found = true;
+-
+- freeBlock = freeBlock->NextFree();
+- } while (!found && freeBlock != VMA_NULL);
+-
+- VMA_VALIDATE(found);
+- calculatedFreeSize += prev->size;
+- }
+- else
+- {
+- ++allocCount;
+- // Check if taken block is not on a free list
+- Block* freeBlock = m_FreeList[listIndex];
+- while (freeBlock)
+- {
+- VMA_VALIDATE(freeBlock != prev);
+- freeBlock = freeBlock->NextFree();
+- }
+-
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
+- }
+- }
+-
+- if (prev->prevPhysical)
+- {
+- VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
+- }
+- }
+-
+- if (!IsVirtual())
+- {
+- VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
+- }
+-
+- VMA_VALIDATE(nextOffset == 0);
+- VMA_VALIDATE(calculatedSize == GetSize());
+- VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
+- VMA_VALIDATE(allocCount == m_AllocCount);
+- VMA_VALIDATE(freeCount == m_BlocksFreeCount);
+-
+- return true;
+-}
+-
+-void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
+-{
+- inoutStats.statistics.blockCount++;
+- inoutStats.statistics.blockBytes += GetSize();
+- if (m_NullBlock->size > 0)
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
+-
+- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
+- {
+- if (block->IsFree())
+- VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
+- else
+- VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
+- }
+-}
+-
+-void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
+-{
+- inoutStats.blockCount++;
+- inoutStats.allocationCount += (uint32_t)m_AllocCount;
+- inoutStats.blockBytes += GetSize();
+- inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
+-{
+- size_t blockCount = m_AllocCount + m_BlocksFreeCount;
+- VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
+- VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
+-
+- size_t i = blockCount;
+- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
+- {
+- blockList[--i] = block;
+- }
+- VMA_ASSERT(i == 0);
+-
+- VmaDetailedStatistics stats;
+- VmaClearDetailedStatistics(stats);
+- AddDetailedStatistics(stats);
+-
+- PrintDetailedMap_Begin(json,
+- stats.statistics.blockBytes - stats.statistics.allocationBytes,
+- stats.statistics.allocationCount,
+- stats.unusedRangeCount);
+-
+- for (; i < blockCount; ++i)
+- {
+- Block* block = blockList[i];
+- if (block->IsFree())
+- PrintDetailedMap_UnusedRange(json, block->offset, block->size);
+- else
+- PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
+- }
+- if (m_NullBlock->size > 0)
+- PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
+-
+- PrintDetailedMap_End(json);
+-}
+-#endif
+-
+-bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- bool upperAddress,
+- VmaSuballocationType allocType,
+- uint32_t strategy,
+- VmaAllocationRequest* pAllocationRequest)
+-{
+- VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
+- VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
+-
+- // For small granularity round up
+- if (!IsVirtual())
+- m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
+-
+- allocSize += GetDebugMargin();
+- // Quick check for too small pool
+- if (allocSize > GetSumFreeSize())
+- return false;
+-
+- // If no free blocks in pool then check only null block
+- if (m_BlocksFreeCount == 0)
+- return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
+-
+- // Round up to the next block
+- VkDeviceSize sizeForNextList = allocSize;
+- VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4));
+- if (allocSize > SMALL_BUFFER_SIZE)
+- {
+- sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
+- }
+- else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
+- sizeForNextList = SMALL_BUFFER_SIZE + 1;
+- else
+- sizeForNextList += smallSizeStep;
+-
+- uint32_t nextListIndex = m_ListsCount;
+- uint32_t prevListIndex = m_ListsCount;
+- Block* nextListBlock = VMA_NULL;
+- Block* prevListBlock = VMA_NULL;
+-
+- // Check blocks according to strategies
+- if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)
+- {
+- // Quick check for larger block first
+- nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
+- if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+-
+- // If not fitted then null block
+- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+-
+- // Null block failed, search larger bucket
+- while (nextListBlock)
+- {
+- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- nextListBlock = nextListBlock->NextFree();
+- }
+-
+- // Failed again, check best fit bucket
+- prevListBlock = FindFreeBlock(allocSize, prevListIndex);
+- while (prevListBlock)
+- {
+- if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- prevListBlock = prevListBlock->NextFree();
+- }
+- }
+- else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
+- {
+- // Check best fit bucket
+- prevListBlock = FindFreeBlock(allocSize, prevListIndex);
+- while (prevListBlock)
+- {
+- if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- prevListBlock = prevListBlock->NextFree();
+- }
+-
+- // If failed check null block
+- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+-
+- // Check larger bucket
+- nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
+- while (nextListBlock)
+- {
+- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- nextListBlock = nextListBlock->NextFree();
+- }
+- }
+- else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )
+- {
+- // Perform search from the start
+- VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
+- VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
+-
+- size_t i = m_BlocksFreeCount;
+- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
+- {
+- if (block->IsFree() && block->size >= allocSize)
+- blockList[--i] = block;
+- }
+-
+- for (; i < m_BlocksFreeCount; ++i)
+- {
+- Block& block = *blockList[i];
+- if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- }
+-
+- // If failed check null block
+- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+-
+- // Whole range searched, no more memory
+- return false;
+- }
+- else
+- {
+- // Check larger bucket
+- nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
+- while (nextListBlock)
+- {
+- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- nextListBlock = nextListBlock->NextFree();
+- }
+-
+- // If failed check null block
+- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+-
+- // Check best fit bucket
+- prevListBlock = FindFreeBlock(allocSize, prevListIndex);
+- while (prevListBlock)
+- {
+- if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- prevListBlock = prevListBlock->NextFree();
+- }
+- }
+-
+- // Worst case, full search has to be done
+- while (++nextListIndex < m_ListsCount)
+- {
+- nextListBlock = m_FreeList[nextListIndex];
+- while (nextListBlock)
+- {
+- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
+- return true;
+- nextListBlock = nextListBlock->NextFree();
+- }
+- }
+-
+- // No more memory sadly
+- return false;
+-}
+-
+-VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
+-{
+- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
+- {
+- if (!block->IsFree())
+- {
+- if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
+- {
+- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
+- return VK_ERROR_UNKNOWN_COPY;
+- }
+- }
+- }
+-
+- return VK_SUCCESS;
+-}
+-
+-void VmaBlockMetadata_TLSF::Alloc(
+- const VmaAllocationRequest& request,
+- VmaSuballocationType type,
+- void* userData)
+-{
+- VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
+-
+- // Get block and pop it from the free list
+- Block* currentBlock = (Block*)request.allocHandle;
+- VkDeviceSize offset = request.algorithmData;
+- VMA_ASSERT(currentBlock != VMA_NULL);
+- VMA_ASSERT(currentBlock->offset <= offset);
+-
+- if (currentBlock != m_NullBlock)
+- RemoveFreeBlock(currentBlock);
+-
+- VkDeviceSize debugMargin = GetDebugMargin();
+- VkDeviceSize misssingAlignment = offset - currentBlock->offset;
+-
+- // Append missing alignment to prev block or create new one
+- if (misssingAlignment)
+- {
+- Block* prevBlock = currentBlock->prevPhysical;
+- VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
+-
+- if (prevBlock->IsFree() && prevBlock->size != debugMargin)
+- {
+- uint32_t oldList = GetListIndex(prevBlock->size);
+- prevBlock->size += misssingAlignment;
+- // Check if new size crosses list bucket
+- if (oldList != GetListIndex(prevBlock->size))
+- {
+- prevBlock->size -= misssingAlignment;
+- RemoveFreeBlock(prevBlock);
+- prevBlock->size += misssingAlignment;
+- InsertFreeBlock(prevBlock);
+- }
+- else
+- m_BlocksFreeSize += misssingAlignment;
+- }
+- else
+- {
+- Block* newBlock = m_BlockAllocator.Alloc();
+- currentBlock->prevPhysical = newBlock;
+- prevBlock->nextPhysical = newBlock;
+- newBlock->prevPhysical = prevBlock;
+- newBlock->nextPhysical = currentBlock;
+- newBlock->size = misssingAlignment;
+- newBlock->offset = currentBlock->offset;
+- newBlock->MarkTaken();
+-
+- InsertFreeBlock(newBlock);
+- }
+-
+- currentBlock->size -= misssingAlignment;
+- currentBlock->offset += misssingAlignment;
+- }
+-
+- VkDeviceSize size = request.size + debugMargin;
+- if (currentBlock->size == size)
+- {
+- if (currentBlock == m_NullBlock)
+- {
+- // Setup new null block
+- m_NullBlock = m_BlockAllocator.Alloc();
+- m_NullBlock->size = 0;
+- m_NullBlock->offset = currentBlock->offset + size;
+- m_NullBlock->prevPhysical = currentBlock;
+- m_NullBlock->nextPhysical = VMA_NULL;
+- m_NullBlock->MarkFree();
+- m_NullBlock->PrevFree() = VMA_NULL;
+- m_NullBlock->NextFree() = VMA_NULL;
+- currentBlock->nextPhysical = m_NullBlock;
+- currentBlock->MarkTaken();
+- }
+- }
+- else
+- {
+- VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
+-
+- // Create new free block
+- Block* newBlock = m_BlockAllocator.Alloc();
+- newBlock->size = currentBlock->size - size;
+- newBlock->offset = currentBlock->offset + size;
+- newBlock->prevPhysical = currentBlock;
+- newBlock->nextPhysical = currentBlock->nextPhysical;
+- currentBlock->nextPhysical = newBlock;
+- currentBlock->size = size;
+-
+- if (currentBlock == m_NullBlock)
+- {
+- m_NullBlock = newBlock;
+- m_NullBlock->MarkFree();
+- m_NullBlock->NextFree() = VMA_NULL;
+- m_NullBlock->PrevFree() = VMA_NULL;
+- currentBlock->MarkTaken();
+- }
+- else
+- {
+- newBlock->nextPhysical->prevPhysical = newBlock;
+- newBlock->MarkTaken();
+- InsertFreeBlock(newBlock);
+- }
+- }
+- currentBlock->UserData() = userData;
+-
+- if (debugMargin > 0)
+- {
+- currentBlock->size -= debugMargin;
+- Block* newBlock = m_BlockAllocator.Alloc();
+- newBlock->size = debugMargin;
+- newBlock->offset = currentBlock->offset + currentBlock->size;
+- newBlock->prevPhysical = currentBlock;
+- newBlock->nextPhysical = currentBlock->nextPhysical;
+- newBlock->MarkTaken();
+- currentBlock->nextPhysical->prevPhysical = newBlock;
+- currentBlock->nextPhysical = newBlock;
+- InsertFreeBlock(newBlock);
+- }
+-
+- if (!IsVirtual())
+- m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
+- currentBlock->offset, currentBlock->size);
+- ++m_AllocCount;
+-}
+-
+-void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
+-{
+- Block* block = (Block*)allocHandle;
+- Block* next = block->nextPhysical;
+- VMA_ASSERT(!block->IsFree() && "Block is already free!");
+-
+- if (!IsVirtual())
+- m_GranularityHandler.FreePages(block->offset, block->size);
+- --m_AllocCount;
+-
+- VkDeviceSize debugMargin = GetDebugMargin();
+- if (debugMargin > 0)
+- {
+- RemoveFreeBlock(next);
+- MergeBlock(next, block);
+- block = next;
+- next = next->nextPhysical;
+- }
+-
+- // Try merging
+- Block* prev = block->prevPhysical;
+- if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
+- {
+- RemoveFreeBlock(prev);
+- MergeBlock(block, prev);
+- }
+-
+- if (!next->IsFree())
+- InsertFreeBlock(block);
+- else if (next == m_NullBlock)
+- MergeBlock(m_NullBlock, block);
+- else
+- {
+- RemoveFreeBlock(next);
+- MergeBlock(next, block);
+- InsertFreeBlock(next);
+- }
+-}
+-
+-void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
+-{
+- Block* block = (Block*)allocHandle;
+- VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
+- outInfo.offset = block->offset;
+- outInfo.size = block->size;
+- outInfo.pUserData = block->UserData();
+-}
+-
+-void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
+-{
+- Block* block = (Block*)allocHandle;
+- VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
+- return block->UserData();
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
+-{
+- if (m_AllocCount == 0)
+- return VK_NULL_HANDLE;
+-
+- for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
+- {
+- if (!block->IsFree())
+- return (VmaAllocHandle)block;
+- }
+- VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
+- return VK_NULL_HANDLE;
+-}
+-
+-VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
+-{
+- Block* startBlock = (Block*)prevAlloc;
+- VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
+-
+- for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
+- {
+- if (!block->IsFree())
+- return (VmaAllocHandle)block;
+- }
+- return VK_NULL_HANDLE;
+-}
+-
+-VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
+-{
+- Block* block = (Block*)alloc;
+- VMA_ASSERT(!block->IsFree() && "Incorrect block!");
+-
+- if (block->prevPhysical)
+- return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
+- return 0;
+-}
+-
+-void VmaBlockMetadata_TLSF::Clear()
+-{
+- m_AllocCount = 0;
+- m_BlocksFreeCount = 0;
+- m_BlocksFreeSize = 0;
+- m_IsFreeBitmap = 0;
+- m_NullBlock->offset = 0;
+- m_NullBlock->size = GetSize();
+- Block* block = m_NullBlock->prevPhysical;
+- m_NullBlock->prevPhysical = VMA_NULL;
+- while (block)
+- {
+- Block* prev = block->prevPhysical;
+- m_BlockAllocator.Free(block);
+- block = prev;
+- }
+- memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
+- memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
+- m_GranularityHandler.Clear();
+-}
+-
+-void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
+-{
+- Block* block = (Block*)allocHandle;
+- VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
+- block->UserData() = userData;
+-}
+-
+-void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
+-{
+- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
+- if (!block->IsFree())
+- DebugLogAllocation(block->offset, block->size, block->UserData());
+-}
+-
+-uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
+-{
+- if (size > SMALL_BUFFER_SIZE)
+- return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT);
+- return 0;
+-}
+-
+-uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
+-{
+- if (memoryClass == 0)
+- {
+- if (IsVirtual())
+- return static_cast<uint16_t>((size - 1) / 8);
+- else
+- return static_cast<uint16_t>((size - 1) / 64);
+- }
+- return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
+-}
+-
+-uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
+-{
+- if (memoryClass == 0)
+- return secondIndex;
+-
+- const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
+- if (IsVirtual())
+- return index + (1 << SECOND_LEVEL_INDEX);
+- else
+- return index + 4;
+-}
+-
+-uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
+-{
+- uint8_t memoryClass = SizeToMemoryClass(size);
+- return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
+-}
+-
+-void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
+-{
+- VMA_ASSERT(block != m_NullBlock);
+- VMA_ASSERT(block->IsFree());
+-
+- if (block->NextFree() != VMA_NULL)
+- block->NextFree()->PrevFree() = block->PrevFree();
+- if (block->PrevFree() != VMA_NULL)
+- block->PrevFree()->NextFree() = block->NextFree();
+- else
+- {
+- uint8_t memClass = SizeToMemoryClass(block->size);
+- uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
+- uint32_t index = GetListIndex(memClass, secondIndex);
+- VMA_ASSERT(m_FreeList[index] == block);
+- m_FreeList[index] = block->NextFree();
+- if (block->NextFree() == VMA_NULL)
+- {
+- m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
+- if (m_InnerIsFreeBitmap[memClass] == 0)
+- m_IsFreeBitmap &= ~(1UL << memClass);
+- }
+- }
+- block->MarkTaken();
+- block->UserData() = VMA_NULL;
+- --m_BlocksFreeCount;
+- m_BlocksFreeSize -= block->size;
+-}
+-
+-void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
+-{
+- VMA_ASSERT(block != m_NullBlock);
+- VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
+-
+- uint8_t memClass = SizeToMemoryClass(block->size);
+- uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
+- uint32_t index = GetListIndex(memClass, secondIndex);
+- VMA_ASSERT(index < m_ListsCount);
+- block->PrevFree() = VMA_NULL;
+- block->NextFree() = m_FreeList[index];
+- m_FreeList[index] = block;
+- if (block->NextFree() != VMA_NULL)
+- block->NextFree()->PrevFree() = block;
+- else
+- {
+- m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
+- m_IsFreeBitmap |= 1UL << memClass;
+- }
+- ++m_BlocksFreeCount;
+- m_BlocksFreeSize += block->size;
+-}
+-
+-void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
+-{
+- VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!");
+- VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
+-
+- block->offset = prev->offset;
+- block->size += prev->size;
+- block->prevPhysical = prev->prevPhysical;
+- if (block->prevPhysical)
+- block->prevPhysical->nextPhysical = block;
+- m_BlockAllocator.Free(prev);
+-}
+-
+-VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
+-{
+- uint8_t memoryClass = SizeToMemoryClass(size);
+- uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
+- if (!innerFreeMap)
+- {
+- // Check higher levels for available blocks
+- uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
+- if (!freeMap)
+- return VMA_NULL; // No more memory available
+-
+- // Find lowest free region
+- memoryClass = VMA_BITSCAN_LSB(freeMap);
+- innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
+- VMA_ASSERT(innerFreeMap != 0);
+- }
+- // Find lowest free subregion
+- listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
+- VMA_ASSERT(m_FreeList[listIndex]);
+- return m_FreeList[listIndex];
+-}
+-
+-bool VmaBlockMetadata_TLSF::CheckBlock(
+- Block& block,
+- uint32_t listIndex,
+- VkDeviceSize allocSize,
+- VkDeviceSize allocAlignment,
+- VmaSuballocationType allocType,
+- VmaAllocationRequest* pAllocationRequest)
+-{
+- VMA_ASSERT(block.IsFree() && "Block is already taken!");
+-
+- VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
+- if (block.size < allocSize + alignedOffset - block.offset)
+- return false;
+-
+- // Check for granularity conflicts
+- if (!IsVirtual() &&
+- m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
+- return false;
+-
+- // Alloc successful
+- pAllocationRequest->type = VmaAllocationRequestType::TLSF;
+- pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
+- pAllocationRequest->size = allocSize - GetDebugMargin();
+- pAllocationRequest->customData = (void*)allocType;
+- pAllocationRequest->algorithmData = alignedOffset;
+-
+- // Place block at the start of list if it's normal block
+- if (listIndex != m_ListsCount && block.PrevFree())
+- {
+- block.PrevFree()->NextFree() = block.NextFree();
+- if (block.NextFree())
+- block.NextFree()->PrevFree() = block.PrevFree();
+- block.PrevFree() = VMA_NULL;
+- block.NextFree() = m_FreeList[listIndex];
+- m_FreeList[listIndex] = &block;
+- if (block.NextFree())
+- block.NextFree()->PrevFree() = &block;
+- }
+-
+- return true;
+-}
+-#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
+-#endif // _VMA_BLOCK_METADATA_TLSF
+-
+-#ifndef _VMA_BLOCK_VECTOR
+-/*
+-Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
+-Vulkan memory type.
+-
+-Synchronized internally with a mutex.
+-*/
+-class VmaBlockVector
+-{
+- friend struct VmaDefragmentationContext_T;
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector)
+-public:
+- VmaBlockVector(
+- VmaAllocator hAllocator,
+- VmaPool hParentPool,
+- uint32_t memoryTypeIndex,
+- VkDeviceSize preferredBlockSize,
+- size_t minBlockCount,
+- size_t maxBlockCount,
+- VkDeviceSize bufferImageGranularity,
+- bool explicitBlockSize,
+- uint32_t algorithm,
+- float priority,
+- VkDeviceSize minAllocationAlignment,
+- void* pMemoryAllocateNext);
+- ~VmaBlockVector();
+-
+- VmaAllocator GetAllocator() const { return m_hAllocator; }
+- VmaPool GetParentPool() const { return m_hParentPool; }
+- bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
+- uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
+- VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
+- VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
+- uint32_t GetAlgorithm() const { return m_Algorithm; }
+- bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
+- float GetPriority() const { return m_Priority; }
+- const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
+- // To be used only while the m_Mutex is locked. Used during defragmentation.
+- size_t GetBlockCount() const { return m_Blocks.size(); }
+- // To be used only while the m_Mutex is locked. Used during defragmentation.
+- VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
+- VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
+-
+- VkResult CreateMinBlocks();
+- void AddStatistics(VmaStatistics& inoutStats);
+- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
+- bool IsEmpty();
+- bool IsCorruptionDetectionEnabled() const;
+-
+- VkResult Allocate(
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- const VmaAllocationCreateInfo& createInfo,
+- VmaSuballocationType suballocType,
+- size_t allocationCount,
+- VmaAllocation* pAllocations);
+-
+- void Free(const VmaAllocation hAllocation);
+-
+-#if VMA_STATS_STRING_ENABLED
+- void PrintDetailedMap(class VmaJsonWriter& json);
+-#endif
+-
+- VkResult CheckCorruption();
+-
+-private:
+- const VmaAllocator m_hAllocator;
+- const VmaPool m_hParentPool;
+- const uint32_t m_MemoryTypeIndex;
+- const VkDeviceSize m_PreferredBlockSize;
+- const size_t m_MinBlockCount;
+- const size_t m_MaxBlockCount;
+- const VkDeviceSize m_BufferImageGranularity;
+- const bool m_ExplicitBlockSize;
+- const uint32_t m_Algorithm;
+- const float m_Priority;
+- const VkDeviceSize m_MinAllocationAlignment;
+-
+- void* const m_pMemoryAllocateNext;
+- VMA_RW_MUTEX m_Mutex;
+- // Incrementally sorted by sumFreeSize, ascending.
+- VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
+- uint32_t m_NextBlockId;
+- bool m_IncrementalSort = true;
+-
+- void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
+-
+- VkDeviceSize CalcMaxBlockSize() const;
+- // Finds and removes given block from vector.
+- void Remove(VmaDeviceMemoryBlock* pBlock);
+- // Performs single step in sorting m_Blocks. They may not be fully sorted
+- // after this call.
+- void IncrementallySortBlocks();
+- void SortByFreeSize();
+-
+- VkResult AllocatePage(
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- const VmaAllocationCreateInfo& createInfo,
+- VmaSuballocationType suballocType,
+- VmaAllocation* pAllocation);
+-
+- VkResult AllocateFromBlock(
+- VmaDeviceMemoryBlock* pBlock,
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- VmaAllocationCreateFlags allocFlags,
+- void* pUserData,
+- VmaSuballocationType suballocType,
+- uint32_t strategy,
+- VmaAllocation* pAllocation);
+-
+- VkResult CommitAllocationRequest(
+- VmaAllocationRequest& allocRequest,
+- VmaDeviceMemoryBlock* pBlock,
+- VkDeviceSize alignment,
+- VmaAllocationCreateFlags allocFlags,
+- void* pUserData,
+- VmaSuballocationType suballocType,
+- VmaAllocation* pAllocation);
+-
+- VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
+- bool HasEmptyBlock();
+-};
+-#endif // _VMA_BLOCK_VECTOR
+-
+-#ifndef _VMA_DEFRAGMENTATION_CONTEXT
+-struct VmaDefragmentationContext_T
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T)
+-public:
+- VmaDefragmentationContext_T(
+- VmaAllocator hAllocator,
+- const VmaDefragmentationInfo& info);
+- ~VmaDefragmentationContext_T();
+-
+- void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
+-
+- VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
+- VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
+-
+-private:
+- // Max number of allocations to ignore due to size constraints before ending single pass
+- static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
+- enum class CounterStatus { Pass, Ignore, End };
+-
+- struct FragmentedBlock
+- {
+- uint32_t data;
+- VmaDeviceMemoryBlock* block;
+- };
+- struct StateBalanced
+- {
+- VkDeviceSize avgFreeSize = 0;
+- VkDeviceSize avgAllocSize = UINT64_MAX;
+- };
+- struct StateExtensive
+- {
+- enum class Operation : uint8_t
+- {
+- FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
+- MoveBuffers, MoveTextures, MoveAll,
+- Cleanup, Done
+- };
+-
+- Operation operation = Operation::FindFreeBlockTexture;
+- size_t firstFreeBlock = SIZE_MAX;
+- };
+- struct MoveAllocationData
+- {
+- VkDeviceSize size;
+- VkDeviceSize alignment;
+- VmaSuballocationType type;
+- VmaAllocationCreateFlags flags;
+- VmaDefragmentationMove move = {};
+- };
+-
+- const VkDeviceSize m_MaxPassBytes;
+- const uint32_t m_MaxPassAllocations;
+- const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;
+- void* m_BreakCallbackUserData;
+-
+- VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
+- VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
+-
+- uint8_t m_IgnoredAllocs = 0;
+- uint32_t m_Algorithm;
+- uint32_t m_BlockVectorCount;
+- VmaBlockVector* m_PoolBlockVector;
+- VmaBlockVector** m_pBlockVectors;
+- size_t m_ImmovableBlockCount = 0;
+- VmaDefragmentationStats m_GlobalStats = { 0 };
+- VmaDefragmentationStats m_PassStats = { 0 };
+- void* m_AlgorithmState = VMA_NULL;
+-
+- static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
+- CounterStatus CheckCounters(VkDeviceSize bytes);
+- bool IncrementCounters(VkDeviceSize bytes);
+- bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
+- bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
+-
+- bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
+- bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
+- bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
+- bool ComputeDefragmentation_Full(VmaBlockVector& vector);
+- bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
+-
+- void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
+- bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
+- VmaBlockVector& vector, size_t firstFreeBlock,
+- bool& texturePresent, bool& bufferPresent, bool& otherPresent);
+-};
+-#endif // _VMA_DEFRAGMENTATION_CONTEXT
+-
+-#ifndef _VMA_POOL_T
+-struct VmaPool_T
+-{
+- friend struct VmaPoolListItemTraits;
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T)
+-public:
+- VmaBlockVector m_BlockVector;
+- VmaDedicatedAllocationList m_DedicatedAllocations;
+-
+- VmaPool_T(
+- VmaAllocator hAllocator,
+- const VmaPoolCreateInfo& createInfo,
+- VkDeviceSize preferredBlockSize);
+- ~VmaPool_T();
+-
+- uint32_t GetId() const { return m_Id; }
+- void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
+-
+- const char* GetName() const { return m_Name; }
+- void SetName(const char* pName);
+-
+-#if VMA_STATS_STRING_ENABLED
+- //void PrintDetailedMap(class VmaStringBuilder& sb);
+-#endif
+-
+-private:
+- uint32_t m_Id;
+- char* m_Name;
+- VmaPool_T* m_PrevPool = VMA_NULL;
+- VmaPool_T* m_NextPool = VMA_NULL;
+-};
+-
+-struct VmaPoolListItemTraits
+-{
+- typedef VmaPool_T ItemType;
+-
+- static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
+- static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
+- static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
+- static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
+-};
+-#endif // _VMA_POOL_T
+-
+-#ifndef _VMA_CURRENT_BUDGET_DATA
+-struct VmaCurrentBudgetData
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData)
+-public:
+-
+- VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
+- VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
+- VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
+- VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
+-
+-#if VMA_MEMORY_BUDGET
+- VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
+- VMA_RW_MUTEX m_BudgetMutex;
+- uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
+- uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
+- uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
+-#endif // VMA_MEMORY_BUDGET
+-
+- VmaCurrentBudgetData();
+-
+- void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
+- void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
+-};
+-
+-#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
+-VmaCurrentBudgetData::VmaCurrentBudgetData()
+-{
+- for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
+- {
+- m_BlockCount[heapIndex] = 0;
+- m_AllocationCount[heapIndex] = 0;
+- m_BlockBytes[heapIndex] = 0;
+- m_AllocationBytes[heapIndex] = 0;
+-#if VMA_MEMORY_BUDGET
+- m_VulkanUsage[heapIndex] = 0;
+- m_VulkanBudget[heapIndex] = 0;
+- m_BlockBytesAtBudgetFetch[heapIndex] = 0;
+-#endif
+- }
+-
+-#if VMA_MEMORY_BUDGET
+- m_OperationsSinceBudgetFetch = 0;
+-#endif
+-}
+-
+-void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
+-{
+- m_AllocationBytes[heapIndex] += allocationSize;
+- ++m_AllocationCount[heapIndex];
+-#if VMA_MEMORY_BUDGET
+- ++m_OperationsSinceBudgetFetch;
+-#endif
+-}
+-
+-void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
+-{
+- VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
+- m_AllocationBytes[heapIndex] -= allocationSize;
+- VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
+- --m_AllocationCount[heapIndex];
+-#if VMA_MEMORY_BUDGET
+- ++m_OperationsSinceBudgetFetch;
+-#endif
+-}
+-#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
+-#endif // _VMA_CURRENT_BUDGET_DATA
+-
+-#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
+-/*
+-Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
+-*/
+-class VmaAllocationObjectAllocator
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)
+-public:
+- VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
+- : m_Allocator(pAllocationCallbacks, 1024) {}
+-
+- template<typename... Types> VmaAllocation Allocate(Types&&... args);
+- void Free(VmaAllocation hAlloc);
+-
+-private:
+- VMA_MUTEX m_Mutex;
+- VmaPoolAllocator<VmaAllocation_T> m_Allocator;
+-};
+-
+-template<typename... Types>
+-VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)
+-{
+- VmaMutexLock mutexLock(m_Mutex);
+- return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
+-}
+-
+-void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
+-{
+- VmaMutexLock mutexLock(m_Mutex);
+- m_Allocator.Free(hAlloc);
+-}
+-#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
+-
+-#ifndef _VMA_VIRTUAL_BLOCK_T
+-struct VmaVirtualBlock_T
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T)
+-public:
+- const bool m_AllocationCallbacksSpecified;
+- const VkAllocationCallbacks m_AllocationCallbacks;
+-
+- VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
+- ~VmaVirtualBlock_T();
+-
+- VkResult Init() { return VK_SUCCESS; }
+- bool IsEmpty() const { return m_Metadata->IsEmpty(); }
+- void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
+- void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
+- void Clear() { m_Metadata->Clear(); }
+-
+- const VkAllocationCallbacks* GetAllocationCallbacks() const;
+- void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
+- VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
+- VkDeviceSize* outOffset);
+- void GetStatistics(VmaStatistics& outStats) const;
+- void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
+-#if VMA_STATS_STRING_ENABLED
+- void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
+-#endif
+-
+-private:
+- VmaBlockMetadata* m_Metadata;
+-};
+-
+-#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
+-VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
+- : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
+- m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
+-{
+- const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
+- switch (algorithm)
+- {
+- case 0:
+- m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
+- break;
+- case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:
+- m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
+- break;
+- default:
+- VMA_ASSERT(0);
+- m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
+- }
+-
+- m_Metadata->Init(createInfo.size);
+-}
+-
+-VmaVirtualBlock_T::~VmaVirtualBlock_T()
+-{
+- // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
+- if (!m_Metadata->IsEmpty())
+- m_Metadata->DebugLogAllAllocations();
+- // This is the most important assert in the entire library.
+- // Hitting it means you have some memory leak - unreleased virtual allocations.
+- VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
+-
+- vma_delete(GetAllocationCallbacks(), m_Metadata);
+-}
+-
+-const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
+-{
+- return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
+-}
+-
+-void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
+-{
+- m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
+-}
+-
+-VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
+- VkDeviceSize* outOffset)
+-{
+- VmaAllocationRequest request = {};
+- if (m_Metadata->CreateAllocationRequest(
+- createInfo.size, // allocSize
+- VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
+- (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
+- VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
+- createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
+- &request))
+- {
+- m_Metadata->Alloc(request,
+- VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
+- createInfo.pUserData);
+- outAllocation = (VmaVirtualAllocation)request.allocHandle;
+- if(outOffset)
+- *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
+- return VK_SUCCESS;
+- }
+- outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
+- if (outOffset)
+- *outOffset = UINT64_MAX;
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+-}
+-
+-void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
+-{
+- VmaClearStatistics(outStats);
+- m_Metadata->AddStatistics(outStats);
+-}
+-
+-void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
+-{
+- VmaClearDetailedStatistics(outStats);
+- m_Metadata->AddDetailedStatistics(outStats);
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
+-{
+- VmaJsonWriter json(GetAllocationCallbacks(), sb);
+- json.BeginObject();
+-
+- VmaDetailedStatistics stats;
+- CalculateDetailedStatistics(stats);
+-
+- json.WriteString("Stats");
+- VmaPrintDetailedStatistics(json, stats);
+-
+- if (detailedMap)
+- {
+- json.WriteString("Details");
+- json.BeginObject();
+- m_Metadata->PrintDetailedMap(json);
+- json.EndObject();
+- }
+-
+- json.EndObject();
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
+-#endif // _VMA_VIRTUAL_BLOCK_T
+-
+-
+-// Main allocator object.
+-struct VmaAllocator_T
+-{
+- VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T)
+-public:
+- bool m_UseMutex;
+- uint32_t m_VulkanApiVersion;
+- bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
+- bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
+- bool m_UseExtMemoryBudget;
+- bool m_UseAmdDeviceCoherentMemory;
+- bool m_UseKhrBufferDeviceAddress;
+- bool m_UseExtMemoryPriority;
+- VkDevice m_hDevice;
+- VkInstance m_hInstance;
+- bool m_AllocationCallbacksSpecified;
+- VkAllocationCallbacks m_AllocationCallbacks;
+- VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
+- VmaAllocationObjectAllocator m_AllocationObjectAllocator;
+-
+- // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
+- uint32_t m_HeapSizeLimitMask;
+-
+- VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
+- VkPhysicalDeviceMemoryProperties m_MemProps;
+-
+- // Default pools.
+- VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
+- VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
+-
+- VmaCurrentBudgetData m_Budget;
+- VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
+-
+- VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
+- VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
+- ~VmaAllocator_T();
+-
+- const VkAllocationCallbacks* GetAllocationCallbacks() const
+- {
+- return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
+- }
+- const VmaVulkanFunctions& GetVulkanFunctions() const
+- {
+- return m_VulkanFunctions;
+- }
+-
+- VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
+-
+- VkDeviceSize GetBufferImageGranularity() const
+- {
+- return VMA_MAX(
+- static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
+- m_PhysicalDeviceProperties.limits.bufferImageGranularity);
+- }
+-
+- uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
+- uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
+-
+- uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
+- {
+- VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
+- return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
+- }
+- // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
+- bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
+- {
+- return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
+- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+- }
+- // Minimum alignment for all allocations in specific memory type.
+- VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
+- {
+- return IsMemoryTypeNonCoherent(memTypeIndex) ?
+- VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
+- (VkDeviceSize)VMA_MIN_ALIGNMENT;
+- }
+-
+- bool IsIntegratedGpu() const
+- {
+- return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
+- }
+-
+- uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
+-
+- void GetBufferMemoryRequirements(
+- VkBuffer hBuffer,
+- VkMemoryRequirements& memReq,
+- bool& requiresDedicatedAllocation,
+- bool& prefersDedicatedAllocation) const;
+- void GetImageMemoryRequirements(
+- VkImage hImage,
+- VkMemoryRequirements& memReq,
+- bool& requiresDedicatedAllocation,
+- bool& prefersDedicatedAllocation) const;
+- VkResult FindMemoryTypeIndex(
+- uint32_t memoryTypeBits,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
+- uint32_t* pMemoryTypeIndex) const;
+-
+- // Main allocation function.
+- VkResult AllocateMemory(
+- const VkMemoryRequirements& vkMemReq,
+- bool requiresDedicatedAllocation,
+- bool prefersDedicatedAllocation,
+- VkBuffer dedicatedBuffer,
+- VkImage dedicatedImage,
+- VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown.
+- const VmaAllocationCreateInfo& createInfo,
+- VmaSuballocationType suballocType,
+- size_t allocationCount,
+- VmaAllocation* pAllocations);
+-
+- // Main deallocation function.
+- void FreeMemory(
+- size_t allocationCount,
+- const VmaAllocation* pAllocations);
+-
+- void CalculateStatistics(VmaTotalStatistics* pStats);
+-
+- void GetHeapBudgets(
+- VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
+-
+-#if VMA_STATS_STRING_ENABLED
+- void PrintDetailedMap(class VmaJsonWriter& json);
+-#endif
+-
+- void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
+-
+- VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
+- void DestroyPool(VmaPool pool);
+- void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
+- void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
+-
+- void SetCurrentFrameIndex(uint32_t frameIndex);
+- uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
+-
+- VkResult CheckPoolCorruption(VmaPool hPool);
+- VkResult CheckCorruption(uint32_t memoryTypeBits);
+-
+- // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
+- VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
+- // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
+- void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
+- // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
+- VkResult BindVulkanBuffer(
+- VkDeviceMemory memory,
+- VkDeviceSize memoryOffset,
+- VkBuffer buffer,
+- const void* pNext);
+- // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
+- VkResult BindVulkanImage(
+- VkDeviceMemory memory,
+- VkDeviceSize memoryOffset,
+- VkImage image,
+- const void* pNext);
+-
+- VkResult Map(VmaAllocation hAllocation, void** ppData);
+- void Unmap(VmaAllocation hAllocation);
+-
+- VkResult BindBufferMemory(
+- VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkBuffer hBuffer,
+- const void* pNext);
+- VkResult BindImageMemory(
+- VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkImage hImage,
+- const void* pNext);
+-
+- VkResult FlushOrInvalidateAllocation(
+- VmaAllocation hAllocation,
+- VkDeviceSize offset, VkDeviceSize size,
+- VMA_CACHE_OPERATION op);
+- VkResult FlushOrInvalidateAllocations(
+- uint32_t allocationCount,
+- const VmaAllocation* allocations,
+- const VkDeviceSize* offsets, const VkDeviceSize* sizes,
+- VMA_CACHE_OPERATION op);
+-
+- void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
+-
+- /*
+- Returns bit mask of memory types that can support defragmentation on GPU as
+- they support creation of required buffer for copy operations.
+- */
+- uint32_t GetGpuDefragmentationMemoryTypeBits();
+-
+-#if VMA_EXTERNAL_MEMORY
+- VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
+- {
+- return m_TypeExternalMemoryHandleTypes[memTypeIndex];
+- }
+-#endif // #if VMA_EXTERNAL_MEMORY
+-
+-private:
+- VkDeviceSize m_PreferredLargeHeapBlockSize;
+-
+- VkPhysicalDevice m_PhysicalDevice;
+- VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
+- VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
+-#if VMA_EXTERNAL_MEMORY
+- VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
+-#endif // #if VMA_EXTERNAL_MEMORY
+-
+- VMA_RW_MUTEX m_PoolsMutex;
+- typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
+- // Protected by m_PoolsMutex.
+- PoolList m_Pools;
+- uint32_t m_NextPoolId;
+-
+- VmaVulkanFunctions m_VulkanFunctions;
+-
+- // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
+- uint32_t m_GlobalMemoryTypeBits;
+-
+- void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
+-
+-#if VMA_STATIC_VULKAN_FUNCTIONS == 1
+- void ImportVulkanFunctions_Static();
+-#endif
+-
+- void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
+-
+-#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+- void ImportVulkanFunctions_Dynamic();
+-#endif
+-
+- void ValidateVulkanFunctions();
+-
+- VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
+-
+- VkResult AllocateMemoryOfType(
+- VmaPool pool,
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- bool dedicatedPreferred,
+- VkBuffer dedicatedBuffer,
+- VkImage dedicatedImage,
+- VkFlags dedicatedBufferImageUsage,
+- const VmaAllocationCreateInfo& createInfo,
+- uint32_t memTypeIndex,
+- VmaSuballocationType suballocType,
+- VmaDedicatedAllocationList& dedicatedAllocations,
+- VmaBlockVector& blockVector,
+- size_t allocationCount,
+- VmaAllocation* pAllocations);
+-
+- // Helper function only to be used inside AllocateDedicatedMemory.
+- VkResult AllocateDedicatedMemoryPage(
+- VmaPool pool,
+- VkDeviceSize size,
+- VmaSuballocationType suballocType,
+- uint32_t memTypeIndex,
+- const VkMemoryAllocateInfo& allocInfo,
+- bool map,
+- bool isUserDataString,
+- bool isMappingAllowed,
+- void* pUserData,
+- VmaAllocation* pAllocation);
+-
+- // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
+- VkResult AllocateDedicatedMemory(
+- VmaPool pool,
+- VkDeviceSize size,
+- VmaSuballocationType suballocType,
+- VmaDedicatedAllocationList& dedicatedAllocations,
+- uint32_t memTypeIndex,
+- bool map,
+- bool isUserDataString,
+- bool isMappingAllowed,
+- bool canAliasMemory,
+- void* pUserData,
+- float priority,
+- VkBuffer dedicatedBuffer,
+- VkImage dedicatedImage,
+- VkFlags dedicatedBufferImageUsage,
+- size_t allocationCount,
+- VmaAllocation* pAllocations,
+- const void* pNextChain = nullptr);
+-
+- void FreeDedicatedMemory(const VmaAllocation allocation);
+-
+- VkResult CalcMemTypeParams(
+- VmaAllocationCreateInfo& outCreateInfo,
+- uint32_t memTypeIndex,
+- VkDeviceSize size,
+- size_t allocationCount);
+- VkResult CalcAllocationParams(
+- VmaAllocationCreateInfo& outCreateInfo,
+- bool dedicatedRequired,
+- bool dedicatedPreferred);
+-
+- /*
+- Calculates and returns bit mask of memory types that can support defragmentation
+- on GPU as they support creation of required buffer for copy operations.
+- */
+- uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
+- uint32_t CalculateGlobalMemoryTypeBits() const;
+-
+- bool GetFlushOrInvalidateRange(
+- VmaAllocation allocation,
+- VkDeviceSize offset, VkDeviceSize size,
+- VkMappedMemoryRange& outRange) const;
+-
+-#if VMA_MEMORY_BUDGET
+- void UpdateVulkanBudget();
+-#endif // #if VMA_MEMORY_BUDGET
+-};
+-
+-
+-#ifndef _VMA_MEMORY_FUNCTIONS
+-static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
+-{
+- return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
+-}
+-
+-static void VmaFree(VmaAllocator hAllocator, void* ptr)
+-{
+- VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
+-}
+-
+-template<typename T>
+-static T* VmaAllocate(VmaAllocator hAllocator)
+-{
+- return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
+-}
+-
+-template<typename T>
+-static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
+-{
+- return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
+-}
+-
+-template<typename T>
+-static void vma_delete(VmaAllocator hAllocator, T* ptr)
+-{
+- if(ptr != VMA_NULL)
+- {
+- ptr->~T();
+- VmaFree(hAllocator, ptr);
+- }
+-}
+-
+-template<typename T>
+-static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
+-{
+- if(ptr != VMA_NULL)
+- {
+- for(size_t i = count; i--; )
+- ptr[i].~T();
+- VmaFree(hAllocator, ptr);
+- }
+-}
+-#endif // _VMA_MEMORY_FUNCTIONS
+-
+-#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
+-VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
+- : m_pMetadata(VMA_NULL),
+- m_MemoryTypeIndex(UINT32_MAX),
+- m_Id(0),
+- m_hMemory(VK_NULL_HANDLE),
+- m_MapCount(0),
+- m_pMappedData(VMA_NULL) {}
+-
+-VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
+-{
+- VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
+- VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+-}
+-
+-void VmaDeviceMemoryBlock::Init(
+- VmaAllocator hAllocator,
+- VmaPool hParentPool,
+- uint32_t newMemoryTypeIndex,
+- VkDeviceMemory newMemory,
+- VkDeviceSize newSize,
+- uint32_t id,
+- uint32_t algorithm,
+- VkDeviceSize bufferImageGranularity)
+-{
+- VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
+-
+- m_hParentPool = hParentPool;
+- m_MemoryTypeIndex = newMemoryTypeIndex;
+- m_Id = id;
+- m_hMemory = newMemory;
+-
+- switch (algorithm)
+- {
+- case 0:
+- m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
+- bufferImageGranularity, false); // isVirtual
+- break;
+- case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
+- m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
+- bufferImageGranularity, false); // isVirtual
+- break;
+- default:
+- VMA_ASSERT(0);
+- m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
+- bufferImageGranularity, false); // isVirtual
+- }
+- m_pMetadata->Init(newSize);
+-}
+-
+-void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
+-{
+- // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
+- if (!m_pMetadata->IsEmpty())
+- m_pMetadata->DebugLogAllAllocations();
+- // This is the most important assert in the entire library.
+- // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
+- VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
+-
+- VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
+- allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
+- m_hMemory = VK_NULL_HANDLE;
+-
+- vma_delete(allocator, m_pMetadata);
+- m_pMetadata = VMA_NULL;
+-}
+-
+-void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator)
+-{
+- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
+- m_MappingHysteresis.PostAlloc();
+-}
+-
+-void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
+-{
+- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
+- if(m_MappingHysteresis.PostFree())
+- {
+- VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
+- if (m_MapCount == 0)
+- {
+- m_pMappedData = VMA_NULL;
+- (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
+- }
+- }
+-}
+-
+-bool VmaDeviceMemoryBlock::Validate() const
+-{
+- VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
+- (m_pMetadata->GetSize() != 0));
+-
+- return m_pMetadata->Validate();
+-}
+-
+-VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
+-{
+- void* pData = nullptr;
+- VkResult res = Map(hAllocator, 1, &pData);
+- if (res != VK_SUCCESS)
+- {
+- return res;
+- }
+-
+- res = m_pMetadata->CheckCorruption(pData);
+-
+- Unmap(hAllocator, 1);
+-
+- return res;
+-}
+-
+-VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
+-{
+- if (count == 0)
+- {
+- return VK_SUCCESS;
+- }
+-
+- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
+- const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
+- m_MappingHysteresis.PostMap();
+- if (oldTotalMapCount != 0)
+- {
+- m_MapCount += count;
+- VMA_ASSERT(m_pMappedData != VMA_NULL);
+- if (ppData != VMA_NULL)
+- {
+- *ppData = m_pMappedData;
+- }
+- return VK_SUCCESS;
+- }
+- else
+- {
+- VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+- hAllocator->m_hDevice,
+- m_hMemory,
+- 0, // offset
+- VK_WHOLE_SIZE,
+- 0, // flags
+- &m_pMappedData);
+- if (result == VK_SUCCESS)
+- {
+- if (ppData != VMA_NULL)
+- {
+- *ppData = m_pMappedData;
+- }
+- m_MapCount = count;
+- }
+- return result;
+- }
+-}
+-
+-void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
+-{
+- if (count == 0)
+- {
+- return;
+- }
+-
+- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
+- if (m_MapCount >= count)
+- {
+- m_MapCount -= count;
+- const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
+- if (totalMapCount == 0)
+- {
+- m_pMappedData = VMA_NULL;
+- (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
+- }
+- m_MappingHysteresis.PostUnmap();
+- }
+- else
+- {
+- VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
+- }
+-}
+-
+-VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
+-{
+- VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+-
+- void* pData;
+- VkResult res = Map(hAllocator, 1, &pData);
+- if (res != VK_SUCCESS)
+- {
+- return res;
+- }
+-
+- VmaWriteMagicValue(pData, allocOffset + allocSize);
+-
+- Unmap(hAllocator, 1);
+- return VK_SUCCESS;
+-}
+-
+-VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
+-{
+- VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
+-
+- void* pData;
+- VkResult res = Map(hAllocator, 1, &pData);
+- if (res != VK_SUCCESS)
+- {
+- return res;
+- }
+-
+- if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
+- {
+- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
+- }
+-
+- Unmap(hAllocator, 1);
+- return VK_SUCCESS;
+-}
+-
+-VkResult VmaDeviceMemoryBlock::BindBufferMemory(
+- const VmaAllocator hAllocator,
+- const VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkBuffer hBuffer,
+- const void* pNext)
+-{
+- VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+- hAllocation->GetBlock() == this);
+- VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
+- "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
+- const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
+- // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
+- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
+- return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
+-}
+-
+-VkResult VmaDeviceMemoryBlock::BindImageMemory(
+- const VmaAllocator hAllocator,
+- const VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkImage hImage,
+- const void* pNext)
+-{
+- VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
+- hAllocation->GetBlock() == this);
+- VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
+- "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
+- const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
+- // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
+- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
+- return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
+-}
+-#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
+-
+-#ifndef _VMA_ALLOCATION_T_FUNCTIONS
+-VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
+- : m_Alignment{ 1 },
+- m_Size{ 0 },
+- m_pUserData{ VMA_NULL },
+- m_pName{ VMA_NULL },
+- m_MemoryTypeIndex{ 0 },
+- m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
+- m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
+- m_MapCount{ 0 },
+- m_Flags{ 0 }
+-{
+- if(mappingAllowed)
+- m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
+-
+-#if VMA_STATS_STRING_ENABLED
+- m_BufferImageUsage = 0;
+-#endif
+-}
+-
+-VmaAllocation_T::~VmaAllocation_T()
+-{
+- VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
+-
+- // Check if owned string was freed.
+- VMA_ASSERT(m_pName == VMA_NULL);
+-}
+-
+-void VmaAllocation_T::InitBlockAllocation(
+- VmaDeviceMemoryBlock* block,
+- VmaAllocHandle allocHandle,
+- VkDeviceSize alignment,
+- VkDeviceSize size,
+- uint32_t memoryTypeIndex,
+- VmaSuballocationType suballocationType,
+- bool mapped)
+-{
+- VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+- VMA_ASSERT(block != VMA_NULL);
+- m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
+- m_Alignment = alignment;
+- m_Size = size;
+- m_MemoryTypeIndex = memoryTypeIndex;
+- if(mapped)
+- {
+- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
+- m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
+- }
+- m_SuballocationType = (uint8_t)suballocationType;
+- m_BlockAllocation.m_Block = block;
+- m_BlockAllocation.m_AllocHandle = allocHandle;
+-}
+-
+-void VmaAllocation_T::InitDedicatedAllocation(
+- VmaPool hParentPool,
+- uint32_t memoryTypeIndex,
+- VkDeviceMemory hMemory,
+- VmaSuballocationType suballocationType,
+- void* pMappedData,
+- VkDeviceSize size)
+-{
+- VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
+- VMA_ASSERT(hMemory != VK_NULL_HANDLE);
+- m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
+- m_Alignment = 0;
+- m_Size = size;
+- m_MemoryTypeIndex = memoryTypeIndex;
+- m_SuballocationType = (uint8_t)suballocationType;
+- if(pMappedData != VMA_NULL)
+- {
+- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
+- m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
+- }
+- m_DedicatedAllocation.m_hParentPool = hParentPool;
+- m_DedicatedAllocation.m_hMemory = hMemory;
+- m_DedicatedAllocation.m_pMappedData = pMappedData;
+- m_DedicatedAllocation.m_Prev = VMA_NULL;
+- m_DedicatedAllocation.m_Next = VMA_NULL;
+-}
+-
+-void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
+-{
+- VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
+-
+- FreeName(hAllocator);
+-
+- if (pName != VMA_NULL)
+- m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
+-}
+-
+-uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
+-{
+- VMA_ASSERT(allocation != VMA_NULL);
+- VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
+- VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
+-
+- if (m_MapCount != 0)
+- m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
+-
+- m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
+- VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation);
+- m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
+-
+-#if VMA_STATS_STRING_ENABLED
+- VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage);
+-#endif
+- return m_MapCount;
+-}
+-
+-VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
+-{
+- switch (m_Type)
+- {
+- case ALLOCATION_TYPE_BLOCK:
+- return m_BlockAllocation.m_AllocHandle;
+- case ALLOCATION_TYPE_DEDICATED:
+- return VK_NULL_HANDLE;
+- default:
+- VMA_ASSERT(0);
+- return VK_NULL_HANDLE;
+- }
+-}
+-
+-VkDeviceSize VmaAllocation_T::GetOffset() const
+-{
+- switch (m_Type)
+- {
+- case ALLOCATION_TYPE_BLOCK:
+- return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
+- case ALLOCATION_TYPE_DEDICATED:
+- return 0;
+- default:
+- VMA_ASSERT(0);
+- return 0;
+- }
+-}
+-
+-VmaPool VmaAllocation_T::GetParentPool() const
+-{
+- switch (m_Type)
+- {
+- case ALLOCATION_TYPE_BLOCK:
+- return m_BlockAllocation.m_Block->GetParentPool();
+- case ALLOCATION_TYPE_DEDICATED:
+- return m_DedicatedAllocation.m_hParentPool;
+- default:
+- VMA_ASSERT(0);
+- return VK_NULL_HANDLE;
+- }
+-}
+-
+-VkDeviceMemory VmaAllocation_T::GetMemory() const
+-{
+- switch (m_Type)
+- {
+- case ALLOCATION_TYPE_BLOCK:
+- return m_BlockAllocation.m_Block->GetDeviceMemory();
+- case ALLOCATION_TYPE_DEDICATED:
+- return m_DedicatedAllocation.m_hMemory;
+- default:
+- VMA_ASSERT(0);
+- return VK_NULL_HANDLE;
+- }
+-}
+-
+-void* VmaAllocation_T::GetMappedData() const
+-{
+- switch (m_Type)
+- {
+- case ALLOCATION_TYPE_BLOCK:
+- if (m_MapCount != 0 || IsPersistentMap())
+- {
+- void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
+- VMA_ASSERT(pBlockData != VMA_NULL);
+- return (char*)pBlockData + GetOffset();
+- }
+- else
+- {
+- return VMA_NULL;
+- }
+- break;
+- case ALLOCATION_TYPE_DEDICATED:
+- VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
+- return m_DedicatedAllocation.m_pMappedData;
+- default:
+- VMA_ASSERT(0);
+- return VMA_NULL;
+- }
+-}
+-
+-void VmaAllocation_T::BlockAllocMap()
+-{
+- VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
+-
+- if (m_MapCount < 0xFF)
+- {
+- ++m_MapCount;
+- }
+- else
+- {
+- VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
+- }
+-}
+-
+-void VmaAllocation_T::BlockAllocUnmap()
+-{
+- VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
+-
+- if (m_MapCount > 0)
+- {
+- --m_MapCount;
+- }
+- else
+- {
+- VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
+- }
+-}
+-
+-VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
+-{
+- VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
+-
+- if (m_MapCount != 0 || IsPersistentMap())
+- {
+- if (m_MapCount < 0xFF)
+- {
+- VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
+- *ppData = m_DedicatedAllocation.m_pMappedData;
+- ++m_MapCount;
+- return VK_SUCCESS;
+- }
+- else
+- {
+- VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
+- return VK_ERROR_MEMORY_MAP_FAILED;
+- }
+- }
+- else
+- {
+- VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
+- hAllocator->m_hDevice,
+- m_DedicatedAllocation.m_hMemory,
+- 0, // offset
+- VK_WHOLE_SIZE,
+- 0, // flags
+- ppData);
+- if (result == VK_SUCCESS)
+- {
+- m_DedicatedAllocation.m_pMappedData = *ppData;
+- m_MapCount = 1;
+- }
+- return result;
+- }
+-}
+-
+-void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
+-{
+- VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
+-
+- if (m_MapCount > 0)
+- {
+- --m_MapCount;
+- if (m_MapCount == 0 && !IsPersistentMap())
+- {
+- m_DedicatedAllocation.m_pMappedData = VMA_NULL;
+- (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
+- hAllocator->m_hDevice,
+- m_DedicatedAllocation.m_hMemory);
+- }
+- }
+- else
+- {
+- VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
+- }
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage)
+-{
+- VMA_ASSERT(m_BufferImageUsage == 0);
+- m_BufferImageUsage = bufferImageUsage;
+-}
+-
+-void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
+-{
+- json.WriteString("Type");
+- json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
+-
+- json.WriteString("Size");
+- json.WriteNumber(m_Size);
+- json.WriteString("Usage");
+- json.WriteNumber(m_BufferImageUsage);
+-
+- if (m_pUserData != VMA_NULL)
+- {
+- json.WriteString("CustomData");
+- json.BeginString();
+- json.ContinueString_Pointer(m_pUserData);
+- json.EndString();
+- }
+- if (m_pName != VMA_NULL)
+- {
+- json.WriteString("Name");
+- json.WriteString(m_pName);
+- }
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
+-{
+- if(m_pName)
+- {
+- VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
+- m_pName = VMA_NULL;
+- }
+-}
+-#endif // _VMA_ALLOCATION_T_FUNCTIONS
+-
+-#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
+-VmaBlockVector::VmaBlockVector(
+- VmaAllocator hAllocator,
+- VmaPool hParentPool,
+- uint32_t memoryTypeIndex,
+- VkDeviceSize preferredBlockSize,
+- size_t minBlockCount,
+- size_t maxBlockCount,
+- VkDeviceSize bufferImageGranularity,
+- bool explicitBlockSize,
+- uint32_t algorithm,
+- float priority,
+- VkDeviceSize minAllocationAlignment,
+- void* pMemoryAllocateNext)
+- : m_hAllocator(hAllocator),
+- m_hParentPool(hParentPool),
+- m_MemoryTypeIndex(memoryTypeIndex),
+- m_PreferredBlockSize(preferredBlockSize),
+- m_MinBlockCount(minBlockCount),
+- m_MaxBlockCount(maxBlockCount),
+- m_BufferImageGranularity(bufferImageGranularity),
+- m_ExplicitBlockSize(explicitBlockSize),
+- m_Algorithm(algorithm),
+- m_Priority(priority),
+- m_MinAllocationAlignment(minAllocationAlignment),
+- m_pMemoryAllocateNext(pMemoryAllocateNext),
+- m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
+- m_NextBlockId(0) {}
+-
+-VmaBlockVector::~VmaBlockVector()
+-{
+- for (size_t i = m_Blocks.size(); i--; )
+- {
+- m_Blocks[i]->Destroy(m_hAllocator);
+- vma_delete(m_hAllocator, m_Blocks[i]);
+- }
+-}
+-
+-VkResult VmaBlockVector::CreateMinBlocks()
+-{
+- for (size_t i = 0; i < m_MinBlockCount; ++i)
+- {
+- VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
+- if (res != VK_SUCCESS)
+- {
+- return res;
+- }
+- }
+- return VK_SUCCESS;
+-}
+-
+-void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
+-{
+- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+-
+- const size_t blockCount = m_Blocks.size();
+- for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+- {
+- const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+- VMA_ASSERT(pBlock);
+- VMA_HEAVY_ASSERT(pBlock->Validate());
+- pBlock->m_pMetadata->AddStatistics(inoutStats);
+- }
+-}
+-
+-void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
+-{
+- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+-
+- const size_t blockCount = m_Blocks.size();
+- for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
+- {
+- const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+- VMA_ASSERT(pBlock);
+- VMA_HEAVY_ASSERT(pBlock->Validate());
+- pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
+- }
+-}
+-
+-bool VmaBlockVector::IsEmpty()
+-{
+- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+- return m_Blocks.empty();
+-}
+-
+-bool VmaBlockVector::IsCorruptionDetectionEnabled() const
+-{
+- const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+- return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
+- (VMA_DEBUG_MARGIN > 0) &&
+- (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
+- (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
+-}
+-
+-VkResult VmaBlockVector::Allocate(
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- const VmaAllocationCreateInfo& createInfo,
+- VmaSuballocationType suballocType,
+- size_t allocationCount,
+- VmaAllocation* pAllocations)
+-{
+- size_t allocIndex;
+- VkResult res = VK_SUCCESS;
+-
+- alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
+-
+- if (IsCorruptionDetectionEnabled())
+- {
+- size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+- alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
+- }
+-
+- {
+- VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+- for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+- {
+- res = AllocatePage(
+- size,
+- alignment,
+- createInfo,
+- suballocType,
+- pAllocations + allocIndex);
+- if (res != VK_SUCCESS)
+- {
+- break;
+- }
+- }
+- }
+-
+- if (res != VK_SUCCESS)
+- {
+- // Free all already created allocations.
+- while (allocIndex--)
+- Free(pAllocations[allocIndex]);
+- memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+- }
+-
+- return res;
+-}
+-
+-VkResult VmaBlockVector::AllocatePage(
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- const VmaAllocationCreateInfo& createInfo,
+- VmaSuballocationType suballocType,
+- VmaAllocation* pAllocation)
+-{
+- const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+-
+- VkDeviceSize freeMemory;
+- {
+- const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+- VmaBudget heapBudget = {};
+- m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
+- freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
+- }
+-
+- const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
+- (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;
+- const bool canCreateNewBlock =
+- ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
+- (m_Blocks.size() < m_MaxBlockCount) &&
+- (freeMemory >= size || !canFallbackToDedicated);
+- uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
+-
+- // Upper address can only be used with linear allocator and within single memory block.
+- if (isUpperAddress &&
+- (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
+- {
+- return VK_ERROR_FEATURE_NOT_PRESENT;
+- }
+-
+- // Early reject: requested allocation size is larger that maximum block size for this block vector.
+- if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
+- {
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+- }
+-
+- // 1. Search existing allocations. Try to allocate.
+- if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
+- {
+- // Use only last block.
+- if (!m_Blocks.empty())
+- {
+- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
+- VMA_ASSERT(pCurrBlock);
+- VkResult res = AllocateFromBlock(
+- pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
+- if (res == VK_SUCCESS)
+- {
+- VMA_DEBUG_LOG_FORMAT(" Returned from last block #%u", pCurrBlock->GetId());
+- IncrementallySortBlocks();
+- return VK_SUCCESS;
+- }
+- }
+- }
+- else
+- {
+- if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
+- {
+- const bool isHostVisible =
+- (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
+- if(isHostVisible)
+- {
+- const bool isMappingAllowed = (createInfo.flags &
+- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
+- /*
+- For non-mappable allocations, check blocks that are not mapped first.
+- For mappable allocations, check blocks that are already mapped first.
+- This way, having many blocks, we will separate mappable and non-mappable allocations,
+- hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
+- */
+- for(size_t mappingI = 0; mappingI < 2; ++mappingI)
+- {
+- // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+- for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+- {
+- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+- VMA_ASSERT(pCurrBlock);
+- const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
+- if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
+- {
+- VkResult res = AllocateFromBlock(
+- pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
+- if (res == VK_SUCCESS)
+- {
+- VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
+- IncrementallySortBlocks();
+- return VK_SUCCESS;
+- }
+- }
+- }
+- }
+- }
+- else
+- {
+- // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
+- for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+- {
+- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+- VMA_ASSERT(pCurrBlock);
+- VkResult res = AllocateFromBlock(
+- pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
+- if (res == VK_SUCCESS)
+- {
+- VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
+- IncrementallySortBlocks();
+- return VK_SUCCESS;
+- }
+- }
+- }
+- }
+- else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
+- {
+- // Backward order in m_Blocks - prefer blocks with largest amount of free space.
+- for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
+- {
+- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
+- VMA_ASSERT(pCurrBlock);
+- VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
+- if (res == VK_SUCCESS)
+- {
+- VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
+- IncrementallySortBlocks();
+- return VK_SUCCESS;
+- }
+- }
+- }
+- }
+-
+- // 2. Try to create new block.
+- if (canCreateNewBlock)
+- {
+- // Calculate optimal size for new block.
+- VkDeviceSize newBlockSize = m_PreferredBlockSize;
+- uint32_t newBlockSizeShift = 0;
+- const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
+-
+- if (!m_ExplicitBlockSize)
+- {
+- // Allocate 1/8, 1/4, 1/2 as first blocks.
+- const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
+- for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
+- {
+- const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+- if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
+- {
+- newBlockSize = smallerNewBlockSize;
+- ++newBlockSizeShift;
+- }
+- else
+- {
+- break;
+- }
+- }
+- }
+-
+- size_t newBlockIndex = 0;
+- VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+- CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+- // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
+- if (!m_ExplicitBlockSize)
+- {
+- while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
+- {
+- const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
+- if (smallerNewBlockSize >= size)
+- {
+- newBlockSize = smallerNewBlockSize;
+- ++newBlockSizeShift;
+- res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
+- CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
+- }
+- else
+- {
+- break;
+- }
+- }
+- }
+-
+- if (res == VK_SUCCESS)
+- {
+- VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
+- VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
+-
+- res = AllocateFromBlock(
+- pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
+- if (res == VK_SUCCESS)
+- {
+- VMA_DEBUG_LOG_FORMAT(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
+- IncrementallySortBlocks();
+- return VK_SUCCESS;
+- }
+- else
+- {
+- // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+- }
+- }
+- }
+-
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+-}
+-
+-void VmaBlockVector::Free(const VmaAllocation hAllocation)
+-{
+- VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
+-
+- bool budgetExceeded = false;
+- {
+- const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
+- VmaBudget heapBudget = {};
+- m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
+- budgetExceeded = heapBudget.usage >= heapBudget.budget;
+- }
+-
+- // Scope for lock.
+- {
+- VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
+-
+- VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+-
+- if (IsCorruptionDetectionEnabled())
+- {
+- VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
+- VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
+- }
+-
+- if (hAllocation->IsPersistentMap())
+- {
+- pBlock->Unmap(m_hAllocator, 1);
+- }
+-
+- const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
+- pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
+- pBlock->PostFree(m_hAllocator);
+- VMA_HEAVY_ASSERT(pBlock->Validate());
+-
+- VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
+-
+- const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
+- // pBlock became empty after this deallocation.
+- if (pBlock->m_pMetadata->IsEmpty())
+- {
+- // Already had empty block. We don't want to have two, so delete this one.
+- if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
+- {
+- pBlockToDelete = pBlock;
+- Remove(pBlock);
+- }
+- // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
+- }
+- // pBlock didn't become empty, but we have another empty block - find and free that one.
+- // (This is optional, heuristics.)
+- else if (hadEmptyBlockBeforeFree && canDeleteBlock)
+- {
+- VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
+- if (pLastBlock->m_pMetadata->IsEmpty())
+- {
+- pBlockToDelete = pLastBlock;
+- m_Blocks.pop_back();
+- }
+- }
+-
+- IncrementallySortBlocks();
+- }
+-
+- // Destruction of a free block. Deferred until this point, outside of mutex
+- // lock, for performance reason.
+- if (pBlockToDelete != VMA_NULL)
+- {
+- VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%u", pBlockToDelete->GetId());
+- pBlockToDelete->Destroy(m_hAllocator);
+- vma_delete(m_hAllocator, pBlockToDelete);
+- }
+-
+- m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
+- m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
+-}
+-
+-VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
+-{
+- VkDeviceSize result = 0;
+- for (size_t i = m_Blocks.size(); i--; )
+- {
+- result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
+- if (result >= m_PreferredBlockSize)
+- {
+- break;
+- }
+- }
+- return result;
+-}
+-
+-void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
+-{
+- for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+- {
+- if (m_Blocks[blockIndex] == pBlock)
+- {
+- VmaVectorRemove(m_Blocks, blockIndex);
+- return;
+- }
+- }
+- VMA_ASSERT(0);
+-}
+-
+-void VmaBlockVector::IncrementallySortBlocks()
+-{
+- if (!m_IncrementalSort)
+- return;
+- if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
+- {
+- // Bubble sort only until first swap.
+- for (size_t i = 1; i < m_Blocks.size(); ++i)
+- {
+- if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
+- {
+- VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
+- return;
+- }
+- }
+- }
+-}
+-
+-void VmaBlockVector::SortByFreeSize()
+-{
+- VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
+- [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
+- {
+- return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
+- });
+-}
+-
+-VkResult VmaBlockVector::AllocateFromBlock(
+- VmaDeviceMemoryBlock* pBlock,
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- VmaAllocationCreateFlags allocFlags,
+- void* pUserData,
+- VmaSuballocationType suballocType,
+- uint32_t strategy,
+- VmaAllocation* pAllocation)
+-{
+- const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
+-
+- VmaAllocationRequest currRequest = {};
+- if (pBlock->m_pMetadata->CreateAllocationRequest(
+- size,
+- alignment,
+- isUpperAddress,
+- suballocType,
+- strategy,
+- &currRequest))
+- {
+- return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
+- }
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+-}
+-
+-VkResult VmaBlockVector::CommitAllocationRequest(
+- VmaAllocationRequest& allocRequest,
+- VmaDeviceMemoryBlock* pBlock,
+- VkDeviceSize alignment,
+- VmaAllocationCreateFlags allocFlags,
+- void* pUserData,
+- VmaSuballocationType suballocType,
+- VmaAllocation* pAllocation)
+-{
+- const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
+- const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
+- const bool isMappingAllowed = (allocFlags &
+- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
+-
+- pBlock->PostAlloc(m_hAllocator);
+- // Allocate from pCurrBlock.
+- if (mapped)
+- {
+- VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
+- if (res != VK_SUCCESS)
+- {
+- return res;
+- }
+- }
+-
+- *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
+- pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
+- (*pAllocation)->InitBlockAllocation(
+- pBlock,
+- allocRequest.allocHandle,
+- alignment,
+- allocRequest.size, // Not size, as actual allocation size may be larger than requested!
+- m_MemoryTypeIndex,
+- suballocType,
+- mapped);
+- VMA_HEAVY_ASSERT(pBlock->Validate());
+- if (isUserDataString)
+- (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
+- else
+- (*pAllocation)->SetUserData(m_hAllocator, pUserData);
+- m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
+- if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+- {
+- m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+- }
+- if (IsCorruptionDetectionEnabled())
+- {
+- VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
+- VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
+- }
+- return VK_SUCCESS;
+-}
+-
+-VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
+-{
+- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+- allocInfo.pNext = m_pMemoryAllocateNext;
+- allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
+- allocInfo.allocationSize = blockSize;
+-
+-#if VMA_BUFFER_DEVICE_ADDRESS
+- // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
+- VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+- if (m_hAllocator->m_UseKhrBufferDeviceAddress)
+- {
+- allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+- VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+- }
+-#endif // VMA_BUFFER_DEVICE_ADDRESS
+-
+-#if VMA_MEMORY_PRIORITY
+- VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
+- if (m_hAllocator->m_UseExtMemoryPriority)
+- {
+- VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
+- priorityInfo.priority = m_Priority;
+- VmaPnextChainPushFront(&allocInfo, &priorityInfo);
+- }
+-#endif // VMA_MEMORY_PRIORITY
+-
+-#if VMA_EXTERNAL_MEMORY
+- // Attach VkExportMemoryAllocateInfoKHR if necessary.
+- VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
+- exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
+- if (exportMemoryAllocInfo.handleTypes != 0)
+- {
+- VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
+- }
+-#endif // VMA_EXTERNAL_MEMORY
+-
+- VkDeviceMemory mem = VK_NULL_HANDLE;
+- VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
+- if (res < 0)
+- {
+- return res;
+- }
+-
+- // New VkDeviceMemory successfully created.
+-
+- // Create new Allocation for it.
+- VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
+- pBlock->Init(
+- m_hAllocator,
+- m_hParentPool,
+- m_MemoryTypeIndex,
+- mem,
+- allocInfo.allocationSize,
+- m_NextBlockId++,
+- m_Algorithm,
+- m_BufferImageGranularity);
+-
+- m_Blocks.push_back(pBlock);
+- if (pNewBlockIndex != VMA_NULL)
+- {
+- *pNewBlockIndex = m_Blocks.size() - 1;
+- }
+-
+- return VK_SUCCESS;
+-}
+-
+-bool VmaBlockVector::HasEmptyBlock()
+-{
+- for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
+- {
+- VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
+- if (pBlock->m_pMetadata->IsEmpty())
+- {
+- return true;
+- }
+- }
+- return false;
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
+-{
+- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+-
+-
+- json.BeginObject();
+- for (size_t i = 0; i < m_Blocks.size(); ++i)
+- {
+- json.BeginString();
+- json.ContinueString(m_Blocks[i]->GetId());
+- json.EndString();
+-
+- json.BeginObject();
+- json.WriteString("MapRefCount");
+- json.WriteNumber(m_Blocks[i]->GetMapRefCount());
+-
+- m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
+- json.EndObject();
+- }
+- json.EndObject();
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-VkResult VmaBlockVector::CheckCorruption()
+-{
+- if (!IsCorruptionDetectionEnabled())
+- {
+- return VK_ERROR_FEATURE_NOT_PRESENT;
+- }
+-
+- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
+- for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
+- {
+- VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
+- VMA_ASSERT(pBlock);
+- VkResult res = pBlock->CheckCorruption(m_hAllocator);
+- if (res != VK_SUCCESS)
+- {
+- return res;
+- }
+- }
+- return VK_SUCCESS;
+-}
+-
+-#endif // _VMA_BLOCK_VECTOR_FUNCTIONS
+-
+-#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
+-VmaDefragmentationContext_T::VmaDefragmentationContext_T(
+- VmaAllocator hAllocator,
+- const VmaDefragmentationInfo& info)
+- : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
+- m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
+- m_BreakCallback(info.pfnBreakCallback),
+- m_BreakCallbackUserData(info.pBreakCallbackUserData),
+- m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
+- m_Moves(m_MoveAllocator)
+-{
+- m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
+-
+- if (info.pool != VMA_NULL)
+- {
+- m_BlockVectorCount = 1;
+- m_PoolBlockVector = &info.pool->m_BlockVector;
+- m_pBlockVectors = &m_PoolBlockVector;
+- m_PoolBlockVector->SetIncrementalSort(false);
+- m_PoolBlockVector->SortByFreeSize();
+- }
+- else
+- {
+- m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
+- m_PoolBlockVector = VMA_NULL;
+- m_pBlockVectors = hAllocator->m_pBlockVectors;
+- for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
+- {
+- VmaBlockVector* vector = m_pBlockVectors[i];
+- if (vector != VMA_NULL)
+- {
+- vector->SetIncrementalSort(false);
+- vector->SortByFreeSize();
+- }
+- }
+- }
+-
+- switch (m_Algorithm)
+- {
+- case 0: // Default algorithm
+- m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;
+- m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
+- break;
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
+- m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
+- break;
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
+- if (hAllocator->GetBufferImageGranularity() > 1)
+- {
+- m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
+- }
+- break;
+- }
+-}
+-
+-VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
+-{
+- if (m_PoolBlockVector != VMA_NULL)
+- {
+- m_PoolBlockVector->SetIncrementalSort(true);
+- }
+- else
+- {
+- for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
+- {
+- VmaBlockVector* vector = m_pBlockVectors[i];
+- if (vector != VMA_NULL)
+- vector->SetIncrementalSort(true);
+- }
+- }
+-
+- if (m_AlgorithmState)
+- {
+- switch (m_Algorithm)
+- {
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
+- vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
+- break;
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
+- vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+- }
+-}
+-
+-VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
+-{
+- if (m_PoolBlockVector != VMA_NULL)
+- {
+- VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
+-
+- if (m_PoolBlockVector->GetBlockCount() > 1)
+- ComputeDefragmentation(*m_PoolBlockVector, 0);
+- else if (m_PoolBlockVector->GetBlockCount() == 1)
+- ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
+- }
+- else
+- {
+- for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
+- {
+- if (m_pBlockVectors[i] != VMA_NULL)
+- {
+- VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
+-
+- if (m_pBlockVectors[i]->GetBlockCount() > 1)
+- {
+- if (ComputeDefragmentation(*m_pBlockVectors[i], i))
+- break;
+- }
+- else if (m_pBlockVectors[i]->GetBlockCount() == 1)
+- {
+- if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
+- break;
+- }
+- }
+- }
+- }
+-
+- moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
+- if (moveInfo.moveCount > 0)
+- {
+- moveInfo.pMoves = m_Moves.data();
+- return VK_INCOMPLETE;
+- }
+-
+- moveInfo.pMoves = VMA_NULL;
+- return VK_SUCCESS;
+-}
+-
+-VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
+-{
+- VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
+-
+- VkResult result = VK_SUCCESS;
+- VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
+- VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
+- VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
+-
+- VmaAllocator allocator = VMA_NULL;
+- for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
+- {
+- VmaDefragmentationMove& move = moveInfo.pMoves[i];
+- size_t prevCount = 0, currentCount = 0;
+- VkDeviceSize freedBlockSize = 0;
+-
+- uint32_t vectorIndex;
+- VmaBlockVector* vector;
+- if (m_PoolBlockVector != VMA_NULL)
+- {
+- vectorIndex = 0;
+- vector = m_PoolBlockVector;
+- }
+- else
+- {
+- vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
+- vector = m_pBlockVectors[vectorIndex];
+- VMA_ASSERT(vector != VMA_NULL);
+- }
+-
+- switch (move.operation)
+- {
+- case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:
+- {
+- uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
+- if (mapCount > 0)
+- {
+- allocator = vector->m_hAllocator;
+- VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
+- bool notPresent = true;
+- for (FragmentedBlock& block : mappedBlocks)
+- {
+- if (block.block == newMapBlock)
+- {
+- notPresent = false;
+- block.data += mapCount;
+- break;
+- }
+- }
+- if (notPresent)
+- mappedBlocks.push_back({ mapCount, newMapBlock });
+- }
+-
+- // Scope for locks, Free have it's own lock
+- {
+- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+- prevCount = vector->GetBlockCount();
+- freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
+- }
+- vector->Free(move.dstTmpAllocation);
+- {
+- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+- currentCount = vector->GetBlockCount();
+- }
+-
+- result = VK_INCOMPLETE;
+- break;
+- }
+- case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
+- {
+- m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
+- --m_PassStats.allocationsMoved;
+- vector->Free(move.dstTmpAllocation);
+-
+- VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
+- bool notPresent = true;
+- for (const FragmentedBlock& block : immovableBlocks)
+- {
+- if (block.block == newBlock)
+- {
+- notPresent = false;
+- break;
+- }
+- }
+- if (notPresent)
+- immovableBlocks.push_back({ vectorIndex, newBlock });
+- break;
+- }
+- case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
+- {
+- m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
+- --m_PassStats.allocationsMoved;
+- // Scope for locks, Free have it's own lock
+- {
+- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+- prevCount = vector->GetBlockCount();
+- freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
+- }
+- vector->Free(move.srcAllocation);
+- {
+- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+- currentCount = vector->GetBlockCount();
+- }
+- freedBlockSize *= prevCount - currentCount;
+-
+- VkDeviceSize dstBlockSize;
+- {
+- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+- dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
+- }
+- vector->Free(move.dstTmpAllocation);
+- {
+- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+- freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
+- currentCount = vector->GetBlockCount();
+- }
+-
+- result = VK_INCOMPLETE;
+- break;
+- }
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- if (prevCount > currentCount)
+- {
+- size_t freedBlocks = prevCount - currentCount;
+- m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
+- m_PassStats.bytesFreed += freedBlockSize;
+- }
+-
+- if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT &&
+- m_AlgorithmState != VMA_NULL)
+- {
+- // Avoid unnecessary tries to allocate when new free block is available
+- StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
+- if (state.firstFreeBlock != SIZE_MAX)
+- {
+- const size_t diff = prevCount - currentCount;
+- if (state.firstFreeBlock >= diff)
+- {
+- state.firstFreeBlock -= diff;
+- if (state.firstFreeBlock != 0)
+- state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
+- }
+- else
+- state.firstFreeBlock = 0;
+- }
+- }
+- }
+- moveInfo.moveCount = 0;
+- moveInfo.pMoves = VMA_NULL;
+- m_Moves.clear();
+-
+- // Update stats
+- m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
+- m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
+- m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
+- m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
+- m_PassStats = { 0 };
+-
+- // Move blocks with immovable allocations according to algorithm
+- if (immovableBlocks.size() > 0)
+- {
+- do
+- {
+- if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT)
+- {
+- if (m_AlgorithmState != VMA_NULL)
+- {
+- bool swapped = false;
+- // Move to the start of free blocks range
+- for (const FragmentedBlock& block : immovableBlocks)
+- {
+- StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
+- if (state.operation != StateExtensive::Operation::Cleanup)
+- {
+- VmaBlockVector* vector = m_pBlockVectors[block.data];
+- VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+-
+- for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
+- {
+- if (vector->GetBlock(i) == block.block)
+- {
+- VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
+- if (state.firstFreeBlock != SIZE_MAX)
+- {
+- if (i + 1 < state.firstFreeBlock)
+- {
+- if (state.firstFreeBlock > 1)
+- VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
+- else
+- --state.firstFreeBlock;
+- }
+- }
+- swapped = true;
+- break;
+- }
+- }
+- }
+- }
+- if (swapped)
+- result = VK_INCOMPLETE;
+- break;
+- }
+- }
+-
+- // Move to the beginning
+- for (const FragmentedBlock& block : immovableBlocks)
+- {
+- VmaBlockVector* vector = m_pBlockVectors[block.data];
+- VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
+-
+- for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
+- {
+- if (vector->GetBlock(i) == block.block)
+- {
+- VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
+- break;
+- }
+- }
+- }
+- } while (false);
+- }
+-
+- // Bulk-map destination blocks
+- for (const FragmentedBlock& block : mappedBlocks)
+- {
+- VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
+- VMA_ASSERT(res == VK_SUCCESS);
+- }
+- return result;
+-}
+-
+-bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
+-{
+- switch (m_Algorithm)
+- {
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:
+- return ComputeDefragmentation_Fast(vector);
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
+- return ComputeDefragmentation_Balanced(vector, index, true);
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:
+- return ComputeDefragmentation_Full(vector);
+- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
+- return ComputeDefragmentation_Extensive(vector, index);
+- default:
+- VMA_ASSERT(0);
+- return ComputeDefragmentation_Balanced(vector, index, true);
+- }
+-}
+-
+-VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
+- VmaAllocHandle handle, VmaBlockMetadata* metadata)
+-{
+- MoveAllocationData moveData;
+- moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
+- moveData.size = moveData.move.srcAllocation->GetSize();
+- moveData.alignment = moveData.move.srcAllocation->GetAlignment();
+- moveData.type = moveData.move.srcAllocation->GetSuballocationType();
+- moveData.flags = 0;
+-
+- if (moveData.move.srcAllocation->IsPersistentMap())
+- moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
+- if (moveData.move.srcAllocation->IsMappingAllowed())
+- moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
+-
+- return moveData;
+-}
+-
+-VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
+-{
+- // Check custom criteria if exists
+- if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))
+- return CounterStatus::End;
+-
+- // Ignore allocation if will exceed max size for copy
+- if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
+- {
+- if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
+- return CounterStatus::Ignore;
+- else
+- return CounterStatus::End;
+- }
+- else
+- m_IgnoredAllocs = 0;
+- return CounterStatus::Pass;
+-}
+-
+-bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
+-{
+- m_PassStats.bytesMoved += bytes;
+- // Early return when max found
+- if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
+- {
+- VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations ||
+- m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
+- return true;
+- }
+- return false;
+-}
+-
+-bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
+-{
+- VmaBlockMetadata* metadata = block->m_pMetadata;
+-
+- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
+- handle != VK_NULL_HANDLE;
+- handle = metadata->GetNextAllocation(handle))
+- {
+- MoveAllocationData moveData = GetMoveData(handle, metadata);
+- // Ignore newly created allocations by defragmentation algorithm
+- if (moveData.move.srcAllocation->GetUserData() == this)
+- continue;
+- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
+- {
+- case CounterStatus::Ignore:
+- continue;
+- case CounterStatus::End:
+- return true;
+- case CounterStatus::Pass:
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
+- if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
+- {
+- VmaAllocationRequest request = {};
+- if (metadata->CreateAllocationRequest(
+- moveData.size,
+- moveData.alignment,
+- false,
+- moveData.type,
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
+- &request))
+- {
+- if (metadata->GetAllocationOffset(request.allocHandle) < offset)
+- {
+- if (vector.CommitAllocationRequest(
+- request,
+- block,
+- moveData.alignment,
+- moveData.flags,
+- this,
+- moveData.type,
+- &moveData.move.dstTmpAllocation) == VK_SUCCESS)
+- {
+- m_Moves.push_back(moveData.move);
+- if (IncrementCounters(moveData.size))
+- return true;
+- }
+- }
+- }
+- }
+- }
+- return false;
+-}
+-
+-bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
+-{
+- for (; start < end; ++start)
+- {
+- VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
+- if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
+- {
+- if (vector.AllocateFromBlock(dstBlock,
+- data.size,
+- data.alignment,
+- data.flags,
+- this,
+- data.type,
+- 0,
+- &data.move.dstTmpAllocation) == VK_SUCCESS)
+- {
+- m_Moves.push_back(data.move);
+- if (IncrementCounters(data.size))
+- return true;
+- break;
+- }
+- }
+- }
+- return false;
+-}
+-
+-bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
+-{
+- // Move only between blocks
+-
+- // Go through allocations in last blocks and try to fit them inside first ones
+- for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
+- {
+- VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
+-
+- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
+- handle != VK_NULL_HANDLE;
+- handle = metadata->GetNextAllocation(handle))
+- {
+- MoveAllocationData moveData = GetMoveData(handle, metadata);
+- // Ignore newly created allocations by defragmentation algorithm
+- if (moveData.move.srcAllocation->GetUserData() == this)
+- continue;
+- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
+- {
+- case CounterStatus::Ignore:
+- continue;
+- case CounterStatus::End:
+- return true;
+- case CounterStatus::Pass:
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- // Check all previous blocks for free space
+- if (AllocInOtherBlock(0, i, moveData, vector))
+- return true;
+- }
+- }
+- return false;
+-}
+-
+-bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
+-{
+- // Go over every allocation and try to fit it in previous blocks at lowest offsets,
+- // if not possible: realloc within single block to minimize offset (exclude offset == 0),
+- // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)
+- VMA_ASSERT(m_AlgorithmState != VMA_NULL);
+-
+- StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
+- if (update && vectorState.avgAllocSize == UINT64_MAX)
+- UpdateVectorStatistics(vector, vectorState);
+-
+- const size_t startMoveCount = m_Moves.size();
+- VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
+- for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
+- {
+- VmaDeviceMemoryBlock* block = vector.GetBlock(i);
+- VmaBlockMetadata* metadata = block->m_pMetadata;
+- VkDeviceSize prevFreeRegionSize = 0;
+-
+- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
+- handle != VK_NULL_HANDLE;
+- handle = metadata->GetNextAllocation(handle))
+- {
+- MoveAllocationData moveData = GetMoveData(handle, metadata);
+- // Ignore newly created allocations by defragmentation algorithm
+- if (moveData.move.srcAllocation->GetUserData() == this)
+- continue;
+- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
+- {
+- case CounterStatus::Ignore:
+- continue;
+- case CounterStatus::End:
+- return true;
+- case CounterStatus::Pass:
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- // Check all previous blocks for free space
+- const size_t prevMoveCount = m_Moves.size();
+- if (AllocInOtherBlock(0, i, moveData, vector))
+- return true;
+-
+- VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
+- // If no room found then realloc within block for lower offset
+- VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
+- if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
+- {
+- // Check if realloc will make sense
+- if (prevFreeRegionSize >= minimalFreeRegion ||
+- nextFreeRegionSize >= minimalFreeRegion ||
+- moveData.size <= vectorState.avgFreeSize ||
+- moveData.size <= vectorState.avgAllocSize)
+- {
+- VmaAllocationRequest request = {};
+- if (metadata->CreateAllocationRequest(
+- moveData.size,
+- moveData.alignment,
+- false,
+- moveData.type,
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
+- &request))
+- {
+- if (metadata->GetAllocationOffset(request.allocHandle) < offset)
+- {
+- if (vector.CommitAllocationRequest(
+- request,
+- block,
+- moveData.alignment,
+- moveData.flags,
+- this,
+- moveData.type,
+- &moveData.move.dstTmpAllocation) == VK_SUCCESS)
+- {
+- m_Moves.push_back(moveData.move);
+- if (IncrementCounters(moveData.size))
+- return true;
+- }
+- }
+- }
+- }
+- }
+- prevFreeRegionSize = nextFreeRegionSize;
+- }
+- }
+-
+- // No moves performed, update statistics to current vector state
+- if (startMoveCount == m_Moves.size() && !update)
+- {
+- vectorState.avgAllocSize = UINT64_MAX;
+- return ComputeDefragmentation_Balanced(vector, index, false);
+- }
+- return false;
+-}
+-
+-bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
+-{
+- // Go over every allocation and try to fit it in previous blocks at lowest offsets,
+- // if not possible: realloc within single block to minimize offset (exclude offset == 0)
+-
+- for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
+- {
+- VmaDeviceMemoryBlock* block = vector.GetBlock(i);
+- VmaBlockMetadata* metadata = block->m_pMetadata;
+-
+- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
+- handle != VK_NULL_HANDLE;
+- handle = metadata->GetNextAllocation(handle))
+- {
+- MoveAllocationData moveData = GetMoveData(handle, metadata);
+- // Ignore newly created allocations by defragmentation algorithm
+- if (moveData.move.srcAllocation->GetUserData() == this)
+- continue;
+- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
+- {
+- case CounterStatus::Ignore:
+- continue;
+- case CounterStatus::End:
+- return true;
+- case CounterStatus::Pass:
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- // Check all previous blocks for free space
+- const size_t prevMoveCount = m_Moves.size();
+- if (AllocInOtherBlock(0, i, moveData, vector))
+- return true;
+-
+- // If no room found then realloc within block for lower offset
+- VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
+- if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
+- {
+- VmaAllocationRequest request = {};
+- if (metadata->CreateAllocationRequest(
+- moveData.size,
+- moveData.alignment,
+- false,
+- moveData.type,
+- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
+- &request))
+- {
+- if (metadata->GetAllocationOffset(request.allocHandle) < offset)
+- {
+- if (vector.CommitAllocationRequest(
+- request,
+- block,
+- moveData.alignment,
+- moveData.flags,
+- this,
+- moveData.type,
+- &moveData.move.dstTmpAllocation) == VK_SUCCESS)
+- {
+- m_Moves.push_back(moveData.move);
+- if (IncrementCounters(moveData.size))
+- return true;
+- }
+- }
+- }
+- }
+- }
+- }
+- return false;
+-}
+-
+-bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
+-{
+- // First free single block, then populate it to the brim, then free another block, and so on
+-
+- // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
+- if (vector.m_BufferImageGranularity == 1)
+- return ComputeDefragmentation_Full(vector);
+-
+- VMA_ASSERT(m_AlgorithmState != VMA_NULL);
+-
+- StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
+-
+- bool texturePresent = false, bufferPresent = false, otherPresent = false;
+- switch (vectorState.operation)
+- {
+- case StateExtensive::Operation::Done: // Vector defragmented
+- return false;
+- case StateExtensive::Operation::FindFreeBlockBuffer:
+- case StateExtensive::Operation::FindFreeBlockTexture:
+- case StateExtensive::Operation::FindFreeBlockAll:
+- {
+- // No more blocks to free, just perform fast realloc and move to cleanup
+- if (vectorState.firstFreeBlock == 0)
+- {
+- vectorState.operation = StateExtensive::Operation::Cleanup;
+- return ComputeDefragmentation_Fast(vector);
+- }
+-
+- // No free blocks, have to clear last one
+- size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
+- VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
+-
+- const size_t prevMoveCount = m_Moves.size();
+- for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
+- handle != VK_NULL_HANDLE;
+- handle = freeMetadata->GetNextAllocation(handle))
+- {
+- MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
+- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
+- {
+- case CounterStatus::Ignore:
+- continue;
+- case CounterStatus::End:
+- return true;
+- case CounterStatus::Pass:
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- // Check all previous blocks for free space
+- if (AllocInOtherBlock(0, last, moveData, vector))
+- {
+- // Full clear performed already
+- if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
+- vectorState.firstFreeBlock = last;
+- return true;
+- }
+- }
+-
+- if (prevMoveCount == m_Moves.size())
+- {
+- // Cannot perform full clear, have to move data in other blocks around
+- if (last != 0)
+- {
+- for (size_t i = last - 1; i; --i)
+- {
+- if (ReallocWithinBlock(vector, vector.GetBlock(i)))
+- return true;
+- }
+- }
+-
+- if (prevMoveCount == m_Moves.size())
+- {
+- // No possible reallocs within blocks, try to move them around fast
+- return ComputeDefragmentation_Fast(vector);
+- }
+- }
+- else
+- {
+- switch (vectorState.operation)
+- {
+- case StateExtensive::Operation::FindFreeBlockBuffer:
+- vectorState.operation = StateExtensive::Operation::MoveBuffers;
+- break;
+- case StateExtensive::Operation::FindFreeBlockTexture:
+- vectorState.operation = StateExtensive::Operation::MoveTextures;
+- break;
+- case StateExtensive::Operation::FindFreeBlockAll:
+- vectorState.operation = StateExtensive::Operation::MoveAll;
+- break;
+- default:
+- VMA_ASSERT(0);
+- vectorState.operation = StateExtensive::Operation::MoveTextures;
+- }
+- vectorState.firstFreeBlock = last;
+- // Nothing done, block found without reallocations, can perform another reallocs in same pass
+- return ComputeDefragmentation_Extensive(vector, index);
+- }
+- break;
+- }
+- case StateExtensive::Operation::MoveTextures:
+- {
+- if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
+- vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
+- {
+- if (texturePresent)
+- {
+- vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
+- return ComputeDefragmentation_Extensive(vector, index);
+- }
+-
+- if (!bufferPresent && !otherPresent)
+- {
+- vectorState.operation = StateExtensive::Operation::Cleanup;
+- break;
+- }
+-
+- // No more textures to move, check buffers
+- vectorState.operation = StateExtensive::Operation::MoveBuffers;
+- bufferPresent = false;
+- otherPresent = false;
+- }
+- else
+- break;
+- VMA_FALLTHROUGH; // Fallthrough
+- }
+- case StateExtensive::Operation::MoveBuffers:
+- {
+- if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
+- vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
+- {
+- if (bufferPresent)
+- {
+- vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
+- return ComputeDefragmentation_Extensive(vector, index);
+- }
+-
+- if (!otherPresent)
+- {
+- vectorState.operation = StateExtensive::Operation::Cleanup;
+- break;
+- }
+-
+- // No more buffers to move, check all others
+- vectorState.operation = StateExtensive::Operation::MoveAll;
+- otherPresent = false;
+- }
+- else
+- break;
+- VMA_FALLTHROUGH; // Fallthrough
+- }
+- case StateExtensive::Operation::MoveAll:
+- {
+- if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
+- vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
+- {
+- if (otherPresent)
+- {
+- vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
+- return ComputeDefragmentation_Extensive(vector, index);
+- }
+- // Everything moved
+- vectorState.operation = StateExtensive::Operation::Cleanup;
+- }
+- break;
+- }
+- case StateExtensive::Operation::Cleanup:
+- // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
+- break;
+- }
+-
+- if (vectorState.operation == StateExtensive::Operation::Cleanup)
+- {
+- // All other work done, pack data in blocks even tighter if possible
+- const size_t prevMoveCount = m_Moves.size();
+- for (size_t i = 0; i < vector.GetBlockCount(); ++i)
+- {
+- if (ReallocWithinBlock(vector, vector.GetBlock(i)))
+- return true;
+- }
+-
+- if (prevMoveCount == m_Moves.size())
+- vectorState.operation = StateExtensive::Operation::Done;
+- }
+- return false;
+-}
+-
+-void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
+-{
+- size_t allocCount = 0;
+- size_t freeCount = 0;
+- state.avgFreeSize = 0;
+- state.avgAllocSize = 0;
+-
+- for (size_t i = 0; i < vector.GetBlockCount(); ++i)
+- {
+- VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
+-
+- allocCount += metadata->GetAllocationCount();
+- freeCount += metadata->GetFreeRegionsCount();
+- state.avgFreeSize += metadata->GetSumFreeSize();
+- state.avgAllocSize += metadata->GetSize();
+- }
+-
+- state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
+- state.avgFreeSize /= freeCount;
+-}
+-
+-bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
+- VmaBlockVector& vector, size_t firstFreeBlock,
+- bool& texturePresent, bool& bufferPresent, bool& otherPresent)
+-{
+- const size_t prevMoveCount = m_Moves.size();
+- for (size_t i = firstFreeBlock ; i;)
+- {
+- VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
+- VmaBlockMetadata* metadata = block->m_pMetadata;
+-
+- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
+- handle != VK_NULL_HANDLE;
+- handle = metadata->GetNextAllocation(handle))
+- {
+- MoveAllocationData moveData = GetMoveData(handle, metadata);
+- // Ignore newly created allocations by defragmentation algorithm
+- if (moveData.move.srcAllocation->GetUserData() == this)
+- continue;
+- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
+- {
+- case CounterStatus::Ignore:
+- continue;
+- case CounterStatus::End:
+- return true;
+- case CounterStatus::Pass:
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-
+- // Move only single type of resources at once
+- if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
+- {
+- // Try to fit allocation into free blocks
+- if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
+- return false;
+- }
+-
+- if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
+- texturePresent = true;
+- else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
+- bufferPresent = true;
+- else
+- otherPresent = true;
+- }
+- }
+- return prevMoveCount == m_Moves.size();
+-}
+-#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
+-
+-#ifndef _VMA_POOL_T_FUNCTIONS
+-VmaPool_T::VmaPool_T(
+- VmaAllocator hAllocator,
+- const VmaPoolCreateInfo& createInfo,
+- VkDeviceSize preferredBlockSize)
+- : m_BlockVector(
+- hAllocator,
+- this, // hParentPool
+- createInfo.memoryTypeIndex,
+- createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
+- createInfo.minBlockCount,
+- createInfo.maxBlockCount,
+- (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
+- createInfo.blockSize != 0, // explicitBlockSize
+- createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
+- createInfo.priority,
+- VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
+- createInfo.pMemoryAllocateNext),
+- m_Id(0),
+- m_Name(VMA_NULL) {}
+-
+-VmaPool_T::~VmaPool_T()
+-{
+- VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
+-}
+-
+-void VmaPool_T::SetName(const char* pName)
+-{
+- const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
+- VmaFreeString(allocs, m_Name);
+-
+- if (pName != VMA_NULL)
+- {
+- m_Name = VmaCreateStringCopy(allocs, pName);
+- }
+- else
+- {
+- m_Name = VMA_NULL;
+- }
+-}
+-#endif // _VMA_POOL_T_FUNCTIONS
+-
+-#ifndef _VMA_ALLOCATOR_T_FUNCTIONS
+-VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
+- m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
+- m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
+- m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
+- m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
+- m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
+- m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
+- m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
+- m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
+- m_hDevice(pCreateInfo->device),
+- m_hInstance(pCreateInfo->instance),
+- m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
+- m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
+- *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
+- m_AllocationObjectAllocator(&m_AllocationCallbacks),
+- m_HeapSizeLimitMask(0),
+- m_DeviceMemoryCount(0),
+- m_PreferredLargeHeapBlockSize(0),
+- m_PhysicalDevice(pCreateInfo->physicalDevice),
+- m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
+- m_NextPoolId(0),
+- m_GlobalMemoryTypeBits(UINT32_MAX)
+-{
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- m_UseKhrDedicatedAllocation = false;
+- m_UseKhrBindMemory2 = false;
+- }
+-
+- if(VMA_DEBUG_DETECT_CORRUPTION)
+- {
+- // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
+- VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
+- }
+-
+- VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
+-
+- if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
+- {
+-#if !(VMA_DEDICATED_ALLOCATION)
+- if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
+- {
+- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
+- }
+-#endif
+-#if !(VMA_BIND_MEMORY2)
+- if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
+- {
+- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
+- }
+-#endif
+- }
+-#if !(VMA_MEMORY_BUDGET)
+- if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
+- {
+- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
+- }
+-#endif
+-#if !(VMA_BUFFER_DEVICE_ADDRESS)
+- if(m_UseKhrBufferDeviceAddress)
+- {
+- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
+- }
+-#endif
+-#if VMA_VULKAN_VERSION < 1003000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
+- {
+- VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros.");
+- }
+-#endif
+-#if VMA_VULKAN_VERSION < 1002000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
+- {
+- VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
+- }
+-#endif
+-#if VMA_VULKAN_VERSION < 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
+- }
+-#endif
+-#if !(VMA_MEMORY_PRIORITY)
+- if(m_UseExtMemoryPriority)
+- {
+- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
+- }
+-#endif
+-
+- memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
+- memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
+- memset(&m_MemProps, 0, sizeof(m_MemProps));
+-
+- memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
+- memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
+-
+-#if VMA_EXTERNAL_MEMORY
+- memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
+-#endif // #if VMA_EXTERNAL_MEMORY
+-
+- if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
+- {
+- m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
+- m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
+- m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
+- }
+-
+- ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
+-
+- (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
+- (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
+-
+- VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
+- VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
+- VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
+- VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
+-
+- m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
+- pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
+-
+- m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
+-
+-#if VMA_EXTERNAL_MEMORY
+- if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
+- {
+- memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
+- sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
+- }
+-#endif // #if VMA_EXTERNAL_MEMORY
+-
+- if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
+- {
+- for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+- {
+- const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
+- if(limit != VK_WHOLE_SIZE)
+- {
+- m_HeapSizeLimitMask |= 1u << heapIndex;
+- if(limit < m_MemProps.memoryHeaps[heapIndex].size)
+- {
+- m_MemProps.memoryHeaps[heapIndex].size = limit;
+- }
+- }
+- }
+- }
+-
+- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- // Create only supported types
+- if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
+- {
+- const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
+- m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
+- this,
+- VK_NULL_HANDLE, // hParentPool
+- memTypeIndex,
+- preferredBlockSize,
+- 0,
+- SIZE_MAX,
+- GetBufferImageGranularity(),
+- false, // explicitBlockSize
+- 0, // algorithm
+- 0.5f, // priority (0.5 is the default per Vulkan spec)
+- GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
+- VMA_NULL); // // pMemoryAllocateNext
+- // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
+- // becase minBlockCount is 0.
+- }
+- }
+-}
+-
+-VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
+-{
+- VkResult res = VK_SUCCESS;
+-
+-#if VMA_MEMORY_BUDGET
+- if(m_UseExtMemoryBudget)
+- {
+- UpdateVulkanBudget();
+- }
+-#endif // #if VMA_MEMORY_BUDGET
+-
+- return res;
+-}
+-
+-VmaAllocator_T::~VmaAllocator_T()
+-{
+- VMA_ASSERT(m_Pools.IsEmpty());
+-
+- for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
+- {
+- vma_delete(this, m_pBlockVectors[memTypeIndex]);
+- }
+-}
+-
+-void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
+-{
+-#if VMA_STATIC_VULKAN_FUNCTIONS == 1
+- ImportVulkanFunctions_Static();
+-#endif
+-
+- if(pVulkanFunctions != VMA_NULL)
+- {
+- ImportVulkanFunctions_Custom(pVulkanFunctions);
+- }
+-
+-#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+- ImportVulkanFunctions_Dynamic();
+-#endif
+-
+- ValidateVulkanFunctions();
+-}
+-
+-#if VMA_STATIC_VULKAN_FUNCTIONS == 1
+-
+-void VmaAllocator_T::ImportVulkanFunctions_Static()
+-{
+- // Vulkan 1.0
+- m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
+- m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
+- m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
+- m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
+- m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
+- m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
+- m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
+- m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
+- m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
+- m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
+- m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
+- m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
+- m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
+- m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
+- m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
+- m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
+- m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
+- m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
+- m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
+-
+- // Vulkan 1.1
+-#if VMA_VULKAN_VERSION >= 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
+- m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
+- m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
+- m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
+- }
+-#endif
+-
+-#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
+- }
+-#endif
+-
+-#if VMA_VULKAN_VERSION >= 1003000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
+- {
+- m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
+- m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
+- }
+-#endif
+-}
+-
+-#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
+-
+-void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
+-{
+- VMA_ASSERT(pVulkanFunctions != VMA_NULL);
+-
+-#define VMA_COPY_IF_NOT_NULL(funcName) \
+- if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
+-
+- VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
+- VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
+- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
+- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
+- VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
+- VMA_COPY_IF_NOT_NULL(vkFreeMemory);
+- VMA_COPY_IF_NOT_NULL(vkMapMemory);
+- VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
+- VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
+- VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
+- VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
+- VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
+- VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
+- VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
+- VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
+- VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
+- VMA_COPY_IF_NOT_NULL(vkCreateImage);
+- VMA_COPY_IF_NOT_NULL(vkDestroyImage);
+- VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
+-
+-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
+- VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
+-#endif
+-
+-#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+- VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
+- VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
+-#endif
+-
+-#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
+-#endif
+-
+-#if VMA_VULKAN_VERSION >= 1003000
+- VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
+- VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
+-#endif
+-
+-#undef VMA_COPY_IF_NOT_NULL
+-}
+-
+-#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+-
+-void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
+-{
+- VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
+- "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
+- "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
+- "Other members can be null.");
+-
+-#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
+- if(m_VulkanFunctions.memberName == VMA_NULL) \
+- m_VulkanFunctions.memberName = \
+- (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
+-#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
+- if(m_VulkanFunctions.memberName == VMA_NULL) \
+- m_VulkanFunctions.memberName = \
+- (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
+-
+- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
+- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
+- VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
+- VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
+- VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
+- VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
+- VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
+- VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
+- VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
+- VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
+- VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
+- VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
+- VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
+- VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
+- VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
+- VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
+- VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
+-
+-#if VMA_VULKAN_VERSION >= 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
+- VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
+- VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
+- VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
+- }
+-#endif
+-
+-#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
+- }
+- else if(m_UseExtMemoryBudget)
+- {
+- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2KHR");
+- }
+-#endif
+-
+-#if VMA_DEDICATED_ALLOCATION
+- if(m_UseKhrDedicatedAllocation)
+- {
+- VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
+- VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
+- }
+-#endif
+-
+-#if VMA_BIND_MEMORY2
+- if(m_UseKhrBindMemory2)
+- {
+- VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
+- VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
+- }
+-#endif // #if VMA_BIND_MEMORY2
+-
+-#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2");
+- }
+- else if(m_UseExtMemoryBudget)
+- {
+- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
+- }
+-#endif // #if VMA_MEMORY_BUDGET
+-
+-#if VMA_VULKAN_VERSION >= 1003000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
+- {
+- VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
+- VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
+- }
+-#endif
+-
+-#undef VMA_FETCH_DEVICE_FUNC
+-#undef VMA_FETCH_INSTANCE_FUNC
+-}
+-
+-#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
+-
+-void VmaAllocator_T::ValidateVulkanFunctions()
+-{
+- VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
+-
+-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
+- {
+- VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
+- }
+-#endif
+-
+-#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
+- {
+- VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
+- }
+-#endif
+-
+-#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
+- if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
+- }
+-#endif
+-
+-#if VMA_VULKAN_VERSION >= 1003000
+- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
+- {
+- VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
+- VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
+- }
+-#endif
+-}
+-
+-VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
+-{
+- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+- const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+- const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
+- return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
+-}
+-
+-VkResult VmaAllocator_T::AllocateMemoryOfType(
+- VmaPool pool,
+- VkDeviceSize size,
+- VkDeviceSize alignment,
+- bool dedicatedPreferred,
+- VkBuffer dedicatedBuffer,
+- VkImage dedicatedImage,
+- VkFlags dedicatedBufferImageUsage,
+- const VmaAllocationCreateInfo& createInfo,
+- uint32_t memTypeIndex,
+- VmaSuballocationType suballocType,
+- VmaDedicatedAllocationList& dedicatedAllocations,
+- VmaBlockVector& blockVector,
+- size_t allocationCount,
+- VmaAllocation* pAllocations)
+-{
+- VMA_ASSERT(pAllocations != VMA_NULL);
+- VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
+-
+- VmaAllocationCreateInfo finalCreateInfo = createInfo;
+- VkResult res = CalcMemTypeParams(
+- finalCreateInfo,
+- memTypeIndex,
+- size,
+- allocationCount);
+- if(res != VK_SUCCESS)
+- return res;
+-
+- if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
+- {
+- return AllocateDedicatedMemory(
+- pool,
+- size,
+- suballocType,
+- dedicatedAllocations,
+- memTypeIndex,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+- (finalCreateInfo.flags &
+- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
+- finalCreateInfo.pUserData,
+- finalCreateInfo.priority,
+- dedicatedBuffer,
+- dedicatedImage,
+- dedicatedBufferImageUsage,
+- allocationCount,
+- pAllocations,
+- blockVector.GetAllocationNextPtr());
+- }
+- else
+- {
+- const bool canAllocateDedicated =
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
+- (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
+-
+- if(canAllocateDedicated)
+- {
+- // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
+- if(size > blockVector.GetPreferredBlockSize() / 2)
+- {
+- dedicatedPreferred = true;
+- }
+- // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
+- // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
+- // 3/4 of the maximum allocation count.
+- if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 &&
+- m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
+- {
+- dedicatedPreferred = false;
+- }
+-
+- if(dedicatedPreferred)
+- {
+- res = AllocateDedicatedMemory(
+- pool,
+- size,
+- suballocType,
+- dedicatedAllocations,
+- memTypeIndex,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+- (finalCreateInfo.flags &
+- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
+- finalCreateInfo.pUserData,
+- finalCreateInfo.priority,
+- dedicatedBuffer,
+- dedicatedImage,
+- dedicatedBufferImageUsage,
+- allocationCount,
+- pAllocations,
+- blockVector.GetAllocationNextPtr());
+- if(res == VK_SUCCESS)
+- {
+- // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
+- VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
+- return VK_SUCCESS;
+- }
+- }
+- }
+-
+- res = blockVector.Allocate(
+- size,
+- alignment,
+- finalCreateInfo,
+- suballocType,
+- allocationCount,
+- pAllocations);
+- if(res == VK_SUCCESS)
+- return VK_SUCCESS;
+-
+- // Try dedicated memory.
+- if(canAllocateDedicated && !dedicatedPreferred)
+- {
+- res = AllocateDedicatedMemory(
+- pool,
+- size,
+- suballocType,
+- dedicatedAllocations,
+- memTypeIndex,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
+- (finalCreateInfo.flags &
+- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
+- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
+- finalCreateInfo.pUserData,
+- finalCreateInfo.priority,
+- dedicatedBuffer,
+- dedicatedImage,
+- dedicatedBufferImageUsage,
+- allocationCount,
+- pAllocations,
+- blockVector.GetAllocationNextPtr());
+- if(res == VK_SUCCESS)
+- {
+- // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
+- VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
+- return VK_SUCCESS;
+- }
+- }
+- // Everything failed: Return error code.
+- VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+- return res;
+- }
+-}
+-
+-VkResult VmaAllocator_T::AllocateDedicatedMemory(
+- VmaPool pool,
+- VkDeviceSize size,
+- VmaSuballocationType suballocType,
+- VmaDedicatedAllocationList& dedicatedAllocations,
+- uint32_t memTypeIndex,
+- bool map,
+- bool isUserDataString,
+- bool isMappingAllowed,
+- bool canAliasMemory,
+- void* pUserData,
+- float priority,
+- VkBuffer dedicatedBuffer,
+- VkImage dedicatedImage,
+- VkFlags dedicatedBufferImageUsage,
+- size_t allocationCount,
+- VmaAllocation* pAllocations,
+- const void* pNextChain)
+-{
+- VMA_ASSERT(allocationCount > 0 && pAllocations);
+-
+- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+- allocInfo.memoryTypeIndex = memTypeIndex;
+- allocInfo.allocationSize = size;
+- allocInfo.pNext = pNextChain;
+-
+-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
+- if(!canAliasMemory)
+- {
+- if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- if(dedicatedBuffer != VK_NULL_HANDLE)
+- {
+- VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
+- dedicatedAllocInfo.buffer = dedicatedBuffer;
+- VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
+- }
+- else if(dedicatedImage != VK_NULL_HANDLE)
+- {
+- dedicatedAllocInfo.image = dedicatedImage;
+- VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
+- }
+- }
+- }
+-#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+-
+-#if VMA_BUFFER_DEVICE_ADDRESS
+- VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
+- if(m_UseKhrBufferDeviceAddress)
+- {
+- bool canContainBufferWithDeviceAddress = true;
+- if(dedicatedBuffer != VK_NULL_HANDLE)
+- {
+- canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown
+- (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
+- }
+- else if(dedicatedImage != VK_NULL_HANDLE)
+- {
+- canContainBufferWithDeviceAddress = false;
+- }
+- if(canContainBufferWithDeviceAddress)
+- {
+- allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
+- VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
+- }
+- }
+-#endif // #if VMA_BUFFER_DEVICE_ADDRESS
+-
+-#if VMA_MEMORY_PRIORITY
+- VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
+- if(m_UseExtMemoryPriority)
+- {
+- VMA_ASSERT(priority >= 0.f && priority <= 1.f);
+- priorityInfo.priority = priority;
+- VmaPnextChainPushFront(&allocInfo, &priorityInfo);
+- }
+-#endif // #if VMA_MEMORY_PRIORITY
+-
+-#if VMA_EXTERNAL_MEMORY
+- // Attach VkExportMemoryAllocateInfoKHR if necessary.
+- VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
+- exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
+- if(exportMemoryAllocInfo.handleTypes != 0)
+- {
+- VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
+- }
+-#endif // #if VMA_EXTERNAL_MEMORY
+-
+- size_t allocIndex;
+- VkResult res = VK_SUCCESS;
+- for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+- {
+- res = AllocateDedicatedMemoryPage(
+- pool,
+- size,
+- suballocType,
+- memTypeIndex,
+- allocInfo,
+- map,
+- isUserDataString,
+- isMappingAllowed,
+- pUserData,
+- pAllocations + allocIndex);
+- if(res != VK_SUCCESS)
+- {
+- break;
+- }
+- }
+-
+- if(res == VK_SUCCESS)
+- {
+- for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+- {
+- dedicatedAllocations.Register(pAllocations[allocIndex]);
+- }
+- VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
+- }
+- else
+- {
+- // Free all already created allocations.
+- while(allocIndex--)
+- {
+- VmaAllocation currAlloc = pAllocations[allocIndex];
+- VkDeviceMemory hMemory = currAlloc->GetMemory();
+-
+- /*
+- There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+- before vkFreeMemory.
+-
+- if(currAlloc->GetMappedData() != VMA_NULL)
+- {
+- (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+- }
+- */
+-
+- FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
+- m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
+- m_AllocationObjectAllocator.Free(currAlloc);
+- }
+-
+- memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+- }
+-
+- return res;
+-}
+-
+-VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
+- VmaPool pool,
+- VkDeviceSize size,
+- VmaSuballocationType suballocType,
+- uint32_t memTypeIndex,
+- const VkMemoryAllocateInfo& allocInfo,
+- bool map,
+- bool isUserDataString,
+- bool isMappingAllowed,
+- void* pUserData,
+- VmaAllocation* pAllocation)
+-{
+- VkDeviceMemory hMemory = VK_NULL_HANDLE;
+- VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
+- if(res < 0)
+- {
+- VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
+- return res;
+- }
+-
+- void* pMappedData = VMA_NULL;
+- if(map)
+- {
+- res = (*m_VulkanFunctions.vkMapMemory)(
+- m_hDevice,
+- hMemory,
+- 0,
+- VK_WHOLE_SIZE,
+- 0,
+- &pMappedData);
+- if(res < 0)
+- {
+- VMA_DEBUG_LOG(" vkMapMemory FAILED");
+- FreeVulkanMemory(memTypeIndex, size, hMemory);
+- return res;
+- }
+- }
+-
+- *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
+- (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
+- if (isUserDataString)
+- (*pAllocation)->SetName(this, (const char*)pUserData);
+- else
+- (*pAllocation)->SetUserData(this, pUserData);
+- m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
+- if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+- {
+- FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
+- }
+-
+- return VK_SUCCESS;
+-}
+-
+-void VmaAllocator_T::GetBufferMemoryRequirements(
+- VkBuffer hBuffer,
+- VkMemoryRequirements& memReq,
+- bool& requiresDedicatedAllocation,
+- bool& prefersDedicatedAllocation) const
+-{
+-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
+- memReqInfo.buffer = hBuffer;
+-
+- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+-
+- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+- VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
+-
+- (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+-
+- memReq = memReq2.memoryRequirements;
+- requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+- prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+- }
+- else
+-#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- {
+- (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
+- requiresDedicatedAllocation = false;
+- prefersDedicatedAllocation = false;
+- }
+-}
+-
+-void VmaAllocator_T::GetImageMemoryRequirements(
+- VkImage hImage,
+- VkMemoryRequirements& memReq,
+- bool& requiresDedicatedAllocation,
+- bool& prefersDedicatedAllocation) const
+-{
+-#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
+- {
+- VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
+- memReqInfo.image = hImage;
+-
+- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
+-
+- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
+- VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
+-
+- (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
+-
+- memReq = memReq2.memoryRequirements;
+- requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
+- prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
+- }
+- else
+-#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
+- {
+- (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
+- requiresDedicatedAllocation = false;
+- prefersDedicatedAllocation = false;
+- }
+-}
+-
+-VkResult VmaAllocator_T::FindMemoryTypeIndex(
+- uint32_t memoryTypeBits,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- VkFlags bufImgUsage,
+- uint32_t* pMemoryTypeIndex) const
+-{
+- memoryTypeBits &= GetGlobalMemoryTypeBits();
+-
+- if(pAllocationCreateInfo->memoryTypeBits != 0)
+- {
+- memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
+- }
+-
+- VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
+- if(!FindMemoryPreferences(
+- IsIntegratedGpu(),
+- *pAllocationCreateInfo,
+- bufImgUsage,
+- requiredFlags, preferredFlags, notPreferredFlags))
+- {
+- return VK_ERROR_FEATURE_NOT_PRESENT;
+- }
+-
+- *pMemoryTypeIndex = UINT32_MAX;
+- uint32_t minCost = UINT32_MAX;
+- for(uint32_t memTypeIndex = 0, memTypeBit = 1;
+- memTypeIndex < GetMemoryTypeCount();
+- ++memTypeIndex, memTypeBit <<= 1)
+- {
+- // This memory type is acceptable according to memoryTypeBits bitmask.
+- if((memTypeBit & memoryTypeBits) != 0)
+- {
+- const VkMemoryPropertyFlags currFlags =
+- m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
+- // This memory type contains requiredFlags.
+- if((requiredFlags & ~currFlags) == 0)
+- {
+- // Calculate cost as number of bits from preferredFlags not present in this memory type.
+- uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
+- VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
+- // Remember memory type with lowest cost.
+- if(currCost < minCost)
+- {
+- *pMemoryTypeIndex = memTypeIndex;
+- if(currCost == 0)
+- {
+- return VK_SUCCESS;
+- }
+- minCost = currCost;
+- }
+- }
+- }
+- }
+- return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
+-}
+-
+-VkResult VmaAllocator_T::CalcMemTypeParams(
+- VmaAllocationCreateInfo& inoutCreateInfo,
+- uint32_t memTypeIndex,
+- VkDeviceSize size,
+- size_t allocationCount)
+-{
+- // If memory type is not HOST_VISIBLE, disable MAPPED.
+- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
+- (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
+- {
+- inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
+- }
+-
+- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
+- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
+- {
+- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+- VmaBudget heapBudget = {};
+- GetHeapBudgets(&heapBudget, heapIndex, 1);
+- if(heapBudget.usage + size * allocationCount > heapBudget.budget)
+- {
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+- }
+- }
+- return VK_SUCCESS;
+-}
+-
+-VkResult VmaAllocator_T::CalcAllocationParams(
+- VmaAllocationCreateInfo& inoutCreateInfo,
+- bool dedicatedRequired,
+- bool dedicatedPreferred)
+-{
+- VMA_ASSERT((inoutCreateInfo.flags &
+- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=
+- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&
+- "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
+- VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||
+- (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&
+- "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
+- if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
+- {
+- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
+- {
+- VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&
+- "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
+- }
+- }
+-
+- // If memory is lazily allocated, it should be always dedicated.
+- if(dedicatedRequired ||
+- inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
+- {
+- inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+- }
+-
+- if(inoutCreateInfo.pool != VK_NULL_HANDLE)
+- {
+- if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
+- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
+- {
+- VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
+- return VK_ERROR_FEATURE_NOT_PRESENT;
+- }
+- inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
+- }
+-
+- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
+- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+- {
+- VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
+- return VK_ERROR_FEATURE_NOT_PRESENT;
+- }
+-
+- if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
+- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
+- {
+- inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+- }
+-
+- // Non-auto USAGE values imply HOST_ACCESS flags.
+- // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
+- // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
+- // Otherwise they just protect from assert on mapping.
+- if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
+- inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
+- inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
+- {
+- if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)
+- {
+- inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
+- }
+- }
+-
+- return VK_SUCCESS;
+-}
+-
+-VkResult VmaAllocator_T::AllocateMemory(
+- const VkMemoryRequirements& vkMemReq,
+- bool requiresDedicatedAllocation,
+- bool prefersDedicatedAllocation,
+- VkBuffer dedicatedBuffer,
+- VkImage dedicatedImage,
+- VkFlags dedicatedBufferImageUsage,
+- const VmaAllocationCreateInfo& createInfo,
+- VmaSuballocationType suballocType,
+- size_t allocationCount,
+- VmaAllocation* pAllocations)
+-{
+- memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
+-
+- VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
+-
+- if(vkMemReq.size == 0)
+- {
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+-
+- VmaAllocationCreateInfo createInfoFinal = createInfo;
+- VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
+- if(res != VK_SUCCESS)
+- return res;
+-
+- if(createInfoFinal.pool != VK_NULL_HANDLE)
+- {
+- VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
+- return AllocateMemoryOfType(
+- createInfoFinal.pool,
+- vkMemReq.size,
+- vkMemReq.alignment,
+- prefersDedicatedAllocation,
+- dedicatedBuffer,
+- dedicatedImage,
+- dedicatedBufferImageUsage,
+- createInfoFinal,
+- blockVector.GetMemoryTypeIndex(),
+- suballocType,
+- createInfoFinal.pool->m_DedicatedAllocations,
+- blockVector,
+- allocationCount,
+- pAllocations);
+- }
+- else
+- {
+- // Bit mask of memory Vulkan types acceptable for this allocation.
+- uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
+- uint32_t memTypeIndex = UINT32_MAX;
+- res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
+- // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
+- if(res != VK_SUCCESS)
+- return res;
+- do
+- {
+- VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
+- VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
+- res = AllocateMemoryOfType(
+- VK_NULL_HANDLE,
+- vkMemReq.size,
+- vkMemReq.alignment,
+- requiresDedicatedAllocation || prefersDedicatedAllocation,
+- dedicatedBuffer,
+- dedicatedImage,
+- dedicatedBufferImageUsage,
+- createInfoFinal,
+- memTypeIndex,
+- suballocType,
+- m_DedicatedAllocations[memTypeIndex],
+- *blockVector,
+- allocationCount,
+- pAllocations);
+- // Allocation succeeded
+- if(res == VK_SUCCESS)
+- return VK_SUCCESS;
+-
+- // Remove old memTypeIndex from list of possibilities.
+- memoryTypeBits &= ~(1u << memTypeIndex);
+- // Find alternative memTypeIndex.
+- res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
+- } while(res == VK_SUCCESS);
+-
+- // No other matching memory type index could be found.
+- // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+- }
+-}
+-
+-void VmaAllocator_T::FreeMemory(
+- size_t allocationCount,
+- const VmaAllocation* pAllocations)
+-{
+- VMA_ASSERT(pAllocations);
+-
+- for(size_t allocIndex = allocationCount; allocIndex--; )
+- {
+- VmaAllocation allocation = pAllocations[allocIndex];
+-
+- if(allocation != VK_NULL_HANDLE)
+- {
+- if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
+- {
+- FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
+- }
+-
+- allocation->FreeName(this);
+-
+- switch(allocation->GetType())
+- {
+- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+- {
+- VmaBlockVector* pBlockVector = VMA_NULL;
+- VmaPool hPool = allocation->GetParentPool();
+- if(hPool != VK_NULL_HANDLE)
+- {
+- pBlockVector = &hPool->m_BlockVector;
+- }
+- else
+- {
+- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+- pBlockVector = m_pBlockVectors[memTypeIndex];
+- VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
+- }
+- pBlockVector->Free(allocation);
+- }
+- break;
+- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+- FreeDedicatedMemory(allocation);
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+- }
+- }
+-}
+-
+-void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
+-{
+- // Initialize.
+- VmaClearDetailedStatistics(pStats->total);
+- for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
+- VmaClearDetailedStatistics(pStats->memoryType[i]);
+- for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
+- VmaClearDetailedStatistics(pStats->memoryHeap[i]);
+-
+- // Process default pools.
+- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
+- if (pBlockVector != VMA_NULL)
+- pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
+- }
+-
+- // Process custom pools.
+- {
+- VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+- for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
+- {
+- VmaBlockVector& blockVector = pool->m_BlockVector;
+- const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
+- blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
+- pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
+- }
+- }
+-
+- // Process dedicated allocations.
+- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
+- }
+-
+- // Sum from memory types to memory heaps.
+- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
+- VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
+- }
+-
+- // Sum from memory heaps to total.
+- for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
+- VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
+-
+- VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||
+- pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
+- VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
+- pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);
+-}
+-
+-void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
+-{
+-#if VMA_MEMORY_BUDGET
+- if(m_UseExtMemoryBudget)
+- {
+- if(m_Budget.m_OperationsSinceBudgetFetch < 30)
+- {
+- VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
+- for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
+- {
+- const uint32_t heapIndex = firstHeap + i;
+-
+- outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
+- outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
+- outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
+- outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
+-
+- if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
+- {
+- outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
+- outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+- }
+- else
+- {
+- outBudgets->usage = 0;
+- }
+-
+- // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
+- outBudgets->budget = VMA_MIN(
+- m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
+- }
+- }
+- else
+- {
+- UpdateVulkanBudget(); // Outside of mutex lock
+- GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
+- }
+- }
+- else
+-#endif
+- {
+- for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
+- {
+- const uint32_t heapIndex = firstHeap + i;
+-
+- outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
+- outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
+- outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
+- outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
+-
+- outBudgets->usage = outBudgets->statistics.blockBytes;
+- outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
+- }
+- }
+-}
+-
+-void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
+-{
+- pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
+- pAllocationInfo->deviceMemory = hAllocation->GetMemory();
+- pAllocationInfo->offset = hAllocation->GetOffset();
+- pAllocationInfo->size = hAllocation->GetSize();
+- pAllocationInfo->pMappedData = hAllocation->GetMappedData();
+- pAllocationInfo->pUserData = hAllocation->GetUserData();
+- pAllocationInfo->pName = hAllocation->GetName();
+-}
+-
+-VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
+-{
+- VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
+-
+- VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
+-
+- // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
+- if(pCreateInfo->pMemoryAllocateNext)
+- {
+- VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
+- }
+-
+- if(newCreateInfo.maxBlockCount == 0)
+- {
+- newCreateInfo.maxBlockCount = SIZE_MAX;
+- }
+- if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
+- {
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+- // Memory type index out of range or forbidden.
+- if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
+- ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
+- {
+- return VK_ERROR_FEATURE_NOT_PRESENT;
+- }
+- if(newCreateInfo.minAllocationAlignment > 0)
+- {
+- VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
+- }
+-
+- const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
+-
+- *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
+-
+- VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
+- if(res != VK_SUCCESS)
+- {
+- vma_delete(this, *pPool);
+- *pPool = VMA_NULL;
+- return res;
+- }
+-
+- // Add to m_Pools.
+- {
+- VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+- (*pPool)->SetId(m_NextPoolId++);
+- m_Pools.PushBack(*pPool);
+- }
+-
+- return VK_SUCCESS;
+-}
+-
+-void VmaAllocator_T::DestroyPool(VmaPool pool)
+-{
+- // Remove from m_Pools.
+- {
+- VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
+- m_Pools.Remove(pool);
+- }
+-
+- vma_delete(this, pool);
+-}
+-
+-void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
+-{
+- VmaClearStatistics(*pPoolStats);
+- pool->m_BlockVector.AddStatistics(*pPoolStats);
+- pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
+-}
+-
+-void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
+-{
+- VmaClearDetailedStatistics(*pPoolStats);
+- pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
+- pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
+-}
+-
+-void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
+-{
+- m_CurrentFrameIndex.store(frameIndex);
+-
+-#if VMA_MEMORY_BUDGET
+- if(m_UseExtMemoryBudget)
+- {
+- UpdateVulkanBudget();
+- }
+-#endif // #if VMA_MEMORY_BUDGET
+-}
+-
+-VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
+-{
+- return hPool->m_BlockVector.CheckCorruption();
+-}
+-
+-VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
+-{
+- VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
+-
+- // Process default pools.
+- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
+- if(pBlockVector != VMA_NULL)
+- {
+- VkResult localRes = pBlockVector->CheckCorruption();
+- switch(localRes)
+- {
+- case VK_ERROR_FEATURE_NOT_PRESENT:
+- break;
+- case VK_SUCCESS:
+- finalRes = VK_SUCCESS;
+- break;
+- default:
+- return localRes;
+- }
+- }
+- }
+-
+- // Process custom pools.
+- {
+- VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+- for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
+- {
+- if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
+- {
+- VkResult localRes = pool->m_BlockVector.CheckCorruption();
+- switch(localRes)
+- {
+- case VK_ERROR_FEATURE_NOT_PRESENT:
+- break;
+- case VK_SUCCESS:
+- finalRes = VK_SUCCESS;
+- break;
+- default:
+- return localRes;
+- }
+- }
+- }
+- }
+-
+- return finalRes;
+-}
+-
+-VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
+-{
+- AtomicTransactionalIncrement<VMA_ATOMIC_UINT32> deviceMemoryCountIncrement;
+- const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
+-#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
+- if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
+- {
+- return VK_ERROR_TOO_MANY_OBJECTS;
+- }
+-#endif
+-
+- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
+-
+- // HeapSizeLimit is in effect for this heap.
+- if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
+- {
+- const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
+- VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
+- for(;;)
+- {
+- const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
+- if(blockBytesAfterAllocation > heapSize)
+- {
+- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+- }
+- if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
+- {
+- break;
+- }
+- }
+- }
+- else
+- {
+- m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
+- }
+- ++m_Budget.m_BlockCount[heapIndex];
+-
+- // VULKAN CALL vkAllocateMemory.
+- VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
+-
+- if(res == VK_SUCCESS)
+- {
+-#if VMA_MEMORY_BUDGET
+- ++m_Budget.m_OperationsSinceBudgetFetch;
+-#endif
+-
+- // Informative callback.
+- if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
+- {
+- (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
+- }
+-
+- deviceMemoryCountIncrement.Commit();
+- }
+- else
+- {
+- --m_Budget.m_BlockCount[heapIndex];
+- m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
+- }
+-
+- return res;
+-}
+-
+-void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
+-{
+- // Informative callback.
+- if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
+- {
+- (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
+- }
+-
+- // VULKAN CALL vkFreeMemory.
+- (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
+-
+- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
+- --m_Budget.m_BlockCount[heapIndex];
+- m_Budget.m_BlockBytes[heapIndex] -= size;
+-
+- --m_DeviceMemoryCount;
+-}
+-
+-VkResult VmaAllocator_T::BindVulkanBuffer(
+- VkDeviceMemory memory,
+- VkDeviceSize memoryOffset,
+- VkBuffer buffer,
+- const void* pNext)
+-{
+- if(pNext != VMA_NULL)
+- {
+-#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+- if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+- m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
+- {
+- VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
+- bindBufferMemoryInfo.pNext = pNext;
+- bindBufferMemoryInfo.buffer = buffer;
+- bindBufferMemoryInfo.memory = memory;
+- bindBufferMemoryInfo.memoryOffset = memoryOffset;
+- return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+- }
+- else
+-#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+- {
+- return VK_ERROR_EXTENSION_NOT_PRESENT;
+- }
+- }
+- else
+- {
+- return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
+- }
+-}
+-
+-VkResult VmaAllocator_T::BindVulkanImage(
+- VkDeviceMemory memory,
+- VkDeviceSize memoryOffset,
+- VkImage image,
+- const void* pNext)
+-{
+- if(pNext != VMA_NULL)
+- {
+-#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
+- if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
+- m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
+- {
+- VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
+- bindBufferMemoryInfo.pNext = pNext;
+- bindBufferMemoryInfo.image = image;
+- bindBufferMemoryInfo.memory = memory;
+- bindBufferMemoryInfo.memoryOffset = memoryOffset;
+- return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
+- }
+- else
+-#endif // #if VMA_BIND_MEMORY2
+- {
+- return VK_ERROR_EXTENSION_NOT_PRESENT;
+- }
+- }
+- else
+- {
+- return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
+- }
+-}
+-
+-VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
+-{
+- switch(hAllocation->GetType())
+- {
+- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+- {
+- VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+- char *pBytes = VMA_NULL;
+- VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
+- if(res == VK_SUCCESS)
+- {
+- *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
+- hAllocation->BlockAllocMap();
+- }
+- return res;
+- }
+- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+- return hAllocation->DedicatedAllocMap(this, ppData);
+- default:
+- VMA_ASSERT(0);
+- return VK_ERROR_MEMORY_MAP_FAILED;
+- }
+-}
+-
+-void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
+-{
+- switch(hAllocation->GetType())
+- {
+- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+- {
+- VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+- hAllocation->BlockAllocUnmap();
+- pBlock->Unmap(this, 1);
+- }
+- break;
+- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+- hAllocation->DedicatedAllocUnmap(this);
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+-}
+-
+-VkResult VmaAllocator_T::BindBufferMemory(
+- VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkBuffer hBuffer,
+- const void* pNext)
+-{
+- VkResult res = VK_ERROR_UNKNOWN;
+- switch(hAllocation->GetType())
+- {
+- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+- res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
+- break;
+- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+- {
+- VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
+- VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
+- res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
+- break;
+- }
+- default:
+- VMA_ASSERT(0);
+- }
+- return res;
+-}
+-
+-VkResult VmaAllocator_T::BindImageMemory(
+- VmaAllocation hAllocation,
+- VkDeviceSize allocationLocalOffset,
+- VkImage hImage,
+- const void* pNext)
+-{
+- VkResult res = VK_ERROR_UNKNOWN;
+- switch(hAllocation->GetType())
+- {
+- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+- res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
+- break;
+- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+- {
+- VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
+- VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
+- res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
+- break;
+- }
+- default:
+- VMA_ASSERT(0);
+- }
+- return res;
+-}
+-
+-VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
+- VmaAllocation hAllocation,
+- VkDeviceSize offset, VkDeviceSize size,
+- VMA_CACHE_OPERATION op)
+-{
+- VkResult res = VK_SUCCESS;
+-
+- VkMappedMemoryRange memRange = {};
+- if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
+- {
+- switch(op)
+- {
+- case VMA_CACHE_FLUSH:
+- res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
+- break;
+- case VMA_CACHE_INVALIDATE:
+- res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+- }
+- // else: Just ignore this call.
+- return res;
+-}
+-
+-VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
+- uint32_t allocationCount,
+- const VmaAllocation* allocations,
+- const VkDeviceSize* offsets, const VkDeviceSize* sizes,
+- VMA_CACHE_OPERATION op)
+-{
+- typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
+- typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
+- RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
+-
+- for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
+- {
+- const VmaAllocation alloc = allocations[allocIndex];
+- const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
+- const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
+- VkMappedMemoryRange newRange;
+- if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
+- {
+- ranges.push_back(newRange);
+- }
+- }
+-
+- VkResult res = VK_SUCCESS;
+- if(!ranges.empty())
+- {
+- switch(op)
+- {
+- case VMA_CACHE_FLUSH:
+- res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
+- break;
+- case VMA_CACHE_INVALIDATE:
+- res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
+- break;
+- default:
+- VMA_ASSERT(0);
+- }
+- }
+- // else: Just ignore this call.
+- return res;
+-}
+-
+-void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
+-{
+- VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
+-
+- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+- VmaPool parentPool = allocation->GetParentPool();
+- if(parentPool == VK_NULL_HANDLE)
+- {
+- // Default pool
+- m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
+- }
+- else
+- {
+- // Custom pool
+- parentPool->m_DedicatedAllocations.Unregister(allocation);
+- }
+-
+- VkDeviceMemory hMemory = allocation->GetMemory();
+-
+- /*
+- There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
+- before vkFreeMemory.
+-
+- if(allocation->GetMappedData() != VMA_NULL)
+- {
+- (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
+- }
+- */
+-
+- FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
+-
+- m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
+- m_AllocationObjectAllocator.Free(allocation);
+-
+- VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
+-}
+-
+-uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
+-{
+- VkBufferCreateInfo dummyBufCreateInfo;
+- VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
+-
+- uint32_t memoryTypeBits = 0;
+-
+- // Create buffer.
+- VkBuffer buf = VK_NULL_HANDLE;
+- VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
+- m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
+- if(res == VK_SUCCESS)
+- {
+- // Query for supported memory types.
+- VkMemoryRequirements memReq;
+- (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
+- memoryTypeBits = memReq.memoryTypeBits;
+-
+- // Destroy buffer.
+- (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
+- }
+-
+- return memoryTypeBits;
+-}
+-
+-uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
+-{
+- // Make sure memory information is already fetched.
+- VMA_ASSERT(GetMemoryTypeCount() > 0);
+-
+- uint32_t memoryTypeBits = UINT32_MAX;
+-
+- if(!m_UseAmdDeviceCoherentMemory)
+- {
+- // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
+- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
+- {
+- memoryTypeBits &= ~(1u << memTypeIndex);
+- }
+- }
+- }
+-
+- return memoryTypeBits;
+-}
+-
+-bool VmaAllocator_T::GetFlushOrInvalidateRange(
+- VmaAllocation allocation,
+- VkDeviceSize offset, VkDeviceSize size,
+- VkMappedMemoryRange& outRange) const
+-{
+- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+- if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
+- {
+- const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
+- const VkDeviceSize allocationSize = allocation->GetSize();
+- VMA_ASSERT(offset <= allocationSize);
+-
+- outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
+- outRange.pNext = VMA_NULL;
+- outRange.memory = allocation->GetMemory();
+-
+- switch(allocation->GetType())
+- {
+- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
+- outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+- if(size == VK_WHOLE_SIZE)
+- {
+- outRange.size = allocationSize - outRange.offset;
+- }
+- else
+- {
+- VMA_ASSERT(offset + size <= allocationSize);
+- outRange.size = VMA_MIN(
+- VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
+- allocationSize - outRange.offset);
+- }
+- break;
+- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
+- {
+- // 1. Still within this allocation.
+- outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
+- if(size == VK_WHOLE_SIZE)
+- {
+- size = allocationSize - offset;
+- }
+- else
+- {
+- VMA_ASSERT(offset + size <= allocationSize);
+- }
+- outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
+-
+- // 2. Adjust to whole block.
+- const VkDeviceSize allocationOffset = allocation->GetOffset();
+- VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
+- const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
+- outRange.offset += allocationOffset;
+- outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
+-
+- break;
+- }
+- default:
+- VMA_ASSERT(0);
+- }
+- return true;
+- }
+- return false;
+-}
+-
+-#if VMA_MEMORY_BUDGET
+-void VmaAllocator_T::UpdateVulkanBudget()
+-{
+- VMA_ASSERT(m_UseExtMemoryBudget);
+-
+- VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
+-
+- VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
+- VmaPnextChainPushFront(&memProps, &budgetProps);
+-
+- GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
+-
+- {
+- VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
+-
+- for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
+- {
+- m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
+- m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
+- m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
+-
+- // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
+- if(m_Budget.m_VulkanBudget[heapIndex] == 0)
+- {
+- m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
+- }
+- else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
+- {
+- m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
+- }
+- if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
+- {
+- m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
+- }
+- }
+- m_Budget.m_OperationsSinceBudgetFetch = 0;
+- }
+-}
+-#endif // VMA_MEMORY_BUDGET
+-
+-void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
+-{
+- if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
+- hAllocation->IsMappingAllowed() &&
+- (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
+- {
+- void* pData = VMA_NULL;
+- VkResult res = Map(hAllocation, &pData);
+- if(res == VK_SUCCESS)
+- {
+- memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
+- FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
+- Unmap(hAllocation);
+- }
+- else
+- {
+- VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
+- }
+- }
+-}
+-
+-uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
+-{
+- uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
+- if(memoryTypeBits == UINT32_MAX)
+- {
+- memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
+- m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
+- }
+- return memoryTypeBits;
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
+-{
+- json.WriteString("DefaultPools");
+- json.BeginObject();
+- {
+- for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
+- VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
+- if (pBlockVector != VMA_NULL)
+- {
+- json.BeginString("Type ");
+- json.ContinueString(memTypeIndex);
+- json.EndString();
+- json.BeginObject();
+- {
+- json.WriteString("PreferredBlockSize");
+- json.WriteNumber(pBlockVector->GetPreferredBlockSize());
+-
+- json.WriteString("Blocks");
+- pBlockVector->PrintDetailedMap(json);
+-
+- json.WriteString("DedicatedAllocations");
+- dedicatedAllocList.BuildStatsString(json);
+- }
+- json.EndObject();
+- }
+- }
+- }
+- json.EndObject();
+-
+- json.WriteString("CustomPools");
+- json.BeginObject();
+- {
+- VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
+- if (!m_Pools.IsEmpty())
+- {
+- for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+- {
+- bool displayType = true;
+- size_t index = 0;
+- for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
+- {
+- VmaBlockVector& blockVector = pool->m_BlockVector;
+- if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
+- {
+- if (displayType)
+- {
+- json.BeginString("Type ");
+- json.ContinueString(memTypeIndex);
+- json.EndString();
+- json.BeginArray();
+- displayType = false;
+- }
+-
+- json.BeginObject();
+- {
+- json.WriteString("Name");
+- json.BeginString();
+- json.ContinueString((uint64_t)index++);
+- if (pool->GetName())
+- {
+- json.ContinueString(" - ");
+- json.ContinueString(pool->GetName());
+- }
+- json.EndString();
+-
+- json.WriteString("PreferredBlockSize");
+- json.WriteNumber(blockVector.GetPreferredBlockSize());
+-
+- json.WriteString("Blocks");
+- blockVector.PrintDetailedMap(json);
+-
+- json.WriteString("DedicatedAllocations");
+- pool->m_DedicatedAllocations.BuildStatsString(json);
+- }
+- json.EndObject();
+- }
+- }
+-
+- if (!displayType)
+- json.EndArray();
+- }
+- }
+- }
+- json.EndObject();
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-#endif // _VMA_ALLOCATOR_T_FUNCTIONS
+-
+-
+-#ifndef _VMA_PUBLIC_INTERFACE
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
+- const VmaAllocatorCreateInfo* pCreateInfo,
+- VmaAllocator* pAllocator)
+-{
+- VMA_ASSERT(pCreateInfo && pAllocator);
+- VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
+- (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
+- VMA_DEBUG_LOG("vmaCreateAllocator");
+- *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
+- VkResult result = (*pAllocator)->Init(pCreateInfo);
+- if(result < 0)
+- {
+- vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
+- *pAllocator = VK_NULL_HANDLE;
+- }
+- return result;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
+- VmaAllocator allocator)
+-{
+- if(allocator != VK_NULL_HANDLE)
+- {
+- VMA_DEBUG_LOG("vmaDestroyAllocator");
+- VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
+- vma_delete(&allocationCallbacks, allocator);
+- }
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
+-{
+- VMA_ASSERT(allocator && pAllocatorInfo);
+- pAllocatorInfo->instance = allocator->m_hInstance;
+- pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
+- pAllocatorInfo->device = allocator->m_hDevice;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
+- VmaAllocator allocator,
+- const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
+-{
+- VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
+- *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
+- VmaAllocator allocator,
+- const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
+-{
+- VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
+- *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
+- VmaAllocator allocator,
+- uint32_t memoryTypeIndex,
+- VkMemoryPropertyFlags* pFlags)
+-{
+- VMA_ASSERT(allocator && pFlags);
+- VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
+- *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
+- VmaAllocator allocator,
+- uint32_t frameIndex)
+-{
+- VMA_ASSERT(allocator);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->SetCurrentFrameIndex(frameIndex);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
+- VmaAllocator allocator,
+- VmaTotalStatistics* pStats)
+-{
+- VMA_ASSERT(allocator && pStats);
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+- allocator->CalculateStatistics(pStats);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
+- VmaAllocator allocator,
+- VmaBudget* pBudgets)
+-{
+- VMA_ASSERT(allocator && pBudgets);
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+- allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
+- VmaAllocator allocator,
+- char** ppStatsString,
+- VkBool32 detailedMap)
+-{
+- VMA_ASSERT(allocator && ppStatsString);
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- VmaStringBuilder sb(allocator->GetAllocationCallbacks());
+- {
+- VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
+- allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
+-
+- VmaTotalStatistics stats;
+- allocator->CalculateStatistics(&stats);
+-
+- VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
+- json.BeginObject();
+- {
+- json.WriteString("General");
+- json.BeginObject();
+- {
+- const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
+- const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
+-
+- json.WriteString("API");
+- json.WriteString("Vulkan");
+-
+- json.WriteString("apiVersion");
+- json.BeginString();
+- json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion));
+- json.ContinueString(".");
+- json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion));
+- json.ContinueString(".");
+- json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion));
+- json.EndString();
+-
+- json.WriteString("GPU");
+- json.WriteString(deviceProperties.deviceName);
+- json.WriteString("deviceType");
+- json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
+-
+- json.WriteString("maxMemoryAllocationCount");
+- json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
+- json.WriteString("bufferImageGranularity");
+- json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
+- json.WriteString("nonCoherentAtomSize");
+- json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
+-
+- json.WriteString("memoryHeapCount");
+- json.WriteNumber(memoryProperties.memoryHeapCount);
+- json.WriteString("memoryTypeCount");
+- json.WriteNumber(memoryProperties.memoryTypeCount);
+- }
+- json.EndObject();
+- }
+- {
+- json.WriteString("Total");
+- VmaPrintDetailedStatistics(json, stats.total);
+- }
+- {
+- json.WriteString("MemoryInfo");
+- json.BeginObject();
+- {
+- for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
+- {
+- json.BeginString("Heap ");
+- json.ContinueString(heapIndex);
+- json.EndString();
+- json.BeginObject();
+- {
+- const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
+- json.WriteString("Flags");
+- json.BeginArray(true);
+- {
+- if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
+- json.WriteString("DEVICE_LOCAL");
+- #if VMA_VULKAN_VERSION >= 1001000
+- if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)
+- json.WriteString("MULTI_INSTANCE");
+- #endif
+-
+- VkMemoryHeapFlags flags = heapInfo.flags &
+- ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
+- #if VMA_VULKAN_VERSION >= 1001000
+- | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
+- #endif
+- );
+- if (flags != 0)
+- json.WriteNumber(flags);
+- }
+- json.EndArray();
+-
+- json.WriteString("Size");
+- json.WriteNumber(heapInfo.size);
+-
+- json.WriteString("Budget");
+- json.BeginObject();
+- {
+- json.WriteString("BudgetBytes");
+- json.WriteNumber(budgets[heapIndex].budget);
+- json.WriteString("UsageBytes");
+- json.WriteNumber(budgets[heapIndex].usage);
+- }
+- json.EndObject();
+-
+- json.WriteString("Stats");
+- VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
+-
+- json.WriteString("MemoryPools");
+- json.BeginObject();
+- {
+- for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
+- {
+- if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
+- {
+- json.BeginString("Type ");
+- json.ContinueString(typeIndex);
+- json.EndString();
+- json.BeginObject();
+- {
+- json.WriteString("Flags");
+- json.BeginArray(true);
+- {
+- VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
+- if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+- json.WriteString("DEVICE_LOCAL");
+- if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
+- json.WriteString("HOST_VISIBLE");
+- if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
+- json.WriteString("HOST_COHERENT");
+- if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
+- json.WriteString("HOST_CACHED");
+- if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
+- json.WriteString("LAZILY_ALLOCATED");
+- #if VMA_VULKAN_VERSION >= 1001000
+- if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
+- json.WriteString("PROTECTED");
+- #endif
+- #if VK_AMD_device_coherent_memory
+- if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
+- json.WriteString("DEVICE_COHERENT_AMD");
+- if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
+- json.WriteString("DEVICE_UNCACHED_AMD");
+- #endif
+-
+- flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
+- #if VMA_VULKAN_VERSION >= 1001000
+- | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
+- #endif
+- #if VK_AMD_device_coherent_memory
+- | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
+- | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
+- #endif
+- | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
+- | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
+- | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
+- if (flags != 0)
+- json.WriteNumber(flags);
+- }
+- json.EndArray();
+-
+- json.WriteString("Stats");
+- VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
+- }
+- json.EndObject();
+- }
+- }
+-
+- }
+- json.EndObject();
+- }
+- json.EndObject();
+- }
+- }
+- json.EndObject();
+- }
+-
+- if (detailedMap == VK_TRUE)
+- allocator->PrintDetailedMap(json);
+-
+- json.EndObject();
+- }
+-
+- *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
+- VmaAllocator allocator,
+- char* pStatsString)
+-{
+- if(pStatsString != VMA_NULL)
+- {
+- VMA_ASSERT(allocator);
+- VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
+- }
+-}
+-
+-#endif // VMA_STATS_STRING_ENABLED
+-
+-/*
+-This function is not protected by any mutex because it just reads immutable data.
+-*/
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
+- VmaAllocator allocator,
+- uint32_t memoryTypeBits,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- uint32_t* pMemoryTypeIndex)
+-{
+- VMA_ASSERT(allocator != VK_NULL_HANDLE);
+- VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+- VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+-
+- return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
+- VmaAllocator allocator,
+- const VkBufferCreateInfo* pBufferCreateInfo,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- uint32_t* pMemoryTypeIndex)
+-{
+- VMA_ASSERT(allocator != VK_NULL_HANDLE);
+- VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
+- VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+- VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+-
+- const VkDevice hDev = allocator->m_hDevice;
+- const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
+- VkResult res;
+-
+-#if VMA_VULKAN_VERSION >= 1003000
+- if(funcs->vkGetDeviceBufferMemoryRequirements)
+- {
+- // Can query straight from VkBufferCreateInfo :)
+- VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS};
+- devBufMemReq.pCreateInfo = pBufferCreateInfo;
+-
+- VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
+- (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
+-
+- res = allocator->FindMemoryTypeIndex(
+- memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
+- }
+- else
+-#endif // #if VMA_VULKAN_VERSION >= 1003000
+- {
+- // Must create a dummy buffer to query :(
+- VkBuffer hBuffer = VK_NULL_HANDLE;
+- res = funcs->vkCreateBuffer(
+- hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
+- if(res == VK_SUCCESS)
+- {
+- VkMemoryRequirements memReq = {};
+- funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
+-
+- res = allocator->FindMemoryTypeIndex(
+- memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
+-
+- funcs->vkDestroyBuffer(
+- hDev, hBuffer, allocator->GetAllocationCallbacks());
+- }
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
+- VmaAllocator allocator,
+- const VkImageCreateInfo* pImageCreateInfo,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- uint32_t* pMemoryTypeIndex)
+-{
+- VMA_ASSERT(allocator != VK_NULL_HANDLE);
+- VMA_ASSERT(pImageCreateInfo != VMA_NULL);
+- VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
+- VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
+-
+- const VkDevice hDev = allocator->m_hDevice;
+- const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
+- VkResult res;
+-
+-#if VMA_VULKAN_VERSION >= 1003000
+- if(funcs->vkGetDeviceImageMemoryRequirements)
+- {
+- // Can query straight from VkImageCreateInfo :)
+- VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS};
+- devImgMemReq.pCreateInfo = pImageCreateInfo;
+- VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
+- "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
+-
+- VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
+- (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
+-
+- res = allocator->FindMemoryTypeIndex(
+- memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
+- }
+- else
+-#endif // #if VMA_VULKAN_VERSION >= 1003000
+- {
+- // Must create a dummy image to query :(
+- VkImage hImage = VK_NULL_HANDLE;
+- res = funcs->vkCreateImage(
+- hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
+- if(res == VK_SUCCESS)
+- {
+- VkMemoryRequirements memReq = {};
+- funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
+-
+- res = allocator->FindMemoryTypeIndex(
+- memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
+-
+- funcs->vkDestroyImage(
+- hDev, hImage, allocator->GetAllocationCallbacks());
+- }
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
+- VmaAllocator allocator,
+- const VmaPoolCreateInfo* pCreateInfo,
+- VmaPool* pPool)
+-{
+- VMA_ASSERT(allocator && pCreateInfo && pPool);
+-
+- VMA_DEBUG_LOG("vmaCreatePool");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return allocator->CreatePool(pCreateInfo, pPool);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
+- VmaAllocator allocator,
+- VmaPool pool)
+-{
+- VMA_ASSERT(allocator);
+-
+- if(pool == VK_NULL_HANDLE)
+- {
+- return;
+- }
+-
+- VMA_DEBUG_LOG("vmaDestroyPool");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->DestroyPool(pool);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
+- VmaAllocator allocator,
+- VmaPool pool,
+- VmaStatistics* pPoolStats)
+-{
+- VMA_ASSERT(allocator && pool && pPoolStats);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->GetPoolStatistics(pool, pPoolStats);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
+- VmaAllocator allocator,
+- VmaPool pool,
+- VmaDetailedStatistics* pPoolStats)
+-{
+- VMA_ASSERT(allocator && pool && pPoolStats);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->CalculatePoolStatistics(pool, pPoolStats);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
+-{
+- VMA_ASSERT(allocator && pool);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- VMA_DEBUG_LOG("vmaCheckPoolCorruption");
+-
+- return allocator->CheckPoolCorruption(pool);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
+- VmaAllocator allocator,
+- VmaPool pool,
+- const char** ppName)
+-{
+- VMA_ASSERT(allocator && pool && ppName);
+-
+- VMA_DEBUG_LOG("vmaGetPoolName");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- *ppName = pool->GetName();
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
+- VmaAllocator allocator,
+- VmaPool pool,
+- const char* pName)
+-{
+- VMA_ASSERT(allocator && pool);
+-
+- VMA_DEBUG_LOG("vmaSetPoolName");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- pool->SetName(pName);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
+- VmaAllocator allocator,
+- const VkMemoryRequirements* pVkMemoryRequirements,
+- const VmaAllocationCreateInfo* pCreateInfo,
+- VmaAllocation* pAllocation,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
+-
+- VMA_DEBUG_LOG("vmaAllocateMemory");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- VkResult result = allocator->AllocateMemory(
+- *pVkMemoryRequirements,
+- false, // requiresDedicatedAllocation
+- false, // prefersDedicatedAllocation
+- VK_NULL_HANDLE, // dedicatedBuffer
+- VK_NULL_HANDLE, // dedicatedImage
+- UINT32_MAX, // dedicatedBufferImageUsage
+- *pCreateInfo,
+- VMA_SUBALLOCATION_TYPE_UNKNOWN,
+- 1, // allocationCount
+- pAllocation);
+-
+- if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+- {
+- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+- }
+-
+- return result;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
+- VmaAllocator allocator,
+- const VkMemoryRequirements* pVkMemoryRequirements,
+- const VmaAllocationCreateInfo* pCreateInfo,
+- size_t allocationCount,
+- VmaAllocation* pAllocations,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- if(allocationCount == 0)
+- {
+- return VK_SUCCESS;
+- }
+-
+- VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
+-
+- VMA_DEBUG_LOG("vmaAllocateMemoryPages");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- VkResult result = allocator->AllocateMemory(
+- *pVkMemoryRequirements,
+- false, // requiresDedicatedAllocation
+- false, // prefersDedicatedAllocation
+- VK_NULL_HANDLE, // dedicatedBuffer
+- VK_NULL_HANDLE, // dedicatedImage
+- UINT32_MAX, // dedicatedBufferImageUsage
+- *pCreateInfo,
+- VMA_SUBALLOCATION_TYPE_UNKNOWN,
+- allocationCount,
+- pAllocations);
+-
+- if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
+- {
+- for(size_t i = 0; i < allocationCount; ++i)
+- {
+- allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
+- }
+- }
+-
+- return result;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
+- VmaAllocator allocator,
+- VkBuffer buffer,
+- const VmaAllocationCreateInfo* pCreateInfo,
+- VmaAllocation* pAllocation,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+-
+- VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- VkMemoryRequirements vkMemReq = {};
+- bool requiresDedicatedAllocation = false;
+- bool prefersDedicatedAllocation = false;
+- allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
+- requiresDedicatedAllocation,
+- prefersDedicatedAllocation);
+-
+- VkResult result = allocator->AllocateMemory(
+- vkMemReq,
+- requiresDedicatedAllocation,
+- prefersDedicatedAllocation,
+- buffer, // dedicatedBuffer
+- VK_NULL_HANDLE, // dedicatedImage
+- UINT32_MAX, // dedicatedBufferImageUsage
+- *pCreateInfo,
+- VMA_SUBALLOCATION_TYPE_BUFFER,
+- 1, // allocationCount
+- pAllocation);
+-
+- if(pAllocationInfo && result == VK_SUCCESS)
+- {
+- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+- }
+-
+- return result;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
+- VmaAllocator allocator,
+- VkImage image,
+- const VmaAllocationCreateInfo* pCreateInfo,
+- VmaAllocation* pAllocation,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
+-
+- VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- VkMemoryRequirements vkMemReq = {};
+- bool requiresDedicatedAllocation = false;
+- bool prefersDedicatedAllocation = false;
+- allocator->GetImageMemoryRequirements(image, vkMemReq,
+- requiresDedicatedAllocation, prefersDedicatedAllocation);
+-
+- VkResult result = allocator->AllocateMemory(
+- vkMemReq,
+- requiresDedicatedAllocation,
+- prefersDedicatedAllocation,
+- VK_NULL_HANDLE, // dedicatedBuffer
+- image, // dedicatedImage
+- UINT32_MAX, // dedicatedBufferImageUsage
+- *pCreateInfo,
+- VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
+- 1, // allocationCount
+- pAllocation);
+-
+- if(pAllocationInfo && result == VK_SUCCESS)
+- {
+- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+- }
+-
+- return result;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
+- VmaAllocator allocator,
+- VmaAllocation allocation)
+-{
+- VMA_ASSERT(allocator);
+-
+- if(allocation == VK_NULL_HANDLE)
+- {
+- return;
+- }
+-
+- VMA_DEBUG_LOG("vmaFreeMemory");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->FreeMemory(
+- 1, // allocationCount
+- &allocation);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
+- VmaAllocator allocator,
+- size_t allocationCount,
+- const VmaAllocation* pAllocations)
+-{
+- if(allocationCount == 0)
+- {
+- return;
+- }
+-
+- VMA_ASSERT(allocator);
+-
+- VMA_DEBUG_LOG("vmaFreeMemoryPages");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->FreeMemory(allocationCount, pAllocations);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- VMA_ASSERT(allocator && allocation && pAllocationInfo);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->GetAllocationInfo(allocation, pAllocationInfo);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- void* pUserData)
+-{
+- VMA_ASSERT(allocator && allocation);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocation->SetUserData(allocator, pUserData);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- const char* VMA_NULLABLE pName)
+-{
+- allocation->SetName(allocator, pName);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)
+-{
+- VMA_ASSERT(allocator && allocation && pFlags);
+- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
+- *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- void** ppData)
+-{
+- VMA_ASSERT(allocator && allocation && ppData);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return allocator->Map(allocation, ppData);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
+- VmaAllocator allocator,
+- VmaAllocation allocation)
+-{
+- VMA_ASSERT(allocator && allocation);
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- allocator->Unmap(allocation);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- VkDeviceSize offset,
+- VkDeviceSize size)
+-{
+- VMA_ASSERT(allocator && allocation);
+-
+- VMA_DEBUG_LOG("vmaFlushAllocation");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
+-
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- VkDeviceSize offset,
+- VkDeviceSize size)
+-{
+- VMA_ASSERT(allocator && allocation);
+-
+- VMA_DEBUG_LOG("vmaInvalidateAllocation");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
+-
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
+- VmaAllocator allocator,
+- uint32_t allocationCount,
+- const VmaAllocation* allocations,
+- const VkDeviceSize* offsets,
+- const VkDeviceSize* sizes)
+-{
+- VMA_ASSERT(allocator);
+-
+- if(allocationCount == 0)
+- {
+- return VK_SUCCESS;
+- }
+-
+- VMA_ASSERT(allocations);
+-
+- VMA_DEBUG_LOG("vmaFlushAllocations");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
+-
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
+- VmaAllocator allocator,
+- uint32_t allocationCount,
+- const VmaAllocation* allocations,
+- const VkDeviceSize* offsets,
+- const VkDeviceSize* sizes)
+-{
+- VMA_ASSERT(allocator);
+-
+- if(allocationCount == 0)
+- {
+- return VK_SUCCESS;
+- }
+-
+- VMA_ASSERT(allocations);
+-
+- VMA_DEBUG_LOG("vmaInvalidateAllocations");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
+-
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
+- VmaAllocator allocator,
+- uint32_t memoryTypeBits)
+-{
+- VMA_ASSERT(allocator);
+-
+- VMA_DEBUG_LOG("vmaCheckCorruption");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return allocator->CheckCorruption(memoryTypeBits);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
+- VmaAllocator allocator,
+- const VmaDefragmentationInfo* pInfo,
+- VmaDefragmentationContext* pContext)
+-{
+- VMA_ASSERT(allocator && pInfo && pContext);
+-
+- VMA_DEBUG_LOG("vmaBeginDefragmentation");
+-
+- if (pInfo->pool != VMA_NULL)
+- {
+- // Check if run on supported algorithms
+- if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
+- return VK_ERROR_FEATURE_NOT_PRESENT;
+- }
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
+- return VK_SUCCESS;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
+- VmaAllocator allocator,
+- VmaDefragmentationContext context,
+- VmaDefragmentationStats* pStats)
+-{
+- VMA_ASSERT(allocator && context);
+-
+- VMA_DEBUG_LOG("vmaEndDefragmentation");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- if (pStats)
+- context->GetStats(*pStats);
+- vma_delete(allocator, context);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaDefragmentationContext VMA_NOT_NULL context,
+- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
+-{
+- VMA_ASSERT(context && pPassInfo);
+-
+- VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return context->DefragmentPassBegin(*pPassInfo);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaDefragmentationContext VMA_NOT_NULL context,
+- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
+-{
+- VMA_ASSERT(context && pPassInfo);
+-
+- VMA_DEBUG_LOG("vmaEndDefragmentationPass");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return context->DefragmentPassEnd(*pPassInfo);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- VkBuffer buffer)
+-{
+- VMA_ASSERT(allocator && allocation && buffer);
+-
+- VMA_DEBUG_LOG("vmaBindBufferMemory");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- VkDeviceSize allocationLocalOffset,
+- VkBuffer buffer,
+- const void* pNext)
+-{
+- VMA_ASSERT(allocator && allocation && buffer);
+-
+- VMA_DEBUG_LOG("vmaBindBufferMemory2");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- VkImage image)
+-{
+- VMA_ASSERT(allocator && allocation && image);
+-
+- VMA_DEBUG_LOG("vmaBindImageMemory");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
+- VmaAllocator allocator,
+- VmaAllocation allocation,
+- VkDeviceSize allocationLocalOffset,
+- VkImage image,
+- const void* pNext)
+-{
+- VMA_ASSERT(allocator && allocation && image);
+-
+- VMA_DEBUG_LOG("vmaBindImageMemory2");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
+- VmaAllocator allocator,
+- const VkBufferCreateInfo* pBufferCreateInfo,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- VkBuffer* pBuffer,
+- VmaAllocation* pAllocation,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
+-
+- if(pBufferCreateInfo->size == 0)
+- {
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+- if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
+- !allocator->m_UseKhrBufferDeviceAddress)
+- {
+- VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+-
+- VMA_DEBUG_LOG("vmaCreateBuffer");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- *pBuffer = VK_NULL_HANDLE;
+- *pAllocation = VK_NULL_HANDLE;
+-
+- // 1. Create VkBuffer.
+- VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
+- allocator->m_hDevice,
+- pBufferCreateInfo,
+- allocator->GetAllocationCallbacks(),
+- pBuffer);
+- if(res >= 0)
+- {
+- // 2. vkGetBufferMemoryRequirements.
+- VkMemoryRequirements vkMemReq = {};
+- bool requiresDedicatedAllocation = false;
+- bool prefersDedicatedAllocation = false;
+- allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
+- requiresDedicatedAllocation, prefersDedicatedAllocation);
+-
+- // 3. Allocate memory using allocator.
+- res = allocator->AllocateMemory(
+- vkMemReq,
+- requiresDedicatedAllocation,
+- prefersDedicatedAllocation,
+- *pBuffer, // dedicatedBuffer
+- VK_NULL_HANDLE, // dedicatedImage
+- pBufferCreateInfo->usage, // dedicatedBufferImageUsage
+- *pAllocationCreateInfo,
+- VMA_SUBALLOCATION_TYPE_BUFFER,
+- 1, // allocationCount
+- pAllocation);
+-
+- if(res >= 0)
+- {
+- // 3. Bind buffer with memory.
+- if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
+- {
+- res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
+- }
+- if(res >= 0)
+- {
+- // All steps succeeded.
+- #if VMA_STATS_STRING_ENABLED
+- (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
+- #endif
+- if(pAllocationInfo != VMA_NULL)
+- {
+- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+- }
+-
+- return VK_SUCCESS;
+- }
+- allocator->FreeMemory(
+- 1, // allocationCount
+- pAllocation);
+- *pAllocation = VK_NULL_HANDLE;
+- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+- *pBuffer = VK_NULL_HANDLE;
+- return res;
+- }
+- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+- *pBuffer = VK_NULL_HANDLE;
+- return res;
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
+- VmaAllocator allocator,
+- const VkBufferCreateInfo* pBufferCreateInfo,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- VkDeviceSize minAlignment,
+- VkBuffer* pBuffer,
+- VmaAllocation* pAllocation,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
+-
+- if(pBufferCreateInfo->size == 0)
+- {
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+- if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
+- !allocator->m_UseKhrBufferDeviceAddress)
+- {
+- VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+-
+- VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- *pBuffer = VK_NULL_HANDLE;
+- *pAllocation = VK_NULL_HANDLE;
+-
+- // 1. Create VkBuffer.
+- VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
+- allocator->m_hDevice,
+- pBufferCreateInfo,
+- allocator->GetAllocationCallbacks(),
+- pBuffer);
+- if(res >= 0)
+- {
+- // 2. vkGetBufferMemoryRequirements.
+- VkMemoryRequirements vkMemReq = {};
+- bool requiresDedicatedAllocation = false;
+- bool prefersDedicatedAllocation = false;
+- allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
+- requiresDedicatedAllocation, prefersDedicatedAllocation);
+-
+- // 2a. Include minAlignment
+- vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
+-
+- // 3. Allocate memory using allocator.
+- res = allocator->AllocateMemory(
+- vkMemReq,
+- requiresDedicatedAllocation,
+- prefersDedicatedAllocation,
+- *pBuffer, // dedicatedBuffer
+- VK_NULL_HANDLE, // dedicatedImage
+- pBufferCreateInfo->usage, // dedicatedBufferImageUsage
+- *pAllocationCreateInfo,
+- VMA_SUBALLOCATION_TYPE_BUFFER,
+- 1, // allocationCount
+- pAllocation);
+-
+- if(res >= 0)
+- {
+- // 3. Bind buffer with memory.
+- if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
+- {
+- res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
+- }
+- if(res >= 0)
+- {
+- // All steps succeeded.
+- #if VMA_STATS_STRING_ENABLED
+- (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
+- #endif
+- if(pAllocationInfo != VMA_NULL)
+- {
+- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+- }
+-
+- return VK_SUCCESS;
+- }
+- allocator->FreeMemory(
+- 1, // allocationCount
+- pAllocation);
+- *pAllocation = VK_NULL_HANDLE;
+- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+- *pBuffer = VK_NULL_HANDLE;
+- return res;
+- }
+- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+- *pBuffer = VK_NULL_HANDLE;
+- return res;
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
+-{
+- return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize allocationLocalOffset,
+- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
+- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
+-{
+- VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
+- VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize());
+-
+- VMA_DEBUG_LOG("vmaCreateAliasingBuffer2");
+-
+- *pBuffer = VK_NULL_HANDLE;
+-
+- if (pBufferCreateInfo->size == 0)
+- {
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+- if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
+- !allocator->m_UseKhrBufferDeviceAddress)
+- {
+- VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- // 1. Create VkBuffer.
+- VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
+- allocator->m_hDevice,
+- pBufferCreateInfo,
+- allocator->GetAllocationCallbacks(),
+- pBuffer);
+- if (res >= 0)
+- {
+- // 2. Bind buffer with memory.
+- res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL);
+- if (res >= 0)
+- {
+- return VK_SUCCESS;
+- }
+- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
+- VmaAllocator allocator,
+- VkBuffer buffer,
+- VmaAllocation allocation)
+-{
+- VMA_ASSERT(allocator);
+-
+- if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+- {
+- return;
+- }
+-
+- VMA_DEBUG_LOG("vmaDestroyBuffer");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- if(buffer != VK_NULL_HANDLE)
+- {
+- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
+- }
+-
+- if(allocation != VK_NULL_HANDLE)
+- {
+- allocator->FreeMemory(
+- 1, // allocationCount
+- &allocation);
+- }
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
+- VmaAllocator allocator,
+- const VkImageCreateInfo* pImageCreateInfo,
+- const VmaAllocationCreateInfo* pAllocationCreateInfo,
+- VkImage* pImage,
+- VmaAllocation* pAllocation,
+- VmaAllocationInfo* pAllocationInfo)
+-{
+- VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
+-
+- if(pImageCreateInfo->extent.width == 0 ||
+- pImageCreateInfo->extent.height == 0 ||
+- pImageCreateInfo->extent.depth == 0 ||
+- pImageCreateInfo->mipLevels == 0 ||
+- pImageCreateInfo->arrayLayers == 0)
+- {
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+-
+- VMA_DEBUG_LOG("vmaCreateImage");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- *pImage = VK_NULL_HANDLE;
+- *pAllocation = VK_NULL_HANDLE;
+-
+- // 1. Create VkImage.
+- VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
+- allocator->m_hDevice,
+- pImageCreateInfo,
+- allocator->GetAllocationCallbacks(),
+- pImage);
+- if(res >= 0)
+- {
+- VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
+- VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
+- VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
+-
+- // 2. Allocate memory using allocator.
+- VkMemoryRequirements vkMemReq = {};
+- bool requiresDedicatedAllocation = false;
+- bool prefersDedicatedAllocation = false;
+- allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
+- requiresDedicatedAllocation, prefersDedicatedAllocation);
+-
+- res = allocator->AllocateMemory(
+- vkMemReq,
+- requiresDedicatedAllocation,
+- prefersDedicatedAllocation,
+- VK_NULL_HANDLE, // dedicatedBuffer
+- *pImage, // dedicatedImage
+- pImageCreateInfo->usage, // dedicatedBufferImageUsage
+- *pAllocationCreateInfo,
+- suballocType,
+- 1, // allocationCount
+- pAllocation);
+-
+- if(res >= 0)
+- {
+- // 3. Bind image with memory.
+- if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
+- {
+- res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
+- }
+- if(res >= 0)
+- {
+- // All steps succeeded.
+- #if VMA_STATS_STRING_ENABLED
+- (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
+- #endif
+- if(pAllocationInfo != VMA_NULL)
+- {
+- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
+- }
+-
+- return VK_SUCCESS;
+- }
+- allocator->FreeMemory(
+- 1, // allocationCount
+- pAllocation);
+- *pAllocation = VK_NULL_HANDLE;
+- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+- *pImage = VK_NULL_HANDLE;
+- return res;
+- }
+- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+- *pImage = VK_NULL_HANDLE;
+- return res;
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
+-{
+- return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VmaAllocation VMA_NOT_NULL allocation,
+- VkDeviceSize allocationLocalOffset,
+- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
+- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
+-{
+- VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
+-
+- *pImage = VK_NULL_HANDLE;
+-
+- VMA_DEBUG_LOG("vmaCreateImage2");
+-
+- if (pImageCreateInfo->extent.width == 0 ||
+- pImageCreateInfo->extent.height == 0 ||
+- pImageCreateInfo->extent.depth == 0 ||
+- pImageCreateInfo->mipLevels == 0 ||
+- pImageCreateInfo->arrayLayers == 0)
+- {
+- return VK_ERROR_INITIALIZATION_FAILED;
+- }
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- // 1. Create VkImage.
+- VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
+- allocator->m_hDevice,
+- pImageCreateInfo,
+- allocator->GetAllocationCallbacks(),
+- pImage);
+- if (res >= 0)
+- {
+- // 2. Bind image with memory.
+- res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL);
+- if (res >= 0)
+- {
+- return VK_SUCCESS;
+- }
+- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
+- VmaAllocator VMA_NOT_NULL allocator,
+- VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
+- VmaAllocation VMA_NULLABLE allocation)
+-{
+- VMA_ASSERT(allocator);
+-
+- if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
+- {
+- return;
+- }
+-
+- VMA_DEBUG_LOG("vmaDestroyImage");
+-
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK
+-
+- if(image != VK_NULL_HANDLE)
+- {
+- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
+- }
+- if(allocation != VK_NULL_HANDLE)
+- {
+- allocator->FreeMemory(
+- 1, // allocationCount
+- &allocation);
+- }
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
+- const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
+- VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)
+-{
+- VMA_ASSERT(pCreateInfo && pVirtualBlock);
+- VMA_ASSERT(pCreateInfo->size > 0);
+- VMA_DEBUG_LOG("vmaCreateVirtualBlock");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
+- VkResult res = (*pVirtualBlock)->Init();
+- if(res < 0)
+- {
+- vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
+- *pVirtualBlock = VK_NULL_HANDLE;
+- }
+- return res;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
+-{
+- if(virtualBlock != VK_NULL_HANDLE)
+- {
+- VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
+- vma_delete(&allocationCallbacks, virtualBlock);
+- }
+-}
+-
+-VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
+- VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
+- VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
+-}
+-
+-VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
+- VkDeviceSize* VMA_NULLABLE pOffset)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
+- VMA_DEBUG_LOG("vmaVirtualAllocate");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
+-{
+- if(allocation != VK_NULL_HANDLE)
+- {
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
+- VMA_DEBUG_LOG("vmaVirtualFree");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- virtualBlock->Free(allocation);
+- }
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
+- VMA_DEBUG_LOG("vmaClearVirtualBlock");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- virtualBlock->Clear();
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
+- VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- virtualBlock->SetAllocationUserData(allocation, pUserData);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaStatistics* VMA_NOT_NULL pStats)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
+- VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- virtualBlock->GetStatistics(*pStats);
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- VmaDetailedStatistics* VMA_NOT_NULL pStats)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
+- VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- virtualBlock->CalculateDetailedStatistics(*pStats);
+-}
+-
+-#if VMA_STATS_STRING_ENABLED
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
+-{
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
+- VmaStringBuilder sb(allocationCallbacks);
+- virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
+- *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
+-}
+-
+-VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
+- char* VMA_NULLABLE pStatsString)
+-{
+- if(pStatsString != VMA_NULL)
+- {
+- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
+- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
+- VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
+- }
+-}
+-#endif // VMA_STATS_STRING_ENABLED
+-#endif // _VMA_PUBLIC_INTERFACE
+-#endif // VMA_IMPLEMENTATION
+-
+-/**
+-\page quick_start Quick start
+-
+-\section quick_start_project_setup Project setup
+-
+-Vulkan Memory Allocator comes in form of a "stb-style" single header file.
+-You don't need to build it as a separate library project.
+-You can add this file directly to your project and submit it to code repository next to your other source files.
+-
+-"Single header" doesn't mean that everything is contained in C/C++ declarations,
+-like it tends to be in case of inline functions or C++ templates.
+-It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
+-If you don't do it properly, you will get linker errors.
+-
+-To do it properly:
+-
+--# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
+- This includes declarations of all members of the library.
+--# In exactly one CPP file define following macro before this include.
+- It enables also internal definitions.
+-
+-\code
+-#define VMA_IMPLEMENTATION
+-#include "vk_mem_alloc.h"
+-\endcode
+-
+-It may be a good idea to create dedicated CPP file just for this purpose.
+-
+-This library includes header `<vulkan/vulkan.h>`, which in turn
+-includes `<windows.h>` on Windows. If you need some specific macros defined
+-before including these headers (like `WIN32_LEAN_AND_MEAN` or
+-`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
+-them before every `#include` of this library.
+-
+-This library is written in C++, but has C-compatible interface.
+-Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
+-implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
+-Some features of C++14 are used. STL containers, RTTI, or C++ exceptions are not used.
+-
+-
+-\section quick_start_initialization Initialization
+-
+-At program startup:
+-
+--# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
+--# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
+- calling vmaCreateAllocator().
+-
+-Only members `physicalDevice`, `device`, `instance` are required.
+-However, you should inform the library which Vulkan version do you use by setting
+-VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
+-by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
+-Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
+-
+-\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version
+-
+-VMA supports Vulkan version down to 1.0, for backward compatibility.
+-If you want to use higher version, you need to inform the library about it.
+-This is a two-step process.
+-
+-<b>Step 1: Compile time.</b> By default, VMA compiles with code supporting the highest
+-Vulkan version found in the included `<vulkan/vulkan.h>` that is also supported by the library.
+-If this is OK, you don't need to do anything.
+-However, if you want to compile VMA as if only some lower Vulkan version was available,
+-define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`.
+-It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version.
+-For example, to compile against Vulkan 1.2:
+-
+-\code
+-#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2
+-#include "vk_mem_alloc.h"
+-\endcode
+-
+-<b>Step 2: Runtime.</b> Even when compiled with higher Vulkan version available,
+-VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object.
+-By default, only Vulkan 1.0 is used.
+-To initialize the allocator with support for higher Vulkan version, you need to set member
+-VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`.
+-See code sample below.
+-
+-\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions
+-
+-You may need to configure importing Vulkan functions. There are 3 ways to do this:
+-
+--# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
+- - You don't need to do anything.
+- - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
+--# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
+- `vkGetDeviceProcAddr` (this is the option presented in the example below):
+- - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
+- - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
+- VmaVulkanFunctions::vkGetDeviceProcAddr.
+- - The library will fetch pointers to all other functions it needs internally.
+--# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
+- [Volk](https://github.com/zeux/volk):
+- - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
+- - Pass these pointers via structure #VmaVulkanFunctions.
+-
+-Example for case 2:
+-
+-\code
+-#define VMA_STATIC_VULKAN_FUNCTIONS 0
+-#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
+-#include "vk_mem_alloc.h"
+-
+-...
+-
+-VmaVulkanFunctions vulkanFunctions = {};
+-vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
+-vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
+-
+-VmaAllocatorCreateInfo allocatorCreateInfo = {};
+-allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
+-allocatorCreateInfo.physicalDevice = physicalDevice;
+-allocatorCreateInfo.device = device;
+-allocatorCreateInfo.instance = instance;
+-allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
+-
+-VmaAllocator allocator;
+-vmaCreateAllocator(&allocatorCreateInfo, &allocator);
+-\endcode
+-
+-
+-\section quick_start_resource_allocation Resource allocation
+-
+-When you want to create a buffer or image:
+-
+--# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
+--# Fill VmaAllocationCreateInfo structure.
+--# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
+- already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
+-
+-\code
+-VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-bufferInfo.size = 65536;
+-bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+-
+-VmaAllocationCreateInfo allocInfo = {};
+-allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-
+-VkBuffer buffer;
+-VmaAllocation allocation;
+-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+-\endcode
+-
+-Don't forget to destroy your objects when no longer needed:
+-
+-\code
+-vmaDestroyBuffer(allocator, buffer, allocation);
+-vmaDestroyAllocator(allocator);
+-\endcode
+-
+-
+-\page choosing_memory_type Choosing memory type
+-
+-Physical devices in Vulkan support various combinations of memory heaps and
+-types. Help with choosing correct and optimal memory type for your specific
+-resource is one of the key features of this library. You can use it by filling
+-appropriate members of VmaAllocationCreateInfo structure, as described below.
+-You can also combine multiple methods.
+-
+--# If you just want to find memory type index that meets your requirements, you
+- can use function: vmaFindMemoryTypeIndexForBufferInfo(),
+- vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
+--# If you want to allocate a region of device memory without association with any
+- specific image or buffer, you can use function vmaAllocateMemory(). Usage of
+- this function is not recommended and usually not needed.
+- vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
+- which may be useful for sparse binding.
+--# If you already have a buffer or an image created, you want to allocate memory
+- for it and then you will bind it yourself, you can use function
+- vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
+- For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
+- or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
+--# **This is the easiest and recommended way to use this library:**
+- If you want to create a buffer or an image, allocate memory for it and bind
+- them together, all in one call, you can use function vmaCreateBuffer(),
+- vmaCreateImage().
+-
+-When using 3. or 4., the library internally queries Vulkan for memory types
+-supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
+-and uses only one of these types.
+-
+-If no memory type can be found that meets all the requirements, these functions
+-return `VK_ERROR_FEATURE_NOT_PRESENT`.
+-
+-You can leave VmaAllocationCreateInfo structure completely filled with zeros.
+-It means no requirements are specified for memory type.
+-It is valid, although not very useful.
+-
+-\section choosing_memory_type_usage Usage
+-
+-The easiest way to specify memory requirements is to fill member
+-VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
+-It defines high level, common usage types.
+-Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
+-
+-For example, if you want to create a uniform buffer that will be filled using
+-transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
+-do it using following code. The buffer will most likely end up in a memory type with
+-`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
+-
+-\code
+-VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-bufferInfo.size = 65536;
+-bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+-
+-VmaAllocationCreateInfo allocInfo = {};
+-allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-
+-VkBuffer buffer;
+-VmaAllocation allocation;
+-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+-\endcode
+-
+-If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
+-on systems with discrete graphics card that have the memories separate, you can use
+-#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
+-
+-When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
+-you also need to specify one of the host access flags:
+-#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
+-This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
+-so you can map it.
+-
+-For example, a staging buffer that will be filled via mapped pointer and then
+-used as a source of transfer to the buffer described previously can be created like this.
+-It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
+-but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
+-
+-\code
+-VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-stagingBufferInfo.size = 65536;
+-stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+-
+-VmaAllocationCreateInfo stagingAllocInfo = {};
+-stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
+-
+-VkBuffer stagingBuffer;
+-VmaAllocation stagingAllocation;
+-vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
+-\endcode
+-
+-For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
+-
+-Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
+-about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
+-so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
+-If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
+-memory type, as described below.
+-
+-\note
+-Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
+-`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
+-are still available and work same way as in previous versions of the library
+-for backward compatibility, but they are not recommended.
+-
+-\section choosing_memory_type_required_preferred_flags Required and preferred flags
+-
+-You can specify more detailed requirements by filling members
+-VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
+-with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
+-if you want to create a buffer that will be persistently mapped on host (so it
+-must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
+-use following code:
+-
+-\code
+-VmaAllocationCreateInfo allocInfo = {};
+-allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+-allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
+-allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
+-
+-VkBuffer buffer;
+-VmaAllocation allocation;
+-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+-\endcode
+-
+-A memory type is chosen that has all the required flags and as many preferred
+-flags set as possible.
+-
+-Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
+-plus some extra "magic" (heuristics).
+-
+-\section choosing_memory_type_explicit_memory_types Explicit memory types
+-
+-If you inspected memory types available on the physical device and you have
+-a preference for memory types that you want to use, you can fill member
+-VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
+-means that a memory type with that index is allowed to be used for the
+-allocation. Special value 0, just like `UINT32_MAX`, means there are no
+-restrictions to memory type index.
+-
+-Please note that this member is NOT just a memory type index.
+-Still you can use it to choose just one, specific memory type.
+-For example, if you already determined that your buffer should be created in
+-memory type 2, use following code:
+-
+-\code
+-uint32_t memoryTypeIndex = 2;
+-
+-VmaAllocationCreateInfo allocInfo = {};
+-allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
+-
+-VkBuffer buffer;
+-VmaAllocation allocation;
+-vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
+-\endcode
+-
+-
+-\section choosing_memory_type_custom_memory_pools Custom memory pools
+-
+-If you allocate from custom memory pool, all the ways of specifying memory
+-requirements described above are not applicable and the aforementioned members
+-of VmaAllocationCreateInfo structure are ignored. Memory type is selected
+-explicitly when creating the pool and then used to make all the allocations from
+-that pool. For further details, see \ref custom_memory_pools.
+-
+-\section choosing_memory_type_dedicated_allocations Dedicated allocations
+-
+-Memory for allocations is reserved out of larger block of `VkDeviceMemory`
+-allocated from Vulkan internally. That is the main feature of this whole library.
+-You can still request a separate memory block to be created for an allocation,
+-just like you would do in a trivial solution without using any allocator.
+-In that case, a buffer or image is always bound to that memory at offset 0.
+-This is called a "dedicated allocation".
+-You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+-The library can also internally decide to use dedicated allocation in some cases, e.g.:
+-
+-- When the size of the allocation is large.
+-- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
+- and it reports that dedicated allocation is required or recommended for the resource.
+-- When allocation of next big memory block fails due to not enough device memory,
+- but allocation with the exact requested size succeeds.
+-
+-
+-\page memory_mapping Memory mapping
+-
+-To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
+-to be able to read from it or write to it in CPU code.
+-Mapping is possible only of memory allocated from a memory type that has
+-`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
+-Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
+-You can use them directly with memory allocated by this library,
+-but it is not recommended because of following issue:
+-Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
+-This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
+-Because of this, Vulkan Memory Allocator provides following facilities:
+-
+-\note If you want to be able to map an allocation, you need to specify one of the flags
+-#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
+-in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
+-when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
+-For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
+-but they can still be used for consistency.
+-
+-\section memory_mapping_mapping_functions Mapping functions
+-
+-The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
+-They are safer and more convenient to use than standard Vulkan functions.
+-You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
+-You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
+-The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
+-For further details, see description of vmaMapMemory() function.
+-Example:
+-
+-\code
+-// Having these objects initialized:
+-struct ConstantBuffer
+-{
+- ...
+-};
+-ConstantBuffer constantBufferData = ...
+-
+-VmaAllocator allocator = ...
+-VkBuffer constantBuffer = ...
+-VmaAllocation constantBufferAllocation = ...
+-
+-// You can map and fill your buffer using following code:
+-
+-void* mappedData;
+-vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
+-memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
+-vmaUnmapMemory(allocator, constantBufferAllocation);
+-\endcode
+-
+-When mapping, you may see a warning from Vulkan validation layer similar to this one:
+-
+-<i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
+-
+-It happens because the library maps entire `VkDeviceMemory` block, where different
+-types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
+-You can safely ignore it if you are sure you access only memory of the intended
+-object that you wanted to map.
+-
+-
+-\section memory_mapping_persistently_mapped_memory Persistently mapped memory
+-
+-Keeping your memory persistently mapped is generally OK in Vulkan.
+-You don't need to unmap it before using its data on the GPU.
+-The library provides a special feature designed for that:
+-Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
+-VmaAllocationCreateInfo::flags stay mapped all the time,
+-so you can just access CPU pointer to it any time
+-without a need to call any "map" or "unmap" function.
+-Example:
+-
+-\code
+-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-bufCreateInfo.size = sizeof(ConstantBuffer);
+-bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
+- VMA_ALLOCATION_CREATE_MAPPED_BIT;
+-
+-VkBuffer buf;
+-VmaAllocation alloc;
+-VmaAllocationInfo allocInfo;
+-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+-
+-// Buffer is already mapped. You can access its memory.
+-memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
+-\endcode
+-
+-\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
+-in a mappable memory type.
+-For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
+-#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
+-#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
+-For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
+-
+-\section memory_mapping_cache_control Cache flush and invalidate
+-
+-Memory in Vulkan doesn't need to be unmapped before using it on GPU,
+-but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
+-you need to manually **invalidate** cache before reading of mapped pointer
+-and **flush** cache after writing to mapped pointer.
+-Map/unmap operations don't do that automatically.
+-Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
+-`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
+-functions that refer to given allocation object: vmaFlushAllocation(),
+-vmaInvalidateAllocation(),
+-or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
+-
+-Regions of memory specified for flush/invalidate must be aligned to
+-`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
+-In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
+-within blocks are aligned to this value, so their offsets are always multiply of
+-`nonCoherentAtomSize` and two different allocations never share same "line" of this size.
+-
+-Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
+-currently provide `HOST_COHERENT` flag on all memory types that are
+-`HOST_VISIBLE`, so on PC you may not need to bother.
+-
+-
+-\page staying_within_budget Staying within budget
+-
+-When developing a graphics-intensive game or program, it is important to avoid allocating
+-more GPU memory than it is physically available. When the memory is over-committed,
+-various bad things can happen, depending on the specific GPU, graphics driver, and
+-operating system:
+-
+-- It may just work without any problems.
+-- The application may slow down because some memory blocks are moved to system RAM
+- and the GPU has to access them through PCI Express bus.
+-- A new allocation may take very long time to complete, even few seconds, and possibly
+- freeze entire system.
+-- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+-- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
+- returned somewhere later.
+-
+-\section staying_within_budget_querying_for_budget Querying for budget
+-
+-To query for current memory usage and available budget, use function vmaGetHeapBudgets().
+-Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
+-
+-Please note that this function returns different information and works faster than
+-vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
+-allocation, while vmaCalculateStatistics() is intended to be used rarely,
+-only to obtain statistical information, e.g. for debugging purposes.
+-
+-It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
+-about the budget from Vulkan device. VMA is able to use this extension automatically.
+-When not enabled, the allocator behaves same way, but then it estimates current usage
+-and available budget based on its internal information and Vulkan memory heap sizes,
+-which may be less precise. In order to use this extension:
+-
+-1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
+- required by it are available and enable them. Please note that the first is a device
+- extension and the second is instance extension!
+-2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
+-3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
+- Vulkan inside of it to avoid overhead of querying it with every allocation.
+-
+-\section staying_within_budget_controlling_memory_usage Controlling memory usage
+-
+-There are many ways in which you can try to stay within the budget.
+-
+-First, when making new allocation requires allocating a new memory block, the library
+-tries not to exceed the budget automatically. If a block with default recommended size
+-(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
+-dedicated memory for just this resource.
+-
+-If the size of the requested resource plus current memory usage is more than the
+-budget, by default the library still tries to create it, leaving it to the Vulkan
+-implementation whether the allocation succeeds or fails. You can change this behavior
+-by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
+-not made if it would exceed the budget or if the budget is already exceeded.
+-VMA then tries to make the allocation from the next eligible Vulkan memory type.
+-The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+-Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
+-when creating resources that are not essential for the application (e.g. the texture
+-of a specific object) and not to pass it when creating critically important resources
+-(e.g. render targets).
+-
+-On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
+-that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
+-whether it should fail with an error code or still allow the allocation.
+-Usage of this extension involves only passing extra structure on Vulkan device creation,
+-so it is out of scope of this library.
+-
+-Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
+-a new allocation is created only when it fits inside one of the existing memory blocks.
+-If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+-This also ensures that the function call is very fast because it never goes to Vulkan
+-to obtain a new block.
+-
+-\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
+-set to more than 0 will currently try to allocate memory blocks without checking whether they
+-fit within budget.
+-
+-
+-\page resource_aliasing Resource aliasing (overlap)
+-
+-New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
+-management, give an opportunity to alias (overlap) multiple resources in the
+-same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
+-It can be useful to save video memory, but it must be used with caution.
+-
+-For example, if you know the flow of your whole render frame in advance, you
+-are going to use some intermediate textures or buffers only during a small range of render passes,
+-and you know these ranges don't overlap in time, you can bind these resources to
+-the same place in memory, even if they have completely different parameters (width, height, format etc.).
+-
+-![Resource aliasing (overlap)](../gfx/Aliasing.png)
+-
+-Such scenario is possible using VMA, but you need to create your images manually.
+-Then you need to calculate parameters of an allocation to be made using formula:
+-
+-- allocation size = max(size of each image)
+-- allocation alignment = max(alignment of each image)
+-- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
+-
+-Following example shows two different images bound to the same place in memory,
+-allocated to fit largest of them.
+-
+-\code
+-// A 512x512 texture to be sampled.
+-VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+-img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
+-img1CreateInfo.extent.width = 512;
+-img1CreateInfo.extent.height = 512;
+-img1CreateInfo.extent.depth = 1;
+-img1CreateInfo.mipLevels = 10;
+-img1CreateInfo.arrayLayers = 1;
+-img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
+-img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+-img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+-img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+-img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+-
+-// A full screen texture to be used as color attachment.
+-VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+-img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
+-img2CreateInfo.extent.width = 1920;
+-img2CreateInfo.extent.height = 1080;
+-img2CreateInfo.extent.depth = 1;
+-img2CreateInfo.mipLevels = 1;
+-img2CreateInfo.arrayLayers = 1;
+-img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+-img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+-img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+-img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+-img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+-
+-VkImage img1;
+-res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
+-VkImage img2;
+-res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
+-
+-VkMemoryRequirements img1MemReq;
+-vkGetImageMemoryRequirements(device, img1, &img1MemReq);
+-VkMemoryRequirements img2MemReq;
+-vkGetImageMemoryRequirements(device, img2, &img2MemReq);
+-
+-VkMemoryRequirements finalMemReq = {};
+-finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
+-finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
+-finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
+-// Validate if(finalMemReq.memoryTypeBits != 0)
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+-
+-VmaAllocation alloc;
+-res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
+-
+-res = vmaBindImageMemory(allocator, alloc, img1);
+-res = vmaBindImageMemory(allocator, alloc, img2);
+-
+-// You can use img1, img2 here, but not at the same time!
+-
+-vmaFreeMemory(allocator, alloc);
+-vkDestroyImage(allocator, img2, nullptr);
+-vkDestroyImage(allocator, img1, nullptr);
+-\endcode
+-
+-VMA also provides convenience functions that create a buffer or image and bind it to memory
+-represented by an existing #VmaAllocation:
+-vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(),
+-vmaCreateAliasingImage(), vmaCreateAliasingImage2().
+-Versions with "2" offer additional parameter `allocationLocalOffset`.
+-
+-Remember that using resources that alias in memory requires proper synchronization.
+-You need to issue a memory barrier to make sure commands that use `img1` and `img2`
+-don't overlap on GPU timeline.
+-You also need to treat a resource after aliasing as uninitialized - containing garbage data.
+-For example, if you use `img1` and then want to use `img2`, you need to issue
+-an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
+-
+-Additional considerations:
+-
+-- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
+-See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
+-- You can create more complex layout where different images and buffers are bound
+-at different offsets inside one large allocation. For example, one can imagine
+-a big texture used in some render passes, aliasing with a set of many small buffers
+-used between in some further passes. To bind a resource at non-zero offset in an allocation,
+-use vmaBindBufferMemory2() / vmaBindImageMemory2().
+-- Before allocating memory for the resources you want to alias, check `memoryTypeBits`
+-returned in memory requirements of each resource to make sure the bits overlap.
+-Some GPUs may expose multiple memory types suitable e.g. only for buffers or
+-images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
+-resources may be disjoint. Aliasing them is not possible in that case.
+-
+-
+-\page custom_memory_pools Custom memory pools
+-
+-A memory pool contains a number of `VkDeviceMemory` blocks.
+-The library automatically creates and manages default pool for each memory type available on the device.
+-Default memory pool automatically grows in size.
+-Size of allocated blocks is also variable and managed automatically.
+-
+-You can create custom pool and allocate memory out of it.
+-It can be useful if you want to:
+-
+-- Keep certain kind of allocations separate from others.
+-- Enforce particular, fixed size of Vulkan memory blocks.
+-- Limit maximum amount of Vulkan memory allocated for that pool.
+-- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
+-- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
+- #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
+-- Perform defragmentation on a specific subset of your allocations.
+-
+-To use custom memory pools:
+-
+--# Fill VmaPoolCreateInfo structure.
+--# Call vmaCreatePool() to obtain #VmaPool handle.
+--# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
+- You don't need to specify any other parameters of this structure, like `usage`.
+-
+-Example:
+-
+-\code
+-// Find memoryTypeIndex for the pool.
+-VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
+-sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+-
+-VmaAllocationCreateInfo sampleAllocCreateInfo = {};
+-sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-
+-uint32_t memTypeIndex;
+-VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
+- &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
+-// Check res...
+-
+-// Create a pool that can have at most 2 blocks, 128 MiB each.
+-VmaPoolCreateInfo poolCreateInfo = {};
+-poolCreateInfo.memoryTypeIndex = memTypeIndex;
+-poolCreateInfo.blockSize = 128ull * 1024 * 1024;
+-poolCreateInfo.maxBlockCount = 2;
+-
+-VmaPool pool;
+-res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
+-// Check res...
+-
+-// Allocate a buffer out of it.
+-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-bufCreateInfo.size = 1024;
+-bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.pool = pool;
+-
+-VkBuffer buf;
+-VmaAllocation alloc;
+-res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
+-// Check res...
+-\endcode
+-
+-You have to free all allocations made from this pool before destroying it.
+-
+-\code
+-vmaDestroyBuffer(allocator, buf, alloc);
+-vmaDestroyPool(allocator, pool);
+-\endcode
+-
+-New versions of this library support creating dedicated allocations in custom pools.
+-It is supported only when VmaPoolCreateInfo::blockSize = 0.
+-To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
+-VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+-
+-\note Excessive use of custom pools is a common mistake when using this library.
+-Custom pools may be useful for special purposes - when you want to
+-keep certain type of resources separate e.g. to reserve minimum amount of memory
+-for them or limit maximum amount of memory they can occupy. For most
+-resources this is not needed and so it is not recommended to create #VmaPool
+-objects and allocations out of them. Allocating from the default pool is sufficient.
+-
+-
+-\section custom_memory_pools_MemTypeIndex Choosing memory type index
+-
+-When creating a pool, you must explicitly specify memory type index.
+-To find the one suitable for your buffers or images, you can use helper functions
+-vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
+-You need to provide structures with example parameters of buffers or images
+-that you are going to create in that pool.
+-
+-\code
+-VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-exampleBufCreateInfo.size = 1024; // Doesn't matter
+-exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-
+-uint32_t memTypeIndex;
+-vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
+-
+-VmaPoolCreateInfo poolCreateInfo = {};
+-poolCreateInfo.memoryTypeIndex = memTypeIndex;
+-// ...
+-\endcode
+-
+-When creating buffers/images allocated in that pool, provide following parameters:
+-
+-- `VkBufferCreateInfo`: Prefer to pass same parameters as above.
+- Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
+- Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
+- or the other way around.
+-- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
+- Other members are ignored anyway.
+-
+-\section linear_algorithm Linear allocation algorithm
+-
+-Each Vulkan memory block managed by this library has accompanying metadata that
+-keeps track of used and unused regions. By default, the metadata structure and
+-algorithm tries to find best place for new allocations among free regions to
+-optimize memory usage. This way you can allocate and free objects in any order.
+-
+-![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
+-
+-Sometimes there is a need to use simpler, linear allocation algorithm. You can
+-create custom pool that uses such algorithm by adding flag
+-#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
+-#VmaPool object. Then an alternative metadata management is used. It always
+-creates new allocations after last one and doesn't reuse free regions after
+-allocations freed in the middle. It results in better allocation performance and
+-less memory consumed by metadata.
+-
+-![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
+-
+-With this one flag, you can create a custom pool that can be used in many ways:
+-free-at-once, stack, double stack, and ring buffer. See below for details.
+-You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
+-
+-\subsection linear_algorithm_free_at_once Free-at-once
+-
+-In a pool that uses linear algorithm, you still need to free all the allocations
+-individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
+-them in any order. New allocations are always made after last one - free space
+-in the middle is not reused. However, when you release all the allocation and
+-the pool becomes empty, allocation starts from the beginning again. This way you
+-can use linear algorithm to speed up creation of allocations that you are going
+-to release all at once.
+-
+-![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
+-
+-This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
+-value that allows multiple memory blocks.
+-
+-\subsection linear_algorithm_stack Stack
+-
+-When you free an allocation that was created last, its space can be reused.
+-Thanks to this, if you always release allocations in the order opposite to their
+-creation (LIFO - Last In First Out), you can achieve behavior of a stack.
+-
+-![Stack](../gfx/Linear_allocator_4_stack.png)
+-
+-This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
+-value that allows multiple memory blocks.
+-
+-\subsection linear_algorithm_double_stack Double stack
+-
+-The space reserved by a custom pool with linear algorithm may be used by two
+-stacks:
+-
+-- First, default one, growing up from offset 0.
+-- Second, "upper" one, growing down from the end towards lower offsets.
+-
+-To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
+-to VmaAllocationCreateInfo::flags.
+-
+-![Double stack](../gfx/Linear_allocator_7_double_stack.png)
+-
+-Double stack is available only in pools with one memory block -
+-VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
+-
+-When the two stacks' ends meet so there is not enough space between them for a
+-new allocation, such allocation fails with usual
+-`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
+-
+-\subsection linear_algorithm_ring_buffer Ring buffer
+-
+-When you free some allocations from the beginning and there is not enough free space
+-for a new one at the end of a pool, allocator's "cursor" wraps around to the
+-beginning and starts allocation there. Thanks to this, if you always release
+-allocations in the same order as you created them (FIFO - First In First Out),
+-you can achieve behavior of a ring buffer / queue.
+-
+-![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
+-
+-Ring buffer is available only in pools with one memory block -
+-VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
+-
+-\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
+-
+-
+-\page defragmentation Defragmentation
+-
+-Interleaved allocations and deallocations of many objects of varying size can
+-cause fragmentation over time, which can lead to a situation where the library is unable
+-to find a continuous range of free memory for a new allocation despite there is
+-enough free space, just scattered across many small free ranges between existing
+-allocations.
+-
+-To mitigate this problem, you can use defragmentation feature.
+-It doesn't happen automatically though and needs your cooperation,
+-because VMA is a low level library that only allocates memory.
+-It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
+-It cannot copy their contents as it doesn't record any commands to a command buffer.
+-
+-Example:
+-
+-\code
+-VmaDefragmentationInfo defragInfo = {};
+-defragInfo.pool = myPool;
+-defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
+-
+-VmaDefragmentationContext defragCtx;
+-VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
+-// Check res...
+-
+-for(;;)
+-{
+- VmaDefragmentationPassMoveInfo pass;
+- res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
+- if(res == VK_SUCCESS)
+- break;
+- else if(res != VK_INCOMPLETE)
+- // Handle error...
+-
+- for(uint32_t i = 0; i < pass.moveCount; ++i)
+- {
+- // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
+- VmaAllocationInfo allocInfo;
+- vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo);
+- MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
+-
+- // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
+- VkImageCreateInfo imgCreateInfo = ...
+- VkImage newImg;
+- res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
+- // Check res...
+- res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg);
+- // Check res...
+-
+- // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
+- vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
+- }
+-
+- // Make sure the copy commands finished executing.
+- vkWaitForFences(...);
+-
+- // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
+- for(uint32_t i = 0; i < pass.moveCount; ++i)
+- {
+- // ...
+- vkDestroyImage(device, resData->img, nullptr);
+- }
+-
+- // Update appropriate descriptors to point to the new places...
+-
+- res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
+- if(res == VK_SUCCESS)
+- break;
+- else if(res != VK_INCOMPLETE)
+- // Handle error...
+-}
+-
+-vmaEndDefragmentation(allocator, defragCtx, nullptr);
+-\endcode
+-
+-Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
+-create/destroy an allocation and a buffer/image at once, these are just a shortcut for
+-creating the resource, allocating memory, and binding them together.
+-Defragmentation works on memory allocations only. You must handle the rest manually.
+-Defragmentation is an iterative process that should repreat "passes" as long as related functions
+-return `VK_INCOMPLETE` not `VK_SUCCESS`.
+-In each pass:
+-
+-1. vmaBeginDefragmentationPass() function call:
+- - Calculates and returns the list of allocations to be moved in this pass.
+- Note this can be a time-consuming process.
+- - Reserves destination memory for them by creating temporary destination allocations
+- that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
+-2. Inside the pass, **you should**:
+- - Inspect the returned list of allocations to be moved.
+- - Create new buffers/images and bind them at the returned destination temporary allocations.
+- - Copy data from source to destination resources if necessary.
+- - Destroy the source buffers/images, but NOT their allocations.
+-3. vmaEndDefragmentationPass() function call:
+- - Frees the source memory reserved for the allocations that are moved.
+- - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
+- - Frees `VkDeviceMemory` blocks that became empty.
+-
+-Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
+-Defragmentation algorithm tries to move all suitable allocations.
+-You can, however, refuse to move some of them inside a defragmentation pass, by setting
+-`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
+-This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
+-If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
+-
+-Inside a pass, for each allocation that should be moved:
+-
+-- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
+- - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
+-- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
+- filled, and used temporarily in each rendering frame, you can just recreate this image
+- without copying its data.
+-- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
+- using `memcpy()`.
+-- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
+- This will cancel the move.
+- - vmaEndDefragmentationPass() will then free the destination memory
+- not the source memory of the allocation, leaving it unchanged.
+-- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
+- you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
+- - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
+-
+-You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
+-(like in the example above) or all the default pools by setting this member to null.
+-
+-Defragmentation is always performed in each pool separately.
+-Allocations are never moved between different Vulkan memory types.
+-The size of the destination memory reserved for a moved allocation is the same as the original one.
+-Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
+-Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
+-
+-You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
+-in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
+-See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
+-
+-It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
+-usage, possibly from multiple threads, with the exception that allocations
+-returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
+-
+-<b>Mapping</b> is preserved on allocations that are moved during defragmentation.
+-Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
+-are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
+-using VmaAllocationInfo::pMappedData.
+-
+-\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
+-
+-
+-\page statistics Statistics
+-
+-This library contains several functions that return information about its internal state,
+-especially the amount of memory allocated from Vulkan.
+-
+-\section statistics_numeric_statistics Numeric statistics
+-
+-If you need to obtain basic statistics about memory usage per heap, together with current budget,
+-you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
+-This is useful to keep track of memory usage and stay within budget
+-(see also \ref staying_within_budget).
+-Example:
+-
+-\code
+-uint32_t heapIndex = ...
+-
+-VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
+-vmaGetHeapBudgets(allocator, budgets);
+-
+-printf("My heap currently has %u allocations taking %llu B,\n",
+- budgets[heapIndex].statistics.allocationCount,
+- budgets[heapIndex].statistics.allocationBytes);
+-printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
+- budgets[heapIndex].statistics.blockCount,
+- budgets[heapIndex].statistics.blockBytes);
+-printf("Vulkan reports total usage %llu B with budget %llu B.\n",
+- budgets[heapIndex].usage,
+- budgets[heapIndex].budget);
+-\endcode
+-
+-You can query for more detailed statistics per memory heap, type, and totals,
+-including minimum and maximum allocation size and unused range size,
+-by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
+-This function is slower though, as it has to traverse all the internal data structures,
+-so it should be used only for debugging purposes.
+-
+-You can query for statistics of a custom pool using function vmaGetPoolStatistics()
+-or vmaCalculatePoolStatistics().
+-
+-You can query for information about a specific allocation using function vmaGetAllocationInfo().
+-It fill structure #VmaAllocationInfo.
+-
+-\section statistics_json_dump JSON dump
+-
+-You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
+-The result is guaranteed to be correct JSON.
+-It uses ANSI encoding.
+-Any strings provided by user (see [Allocation names](@ref allocation_names))
+-are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
+-this JSON string can be treated as using this encoding.
+-It must be freed using function vmaFreeStatsString().
+-
+-The format of this JSON string is not part of official documentation of the library,
+-but it will not change in backward-incompatible way without increasing library major version number
+-and appropriate mention in changelog.
+-
+-The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
+-It can also contain detailed map of allocated memory blocks and their regions -
+-free and occupied by allocations.
+-This allows e.g. to visualize the memory or assess fragmentation.
+-
+-
+-\page allocation_annotation Allocation names and user data
+-
+-\section allocation_user_data Allocation user data
+-
+-You can annotate allocations with your own information, e.g. for debugging purposes.
+-To do that, fill VmaAllocationCreateInfo::pUserData field when creating
+-an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
+-some handle, index, key, ordinal number or any other value that would associate
+-the allocation with your custom metadata.
+-It is useful to identify appropriate data structures in your engine given #VmaAllocation,
+-e.g. when doing \ref defragmentation.
+-
+-\code
+-VkBufferCreateInfo bufCreateInfo = ...
+-
+-MyBufferMetadata* pMetadata = CreateBufferMetadata();
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-allocCreateInfo.pUserData = pMetadata;
+-
+-VkBuffer buffer;
+-VmaAllocation allocation;
+-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
+-\endcode
+-
+-The pointer may be later retrieved as VmaAllocationInfo::pUserData:
+-
+-\code
+-VmaAllocationInfo allocInfo;
+-vmaGetAllocationInfo(allocator, allocation, &allocInfo);
+-MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
+-\endcode
+-
+-It can also be changed using function vmaSetAllocationUserData().
+-
+-Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
+-vmaBuildStatsString() in hexadecimal form.
+-
+-\section allocation_names Allocation names
+-
+-An allocation can also carry a null-terminated string, giving a name to the allocation.
+-To set it, call vmaSetAllocationName().
+-The library creates internal copy of the string, so the pointer you pass doesn't need
+-to be valid for whole lifetime of the allocation. You can free it after the call.
+-
+-\code
+-std::string imageName = "Texture: ";
+-imageName += fileName;
+-vmaSetAllocationName(allocator, allocation, imageName.c_str());
+-\endcode
+-
+-The string can be later retrieved by inspecting VmaAllocationInfo::pName.
+-It is also printed in JSON report created by vmaBuildStatsString().
+-
+-\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
+-You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
+-
+-
+-\page virtual_allocator Virtual allocator
+-
+-As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
+-It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
+-You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
+-A common use case is sub-allocation of pieces of one large GPU buffer.
+-
+-\section virtual_allocator_creating_virtual_block Creating virtual block
+-
+-To use this functionality, there is no main "allocator" object.
+-You don't need to have #VmaAllocator object created.
+-All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
+-
+--# Fill in #VmaVirtualBlockCreateInfo structure.
+--# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
+-
+-Example:
+-
+-\code
+-VmaVirtualBlockCreateInfo blockCreateInfo = {};
+-blockCreateInfo.size = 1048576; // 1 MB
+-
+-VmaVirtualBlock block;
+-VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
+-\endcode
+-
+-\section virtual_allocator_making_virtual_allocations Making virtual allocations
+-
+-#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
+-using the same code as the main Vulkan memory allocator.
+-Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
+-that represents an opaque handle to an allocation within the virtual block.
+-
+-In order to make such allocation:
+-
+--# Fill in #VmaVirtualAllocationCreateInfo structure.
+--# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
+- You can also receive `VkDeviceSize offset` that was assigned to the allocation.
+-
+-Example:
+-
+-\code
+-VmaVirtualAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.size = 4096; // 4 KB
+-
+-VmaVirtualAllocation alloc;
+-VkDeviceSize offset;
+-res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
+-if(res == VK_SUCCESS)
+-{
+- // Use the 4 KB of your memory starting at offset.
+-}
+-else
+-{
+- // Allocation failed - no space for it could be found. Handle this error!
+-}
+-\endcode
+-
+-\section virtual_allocator_deallocation Deallocation
+-
+-When no longer needed, an allocation can be freed by calling vmaVirtualFree().
+-You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
+-called for the same #VmaVirtualBlock.
+-
+-When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
+-All allocations must be freed before the block is destroyed, which is checked internally by an assert.
+-However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
+-a feature not available in normal Vulkan memory allocator. Example:
+-
+-\code
+-vmaVirtualFree(block, alloc);
+-vmaDestroyVirtualBlock(block);
+-\endcode
+-
+-\section virtual_allocator_allocation_parameters Allocation parameters
+-
+-You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
+-Its default value is null.
+-It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
+-larger data structure containing more information. Example:
+-
+-\code
+-struct CustomAllocData
+-{
+- std::string m_AllocName;
+-};
+-CustomAllocData* allocData = new CustomAllocData();
+-allocData->m_AllocName = "My allocation 1";
+-vmaSetVirtualAllocationUserData(block, alloc, allocData);
+-\endcode
+-
+-The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
+-vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
+-If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
+-Example:
+-
+-\code
+-VmaVirtualAllocationInfo allocInfo;
+-vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
+-delete (CustomAllocData*)allocInfo.pUserData;
+-
+-vmaVirtualFree(block, alloc);
+-\endcode
+-
+-\section virtual_allocator_alignment_and_units Alignment and units
+-
+-It feels natural to express sizes and offsets in bytes.
+-If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
+-VmaVirtualAllocationCreateInfo::alignment to request it. Example:
+-
+-\code
+-VmaVirtualAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.size = 4096; // 4 KB
+-allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
+-
+-VmaVirtualAllocation alloc;
+-res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
+-\endcode
+-
+-Alignments of different allocations made from one block may vary.
+-However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
+-you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
+-It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
+-
+-- VmaVirtualBlockCreateInfo::size
+-- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
+-- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
+-
+-\section virtual_allocator_statistics Statistics
+-
+-You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
+-(to get brief statistics that are fast to calculate)
+-or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
+-The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
+-Example:
+-
+-\code
+-VmaStatistics stats;
+-vmaGetVirtualBlockStatistics(block, &stats);
+-printf("My virtual block has %llu bytes used by %u virtual allocations\n",
+- stats.allocationBytes, stats.allocationCount);
+-\endcode
+-
+-You can also request a full list of allocations and free regions as a string in JSON format by calling
+-vmaBuildVirtualBlockStatsString().
+-Returned string must be later freed using vmaFreeVirtualBlockStatsString().
+-The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
+-
+-\section virtual_allocator_additional_considerations Additional considerations
+-
+-The "virtual allocator" functionality is implemented on a level of individual memory blocks.
+-Keeping track of a whole collection of blocks, allocating new ones when out of free space,
+-deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
+-
+-Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
+-See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
+-You can find their description in chapter \ref custom_memory_pools.
+-Allocation strategies are also supported.
+-See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
+-
+-Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
+-buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
+-
+-
+-\page debugging_memory_usage Debugging incorrect memory usage
+-
+-If you suspect a bug with memory usage, like usage of uninitialized memory or
+-memory being overwritten out of bounds of an allocation,
+-you can use debug features of this library to verify this.
+-
+-\section debugging_memory_usage_initialization Memory initialization
+-
+-If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
+-you can enable automatic memory initialization to verify this.
+-To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
+-
+-\code
+-#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
+-#include "vk_mem_alloc.h"
+-\endcode
+-
+-It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
+-Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
+-Memory is automatically mapped and unmapped if necessary.
+-
+-If you find these values while debugging your program, good chances are that you incorrectly
+-read Vulkan memory that is allocated but not initialized, or already freed, respectively.
+-
+-Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
+-It works also with dedicated allocations.
+-
+-\section debugging_memory_usage_margins Margins
+-
+-By default, allocations are laid out in memory blocks next to each other if possible
+-(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
+-
+-![Allocations without margin](../gfx/Margins_1.png)
+-
+-Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
+-number of bytes as a margin after every allocation.
+-
+-\code
+-#define VMA_DEBUG_MARGIN 16
+-#include "vk_mem_alloc.h"
+-\endcode
+-
+-![Allocations with margin](../gfx/Margins_2.png)
+-
+-If your bug goes away after enabling margins, it means it may be caused by memory
+-being overwritten outside of allocation boundaries. It is not 100% certain though.
+-Change in application behavior may also be caused by different order and distribution
+-of allocations across memory blocks after margins are applied.
+-
+-Margins work with all types of memory.
+-
+-Margin is applied only to allocations made out of memory blocks and not to dedicated
+-allocations, which have their own memory block of specific size.
+-It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
+-or those automatically decided to put into dedicated allocations, e.g. due to its
+-large size or recommended by VK_KHR_dedicated_allocation extension.
+-
+-Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
+-
+-Note that enabling margins increases memory usage and fragmentation.
+-
+-Margins do not apply to \ref virtual_allocator.
+-
+-\section debugging_memory_usage_corruption_detection Corruption detection
+-
+-You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
+-of contents of the margins.
+-
+-\code
+-#define VMA_DEBUG_MARGIN 16
+-#define VMA_DEBUG_DETECT_CORRUPTION 1
+-#include "vk_mem_alloc.h"
+-\endcode
+-
+-When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
+-(it must be multiply of 4) after every allocation is filled with a magic number.
+-This idea is also know as "canary".
+-Memory is automatically mapped and unmapped if necessary.
+-
+-This number is validated automatically when the allocation is destroyed.
+-If it is not equal to the expected value, `VMA_ASSERT()` is executed.
+-It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
+-which indicates a serious bug.
+-
+-You can also explicitly request checking margins of all allocations in all memory blocks
+-that belong to specified memory types by using function vmaCheckCorruption(),
+-or in memory blocks that belong to specified custom pool, by using function
+-vmaCheckPoolCorruption().
+-
+-Margin validation (corruption detection) works only for memory types that are
+-`HOST_VISIBLE` and `HOST_COHERENT`.
+-
+-
+-\page opengl_interop OpenGL Interop
+-
+-VMA provides some features that help with interoperability with OpenGL.
+-
+-\section opengl_interop_exporting_memory Exporting memory
+-
+-If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library:
+-
+-It is recommended to create \ref custom_memory_pools for such allocations.
+-Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
+-while creating the custom pool.
+-Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
+-not only while creating it, as no copy of the structure is made,
+-but its original pointer is used for each allocation instead.
+-
+-If you want to export all memory allocated by the library from certain memory types,
+-also dedicated allocations or other allocations made from default pools,
+-an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
+-It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
+-through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
+-Please note that new versions of the library also support dedicated allocations created in custom pools.
+-
+-You should not mix these two methods in a way that allows to apply both to the same memory type.
+-Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
+-
+-
+-\section opengl_interop_custom_alignment Custom alignment
+-
+-Buffers or images exported to a different API like OpenGL may require a different alignment,
+-higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
+-To impose such alignment:
+-
+-It is recommended to create \ref custom_memory_pools for such allocations.
+-Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
+-to be made out of this pool.
+-The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
+-from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
+-
+-If you want to create a buffer with a specific minimum alignment out of default pools,
+-use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
+-
+-Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
+-allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
+-Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
+-
+-
+-\page usage_patterns Recommended usage patterns
+-
+-Vulkan gives great flexibility in memory allocation.
+-This chapter shows the most common patterns.
+-
+-See also slides from talk:
+-[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
+-
+-
+-\section usage_patterns_gpu_only GPU-only resource
+-
+-<b>When:</b>
+-Any resources that you frequently write and read on GPU,
+-e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
+-images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
+-
+-<b>What to do:</b>
+-Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
+-
+-\code
+-VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+-imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
+-imgCreateInfo.extent.width = 3840;
+-imgCreateInfo.extent.height = 2160;
+-imgCreateInfo.extent.depth = 1;
+-imgCreateInfo.mipLevels = 1;
+-imgCreateInfo.arrayLayers = 1;
+-imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+-imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+-imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+-imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+-imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+-allocCreateInfo.priority = 1.0f;
+-
+-VkImage img;
+-VmaAllocation alloc;
+-vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
+-\endcode
+-
+-<b>Also consider:</b>
+-Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
+-especially if they are large or if you plan to destroy and recreate them with different sizes
+-e.g. when display resolution changes.
+-Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
+-When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
+-to decrease chances to be evicted to system memory by the operating system.
+-
+-\section usage_patterns_staging_copy_upload Staging copy for upload
+-
+-<b>When:</b>
+-A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer
+-to some GPU resource.
+-
+-<b>What to do:</b>
+-Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
+-Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
+-
+-\code
+-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-bufCreateInfo.size = 65536;
+-bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
+- VMA_ALLOCATION_CREATE_MAPPED_BIT;
+-
+-VkBuffer buf;
+-VmaAllocation alloc;
+-VmaAllocationInfo allocInfo;
+-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+-
+-...
+-
+-memcpy(allocInfo.pMappedData, myData, myDataSize);
+-\endcode
+-
+-<b>Also consider:</b>
+-You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
+-using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
+-
+-
+-\section usage_patterns_readback Readback
+-
+-<b>When:</b>
+-Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
+-e.g. results of some computations.
+-
+-<b>What to do:</b>
+-Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
+-Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
+-and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
+-
+-\code
+-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-bufCreateInfo.size = 65536;
+-bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
+- VMA_ALLOCATION_CREATE_MAPPED_BIT;
+-
+-VkBuffer buf;
+-VmaAllocation alloc;
+-VmaAllocationInfo allocInfo;
+-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+-
+-...
+-
+-const float* downloadedData = (const float*)allocInfo.pMappedData;
+-\endcode
+-
+-
+-\section usage_patterns_advanced_data_uploading Advanced data uploading
+-
+-For resources that you frequently write on CPU via mapped pointer and
+-frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
+-
+--# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
+- even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
+- and make the device reach out to that resource directly.
+- - Reads performed by the device will then go through PCI Express bus.
+- The performance of this access may be limited, but it may be fine depending on the size
+- of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
+- of access.
+--# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
+- a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
+- (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
+--# Systems with a discrete graphics card and separate video memory may or may not expose
+- a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
+- If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
+- that is available to CPU for mapping.
+- - Writes performed by the host to that memory go through PCI Express bus.
+- The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
+- as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
+--# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
+- a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
+-
+-Thankfully, VMA offers an aid to create and use such resources in the the way optimal
+-for the current Vulkan device. To help the library make the best choice,
+-use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
+-#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
+-It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
+-but if no such memory type is available or allocation from it fails
+-(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
+-it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
+-It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
+-so you need to create another "staging" allocation and perform explicit transfers.
+-
+-\code
+-VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+-bufCreateInfo.size = 65536;
+-bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
+- VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
+- VMA_ALLOCATION_CREATE_MAPPED_BIT;
+-
+-VkBuffer buf;
+-VmaAllocation alloc;
+-VmaAllocationInfo allocInfo;
+-vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
+-
+-VkMemoryPropertyFlags memPropFlags;
+-vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
+-
+-if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
+-{
+- // Allocation ended up in a mappable memory and is already mapped - write to it directly.
+-
+- // [Executed in runtime]:
+- memcpy(allocInfo.pMappedData, myData, myDataSize);
+-}
+-else
+-{
+- // Allocation ended up in a non-mappable memory - need to transfer.
+- VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+- stagingBufCreateInfo.size = 65536;
+- stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+-
+- VmaAllocationCreateInfo stagingAllocCreateInfo = {};
+- stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+- stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
+- VMA_ALLOCATION_CREATE_MAPPED_BIT;
+-
+- VkBuffer stagingBuf;
+- VmaAllocation stagingAlloc;
+- VmaAllocationInfo stagingAllocInfo;
+- vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
+- &stagingBuf, &stagingAlloc, stagingAllocInfo);
+-
+- // [Executed in runtime]:
+- memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
+- vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE);
+- //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
+- VkBufferCopy bufCopy = {
+- 0, // srcOffset
+- 0, // dstOffset,
+- myDataSize); // size
+- vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
+-}
+-\endcode
+-
+-\section usage_patterns_other_use_cases Other use cases
+-
+-Here are some other, less obvious use cases and their recommended settings:
+-
+-- An image that is used only as transfer source and destination, but it should stay on the device,
+- as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
+- for temporal antialiasing or other temporal effects.
+- - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
+- - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
+-- An image that is used only as transfer source and destination, but it should be placed
+- in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
+- least recently used textures from VRAM.
+- - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
+- - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
+- as VMA needs a hint here to differentiate from the previous case.
+-- A buffer that you want to map and write from the CPU, directly read from the GPU
+- (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
+- host memory due to its large size.
+- - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
+- - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
+- - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
+-
+-
+-\page configuration Configuration
+-
+-Please check "CONFIGURATION SECTION" in the code to find macros that you can define
+-before each include of this file or change directly in this file to provide
+-your own implementation of basic facilities like assert, `min()` and `max()` functions,
+-mutex, atomic etc.
+-The library uses its own implementation of containers by default, but you can switch to using
+-STL containers instead.
+-
+-For example, define `VMA_ASSERT(expr)` before including the library to provide
+-custom implementation of the assertion, compatible with your project.
+-By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
+-and empty otherwise.
+-
+-\section config_Vulkan_functions Pointers to Vulkan functions
+-
+-There are multiple ways to import pointers to Vulkan functions in the library.
+-In the simplest case you don't need to do anything.
+-If the compilation or linking of your program or the initialization of the #VmaAllocator
+-doesn't work for you, you can try to reconfigure it.
+-
+-First, the allocator tries to fetch pointers to Vulkan functions linked statically,
+-like this:
+-
+-\code
+-m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
+-\endcode
+-
+-If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
+-
+-Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
+-You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
+-by using a helper library like [volk](https://github.com/zeux/volk).
+-
+-Third, VMA tries to fetch remaining pointers that are still null by calling
+-`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
+-You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
+-Other pointers will be fetched automatically.
+-If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
+-
+-Finally, all the function pointers required by the library (considering selected
+-Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
+-
+-
+-\section custom_memory_allocator Custom host memory allocator
+-
+-If you use custom allocator for CPU memory rather than default operator `new`
+-and `delete` from C++, you can make this library using your allocator as well
+-by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
+-functions will be passed to Vulkan, as well as used by the library itself to
+-make any CPU-side allocations.
+-
+-\section allocation_callbacks Device memory allocation callbacks
+-
+-The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
+-You can setup callbacks to be informed about these calls, e.g. for the purpose
+-of gathering some statistics. To do it, fill optional member
+-VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
+-
+-\section heap_memory_limit Device heap memory limit
+-
+-When device memory of certain heap runs out of free space, new allocations may
+-fail (returning error code) or they may succeed, silently pushing some existing_
+-memory blocks from GPU VRAM to system RAM (which degrades performance). This
+-behavior is implementation-dependent - it depends on GPU vendor and graphics
+-driver.
+-
+-On AMD cards it can be controlled while creating Vulkan device object by using
+-VK_AMD_memory_overallocation_behavior extension, if available.
+-
+-Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
+-memory available without switching your graphics card to one that really has
+-smaller VRAM, you can use a feature of this library intended for this purpose.
+-To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
+-
+-
+-
+-\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
+-
+-VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
+-performance on some GPUs. It augments Vulkan API with possibility to query
+-driver whether it prefers particular buffer or image to have its own, dedicated
+-allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
+-to do some internal optimizations. The extension is supported by this library.
+-It will be used automatically when enabled.
+-
+-It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
+-and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
+-you are all set.
+-
+-Otherwise, if you want to use it as an extension:
+-
+-1 . When creating Vulkan device, check if following 2 device extensions are
+-supported (call `vkEnumerateDeviceExtensionProperties()`).
+-If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
+-
+-- VK_KHR_get_memory_requirements2
+-- VK_KHR_dedicated_allocation
+-
+-If you enabled these extensions:
+-
+-2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
+-your #VmaAllocator to inform the library that you enabled required extensions
+-and you want the library to use them.
+-
+-\code
+-allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
+-
+-vmaCreateAllocator(&allocatorInfo, &allocator);
+-\endcode
+-
+-That is all. The extension will be automatically used whenever you create a
+-buffer using vmaCreateBuffer() or image using vmaCreateImage().
+-
+-When using the extension together with Vulkan Validation Layer, you will receive
+-warnings like this:
+-
+-_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
+-
+-It is OK, you should just ignore it. It happens because you use function
+-`vkGetBufferMemoryRequirements2KHR()` instead of standard
+-`vkGetBufferMemoryRequirements()`, while the validation layer seems to be
+-unaware of it.
+-
+-To learn more about this extension, see:
+-
+-- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
+-- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
+-
+-
+-
+-\page vk_ext_memory_priority VK_EXT_memory_priority
+-
+-VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
+-value to Vulkan memory allocations that the implementation may use prefer certain
+-buffers and images that are critical for performance to stay in device-local memory
+-in cases when the memory is over-subscribed, while some others may be moved to the system memory.
+-
+-VMA offers convenient usage of this extension.
+-If you enable it, you can pass "priority" parameter when creating allocations or custom pools
+-and the library automatically passes the value to Vulkan using this extension.
+-
+-If you want to use this extension in connection with VMA, follow these steps:
+-
+-\section vk_ext_memory_priority_initialization Initialization
+-
+-1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
+-Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
+-
+-2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
+-Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
+-Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
+-
+-3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
+-to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
+-
+-4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
+-Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
+-Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
+-`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
+-
+-5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
+-have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
+-to VmaAllocatorCreateInfo::flags.
+-
+-\section vk_ext_memory_priority_usage Usage
+-
+-When using this extension, you should initialize following member:
+-
+-- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+-- VmaPoolCreateInfo::priority when creating a custom pool.
+-
+-It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
+-Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
+-and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
+-
+-It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
+-as dedicated and set high priority to them. For example:
+-
+-\code
+-VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+-imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
+-imgCreateInfo.extent.width = 3840;
+-imgCreateInfo.extent.height = 2160;
+-imgCreateInfo.extent.depth = 1;
+-imgCreateInfo.mipLevels = 1;
+-imgCreateInfo.arrayLayers = 1;
+-imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
+-imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+-imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+-imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+-imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+-
+-VmaAllocationCreateInfo allocCreateInfo = {};
+-allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
+-allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
+-allocCreateInfo.priority = 1.0f;
+-
+-VkImage img;
+-VmaAllocation alloc;
+-vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
+-\endcode
+-
+-`priority` member is ignored in the following situations:
+-
+-- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
+- from the parametrs passed in #VmaPoolCreateInfo when the pool was created.
+-- Allocations created in default pools: They inherit the priority from the parameters
+- VMA used when creating default pools, which means `priority == 0.5f`.
+-
+-
+-\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
+-
+-VK_AMD_device_coherent_memory is a device extension that enables access to
+-additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
+-`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
+-allocation of buffers intended for writing "breadcrumb markers" in between passes
+-or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
+-
+-When the extension is available but has not been enabled, Vulkan physical device
+-still exposes those memory types, but their usage is forbidden. VMA automatically
+-takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
+-to allocate memory of such type is made.
+-
+-If you want to use this extension in connection with VMA, follow these steps:
+-
+-\section vk_amd_device_coherent_memory_initialization Initialization
+-
+-1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
+-Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
+-
+-2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
+-Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
+-Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
+-
+-3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
+-to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
+-
+-4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
+-Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
+-Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
+-`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
+-
+-5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
+-have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
+-to VmaAllocatorCreateInfo::flags.
+-
+-\section vk_amd_device_coherent_memory_usage Usage
+-
+-After following steps described above, you can create VMA allocations and custom pools
+-out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
+-devices. There are multiple ways to do it, for example:
+-
+-- You can request or prefer to allocate out of such memory types by adding
+- `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
+- or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
+- other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
+-- If you manually found memory type index to use for this purpose, force allocation
+- from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
+-
+-\section vk_amd_device_coherent_memory_more_information More information
+-
+-To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
+-
+-Example use of this extension can be found in the code of the sample and test suite
+-accompanying this library.
+-
+-
+-\page enabling_buffer_device_address Enabling buffer device address
+-
+-Device extension VK_KHR_buffer_device_address
+-allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
+-It has been promoted to core Vulkan 1.2.
+-
+-If you want to use this feature in connection with VMA, follow these steps:
+-
+-\section enabling_buffer_device_address_initialization Initialization
+-
+-1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
+-Check if the extension is supported - if returned array of `VkExtensionProperties` contains
+-"VK_KHR_buffer_device_address".
+-
+-2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
+-Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
+-Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
+-
+-3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
+-"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
+-
+-4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
+-Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
+-Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
+-`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
+-
+-5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
+-have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
+-to VmaAllocatorCreateInfo::flags.
+-
+-\section enabling_buffer_device_address_usage Usage
+-
+-After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
+-The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
+-allocated memory blocks wherever it might be needed.
+-
+-Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
+-The second part of this functionality related to "capture and replay" is not supported,
+-as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
+-
+-\section enabling_buffer_device_address_more_information More information
+-
+-To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
+-
+-Example use of this extension can be found in the code of the sample and test suite
+-accompanying this library.
+-
+-\page general_considerations General considerations
+-
+-\section general_considerations_thread_safety Thread safety
+-
+-- The library has no global state, so separate #VmaAllocator objects can be used
+- independently.
+- There should be no need to create multiple such objects though - one per `VkDevice` is enough.
+-- By default, all calls to functions that take #VmaAllocator as first parameter
+- are safe to call from multiple threads simultaneously because they are
+- synchronized internally when needed.
+- This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
+-- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
+- flag, calls to functions that take such #VmaAllocator object must be
+- synchronized externally.
+-- Access to a #VmaAllocation object must be externally synchronized. For example,
+- you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
+- threads at the same time if you pass the same #VmaAllocation object to these
+- functions.
+-- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
+-
+-\section general_considerations_versioning_and_compatibility Versioning and compatibility
+-
+-The library uses [**Semantic Versioning**](https://semver.org/),
+-which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
+-
+-- Incremented Patch version means a release is backward- and forward-compatible,
+- introducing only some internal improvements, bug fixes, optimizations etc.
+- or changes that are out of scope of the official API described in this documentation.
+-- Incremented Minor version means a release is backward-compatible,
+- so existing code that uses the library should continue to work, while some new
+- symbols could have been added: new structures, functions, new values in existing
+- enums and bit flags, new structure members, but not new function parameters.
+-- Incrementing Major version means a release could break some backward compatibility.
+-
+-All changes between official releases are documented in file "CHANGELOG.md".
+-
+-\warning Backward compatibility is considered on the level of C++ source code, not binary linkage.
+-Adding new members to existing structures is treated as backward compatible if initializing
+-the new members to binary zero results in the old behavior.
+-You should always fully initialize all library structures to zeros and not rely on their
+-exact binary size.
+-
+-\section general_considerations_validation_layer_warnings Validation layer warnings
+-
+-When using this library, you can meet following types of warnings issued by
+-Vulkan validation layer. They don't necessarily indicate a bug, so you may need
+-to just ignore them.
+-
+-- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
+- - It happens when VK_KHR_dedicated_allocation extension is enabled.
+- `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
+-- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
+- - It happens when you map a buffer or image, because the library maps entire
+- `VkDeviceMemory` block, where different types of images and buffers may end
+- up together, especially on GPUs with unified memory like Intel.
+-- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
+- - It may happen when you use [defragmentation](@ref defragmentation).
+-
+-\section general_considerations_allocation_algorithm Allocation algorithm
+-
+-The library uses following algorithm for allocation, in order:
+-
+--# Try to find free range of memory in existing blocks.
+--# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
+--# If failed, try to create such block with size / 2, size / 4, size / 8.
+--# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
+- just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
+--# If failed, choose other memory type that meets the requirements specified in
+- VmaAllocationCreateInfo and go to point 1.
+--# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
+-
+-\section general_considerations_features_not_supported Features not supported
+-
+-Features deliberately excluded from the scope of this library:
+-
+--# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
+- between CPU and GPU memory and related synchronization is responsibility of the user.
+- Defining some "texture" object that would automatically stream its data from a
+- staging copy in CPU memory to GPU memory would rather be a feature of another,
+- higher-level library implemented on top of VMA.
+- VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
+--# **Recreation of buffers and images.** Although the library has functions for
+- buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
+- recreate these objects yourself after defragmentation. That is because the big
+- structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
+- #VmaAllocation object.
+--# **Handling CPU memory allocation failures.** When dynamically creating small C++
+- objects in CPU memory (not Vulkan memory), allocation failures are not checked
+- and handled gracefully, because that would complicate code significantly and
+- is usually not needed in desktop PC applications anyway.
+- Success of an allocation is just checked with an assert.
+--# **Code free of any compiler warnings.** Maintaining the library to compile and
+- work correctly on so many different platforms is hard enough. Being free of
+- any warnings, on any version of any compiler, is simply not feasible.
+- There are many preprocessor macros that make some variables unused, function parameters unreferenced,
+- or conditional expressions constant in some configurations.
+- The code of this library should not be bigger or more complicated just to silence these warnings.
+- It is recommended to disable such warnings instead.
+--# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
+- are not going to be included into this repository.
+-*/
++//
++// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++//
++
++#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
++#define AMD_VULKAN_MEMORY_ALLOCATOR_H
++
++/** \mainpage Vulkan Memory Allocator
++
++<b>Version 3.1.0-development</b>
++
++Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
++License: MIT
++
++<b>API documentation divided into groups:</b> [Modules](modules.html)
++
++\section main_table_of_contents Table of contents
++
++- <b>User guide</b>
++ - \subpage quick_start
++ - [Project setup](@ref quick_start_project_setup)
++ - [Initialization](@ref quick_start_initialization)
++ - [Resource allocation](@ref quick_start_resource_allocation)
++ - \subpage choosing_memory_type
++ - [Usage](@ref choosing_memory_type_usage)
++ - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
++ - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
++ - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
++ - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
++ - \subpage memory_mapping
++ - [Mapping functions](@ref memory_mapping_mapping_functions)
++ - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
++ - [Cache flush and invalidate](@ref memory_mapping_cache_control)
++ - \subpage staying_within_budget
++ - [Querying for budget](@ref staying_within_budget_querying_for_budget)
++ - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
++ - \subpage resource_aliasing
++ - \subpage custom_memory_pools
++ - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
++ - [Linear allocation algorithm](@ref linear_algorithm)
++ - [Free-at-once](@ref linear_algorithm_free_at_once)
++ - [Stack](@ref linear_algorithm_stack)
++ - [Double stack](@ref linear_algorithm_double_stack)
++ - [Ring buffer](@ref linear_algorithm_ring_buffer)
++ - \subpage defragmentation
++ - \subpage statistics
++ - [Numeric statistics](@ref statistics_numeric_statistics)
++ - [JSON dump](@ref statistics_json_dump)
++ - \subpage allocation_annotation
++ - [Allocation user data](@ref allocation_user_data)
++ - [Allocation names](@ref allocation_names)
++ - \subpage virtual_allocator
++ - \subpage debugging_memory_usage
++ - [Memory initialization](@ref debugging_memory_usage_initialization)
++ - [Margins](@ref debugging_memory_usage_margins)
++ - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
++ - \subpage opengl_interop
++- \subpage usage_patterns
++ - [GPU-only resource](@ref usage_patterns_gpu_only)
++ - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
++ - [Readback](@ref usage_patterns_readback)
++ - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
++ - [Other use cases](@ref usage_patterns_other_use_cases)
++- \subpage configuration
++ - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
++ - [Custom host memory allocator](@ref custom_memory_allocator)
++ - [Device memory allocation callbacks](@ref allocation_callbacks)
++ - [Device heap memory limit](@ref heap_memory_limit)
++- <b>Extension support</b>
++ - \subpage vk_khr_dedicated_allocation
++ - \subpage enabling_buffer_device_address
++ - \subpage vk_ext_memory_priority
++ - \subpage vk_amd_device_coherent_memory
++- \subpage general_considerations
++ - [Thread safety](@ref general_considerations_thread_safety)
++ - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
++ - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
++ - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
++ - [Features not supported](@ref general_considerations_features_not_supported)
++
++\section main_see_also See also
++
++- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
++- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
++
++\defgroup group_init Library initialization
++
++\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
++
++\defgroup group_alloc Memory allocation
++
++\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
++Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
++
++\defgroup group_virtual Virtual allocator
++
++\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
++for user-defined purpose without allocating any real GPU memory.
++
++\defgroup group_stats Statistics
++
++\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
++See documentation chapter: \ref statistics.
++*/
++
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include <vulkan/vulkan.h>
++
++#if !defined(VMA_VULKAN_VERSION)
++ #if defined(VK_VERSION_1_3)
++ #define VMA_VULKAN_VERSION 1003000
++ #elif defined(VK_VERSION_1_2)
++ #define VMA_VULKAN_VERSION 1002000
++ #elif defined(VK_VERSION_1_1)
++ #define VMA_VULKAN_VERSION 1001000
++ #else
++ #define VMA_VULKAN_VERSION 1000000
++ #endif
++#endif
++
++#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
++ extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
++ extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
++ extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
++ extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
++ extern PFN_vkAllocateMemory vkAllocateMemory;
++ extern PFN_vkFreeMemory vkFreeMemory;
++ extern PFN_vkMapMemory vkMapMemory;
++ extern PFN_vkUnmapMemory vkUnmapMemory;
++ extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
++ extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
++ extern PFN_vkBindBufferMemory vkBindBufferMemory;
++ extern PFN_vkBindImageMemory vkBindImageMemory;
++ extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
++ extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
++ extern PFN_vkCreateBuffer vkCreateBuffer;
++ extern PFN_vkDestroyBuffer vkDestroyBuffer;
++ extern PFN_vkCreateImage vkCreateImage;
++ extern PFN_vkDestroyImage vkDestroyImage;
++ extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
++ #if VMA_VULKAN_VERSION >= 1001000
++ extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
++ extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
++ extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
++ extern PFN_vkBindImageMemory2 vkBindImageMemory2;
++ extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
++ #endif // #if VMA_VULKAN_VERSION >= 1001000
++#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
++
++#if !defined(VMA_DEDICATED_ALLOCATION)
++ #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
++ #define VMA_DEDICATED_ALLOCATION 1
++ #else
++ #define VMA_DEDICATED_ALLOCATION 0
++ #endif
++#endif
++
++#if !defined(VMA_BIND_MEMORY2)
++ #if VK_KHR_bind_memory2
++ #define VMA_BIND_MEMORY2 1
++ #else
++ #define VMA_BIND_MEMORY2 0
++ #endif
++#endif
++
++#if !defined(VMA_MEMORY_BUDGET)
++ #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
++ #define VMA_MEMORY_BUDGET 1
++ #else
++ #define VMA_MEMORY_BUDGET 0
++ #endif
++#endif
++
++// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
++#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
++ #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
++ #define VMA_BUFFER_DEVICE_ADDRESS 1
++ #else
++ #define VMA_BUFFER_DEVICE_ADDRESS 0
++ #endif
++#endif
++
++// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
++#if !defined(VMA_MEMORY_PRIORITY)
++ #if VK_EXT_memory_priority
++ #define VMA_MEMORY_PRIORITY 1
++ #else
++ #define VMA_MEMORY_PRIORITY 0
++ #endif
++#endif
++
++// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
++#if !defined(VMA_EXTERNAL_MEMORY)
++ #if VK_KHR_external_memory
++ #define VMA_EXTERNAL_MEMORY 1
++ #else
++ #define VMA_EXTERNAL_MEMORY 0
++ #endif
++#endif
++
++// Define these macros to decorate all public functions with additional code,
++// before and after returned type, appropriately. This may be useful for
++// exporting the functions when compiling VMA as a separate library. Example:
++// #define VMA_CALL_PRE __declspec(dllexport)
++// #define VMA_CALL_POST __cdecl
++#ifndef VMA_CALL_PRE
++ #define VMA_CALL_PRE
++#endif
++#ifndef VMA_CALL_POST
++ #define VMA_CALL_POST
++#endif
++
++// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan
++// structure that will be extended via the pNext chain.
++#ifndef VMA_EXTENDS_VK_STRUCT
++ #define VMA_EXTENDS_VK_STRUCT(vkStruct)
++#endif
++
++// Define this macro to decorate pointers with an attribute specifying the
++// length of the array they point to if they are not null.
++//
++// The length may be one of
++// - The name of another parameter in the argument list where the pointer is declared
++// - The name of another member in the struct where the pointer is declared
++// - The name of a member of a struct type, meaning the value of that member in
++// the context of the call. For example
++// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
++// this means the number of memory heaps available in the device associated
++// with the VmaAllocator being dealt with.
++#ifndef VMA_LEN_IF_NOT_NULL
++ #define VMA_LEN_IF_NOT_NULL(len)
++#endif
++
++// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
++// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
++#ifndef VMA_NULLABLE
++ #ifdef __clang__
++ #define VMA_NULLABLE _Nullable
++ #else
++ #define VMA_NULLABLE
++ #endif
++#endif
++
++// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
++// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
++#ifndef VMA_NOT_NULL
++ #ifdef __clang__
++ #define VMA_NOT_NULL _Nonnull
++ #else
++ #define VMA_NOT_NULL
++ #endif
++#endif
++
++// If non-dispatchable handles are represented as pointers then we can give
++// then nullability annotations
++#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
++ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
++ #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
++ #else
++ #define VMA_NOT_NULL_NON_DISPATCHABLE
++ #endif
++#endif
++
++#ifndef VMA_NULLABLE_NON_DISPATCHABLE
++ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
++ #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
++ #else
++ #define VMA_NULLABLE_NON_DISPATCHABLE
++ #endif
++#endif
++
++#ifndef VMA_STATS_STRING_ENABLED
++ #define VMA_STATS_STRING_ENABLED 1
++#endif
++
++////////////////////////////////////////////////////////////////////////////////
++////////////////////////////////////////////////////////////////////////////////
++//
++// INTERFACE
++//
++////////////////////////////////////////////////////////////////////////////////
++////////////////////////////////////////////////////////////////////////////////
++
++// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
++#ifndef _VMA_ENUM_DECLARATIONS
++
++/**
++\addtogroup group_init
++@{
++*/
++
++/// Flags for created #VmaAllocator.
++typedef enum VmaAllocatorCreateFlagBits
++{
++ /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
++
++ Using this flag may increase performance because internal mutexes are not used.
++ */
++ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
++ /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
++
++ The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
++ When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
++
++ Using this extension will automatically allocate dedicated blocks of memory for
++ some buffers and images instead of suballocating place for them out of bigger
++ memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
++ flag) when it is recommended by the driver. It may improve performance on some
++ GPUs.
++
++ You may set this flag only if you found out that following device extensions are
++ supported, you enabled them while creating Vulkan device passed as
++ VmaAllocatorCreateInfo::device, and you want them to be used internally by this
++ library:
++
++ - VK_KHR_get_memory_requirements2 (device extension)
++ - VK_KHR_dedicated_allocation (device extension)
++
++ When this flag is set, you can experience following warnings reported by Vulkan
++ validation layer. You can ignore them.
++
++ > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
++ */
++ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
++ /**
++ Enables usage of VK_KHR_bind_memory2 extension.
++
++ The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
++ When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
++
++ You may set this flag only if you found out that this device extension is supported,
++ you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
++ and you want it to be used internally by this library.
++
++ The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
++ which allow to pass a chain of `pNext` structures while binding.
++ This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
++ */
++ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
++ /**
++ Enables usage of VK_EXT_memory_budget extension.
++
++ You may set this flag only if you found out that this device extension is supported,
++ you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
++ and you want it to be used internally by this library, along with another instance extension
++ VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
++
++ The extension provides query for current memory usage and budget, which will probably
++ be more accurate than an estimation used by the library otherwise.
++ */
++ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
++ /**
++ Enables usage of VK_AMD_device_coherent_memory extension.
++
++ You may set this flag only if you:
++
++ - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
++ - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
++ - want it to be used internally by this library.
++
++ The extension and accompanying device feature provide access to memory types with
++ `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
++ They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
++
++ When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
++ To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
++ returning `VK_ERROR_FEATURE_NOT_PRESENT`.
++ */
++ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
++ /**
++ Enables usage of "buffer device address" feature, which allows you to use function
++ `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
++
++ You may set this flag only if you:
++
++ 1. (For Vulkan version < 1.2) Found as available and enabled device extension
++ VK_KHR_buffer_device_address.
++ This extension is promoted to core Vulkan 1.2.
++ 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
++
++ When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
++ The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
++ allocated memory blocks wherever it might be needed.
++
++ For more information, see documentation chapter \ref enabling_buffer_device_address.
++ */
++ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
++ /**
++ Enables usage of VK_EXT_memory_priority extension in the library.
++
++ You may set this flag only if you found available and enabled this device extension,
++ along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
++ while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
++
++ When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
++ are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
++
++ A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
++ Larger values are higher priority. The granularity of the priorities is implementation-dependent.
++ It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
++ The value to be used for default priority is 0.5.
++ For more details, see the documentation of the VK_EXT_memory_priority extension.
++ */
++ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
++
++ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
++} VmaAllocatorCreateFlagBits;
++/// See #VmaAllocatorCreateFlagBits.
++typedef VkFlags VmaAllocatorCreateFlags;
++
++/** @} */
++
++/**
++\addtogroup group_alloc
++@{
++*/
++
++/// \brief Intended usage of the allocated memory.
++typedef enum VmaMemoryUsage
++{
++ /** No intended memory usage specified.
++ Use other members of VmaAllocationCreateInfo to specify your requirements.
++ */
++ VMA_MEMORY_USAGE_UNKNOWN = 0,
++ /**
++ \deprecated Obsolete, preserved for backward compatibility.
++ Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
++ */
++ VMA_MEMORY_USAGE_GPU_ONLY = 1,
++ /**
++ \deprecated Obsolete, preserved for backward compatibility.
++ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
++ */
++ VMA_MEMORY_USAGE_CPU_ONLY = 2,
++ /**
++ \deprecated Obsolete, preserved for backward compatibility.
++ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
++ */
++ VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
++ /**
++ \deprecated Obsolete, preserved for backward compatibility.
++ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
++ */
++ VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
++ /**
++ \deprecated Obsolete, preserved for backward compatibility.
++ Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
++ */
++ VMA_MEMORY_USAGE_CPU_COPY = 5,
++ /**
++ Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
++ Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
++
++ Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
++
++ Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
++ */
++ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
++ /**
++ Selects best memory type automatically.
++ This flag is recommended for most common use cases.
++
++ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
++ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
++ in VmaAllocationCreateInfo::flags.
++
++ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
++ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
++ and not with generic memory allocation functions.
++ */
++ VMA_MEMORY_USAGE_AUTO = 7,
++ /**
++ Selects best memory type automatically with preference for GPU (device) memory.
++
++ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
++ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
++ in VmaAllocationCreateInfo::flags.
++
++ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
++ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
++ and not with generic memory allocation functions.
++ */
++ VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
++ /**
++ Selects best memory type automatically with preference for CPU (host) memory.
++
++ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
++ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
++ in VmaAllocationCreateInfo::flags.
++
++ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
++ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
++ and not with generic memory allocation functions.
++ */
++ VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
++
++ VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
++} VmaMemoryUsage;
++
++/// Flags to be passed as VmaAllocationCreateInfo::flags.
++typedef enum VmaAllocationCreateFlagBits
++{
++ /** \brief Set this flag if the allocation should have its own memory block.
++
++ Use it for special, big resources, like fullscreen images used as attachments.
++ */
++ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
++
++ /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
++
++ If new allocation cannot be placed in any of the existing blocks, allocation
++ fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
++
++ You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
++ #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
++ */
++ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
++ /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
++
++ Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
++
++ It is valid to use this flag for allocation made from memory type that is not
++ `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
++ useful if you need an allocation that is efficient to use on GPU
++ (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
++ support it (e.g. Intel GPU).
++ */
++ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
++ /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
++
++ Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
++ null-terminated string. Instead of copying pointer value, a local copy of the
++ string is made and stored in allocation's `pName`. The string is automatically
++ freed together with the allocation. It is also used in vmaBuildStatsString().
++ */
++ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
++ /** Allocation will be created from upper stack in a double stack pool.
++
++ This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
++ */
++ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
++ /** Create both buffer/image and allocation, but don't bind them together.
++ It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
++ The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
++ Otherwise it is ignored.
++
++ If you want to make sure the new buffer/image is not tied to the new memory allocation
++ through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
++ use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
++ */
++ VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
++ /** Create allocation only if additional device memory required for it, if any, won't exceed
++ memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
++ */
++ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
++ /** \brief Set this flag if the allocated memory will have aliasing resources.
++
++ Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
++ Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
++ */
++ VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
++ /**
++ Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
++
++ - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
++ you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
++ - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
++ This includes allocations created in \ref custom_memory_pools.
++
++ Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
++ never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
++
++ \warning Violating this declaration may work correctly, but will likely be very slow.
++ Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
++ Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
++ */
++ VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
++ /**
++ Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
++
++ - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
++ you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
++ - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
++ This includes allocations created in \ref custom_memory_pools.
++
++ Declares that mapped memory can be read, written, and accessed in random order,
++ so a `HOST_CACHED` memory type is required.
++ */
++ VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
++ /**
++ Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
++ it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
++ if it may improve performance.
++
++ By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
++ (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
++ issue an explicit transfer to write/read your data.
++ To prepare for this possibility, don't forget to add appropriate flags like
++ `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
++ */
++ VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
++ /** Allocation strategy that chooses smallest possible free range for the allocation
++ to minimize memory usage and fragmentation, possibly at the expense of allocation time.
++ */
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
++ /** Allocation strategy that chooses first suitable free range for the allocation -
++ not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
++ to minimize allocation time, possibly at the expense of allocation quality.
++ */
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
++ /** Allocation strategy that chooses always the lowest offset in available space.
++ This is not the most efficient strategy but achieves highly packed data.
++ Used internally by defragmentation, not recommended in typical usage.
++ */
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000,
++ /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
++ */
++ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
++ /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
++ */
++ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
++ /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
++ */
++ VMA_ALLOCATION_CREATE_STRATEGY_MASK =
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
++
++ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
++} VmaAllocationCreateFlagBits;
++/// See #VmaAllocationCreateFlagBits.
++typedef VkFlags VmaAllocationCreateFlags;
++
++/// Flags to be passed as VmaPoolCreateInfo::flags.
++typedef enum VmaPoolCreateFlagBits
++{
++ /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
++
++ This is an optional optimization flag.
++
++ If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
++ vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
++ knows exact type of your allocations so it can handle Buffer-Image Granularity
++ in the optimal way.
++
++ If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
++ exact type of such allocations is not known, so allocator must be conservative
++ in handling Buffer-Image Granularity, which can lead to suboptimal allocation
++ (wasted memory). In that case, if you can make sure you always allocate only
++ buffers and linear images or only optimal images out of this pool, use this flag
++ to make allocator disregard Buffer-Image Granularity and so make allocations
++ faster and more optimal.
++ */
++ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
++
++ /** \brief Enables alternative, linear allocation algorithm in this pool.
++
++ Specify this flag to enable linear allocation algorithm, which always creates
++ new allocations after last one and doesn't reuse space from allocations freed in
++ between. It trades memory consumption for simplified algorithm and data
++ structure, which has better performance and uses less memory for metadata.
++
++ By using this flag, you can achieve behavior of free-at-once, stack,
++ ring buffer, and double stack.
++ For details, see documentation chapter \ref linear_algorithm.
++ */
++ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
++
++ /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
++ */
++ VMA_POOL_CREATE_ALGORITHM_MASK =
++ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
++
++ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
++} VmaPoolCreateFlagBits;
++/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
++typedef VkFlags VmaPoolCreateFlags;
++
++/// Flags to be passed as VmaDefragmentationInfo::flags.
++typedef enum VmaDefragmentationFlagBits
++{
++ /* \brief Use simple but fast algorithm for defragmentation.
++ May not achieve best results but will require least time to compute and least allocations to copy.
++ */
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
++ /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
++ Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
++ */
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
++ /* \brief Perform full defragmentation of memory.
++ Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
++ */
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
++ /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
++ Only available when bufferImageGranularity is greater than 1, since it aims to reduce
++ alignment issues between different types of resources.
++ Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
++ */
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
++
++ /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
++ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
++
++ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
++} VmaDefragmentationFlagBits;
++/// See #VmaDefragmentationFlagBits.
++typedef VkFlags VmaDefragmentationFlags;
++
++/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
++typedef enum VmaDefragmentationMoveOperation
++{
++ /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
++ VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
++ /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
++ VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
++ /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
++ VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
++} VmaDefragmentationMoveOperation;
++
++/** @} */
++
++/**
++\addtogroup group_virtual
++@{
++*/
++
++/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
++typedef enum VmaVirtualBlockCreateFlagBits
++{
++ /** \brief Enables alternative, linear allocation algorithm in this virtual block.
++
++ Specify this flag to enable linear allocation algorithm, which always creates
++ new allocations after last one and doesn't reuse space from allocations freed in
++ between. It trades memory consumption for simplified algorithm and data
++ structure, which has better performance and uses less memory for metadata.
++
++ By using this flag, you can achieve behavior of free-at-once, stack,
++ ring buffer, and double stack.
++ For details, see documentation chapter \ref linear_algorithm.
++ */
++ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
++
++ /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
++ */
++ VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
++ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
++
++ VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
++} VmaVirtualBlockCreateFlagBits;
++/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
++typedef VkFlags VmaVirtualBlockCreateFlags;
++
++/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
++typedef enum VmaVirtualAllocationCreateFlagBits
++{
++ /** \brief Allocation will be created from upper stack in a double stack pool.
++
++ This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
++ */
++ VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
++ /** \brief Allocation strategy that tries to minimize memory usage.
++ */
++ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
++ /** \brief Allocation strategy that tries to minimize allocation time.
++ */
++ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
++ /** Allocation strategy that chooses always the lowest offset in available space.
++ This is not the most efficient strategy but achieves highly packed data.
++ */
++ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
++ /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
++
++ These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
++ */
++ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
++
++ VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
++} VmaVirtualAllocationCreateFlagBits;
++/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
++typedef VkFlags VmaVirtualAllocationCreateFlags;
++
++/** @} */
++
++#endif // _VMA_ENUM_DECLARATIONS
++
++#ifndef _VMA_DATA_TYPES_DECLARATIONS
++
++/**
++\addtogroup group_init
++@{ */
++
++/** \struct VmaAllocator
++\brief Represents main object of this library initialized.
++
++Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
++Call function vmaDestroyAllocator() to destroy it.
++
++It is recommended to create just one object of this type per `VkDevice` object,
++right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
++*/
++VK_DEFINE_HANDLE(VmaAllocator)
++
++/** @} */
++
++/**
++\addtogroup group_alloc
++@{
++*/
++
++/** \struct VmaPool
++\brief Represents custom memory pool
++
++Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
++Call function vmaDestroyPool() to destroy it.
++
++For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
++*/
++VK_DEFINE_HANDLE(VmaPool)
++
++/** \struct VmaAllocation
++\brief Represents single memory allocation.
++
++It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
++plus unique offset.
++
++There are multiple ways to create such object.
++You need to fill structure VmaAllocationCreateInfo.
++For more information see [Choosing memory type](@ref choosing_memory_type).
++
++Although the library provides convenience functions that create Vulkan buffer or image,
++allocate memory for it and bind them together,
++binding of the allocation to a buffer or an image is out of scope of the allocation itself.
++Allocation object can exist without buffer/image bound,
++binding can be done manually by the user, and destruction of it can be done
++independently of destruction of the allocation.
++
++The object also remembers its size and some other information.
++To retrieve this information, use function vmaGetAllocationInfo() and inspect
++returned structure VmaAllocationInfo.
++*/
++VK_DEFINE_HANDLE(VmaAllocation)
++
++/** \struct VmaDefragmentationContext
++\brief An opaque object that represents started defragmentation process.
++
++Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
++Call function vmaEndDefragmentation() to destroy it.
++*/
++VK_DEFINE_HANDLE(VmaDefragmentationContext)
++
++/** @} */
++
++/**
++\addtogroup group_virtual
++@{
++*/
++
++/** \struct VmaVirtualAllocation
++\brief Represents single memory allocation done inside VmaVirtualBlock.
++
++Use it as a unique identifier to virtual allocation within the single block.
++
++Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
++*/
++VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)
++
++/** @} */
++
++/**
++\addtogroup group_virtual
++@{
++*/
++
++/** \struct VmaVirtualBlock
++\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
++
++Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
++For more information, see documentation chapter \ref virtual_allocator.
++
++This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
++*/
++VK_DEFINE_HANDLE(VmaVirtualBlock)
++
++/** @} */
++
++/**
++\addtogroup group_init
++@{
++*/
++
++/// Callback function called after successful vkAllocateMemory.
++typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t memoryType,
++ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
++ VkDeviceSize size,
++ void* VMA_NULLABLE pUserData);
++
++/// Callback function called before vkFreeMemory.
++typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t memoryType,
++ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
++ VkDeviceSize size,
++ void* VMA_NULLABLE pUserData);
++
++/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
++
++Provided for informative purpose, e.g. to gather statistics about number of
++allocations or total amount of memory allocated in Vulkan.
++
++Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
++*/
++typedef struct VmaDeviceMemoryCallbacks
++{
++ /// Optional, can be null.
++ PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
++ /// Optional, can be null.
++ PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
++ /// Optional, can be null.
++ void* VMA_NULLABLE pUserData;
++} VmaDeviceMemoryCallbacks;
++
++/** \brief Pointers to some Vulkan functions - a subset used by the library.
++
++Used in VmaAllocatorCreateInfo::pVulkanFunctions.
++*/
++typedef struct VmaVulkanFunctions
++{
++ /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
++ PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
++ /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
++ PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
++ PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
++ PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
++ PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
++ PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
++ PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
++ PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
++ PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
++ PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
++ PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
++ PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
++ PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
++ PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
++ PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
++ PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
++ PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
++ PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
++ PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
++#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
++ PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
++ /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
++ PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
++#endif
++#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
++ /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
++ PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
++ /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
++ PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
++#endif
++#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
++ PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
++#endif
++#if VMA_VULKAN_VERSION >= 1003000
++ /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
++ PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
++ /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
++ PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
++#endif
++} VmaVulkanFunctions;
++
++/// Description of a Allocator to be created.
++typedef struct VmaAllocatorCreateInfo
++{
++ /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
++ VmaAllocatorCreateFlags flags;
++ /// Vulkan physical device.
++ /** It must be valid throughout whole lifetime of created allocator. */
++ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
++ /// Vulkan device.
++ /** It must be valid throughout whole lifetime of created allocator. */
++ VkDevice VMA_NOT_NULL device;
++ /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
++ /** Set to 0 to use default, which is currently 256 MiB. */
++ VkDeviceSize preferredLargeHeapBlockSize;
++ /// Custom CPU memory allocation callbacks. Optional.
++ /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
++ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
++ /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
++ /** Optional, can be null. */
++ const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
++ /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
++
++ If not NULL, it must be a pointer to an array of
++ `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
++ maximum number of bytes that can be allocated out of particular Vulkan memory
++ heap.
++
++ Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
++ heap. This is also the default in case of `pHeapSizeLimit` = NULL.
++
++ If there is a limit defined for a heap:
++
++ - If user tries to allocate more memory from that heap using this allocator,
++ the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
++ - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
++ value of this limit will be reported instead when using vmaGetMemoryProperties().
++
++ Warning! Using this feature may not be equivalent to installing a GPU with
++ smaller amount of memory, because graphics driver doesn't necessary fail new
++ allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
++ exceeded. It may return success and just silently migrate some device memory
++ blocks to system RAM. This driver behavior can also be controlled using
++ VK_AMD_memory_overallocation_behavior extension.
++ */
++ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
++
++ /** \brief Pointers to Vulkan functions. Can be null.
++
++ For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
++ */
++ const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
++ /** \brief Handle to Vulkan instance object.
++
++ Starting from version 3.0.0 this member is no longer optional, it must be set!
++ */
++ VkInstance VMA_NOT_NULL instance;
++ /** \brief Optional. The highest version of Vulkan that the application is designed to use.
++
++ It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
++ The patch version number specified is ignored. Only the major and minor versions are considered.
++ It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
++ Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
++ Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
++ */
++ uint32_t vulkanApiVersion;
++#if VMA_EXTERNAL_MEMORY
++ /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
++
++ If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
++ elements, defining external memory handle types of particular Vulkan memory type,
++ to be passed using `VkExportMemoryAllocateInfoKHR`.
++
++ Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
++ This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
++ */
++ const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
++#endif // #if VMA_EXTERNAL_MEMORY
++} VmaAllocatorCreateInfo;
++
++/// Information about existing #VmaAllocator object.
++typedef struct VmaAllocatorInfo
++{
++ /** \brief Handle to Vulkan instance object.
++
++ This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
++ */
++ VkInstance VMA_NOT_NULL instance;
++ /** \brief Handle to Vulkan physical device object.
++
++ This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
++ */
++ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
++ /** \brief Handle to Vulkan device object.
++
++ This is the same value as has been passed through VmaAllocatorCreateInfo::device.
++ */
++ VkDevice VMA_NOT_NULL device;
++} VmaAllocatorInfo;
++
++/** @} */
++
++/**
++\addtogroup group_stats
++@{
++*/
++
++/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
++
++These are fast to calculate.
++See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
++*/
++typedef struct VmaStatistics
++{
++ /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
++ */
++ uint32_t blockCount;
++ /** \brief Number of #VmaAllocation objects allocated.
++
++ Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
++ */
++ uint32_t allocationCount;
++ /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
++
++ \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
++ (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
++ "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
++ */
++ VkDeviceSize blockBytes;
++ /** \brief Total number of bytes occupied by all #VmaAllocation objects.
++
++ Always less or equal than `blockBytes`.
++ Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
++ but unused by any #VmaAllocation.
++ */
++ VkDeviceSize allocationBytes;
++} VmaStatistics;
++
++/** \brief More detailed statistics than #VmaStatistics.
++
++These are slower to calculate. Use for debugging purposes.
++See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
++
++Previous version of the statistics API provided averages, but they have been removed
++because they can be easily calculated as:
++
++\code
++VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
++VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
++VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
++\endcode
++*/
++typedef struct VmaDetailedStatistics
++{
++ /// Basic statistics.
++ VmaStatistics statistics;
++ /// Number of free ranges of memory between allocations.
++ uint32_t unusedRangeCount;
++ /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
++ VkDeviceSize allocationSizeMin;
++ /// Largest allocation size. 0 if there are 0 allocations.
++ VkDeviceSize allocationSizeMax;
++ /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
++ VkDeviceSize unusedRangeSizeMin;
++ /// Largest empty range size. 0 if there are 0 empty ranges.
++ VkDeviceSize unusedRangeSizeMax;
++} VmaDetailedStatistics;
++
++/** \brief General statistics from current state of the Allocator -
++total memory usage across all memory heaps and types.
++
++These are slower to calculate. Use for debugging purposes.
++See function vmaCalculateStatistics().
++*/
++typedef struct VmaTotalStatistics
++{
++ VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
++ VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
++ VmaDetailedStatistics total;
++} VmaTotalStatistics;
++
++/** \brief Statistics of current memory usage and available budget for a specific memory heap.
++
++These are fast to calculate.
++See function vmaGetHeapBudgets().
++*/
++typedef struct VmaBudget
++{
++ /** \brief Statistics fetched from the library.
++ */
++ VmaStatistics statistics;
++ /** \brief Estimated current memory usage of the program, in bytes.
++
++ Fetched from system using VK_EXT_memory_budget extension if enabled.
++
++ It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
++ also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
++ `VkDeviceMemory` blocks allocated outside of this library, if any.
++ */
++ VkDeviceSize usage;
++ /** \brief Estimated amount of memory available to the program, in bytes.
++
++ Fetched from system using VK_EXT_memory_budget extension if enabled.
++
++ It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
++ external to the program, decided by the operating system.
++ Difference `budget - usage` is the amount of additional memory that can probably
++ be allocated without problems. Exceeding the budget may result in various problems.
++ */
++ VkDeviceSize budget;
++} VmaBudget;
++
++/** @} */
++
++/**
++\addtogroup group_alloc
++@{
++*/
++
++/** \brief Parameters of new #VmaAllocation.
++
++To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
++*/
++typedef struct VmaAllocationCreateInfo
++{
++ /// Use #VmaAllocationCreateFlagBits enum.
++ VmaAllocationCreateFlags flags;
++ /** \brief Intended usage of memory.
++
++ You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
++ If `pool` is not null, this member is ignored.
++ */
++ VmaMemoryUsage usage;
++ /** \brief Flags that must be set in a Memory Type chosen for an allocation.
++
++ Leave 0 if you specify memory requirements in other way. \n
++ If `pool` is not null, this member is ignored.*/
++ VkMemoryPropertyFlags requiredFlags;
++ /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
++
++ Set to 0 if no additional flags are preferred. \n
++ If `pool` is not null, this member is ignored. */
++ VkMemoryPropertyFlags preferredFlags;
++ /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
++
++ Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
++ it meets other requirements specified by this structure, with no further
++ restrictions on memory type index. \n
++ If `pool` is not null, this member is ignored.
++ */
++ uint32_t memoryTypeBits;
++ /** \brief Pool that this allocation should be created in.
++
++ Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
++ `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
++ */
++ VmaPool VMA_NULLABLE pool;
++ /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
++
++ If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
++ null or pointer to a null-terminated string. The string will be then copied to
++ internal buffer, so it doesn't need to be valid after allocation call.
++ */
++ void* VMA_NULLABLE pUserData;
++ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
++
++ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
++ and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
++ Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
++ */
++ float priority;
++} VmaAllocationCreateInfo;
++
++/// Describes parameter of created #VmaPool.
++typedef struct VmaPoolCreateInfo
++{
++ /** \brief Vulkan memory type index to allocate this pool from.
++ */
++ uint32_t memoryTypeIndex;
++ /** \brief Use combination of #VmaPoolCreateFlagBits.
++ */
++ VmaPoolCreateFlags flags;
++ /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
++
++ Specify nonzero to set explicit, constant size of memory blocks used by this
++ pool.
++
++ Leave 0 to use default and let the library manage block sizes automatically.
++ Sizes of particular blocks may vary.
++ In this case, the pool will also support dedicated allocations.
++ */
++ VkDeviceSize blockSize;
++ /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
++
++ Set to 0 to have no preallocated blocks and allow the pool be completely empty.
++ */
++ size_t minBlockCount;
++ /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
++
++ Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
++
++ Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
++ throughout whole lifetime of this pool.
++ */
++ size_t maxBlockCount;
++ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
++
++ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
++ Otherwise, this variable is ignored.
++ */
++ float priority;
++ /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
++
++ Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
++ It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
++ e.g. when doing interop with OpenGL.
++ */
++ VkDeviceSize minAllocationAlignment;
++ /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
++
++ Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
++ It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
++ Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
++
++ Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
++ can be attached automatically by this library when using other, more convenient of its features.
++ */
++ void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;
++} VmaPoolCreateInfo;
++
++/** @} */
++
++/**
++\addtogroup group_alloc
++@{
++*/
++
++/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
++typedef struct VmaAllocationInfo
++{
++ /** \brief Memory type index that this allocation was allocated from.
++
++ It never changes.
++ */
++ uint32_t memoryType;
++ /** \brief Handle to Vulkan memory object.
++
++ Same memory object can be shared by multiple allocations.
++
++ It can change after the allocation is moved during \ref defragmentation.
++ */
++ VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
++ /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
++
++ You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
++ vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
++ not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
++ and apply this offset automatically.
++
++ It can change after the allocation is moved during \ref defragmentation.
++ */
++ VkDeviceSize offset;
++ /** \brief Size of this allocation, in bytes.
++
++ It never changes.
++
++ \note Allocation size returned in this variable may be greater than the size
++ requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
++ allocation is accessible for operations on memory e.g. using a pointer after
++ mapping with vmaMapMemory(), but operations on the resource e.g. using
++ `vkCmdCopyBuffer` must be limited to the size of the resource.
++ */
++ VkDeviceSize size;
++ /** \brief Pointer to the beginning of this allocation as mapped data.
++
++ If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
++ created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
++
++ It can change after call to vmaMapMemory(), vmaUnmapMemory().
++ It can also change after the allocation is moved during \ref defragmentation.
++ */
++ void* VMA_NULLABLE pMappedData;
++ /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
++
++ It can change after call to vmaSetAllocationUserData() for this allocation.
++ */
++ void* VMA_NULLABLE pUserData;
++ /** \brief Custom allocation name that was set with vmaSetAllocationName().
++
++ It can change after call to vmaSetAllocationName() for this allocation.
++
++ Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
++ additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
++ */
++ const char* VMA_NULLABLE pName;
++} VmaAllocationInfo;
++
++/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.
++
++Should return true if the defragmentation needs to stop current pass.
++*/
++typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);
++
++/** \brief Parameters for defragmentation.
++
++To be used with function vmaBeginDefragmentation().
++*/
++typedef struct VmaDefragmentationInfo
++{
++ /// \brief Use combination of #VmaDefragmentationFlagBits.
++ VmaDefragmentationFlags flags;
++ /** \brief Custom pool to be defragmented.
++
++ If null then default pools will undergo defragmentation process.
++ */
++ VmaPool VMA_NULLABLE pool;
++ /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
++
++ `0` means no limit.
++ */
++ VkDeviceSize maxBytesPerPass;
++ /** \brief Maximum number of allocations that can be moved during single pass to a different place.
++
++ `0` means no limit.
++ */
++ uint32_t maxAllocationsPerPass;
++ /** \brief Optional custom callback for stopping vmaBeginDefragmentation().
++
++ Have to return true for breaking current defragmentation pass.
++ */
++ PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;
++ /// \brief Optional data to pass to custom callback for stopping pass of defragmentation.
++ void* VMA_NULLABLE pBreakCallbackUserData;
++} VmaDefragmentationInfo;
++
++/// Single move of an allocation to be done for defragmentation.
++typedef struct VmaDefragmentationMove
++{
++ /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
++ VmaDefragmentationMoveOperation operation;
++ /// Allocation that should be moved.
++ VmaAllocation VMA_NOT_NULL srcAllocation;
++ /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
++
++ \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
++ to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
++ vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
++ */
++ VmaAllocation VMA_NOT_NULL dstTmpAllocation;
++} VmaDefragmentationMove;
++
++/** \brief Parameters for incremental defragmentation steps.
++
++To be used with function vmaBeginDefragmentationPass().
++*/
++typedef struct VmaDefragmentationPassMoveInfo
++{
++ /// Number of elements in the `pMoves` array.
++ uint32_t moveCount;
++ /** \brief Array of moves to be performed by the user in the current defragmentation pass.
++
++ Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
++
++ For each element, you should:
++
++ 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
++ 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
++ 3. Make sure these commands finished executing on the GPU.
++ 4. Destroy the old buffer/image.
++
++ Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
++ After this call, the allocation will point to the new place in memory.
++
++ Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
++
++ Alternatively, if you decide you want to completely remove the allocation:
++
++ 1. Destroy its buffer/image.
++ 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
++
++ Then, after vmaEndDefragmentationPass() the allocation will be freed.
++ */
++ VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
++} VmaDefragmentationPassMoveInfo;
++
++/// Statistics returned for defragmentation process in function vmaEndDefragmentation().
++typedef struct VmaDefragmentationStats
++{
++ /// Total number of bytes that have been copied while moving allocations to different places.
++ VkDeviceSize bytesMoved;
++ /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
++ VkDeviceSize bytesFreed;
++ /// Number of allocations that have been moved to different places.
++ uint32_t allocationsMoved;
++ /// Number of empty `VkDeviceMemory` objects that have been released to the system.
++ uint32_t deviceMemoryBlocksFreed;
++} VmaDefragmentationStats;
++
++/** @} */
++
++/**
++\addtogroup group_virtual
++@{
++*/
++
++/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
++typedef struct VmaVirtualBlockCreateInfo
++{
++ /** \brief Total size of the virtual block.
++
++ Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
++ For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
++ */
++ VkDeviceSize size;
++
++ /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
++ */
++ VmaVirtualBlockCreateFlags flags;
++
++ /** \brief Custom CPU memory allocation callbacks. Optional.
++
++ Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
++ */
++ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
++} VmaVirtualBlockCreateInfo;
++
++/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
++typedef struct VmaVirtualAllocationCreateInfo
++{
++ /** \brief Size of the allocation.
++
++ Cannot be zero.
++ */
++ VkDeviceSize size;
++ /** \brief Required alignment of the allocation. Optional.
++
++ Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
++ */
++ VkDeviceSize alignment;
++ /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
++ */
++ VmaVirtualAllocationCreateFlags flags;
++ /** \brief Custom pointer to be associated with the allocation. Optional.
++
++ It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
++ */
++ void* VMA_NULLABLE pUserData;
++} VmaVirtualAllocationCreateInfo;
++
++/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
++typedef struct VmaVirtualAllocationInfo
++{
++ /** \brief Offset of the allocation.
++
++ Offset at which the allocation was made.
++ */
++ VkDeviceSize offset;
++ /** \brief Size of the allocation.
++
++ Same value as passed in VmaVirtualAllocationCreateInfo::size.
++ */
++ VkDeviceSize size;
++ /** \brief Custom pointer associated with the allocation.
++
++ Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
++ */
++ void* VMA_NULLABLE pUserData;
++} VmaVirtualAllocationInfo;
++
++/** @} */
++
++#endif // _VMA_DATA_TYPES_DECLARATIONS
++
++#ifndef _VMA_FUNCTION_HEADERS
++
++/**
++\addtogroup group_init
++@{
++*/
++
++/// Creates #VmaAllocator object.
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
++ const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
++
++/// Destroys allocator object.
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
++ VmaAllocator VMA_NULLABLE allocator);
++
++/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
++
++It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
++`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
++
++/**
++PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
++You can access it here, without fetching it again on your own.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
++
++/**
++PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
++You can access it here, without fetching it again on your own.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
++
++/**
++\brief Given Memory Type Index, returns Property Flags of this memory type.
++
++This is just a convenience function. Same information can be obtained using
++vmaGetMemoryProperties().
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t memoryTypeIndex,
++ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
++
++/** \brief Sets index of the current frame.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t frameIndex);
++
++/** @} */
++
++/**
++\addtogroup group_stats
++@{
++*/
++
++/** \brief Retrieves statistics from current state of the Allocator.
++
++This function is called "calculate" not "get" because it has to traverse all
++internal data structures, so it may be quite slow. Use it for debugging purposes.
++For faster but more brief statistics suitable to be called every frame or every allocation,
++use vmaGetHeapBudgets().
++
++Note that when using allocator from multiple threads, returned information may immediately
++become outdated.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaTotalStatistics* VMA_NOT_NULL pStats);
++
++/** \brief Retrieves information about current memory usage and budget for all memory heaps.
++
++\param allocator
++\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
++
++This function is called "get" not "calculate" because it is very fast, suitable to be called
++every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
++
++Note that when using allocator from multiple threads, returned information may immediately
++become outdated.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
++
++/** @} */
++
++/**
++\addtogroup group_alloc
++@{
++*/
++
++/**
++\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
++
++This algorithm tries to find a memory type that:
++
++- Is allowed by memoryTypeBits.
++- Contains all the flags from pAllocationCreateInfo->requiredFlags.
++- Matches intended usage.
++- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
++
++\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
++from this function or any other allocating function probably means that your
++device doesn't support any memory type with requested features for the specific
++type of resource you want to use it for. Please check parameters of your
++resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t memoryTypeBits,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
++ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
++
++/**
++\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
++
++It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
++It internally creates a temporary, dummy buffer that never has memory bound.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
++ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
++
++/**
++\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
++
++It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
++It internally creates a temporary, dummy image that never has memory bound.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
++ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
++
++/** \brief Allocates Vulkan device memory and creates #VmaPool object.
++
++\param allocator Allocator object.
++\param pCreateInfo Parameters of pool to create.
++\param[out] pPool Handle to created pool.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
++
++/** \brief Destroys #VmaPool object and frees Vulkan device memory.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaPool VMA_NULLABLE pool);
++
++/** @} */
++
++/**
++\addtogroup group_stats
++@{
++*/
++
++/** \brief Retrieves statistics of existing #VmaPool object.
++
++\param allocator Allocator object.
++\param pool Pool object.
++\param[out] pPoolStats Statistics of specified pool.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaPool VMA_NOT_NULL pool,
++ VmaStatistics* VMA_NOT_NULL pPoolStats);
++
++/** \brief Retrieves detailed statistics of existing #VmaPool object.
++
++\param allocator Allocator object.
++\param pool Pool object.
++\param[out] pPoolStats Statistics of specified pool.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaPool VMA_NOT_NULL pool,
++ VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
++
++/** @} */
++
++/**
++\addtogroup group_alloc
++@{
++*/
++
++/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
++
++Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
++`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
++`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
++
++Possible return values:
++
++- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
++- `VK_SUCCESS` - corruption detection has been performed and succeeded.
++- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
++ `VMA_ASSERT` is also fired in that case.
++- Other value: Error returned by Vulkan, e.g. memory mapping failure.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaPool VMA_NOT_NULL pool);
++
++/** \brief Retrieves name of a custom pool.
++
++After the call `ppName` is either null or points to an internally-owned null-terminated string
++containing name of the pool that was previously set. The pointer becomes invalid when the pool is
++destroyed or its name is changed using vmaSetPoolName().
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaPool VMA_NOT_NULL pool,
++ const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
++
++/** \brief Sets name of a custom pool.
++
++`pName` can be either null or pointer to a null-terminated string with new name for the pool.
++Function makes internal copy of the string, so it can be changed or freed immediately after this call.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaPool VMA_NOT_NULL pool,
++ const char* VMA_NULLABLE pName);
++
++/** \brief General purpose memory allocation.
++
++\param allocator
++\param pVkMemoryRequirements
++\param pCreateInfo
++\param[out] pAllocation Handle to allocated memory.
++\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
++
++You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
++
++It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
++vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
++ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
++
++/** \brief General purpose memory allocation for multiple allocation objects at once.
++
++\param allocator Allocator object.
++\param pVkMemoryRequirements Memory requirements for each allocation.
++\param pCreateInfo Creation parameters for each allocation.
++\param allocationCount Number of allocations to make.
++\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
++\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
++
++You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
++
++Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
++It is just a general purpose allocation function able to make multiple allocations at once.
++It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
++
++All allocations are made using same parameters. All of them are created out of the same memory pool and type.
++If any allocation fails, all allocations already made within this function call are also freed, so that when
++returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
++ size_t allocationCount,
++ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
++ VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
++
++/** \brief Allocates memory suitable for given `VkBuffer`.
++
++\param allocator
++\param buffer
++\param pCreateInfo
++\param[out] pAllocation Handle to allocated memory.
++\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
++
++It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
++
++This is a special-purpose function. In most cases you should use vmaCreateBuffer().
++
++You must free the allocation using vmaFreeMemory() when no longer needed.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
++ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
++
++/** \brief Allocates memory suitable for given `VkImage`.
++
++\param allocator
++\param image
++\param pCreateInfo
++\param[out] pAllocation Handle to allocated memory.
++\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
++
++It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
++
++This is a special-purpose function. In most cases you should use vmaCreateImage().
++
++You must free the allocation using vmaFreeMemory() when no longer needed.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
++ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
++
++/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
++
++Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VmaAllocation VMA_NULLABLE allocation);
++
++/** \brief Frees memory and destroys multiple allocations.
++
++Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
++It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
++vmaAllocateMemoryPages() and other functions.
++It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
++
++Allocations in `pAllocations` array can come from any memory pools and types.
++Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
++ VmaAllocator VMA_NOT_NULL allocator,
++ size_t allocationCount,
++ const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
++
++/** \brief Returns current information about specified allocation.
++
++Current parameters of given allocation are returned in `pAllocationInfo`.
++
++Although this function doesn't lock any mutex, so it should be quite efficient,
++you should avoid calling it too often.
++You can retrieve same VmaAllocationInfo structure while creating your resource, from function
++vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
++(e.g. due to defragmentation).
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
++
++/** \brief Sets pUserData in given allocation to new value.
++
++The value of pointer `pUserData` is copied to allocation's `pUserData`.
++It is opaque, so you can use it however you want - e.g.
++as a pointer, ordinal number or some handle to you own data.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ void* VMA_NULLABLE pUserData);
++
++/** \brief Sets pName in given allocation to new value.
++
++`pName` must be either null, or pointer to a null-terminated string. The function
++makes local copy of the string and sets it as allocation's `pName`. String
++passed as pName doesn't need to be valid for whole lifetime of the allocation -
++you can free it after this call. String previously pointed by allocation's
++`pName` is freed from memory.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ const char* VMA_NULLABLE pName);
++
++/**
++\brief Given an allocation, returns Property Flags of its memory type.
++
++This is just a convenience function. Same information can be obtained using
++vmaGetAllocationInfo() + vmaGetMemoryProperties().
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
++
++/** \brief Maps memory represented by given allocation and returns pointer to it.
++
++Maps memory represented by given allocation to make it accessible to CPU code.
++When succeeded, `*ppData` contains pointer to first byte of this memory.
++
++\warning
++If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
++correctly offsetted to the beginning of region assigned to this particular allocation.
++Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
++You should not add VmaAllocationInfo::offset to it!
++
++Mapping is internally reference-counted and synchronized, so despite raw Vulkan
++function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
++multiple times simultaneously, it is safe to call this function on allocations
++assigned to the same memory block. Actual Vulkan memory will be mapped on first
++mapping and unmapped on last unmapping.
++
++If the function succeeded, you must call vmaUnmapMemory() to unmap the
++allocation when mapping is no longer needed or before freeing the allocation, at
++the latest.
++
++It also safe to call this function multiple times on the same allocation. You
++must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
++
++It is also safe to call this function on allocation created with
++#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
++You must still call vmaUnmapMemory() same number of times as you called
++vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
++"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
++
++This function fails when used on allocation made in memory type that is not
++`HOST_VISIBLE`.
++
++This function doesn't automatically flush or invalidate caches.
++If the allocation is made from a memory types that is not `HOST_COHERENT`,
++you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ void* VMA_NULLABLE* VMA_NOT_NULL ppData);
++
++/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
++
++For details, see description of vmaMapMemory().
++
++This function doesn't automatically flush or invalidate caches.
++If the allocation is made from a memory types that is not `HOST_COHERENT`,
++you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation);
++
++/** \brief Flushes memory of given allocation.
++
++Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
++It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
++Unmap operation doesn't do that automatically.
++
++- `offset` must be relative to the beginning of allocation.
++- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
++- `offset` and `size` don't have to be aligned.
++ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
++- If `size` is 0, this call is ignored.
++- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
++ this call is ignored.
++
++Warning! `offset` and `size` are relative to the contents of given `allocation`.
++If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
++Do not pass allocation's offset as `offset`!!!
++
++This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
++called, otherwise `VK_SUCCESS`.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize offset,
++ VkDeviceSize size);
++
++/** \brief Invalidates memory of given allocation.
++
++Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
++It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
++Map operation doesn't do that automatically.
++
++- `offset` must be relative to the beginning of allocation.
++- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
++- `offset` and `size` don't have to be aligned.
++ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
++- If `size` is 0, this call is ignored.
++- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
++ this call is ignored.
++
++Warning! `offset` and `size` are relative to the contents of given `allocation`.
++If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
++Do not pass allocation's offset as `offset`!!!
++
++This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
++it is called, otherwise `VK_SUCCESS`.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize offset,
++ VkDeviceSize size);
++
++/** \brief Flushes memory of given set of allocations.
++
++Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
++For more information, see documentation of vmaFlushAllocation().
++
++\param allocator
++\param allocationCount
++\param allocations
++\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
++\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
++
++This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
++called, otherwise `VK_SUCCESS`.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t allocationCount,
++ const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
++ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
++ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
++
++/** \brief Invalidates memory of given set of allocations.
++
++Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
++For more information, see documentation of vmaInvalidateAllocation().
++
++\param allocator
++\param allocationCount
++\param allocations
++\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
++\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
++
++This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
++called, otherwise `VK_SUCCESS`.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t allocationCount,
++ const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
++ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
++ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
++
++/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
++
++\param allocator
++\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
++
++Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
++`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
++`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
++
++Possible return values:
++
++- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
++- `VK_SUCCESS` - corruption detection has been performed and succeeded.
++- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
++ `VMA_ASSERT` is also fired in that case.
++- Other value: Error returned by Vulkan, e.g. memory mapping failure.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
++ VmaAllocator VMA_NOT_NULL allocator,
++ uint32_t memoryTypeBits);
++
++/** \brief Begins defragmentation process.
++
++\param allocator Allocator object.
++\param pInfo Structure filled with parameters of defragmentation.
++\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
++\returns
++- `VK_SUCCESS` if defragmentation can begin.
++- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
++
++For more information about defragmentation, see documentation chapter:
++[Defragmentation](@ref defragmentation).
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
++ VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
++
++/** \brief Ends defragmentation process.
++
++\param allocator Allocator object.
++\param context Context object that has been created by vmaBeginDefragmentation().
++\param[out] pStats Optional stats for the defragmentation. Can be null.
++
++Use this function to finish defragmentation started by vmaBeginDefragmentation().
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaDefragmentationContext VMA_NOT_NULL context,
++ VmaDefragmentationStats* VMA_NULLABLE pStats);
++
++/** \brief Starts single defragmentation pass.
++
++\param allocator Allocator object.
++\param context Context object that has been created by vmaBeginDefragmentation().
++\param[out] pPassInfo Computed information for current pass.
++\returns
++- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
++- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
++ and then preferably try another pass with vmaBeginDefragmentationPass().
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaDefragmentationContext VMA_NOT_NULL context,
++ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
++
++/** \brief Ends single defragmentation pass.
++
++\param allocator Allocator object.
++\param context Context object that has been created by vmaBeginDefragmentation().
++\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
++
++Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
++
++Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
++After this call:
++
++- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
++ (which is the default) will be pointing to the new destination place.
++- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
++ will be freed.
++
++If no more moves are possible you can end whole defragmentation.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaDefragmentationContext VMA_NOT_NULL context,
++ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
++
++/** \brief Binds buffer to allocation.
++
++Binds specified buffer to region of memory represented by specified allocation.
++Gets `VkDeviceMemory` handle and offset from the allocation.
++If you want to create a buffer, allocate memory for it and bind them together separately,
++you should use this function for binding instead of standard `vkBindBufferMemory()`,
++because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
++allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
++(which is illegal in Vulkan).
++
++It is recommended to use function vmaCreateBuffer() instead of this one.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
++
++/** \brief Binds buffer to allocation with additional parameters.
++
++\param allocator
++\param allocation
++\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
++\param buffer
++\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
++
++This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
++
++If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
++or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize allocationLocalOffset,
++ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
++ const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);
++
++/** \brief Binds image to allocation.
++
++Binds specified image to region of memory represented by specified allocation.
++Gets `VkDeviceMemory` handle and offset from the allocation.
++If you want to create an image, allocate memory for it and bind them together separately,
++you should use this function for binding instead of standard `vkBindImageMemory()`,
++because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
++allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
++(which is illegal in Vulkan).
++
++It is recommended to use function vmaCreateImage() instead of this one.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
++
++/** \brief Binds image to allocation with additional parameters.
++
++\param allocator
++\param allocation
++\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
++\param image
++\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
++
++This function is similar to vmaBindImageMemory(), but it provides additional parameters.
++
++If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
++or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize allocationLocalOffset,
++ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
++ const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);
++
++/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
++
++\param allocator
++\param pBufferCreateInfo
++\param pAllocationCreateInfo
++\param[out] pBuffer Buffer that was created.
++\param[out] pAllocation Allocation that was created.
++\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
++
++This function automatically:
++
++-# Creates buffer.
++-# Allocates appropriate memory for it.
++-# Binds the buffer with the memory.
++
++If any of these operations fail, buffer and allocation are not created,
++returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
++
++If the function succeeded, you must destroy both buffer and allocation when you
++no longer need them using either convenience function vmaDestroyBuffer() or
++separately, using `vkDestroyBuffer()` and vmaFreeMemory().
++
++If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
++VK_KHR_dedicated_allocation extension is used internally to query driver whether
++it requires or prefers the new buffer to have dedicated allocation. If yes,
++and if dedicated allocation is possible
++(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
++allocation for this buffer, just like when using
++#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
++
++\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
++although recommended as a good practice, is out of scope of this library and could be implemented
++by the user as a higher-level logic on top of VMA.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
++ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
++ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
++ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
++
++/** \brief Creates a buffer with additional minimum alignment.
++
++Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
++minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
++for interop with OpenGL.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
++ VkDeviceSize minAlignment,
++ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
++ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
++ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
++
++/** \brief Creates a new `VkBuffer`, binds already created memory for it.
++
++\param allocator
++\param allocation Allocation that provides memory to be used for binding new buffer to it.
++\param pBufferCreateInfo
++\param[out] pBuffer Buffer that was created.
++
++This function automatically:
++
++-# Creates buffer.
++-# Binds the buffer with the supplied memory.
++
++If any of these operations fail, buffer is not created,
++returned value is negative error code and `*pBuffer` is null.
++
++If the function succeeded, you must destroy the buffer when you
++no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
++allocation you can use convenience function vmaDestroyBuffer().
++
++\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2().
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
++ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
++
++/** \brief Creates a new `VkBuffer`, binds already created memory for it.
++
++\param allocator
++\param allocation Allocation that provides memory to be used for binding new buffer to it.
++\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0.
++\param pBufferCreateInfo
++\param[out] pBuffer Buffer that was created.
++
++This function automatically:
++
++-# Creates buffer.
++-# Binds the buffer with the supplied memory.
++
++If any of these operations fail, buffer is not created,
++returned value is negative error code and `*pBuffer` is null.
++
++If the function succeeded, you must destroy the buffer when you
++no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
++allocation you can use convenience function vmaDestroyBuffer().
++
++\note This is a new version of the function augmented with parameter `allocationLocalOffset`.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize allocationLocalOffset,
++ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
++ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
++
++/** \brief Destroys Vulkan buffer and frees allocated memory.
++
++This is just a convenience function equivalent to:
++
++\code
++vkDestroyBuffer(device, buffer, allocationCallbacks);
++vmaFreeMemory(allocator, allocation);
++\endcode
++
++It is safe to pass null as buffer and/or allocation.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
++ VmaAllocation VMA_NULLABLE allocation);
++
++/// Function similar to vmaCreateBuffer().
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
++ VmaAllocator VMA_NOT_NULL allocator,
++ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
++ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
++ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
++ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
++ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
++
++/// Function similar to vmaCreateAliasingBuffer() but for images.
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
++ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
++
++/// Function similar to vmaCreateAliasingBuffer2() but for images.
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize allocationLocalOffset,
++ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
++ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
++
++/** \brief Destroys Vulkan image and frees allocated memory.
++
++This is just a convenience function equivalent to:
++
++\code
++vkDestroyImage(device, image, allocationCallbacks);
++vmaFreeMemory(allocator, allocation);
++\endcode
++
++It is safe to pass null as image and/or allocation.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
++ VmaAllocation VMA_NULLABLE allocation);
++
++/** @} */
++
++/**
++\addtogroup group_virtual
++@{
++*/
++
++/** \brief Creates new #VmaVirtualBlock object.
++
++\param pCreateInfo Parameters for creation.
++\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
++ const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
++
++/** \brief Destroys #VmaVirtualBlock object.
++
++Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
++You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
++if you are sure this is what you want. If you do neither, an assert is called.
++
++If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
++don't forget to free them.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
++ VmaVirtualBlock VMA_NULLABLE virtualBlock);
++
++/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
++*/
++VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock);
++
++/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
++
++/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
++
++If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
++(despite the function doesn't ever allocate actual GPU memory).
++`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
++
++\param virtualBlock Virtual block
++\param pCreateInfo Parameters for the allocation
++\param[out] pAllocation Returned handle of the new allocation
++\param[out] pOffset Returned offset of the new allocation. Optional, can be null.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
++ VkDeviceSize* VMA_NULLABLE pOffset);
++
++/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
++
++It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
++
++/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
++
++You must either call this function or free each virtual allocation individually with vmaVirtualFree()
++before destroying a virtual block. Otherwise, an assert is called.
++
++If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
++don't forget to free it as well.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock);
++
++/** \brief Changes custom pointer associated with given virtual allocation.
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
++ void* VMA_NULLABLE pUserData);
++
++/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
++
++This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaStatistics* VMA_NOT_NULL pStats);
++
++/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
++
++This function is slow to call. Use for debugging purposes.
++For less detailed statistics, see vmaGetVirtualBlockStatistics().
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaDetailedStatistics* VMA_NOT_NULL pStats);
++
++/** @} */
++
++#if VMA_STATS_STRING_ENABLED
++/**
++\addtogroup group_stats
++@{
++*/
++
++/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
++\param virtualBlock Virtual block.
++\param[out] ppStatsString Returned string.
++\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
++
++Returned string must be freed using vmaFreeVirtualBlockStatsString().
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
++ VkBool32 detailedMap);
++
++/// Frees a string returned by vmaBuildVirtualBlockStatsString().
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
++ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ char* VMA_NULLABLE pStatsString);
++
++/** \brief Builds and returns statistics as a null-terminated string in JSON format.
++\param allocator
++\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
++\param detailedMap
++*/
++VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
++ VmaAllocator VMA_NOT_NULL allocator,
++ char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
++ VkBool32 detailedMap);
++
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
++ VmaAllocator VMA_NOT_NULL allocator,
++ char* VMA_NULLABLE pStatsString);
++
++/** @} */
++
++#endif // VMA_STATS_STRING_ENABLED
++
++#endif // _VMA_FUNCTION_HEADERS
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
++
++////////////////////////////////////////////////////////////////////////////////
++////////////////////////////////////////////////////////////////////////////////
++//
++// IMPLEMENTATION
++//
++////////////////////////////////////////////////////////////////////////////////
++////////////////////////////////////////////////////////////////////////////////
++
++// For Visual Studio IntelliSense.
++#if defined(__cplusplus) && defined(__INTELLISENSE__)
++#define VMA_IMPLEMENTATION
++#endif
++
++#ifdef VMA_IMPLEMENTATION
++#undef VMA_IMPLEMENTATION
++
++#include <cstdint>
++#include <cstdlib>
++#include <cstring>
++#include <utility>
++#include <type_traits>
++
++#ifdef _MSC_VER
++ #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
++#endif
++#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
++ #include <bit> // For std::popcount
++#endif
++
++#if VMA_STATS_STRING_ENABLED
++ #include <cstdio> // For snprintf
++#endif
++
++/*******************************************************************************
++CONFIGURATION SECTION
++
++Define some of these macros before each #include of this header or change them
++here if you need other then default behavior depending on your environment.
++*/
++#ifndef _VMA_CONFIGURATION
++
++/*
++Define this macro to 1 to make the library fetch pointers to Vulkan functions
++internally, like:
++
++ vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
++*/
++#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
++ #define VMA_STATIC_VULKAN_FUNCTIONS 1
++#endif
++
++/*
++Define this macro to 1 to make the library fetch pointers to Vulkan functions
++internally, like:
++
++ vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
++
++To use this feature in new versions of VMA you now have to pass
++VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
++VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
++*/
++#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
++ #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
++#endif
++
++#ifndef VMA_USE_STL_SHARED_MUTEX
++ #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
++ #define VMA_USE_STL_SHARED_MUTEX 1
++ // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
++ // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
++ #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
++ #define VMA_USE_STL_SHARED_MUTEX 1
++ #else
++ #define VMA_USE_STL_SHARED_MUTEX 0
++ #endif
++#endif
++
++/*
++Define this macro to include custom header files without having to edit this file directly, e.g.:
++
++ // Inside of "my_vma_configuration_user_includes.h":
++
++ #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
++ #include "my_custom_min.h" // for my_custom_min
++ #include <algorithm>
++ #include <mutex>
++
++ // Inside a different file, which includes "vk_mem_alloc.h":
++
++ #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
++ #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
++ #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
++ #include "vk_mem_alloc.h"
++ ...
++
++The following headers are used in this CONFIGURATION section only, so feel free to
++remove them if not needed.
++*/
++#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
++ #include <cassert> // for assert
++ #include <algorithm> // for min, max
++ #include <mutex>
++#else
++ #include VMA_CONFIGURATION_USER_INCLUDES_H
++#endif
++
++#ifndef VMA_NULL
++ // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
++ #define VMA_NULL nullptr
++#endif
++
++// Used to silence warnings for implicit fallthrough.
++#ifndef VMA_FALLTHROUGH
++ #if __has_cpp_attribute(clang::fallthrough)
++ #define VMA_FALLTHROUGH [[clang::fallthrough]];
++ #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
++ #define VMA_FALLTHROUGH [[fallthrough]]
++ #else
++ #define VMA_FALLTHROUGH
++ #endif
++#endif
++
++// Normal assert to check for programmer's errors, especially in Debug configuration.
++#ifndef VMA_ASSERT
++ #ifdef NDEBUG
++ #define VMA_ASSERT(expr)
++ #else
++ #define VMA_ASSERT(expr) assert(expr)
++ #endif
++#endif
++
++// Assert that will be called very often, like inside data structures e.g. operator[].
++// Making it non-empty can make program slow.
++#ifndef VMA_HEAVY_ASSERT
++ #ifdef NDEBUG
++ #define VMA_HEAVY_ASSERT(expr)
++ #else
++ #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
++ #endif
++#endif
++
++// If your compiler is not compatible with C++17 and definition of
++// aligned_alloc() function is missing, uncommenting following line may help:
++
++//#include <malloc.h>
++
++#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
++#include <cstdlib>
++void* vma_aligned_alloc(size_t alignment, size_t size)
++{
++ // alignment must be >= sizeof(void*)
++ if(alignment < sizeof(void*))
++ {
++ alignment = sizeof(void*);
++ }
++
++ return memalign(alignment, size);
++}
+#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) || defined(__OpenBSD__) || defined(__FreeBSD__)
- #include <cstdlib>
-
- #if defined(__APPLE__)
++#include <cstdlib>
++
++#if defined(__APPLE__)
++#include <AvailabilityMacros.h>
++#endif
++
++void *vma_aligned_alloc(size_t alignment, size_t size)
++{
++ // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
++ // Therefore, for now disable this specific exception until a proper solution is found.
++ //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
++ //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
++ // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
++ // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
++ // // MAC_OS_X_VERSION_10_16), even though the function is marked
++ // // available for 10.15. That is why the preprocessor checks for 10.16 but
++ // // the __builtin_available checks for 10.15.
++ // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
++ // if (__builtin_available(macOS 10.15, iOS 13, *))
++ // return aligned_alloc(alignment, size);
++ //#endif
++ //#endif
++
++ // alignment must be >= sizeof(void*)
++ if(alignment < sizeof(void*))
++ {
++ alignment = sizeof(void*);
++ }
++
++ void *pointer;
++ if(posix_memalign(&pointer, alignment, size) == 0)
++ return pointer;
++ return VMA_NULL;
++}
++#elif defined(_WIN32)
++void* vma_aligned_alloc(size_t alignment, size_t size)
++{
++ return _aligned_malloc(size, alignment);
++}
++#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
++void* vma_aligned_alloc(size_t alignment, size_t size)
++{
++ return aligned_alloc(alignment, size);
++}
++#else
++void* vma_aligned_alloc(size_t alignment, size_t size)
++{
++ VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
++ return VMA_NULL;
++}
++#endif
++
++#if defined(_WIN32)
++static void vma_aligned_free(void* ptr)
++{
++ _aligned_free(ptr);
++}
++#else
++static void vma_aligned_free(void* VMA_NULLABLE ptr)
++{
++ free(ptr);
++}
++#endif
++
++#ifndef VMA_ALIGN_OF
++ #define VMA_ALIGN_OF(type) (alignof(type))
++#endif
++
++#ifndef VMA_SYSTEM_ALIGNED_MALLOC
++ #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
++#endif
++
++#ifndef VMA_SYSTEM_ALIGNED_FREE
++ // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
++ #if defined(VMA_SYSTEM_FREE)
++ #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
++ #else
++ #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
++ #endif
++#endif
++
++#ifndef VMA_COUNT_BITS_SET
++ // Returns number of bits set to 1 in (v)
++ #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
++#endif
++
++#ifndef VMA_BITSCAN_LSB
++ // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
++ #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
++#endif
++
++#ifndef VMA_BITSCAN_MSB
++ // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
++ #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
++#endif
++
++#ifndef VMA_MIN
++ #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
++#endif
++
++#ifndef VMA_MAX
++ #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
++#endif
++
++#ifndef VMA_SWAP
++ #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
++#endif
++
++#ifndef VMA_SORT
++ #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
++#endif
++
++#ifndef VMA_DEBUG_LOG_FORMAT
++ #define VMA_DEBUG_LOG_FORMAT(format, ...)
++ /*
++ #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \
++ printf((format), __VA_ARGS__); \
++ printf("\n"); \
++ } while(false)
++ */
++#endif
++
++#ifndef VMA_DEBUG_LOG
++ #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str))
++#endif
++
++#ifndef VMA_CLASS_NO_COPY
++ #define VMA_CLASS_NO_COPY(className) \
++ private: \
++ className(const className&) = delete; \
++ className& operator=(const className&) = delete;
++#endif
++#ifndef VMA_CLASS_NO_COPY_NO_MOVE
++ #define VMA_CLASS_NO_COPY_NO_MOVE(className) \
++ private: \
++ className(const className&) = delete; \
++ className(className&&) = delete; \
++ className& operator=(const className&) = delete; \
++ className& operator=(className&&) = delete;
++#endif
++
++// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
++#if VMA_STATS_STRING_ENABLED
++ static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
++ {
++ snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
++ }
++ static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
++ {
++ snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
++ }
++ static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
++ {
++ snprintf(outStr, strLen, "%p", ptr);
++ }
++#endif
++
++#ifndef VMA_MUTEX
++ class VmaMutex
++ {
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)
++ public:
++ VmaMutex() { }
++ void Lock() { m_Mutex.lock(); }
++ void Unlock() { m_Mutex.unlock(); }
++ bool TryLock() { return m_Mutex.try_lock(); }
++ private:
++ std::mutex m_Mutex;
++ };
++ #define VMA_MUTEX VmaMutex
++#endif
++
++// Read-write mutex, where "read" is shared access, "write" is exclusive access.
++#ifndef VMA_RW_MUTEX
++ #if VMA_USE_STL_SHARED_MUTEX
++ // Use std::shared_mutex from C++17.
++ #include <shared_mutex>
++ class VmaRWMutex
++ {
++ public:
++ void LockRead() { m_Mutex.lock_shared(); }
++ void UnlockRead() { m_Mutex.unlock_shared(); }
++ bool TryLockRead() { return m_Mutex.try_lock_shared(); }
++ void LockWrite() { m_Mutex.lock(); }
++ void UnlockWrite() { m_Mutex.unlock(); }
++ bool TryLockWrite() { return m_Mutex.try_lock(); }
++ private:
++ std::shared_mutex m_Mutex;
++ };
++ #define VMA_RW_MUTEX VmaRWMutex
++ #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
++ // Use SRWLOCK from WinAPI.
++ // Minimum supported client = Windows Vista, server = Windows Server 2008.
++ class VmaRWMutex
++ {
++ public:
++ VmaRWMutex() { InitializeSRWLock(&m_Lock); }
++ void LockRead() { AcquireSRWLockShared(&m_Lock); }
++ void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
++ bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
++ void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
++ void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
++ bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
++ private:
++ SRWLOCK m_Lock;
++ };
++ #define VMA_RW_MUTEX VmaRWMutex
++ #else
++ // Less efficient fallback: Use normal mutex.
++ class VmaRWMutex
++ {
++ public:
++ void LockRead() { m_Mutex.Lock(); }
++ void UnlockRead() { m_Mutex.Unlock(); }
++ bool TryLockRead() { return m_Mutex.TryLock(); }
++ void LockWrite() { m_Mutex.Lock(); }
++ void UnlockWrite() { m_Mutex.Unlock(); }
++ bool TryLockWrite() { return m_Mutex.TryLock(); }
++ private:
++ VMA_MUTEX m_Mutex;
++ };
++ #define VMA_RW_MUTEX VmaRWMutex
++ #endif // #if VMA_USE_STL_SHARED_MUTEX
++#endif // #ifndef VMA_RW_MUTEX
++
++/*
++If providing your own implementation, you need to implement a subset of std::atomic.
++*/
++#ifndef VMA_ATOMIC_UINT32
++ #include <atomic>
++ #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
++#endif
++
++#ifndef VMA_ATOMIC_UINT64
++ #include <atomic>
++ #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
++#endif
++
++#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
++ /**
++ Every allocation will have its own memory block.
++ Define to 1 for debugging purposes only.
++ */
++ #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
++#endif
++
++#ifndef VMA_MIN_ALIGNMENT
++ /**
++ Minimum alignment of all allocations, in bytes.
++ Set to more than 1 for debugging purposes. Must be power of two.
++ */
++ #ifdef VMA_DEBUG_ALIGNMENT // Old name
++ #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
++ #else
++ #define VMA_MIN_ALIGNMENT (1)
++ #endif
++#endif
++
++#ifndef VMA_DEBUG_MARGIN
++ /**
++ Minimum margin after every allocation, in bytes.
++ Set nonzero for debugging purposes only.
++ */
++ #define VMA_DEBUG_MARGIN (0)
++#endif
++
++#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
++ /**
++ Define this macro to 1 to automatically fill new allocations and destroyed
++ allocations with some bit pattern.
++ */
++ #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
++#endif
++
++#ifndef VMA_DEBUG_DETECT_CORRUPTION
++ /**
++ Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
++ enable writing magic value to the margin after every allocation and
++ validating it, so that memory corruptions (out-of-bounds writes) are detected.
++ */
++ #define VMA_DEBUG_DETECT_CORRUPTION (0)
++#endif
++
++#ifndef VMA_DEBUG_GLOBAL_MUTEX
++ /**
++ Set this to 1 for debugging purposes only, to enable single mutex protecting all
++ entry calls to the library. Can be useful for debugging multithreading issues.
++ */
++ #define VMA_DEBUG_GLOBAL_MUTEX (0)
++#endif
++
++#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
++ /**
++ Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
++ Set to more than 1 for debugging purposes only. Must be power of two.
++ */
++ #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
++#endif
++
++#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
++ /*
++ Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
++ and return error instead of leaving up to Vulkan implementation what to do in such cases.
++ */
++ #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
++#endif
++
++#ifndef VMA_SMALL_HEAP_MAX_SIZE
++ /// Maximum size of a memory heap in Vulkan to consider it "small".
++ #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
++#endif
++
++#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
++ /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
++ #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
++#endif
++
++/*
++Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
++or a persistently mapped allocation is created and destroyed several times in a row.
++It keeps additional +1 mapping of a device memory block to prevent calling actual
++vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
++tools like RenderDoc.
++*/
++#ifndef VMA_MAPPING_HYSTERESIS_ENABLED
++ #define VMA_MAPPING_HYSTERESIS_ENABLED 1
++#endif
++
++#define VMA_VALIDATE(cond) do { if(!(cond)) { \
++ VMA_ASSERT(0 && "Validation failed: " #cond); \
++ return false; \
++ } } while(false)
++
++/*******************************************************************************
++END OF CONFIGURATION
++*/
++#endif // _VMA_CONFIGURATION
++
++
++static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
++static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
++// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
++static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
++
++// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
++static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
++static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
++static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
++static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
++static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
++static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
++static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
++static const uint32_t VMA_VENDOR_ID_AMD = 4098;
++
++// This one is tricky. Vulkan specification defines this code as available since
++// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
++// See pull request #207.
++#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
++
++
++#if VMA_STATS_STRING_ENABLED
++// Correspond to values of enum VmaSuballocationType.
++static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
++{
++ "FREE",
++ "UNKNOWN",
++ "BUFFER",
++ "IMAGE_UNKNOWN",
++ "IMAGE_LINEAR",
++ "IMAGE_OPTIMAL",
++};
++#endif
++
++static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
++ { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
++
++
++#ifndef _VMA_ENUM_DECLARATIONS
++
++enum VmaSuballocationType
++{
++ VMA_SUBALLOCATION_TYPE_FREE = 0,
++ VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
++ VMA_SUBALLOCATION_TYPE_BUFFER = 2,
++ VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
++ VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
++ VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
++ VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
++};
++
++enum VMA_CACHE_OPERATION
++{
++ VMA_CACHE_FLUSH,
++ VMA_CACHE_INVALIDATE
++};
++
++enum class VmaAllocationRequestType
++{
++ Normal,
++ TLSF,
++ // Used by "Linear" algorithm.
++ UpperAddress,
++ EndOf1st,
++ EndOf2nd,
++};
++
++#endif // _VMA_ENUM_DECLARATIONS
++
++#ifndef _VMA_FORWARD_DECLARATIONS
++// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
++VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle)
++
++struct VmaMutexLock;
++struct VmaMutexLockRead;
++struct VmaMutexLockWrite;
++
++template<typename T>
++struct AtomicTransactionalIncrement;
++
++template<typename T>
++struct VmaStlAllocator;
++
++template<typename T, typename AllocatorT>
++class VmaVector;
++
++template<typename T, typename AllocatorT, size_t N>
++class VmaSmallVector;
++
++template<typename T>
++class VmaPoolAllocator;
++
++template<typename T>
++struct VmaListItem;
++
++template<typename T>
++class VmaRawList;
++
++template<typename T, typename AllocatorT>
++class VmaList;
++
++template<typename ItemTypeTraits>
++class VmaIntrusiveLinkedList;
++
++// Unused in this version
++#if 0
++template<typename T1, typename T2>
++struct VmaPair;
++template<typename FirstT, typename SecondT>
++struct VmaPairFirstLess;
++
++template<typename KeyT, typename ValueT>
++class VmaMap;
++#endif
++
++#if VMA_STATS_STRING_ENABLED
++class VmaStringBuilder;
++class VmaJsonWriter;
++#endif
++
++class VmaDeviceMemoryBlock;
++
++struct VmaDedicatedAllocationListItemTraits;
++class VmaDedicatedAllocationList;
++
++struct VmaSuballocation;
++struct VmaSuballocationOffsetLess;
++struct VmaSuballocationOffsetGreater;
++struct VmaSuballocationItemSizeLess;
++
++typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
++
++struct VmaAllocationRequest;
++
++class VmaBlockMetadata;
++class VmaBlockMetadata_Linear;
++class VmaBlockMetadata_TLSF;
++
++class VmaBlockVector;
++
++struct VmaPoolListItemTraits;
++
++struct VmaCurrentBudgetData;
++
++class VmaAllocationObjectAllocator;
++
++#endif // _VMA_FORWARD_DECLARATIONS
++
++
++#ifndef _VMA_FUNCTIONS
++
++/*
++Returns number of bits set to 1 in (v).
++
++On specific platforms and compilers you can use instrinsics like:
++
++Visual Studio:
++ return __popcnt(v);
++GCC, Clang:
++ return static_cast<uint32_t>(__builtin_popcount(v));
++
++Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
++But you need to check in runtime whether user's CPU supports these, as some old processors don't.
++*/
++static inline uint32_t VmaCountBitsSet(uint32_t v)
++{
++#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
++ return std::popcount(v);
++#else
++ uint32_t c = v - ((v >> 1) & 0x55555555);
++ c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
++ c = ((c >> 4) + c) & 0x0F0F0F0F;
++ c = ((c >> 8) + c) & 0x00FF00FF;
++ c = ((c >> 16) + c) & 0x0000FFFF;
++ return c;
++#endif
++}
++
++static inline uint8_t VmaBitScanLSB(uint64_t mask)
++{
++#if defined(_MSC_VER) && defined(_WIN64)
++ unsigned long pos;
++ if (_BitScanForward64(&pos, mask))
++ return static_cast<uint8_t>(pos);
++ return UINT8_MAX;
++#elif defined __GNUC__ || defined __clang__
++ return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
++#else
++ uint8_t pos = 0;
++ uint64_t bit = 1;
++ do
++ {
++ if (mask & bit)
++ return pos;
++ bit <<= 1;
++ } while (pos++ < 63);
++ return UINT8_MAX;
++#endif
++}
++
++static inline uint8_t VmaBitScanLSB(uint32_t mask)
++{
++#ifdef _MSC_VER
++ unsigned long pos;
++ if (_BitScanForward(&pos, mask))
++ return static_cast<uint8_t>(pos);
++ return UINT8_MAX;
++#elif defined __GNUC__ || defined __clang__
++ return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
++#else
++ uint8_t pos = 0;
++ uint32_t bit = 1;
++ do
++ {
++ if (mask & bit)
++ return pos;
++ bit <<= 1;
++ } while (pos++ < 31);
++ return UINT8_MAX;
++#endif
++}
++
++static inline uint8_t VmaBitScanMSB(uint64_t mask)
++{
++#if defined(_MSC_VER) && defined(_WIN64)
++ unsigned long pos;
++ if (_BitScanReverse64(&pos, mask))
++ return static_cast<uint8_t>(pos);
++#elif defined __GNUC__ || defined __clang__
++ if (mask)
++ return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
++#else
++ uint8_t pos = 63;
++ uint64_t bit = 1ULL << 63;
++ do
++ {
++ if (mask & bit)
++ return pos;
++ bit >>= 1;
++ } while (pos-- > 0);
++#endif
++ return UINT8_MAX;
++}
++
++static inline uint8_t VmaBitScanMSB(uint32_t mask)
++{
++#ifdef _MSC_VER
++ unsigned long pos;
++ if (_BitScanReverse(&pos, mask))
++ return static_cast<uint8_t>(pos);
++#elif defined __GNUC__ || defined __clang__
++ if (mask)
++ return 31 - static_cast<uint8_t>(__builtin_clz(mask));
++#else
++ uint8_t pos = 31;
++ uint32_t bit = 1UL << 31;
++ do
++ {
++ if (mask & bit)
++ return pos;
++ bit >>= 1;
++ } while (pos-- > 0);
++#endif
++ return UINT8_MAX;
++}
++
++/*
++Returns true if given number is a power of two.
++T must be unsigned integer number or signed integer but always nonnegative.
++For 0 returns true.
++*/
++template <typename T>
++inline bool VmaIsPow2(T x)
++{
++ return (x & (x - 1)) == 0;
++}
++
++// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
++// Use types like uint32_t, uint64_t as T.
++template <typename T>
++static inline T VmaAlignUp(T val, T alignment)
++{
++ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
++ return (val + alignment - 1) & ~(alignment - 1);
++}
++
++// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
++// Use types like uint32_t, uint64_t as T.
++template <typename T>
++static inline T VmaAlignDown(T val, T alignment)
++{
++ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
++ return val & ~(alignment - 1);
++}
++
++// Division with mathematical rounding to nearest number.
++template <typename T>
++static inline T VmaRoundDiv(T x, T y)
++{
++ return (x + (y / (T)2)) / y;
++}
++
++// Divide by 'y' and round up to nearest integer.
++template <typename T>
++static inline T VmaDivideRoundingUp(T x, T y)
++{
++ return (x + y - (T)1) / y;
++}
++
++// Returns smallest power of 2 greater or equal to v.
++static inline uint32_t VmaNextPow2(uint32_t v)
++{
++ v--;
++ v |= v >> 1;
++ v |= v >> 2;
++ v |= v >> 4;
++ v |= v >> 8;
++ v |= v >> 16;
++ v++;
++ return v;
++}
++
++static inline uint64_t VmaNextPow2(uint64_t v)
++{
++ v--;
++ v |= v >> 1;
++ v |= v >> 2;
++ v |= v >> 4;
++ v |= v >> 8;
++ v |= v >> 16;
++ v |= v >> 32;
++ v++;
++ return v;
++}
++
++// Returns largest power of 2 less or equal to v.
++static inline uint32_t VmaPrevPow2(uint32_t v)
++{
++ v |= v >> 1;
++ v |= v >> 2;
++ v |= v >> 4;
++ v |= v >> 8;
++ v |= v >> 16;
++ v = v ^ (v >> 1);
++ return v;
++}
++
++static inline uint64_t VmaPrevPow2(uint64_t v)
++{
++ v |= v >> 1;
++ v |= v >> 2;
++ v |= v >> 4;
++ v |= v >> 8;
++ v |= v >> 16;
++ v |= v >> 32;
++ v = v ^ (v >> 1);
++ return v;
++}
++
++static inline bool VmaStrIsEmpty(const char* pStr)
++{
++ return pStr == VMA_NULL || *pStr == '\0';
++}
++
++/*
++Returns true if two memory blocks occupy overlapping pages.
++ResourceA must be in less memory offset than ResourceB.
++
++Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
++chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
++*/
++static inline bool VmaBlocksOnSamePage(
++ VkDeviceSize resourceAOffset,
++ VkDeviceSize resourceASize,
++ VkDeviceSize resourceBOffset,
++ VkDeviceSize pageSize)
++{
++ VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
++ VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
++ VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
++ VkDeviceSize resourceBStart = resourceBOffset;
++ VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
++ return resourceAEndPage == resourceBStartPage;
++}
++
++/*
++Returns true if given suballocation types could conflict and must respect
++VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
++or linear image and another one is optimal image. If type is unknown, behave
++conservatively.
++*/
++static inline bool VmaIsBufferImageGranularityConflict(
++ VmaSuballocationType suballocType1,
++ VmaSuballocationType suballocType2)
++{
++ if (suballocType1 > suballocType2)
++ {
++ VMA_SWAP(suballocType1, suballocType2);
++ }
++
++ switch (suballocType1)
++ {
++ case VMA_SUBALLOCATION_TYPE_FREE:
++ return false;
++ case VMA_SUBALLOCATION_TYPE_UNKNOWN:
++ return true;
++ case VMA_SUBALLOCATION_TYPE_BUFFER:
++ return
++ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
++ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
++ case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
++ return
++ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
++ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
++ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
++ case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
++ return
++ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
++ case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
++ return false;
++ default:
++ VMA_ASSERT(0);
++ return true;
++ }
++}
++
++static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
++{
++#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
++ uint32_t* pDst = (uint32_t*)((char*)pData + offset);
++ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
++ for (size_t i = 0; i < numberCount; ++i, ++pDst)
++ {
++ *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
++ }
++#else
++ // no-op
++#endif
++}
++
++static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
++{
++#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
++ const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
++ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
++ for (size_t i = 0; i < numberCount; ++i, ++pSrc)
++ {
++ if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
++ {
++ return false;
++ }
++ }
++#endif
++ return true;
++}
++
++/*
++Fills structure with parameters of an example buffer to be used for transfers
++during GPU memory defragmentation.
++*/
++static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
++{
++ memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
++ outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
++ outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++ outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
++}
++
++
++/*
++Performs binary search and returns iterator to first element that is greater or
++equal to (key), according to comparison (cmp).
++
++Cmp should return true if first argument is less than second argument.
++
++Returned value is the found element, if present in the collection or place where
++new element with value (key) should be inserted.
++*/
++template <typename CmpLess, typename IterT, typename KeyT>
++static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
++{
++ size_t down = 0, up = size_t(end - beg);
++ while (down < up)
++ {
++ const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
++ if (cmp(*(beg + mid), key))
++ {
++ down = mid + 1;
++ }
++ else
++ {
++ up = mid;
++ }
++ }
++ return beg + down;
++}
++
++template<typename CmpLess, typename IterT, typename KeyT>
++IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
++{
++ IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
++ beg, end, value, cmp);
++ if (it == end ||
++ (!cmp(*it, value) && !cmp(value, *it)))
++ {
++ return it;
++ }
++ return end;
++}
++
++/*
++Returns true if all pointers in the array are not-null and unique.
++Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
++T must be pointer type, e.g. VmaAllocation, VmaPool.
++*/
++template<typename T>
++static bool VmaValidatePointerArray(uint32_t count, const T* arr)
++{
++ for (uint32_t i = 0; i < count; ++i)
++ {
++ const T iPtr = arr[i];
++ if (iPtr == VMA_NULL)
++ {
++ return false;
++ }
++ for (uint32_t j = i + 1; j < count; ++j)
++ {
++ if (iPtr == arr[j])
++ {
++ return false;
++ }
++ }
++ }
++ return true;
++}
++
++template<typename MainT, typename NewT>
++static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
++{
++ newStruct->pNext = mainStruct->pNext;
++ mainStruct->pNext = newStruct;
++}
++
++// This is the main algorithm that guides the selection of a memory type best for an allocation -
++// converts usage to required/preferred/not preferred flags.
++static bool FindMemoryPreferences(
++ bool isIntegratedGPU,
++ const VmaAllocationCreateInfo& allocCreateInfo,
++ VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
++ VkMemoryPropertyFlags& outRequiredFlags,
++ VkMemoryPropertyFlags& outPreferredFlags,
++ VkMemoryPropertyFlags& outNotPreferredFlags)
++{
++ outRequiredFlags = allocCreateInfo.requiredFlags;
++ outPreferredFlags = allocCreateInfo.preferredFlags;
++ outNotPreferredFlags = 0;
++
++ switch(allocCreateInfo.usage)
++ {
++ case VMA_MEMORY_USAGE_UNKNOWN:
++ break;
++ case VMA_MEMORY_USAGE_GPU_ONLY:
++ if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
++ {
++ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ }
++ break;
++ case VMA_MEMORY_USAGE_CPU_ONLY:
++ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
++ break;
++ case VMA_MEMORY_USAGE_CPU_TO_GPU:
++ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
++ if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
++ {
++ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ }
++ break;
++ case VMA_MEMORY_USAGE_GPU_TO_CPU:
++ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
++ outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
++ break;
++ case VMA_MEMORY_USAGE_CPU_COPY:
++ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ break;
++ case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
++ outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
++ break;
++ case VMA_MEMORY_USAGE_AUTO:
++ case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
++ case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
++ {
++ if(bufImgUsage == UINT32_MAX)
++ {
++ VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
++ return false;
++ }
++ // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
++ const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
++ const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
++ const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
++ const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
++ const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
++ const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
++
++ // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
++ if(hostAccessRandom)
++ {
++ if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
++ {
++ // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
++ // Omitting HOST_VISIBLE here is intentional.
++ // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
++ // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
++ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
++ }
++ else
++ {
++ // Always CPU memory, cached.
++ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
++ }
++ }
++ // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
++ else if(hostAccessSequentialWrite)
++ {
++ // Want uncached and write-combined.
++ outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
++
++ if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
++ {
++ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
++ }
++ else
++ {
++ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
++ // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
++ if(deviceAccess)
++ {
++ // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
++ if(preferHost)
++ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ else
++ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ }
++ // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
++ else
++ {
++ // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
++ if(preferDevice)
++ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ else
++ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ }
++ }
++ }
++ // No CPU access
++ else
++ {
++ // if(deviceAccess)
++ //
++ // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,
++ // unless there is a clear preference from the user not to do so.
++ //
++ // else:
++ //
++ // No direct GPU access, no CPU access, just transfers.
++ // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
++ // a "swap file" copy to free some GPU memory (then better CPU memory).
++ // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
++
++ if(preferHost)
++ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ else
++ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++ }
++ break;
++ }
++ default:
++ VMA_ASSERT(0);
++ }
++
++ // Avoid DEVICE_COHERENT unless explicitly requested.
++ if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
++ (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
++ {
++ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
++ }
++
++ return true;
++}
++
++////////////////////////////////////////////////////////////////////////////////
++// Memory allocation
++
++static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
++{
++ void* result = VMA_NULL;
++ if ((pAllocationCallbacks != VMA_NULL) &&
++ (pAllocationCallbacks->pfnAllocation != VMA_NULL))
++ {
++ result = (*pAllocationCallbacks->pfnAllocation)(
++ pAllocationCallbacks->pUserData,
++ size,
++ alignment,
++ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
++ }
++ else
++ {
++ result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
++ }
++ VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
++ return result;
++}
++
++static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
++{
++ if ((pAllocationCallbacks != VMA_NULL) &&
++ (pAllocationCallbacks->pfnFree != VMA_NULL))
++ {
++ (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
++ }
++ else
++ {
++ VMA_SYSTEM_ALIGNED_FREE(ptr);
++ }
++}
++
++template<typename T>
++static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
++{
++ return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
++}
++
++template<typename T>
++static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
++{
++ return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
++}
++
++#define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
++
++#define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
++
++template<typename T>
++static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
++{
++ ptr->~T();
++ VmaFree(pAllocationCallbacks, ptr);
++}
++
++template<typename T>
++static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
++{
++ if (ptr != VMA_NULL)
++ {
++ for (size_t i = count; i--; )
++ {
++ ptr[i].~T();
++ }
++ VmaFree(pAllocationCallbacks, ptr);
++ }
++}
++
++static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
++{
++ if (srcStr != VMA_NULL)
++ {
++ const size_t len = strlen(srcStr);
++ char* const result = vma_new_array(allocs, char, len + 1);
++ memcpy(result, srcStr, len + 1);
++ return result;
++ }
++ return VMA_NULL;
++}
++
++#if VMA_STATS_STRING_ENABLED
++static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
++{
++ if (srcStr != VMA_NULL)
++ {
++ char* const result = vma_new_array(allocs, char, strLen + 1);
++ memcpy(result, srcStr, strLen);
++ result[strLen] = '\0';
++ return result;
++ }
++ return VMA_NULL;
++}
++#endif // VMA_STATS_STRING_ENABLED
++
++static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
++{
++ if (str != VMA_NULL)
++ {
++ const size_t len = strlen(str);
++ vma_delete_array(allocs, str, len + 1);
++ }
++}
++
++template<typename CmpLess, typename VectorT>
++size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
++{
++ const size_t indexToInsert = VmaBinaryFindFirstNotLess(
++ vector.data(),
++ vector.data() + vector.size(),
++ value,
++ CmpLess()) - vector.data();
++ VmaVectorInsert(vector, indexToInsert, value);
++ return indexToInsert;
++}
++
++template<typename CmpLess, typename VectorT>
++bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
++{
++ CmpLess comparator;
++ typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
++ vector.begin(),
++ vector.end(),
++ value,
++ comparator);
++ if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
++ {
++ size_t indexToRemove = it - vector.begin();
++ VmaVectorRemove(vector, indexToRemove);
++ return true;
++ }
++ return false;
++}
++#endif // _VMA_FUNCTIONS
++
++#ifndef _VMA_STATISTICS_FUNCTIONS
++
++static void VmaClearStatistics(VmaStatistics& outStats)
++{
++ outStats.blockCount = 0;
++ outStats.allocationCount = 0;
++ outStats.blockBytes = 0;
++ outStats.allocationBytes = 0;
++}
++
++static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
++{
++ inoutStats.blockCount += src.blockCount;
++ inoutStats.allocationCount += src.allocationCount;
++ inoutStats.blockBytes += src.blockBytes;
++ inoutStats.allocationBytes += src.allocationBytes;
++}
++
++static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
++{
++ VmaClearStatistics(outStats.statistics);
++ outStats.unusedRangeCount = 0;
++ outStats.allocationSizeMin = VK_WHOLE_SIZE;
++ outStats.allocationSizeMax = 0;
++ outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
++ outStats.unusedRangeSizeMax = 0;
++}
++
++static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
++{
++ inoutStats.statistics.allocationCount++;
++ inoutStats.statistics.allocationBytes += size;
++ inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
++ inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
++}
++
++static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
++{
++ inoutStats.unusedRangeCount++;
++ inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
++ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
++}
++
++static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
++{
++ VmaAddStatistics(inoutStats.statistics, src.statistics);
++ inoutStats.unusedRangeCount += src.unusedRangeCount;
++ inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
++ inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
++ inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
++ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
++}
++
++#endif // _VMA_STATISTICS_FUNCTIONS
++
++#ifndef _VMA_MUTEX_LOCK
++// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
++struct VmaMutexLock
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock)
++public:
++ VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
++ m_pMutex(useMutex ? &mutex : VMA_NULL)
++ {
++ if (m_pMutex) { m_pMutex->Lock(); }
++ }
++ ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
++
++private:
++ VMA_MUTEX* m_pMutex;
++};
++
++// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
++struct VmaMutexLockRead
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead)
++public:
++ VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
++ m_pMutex(useMutex ? &mutex : VMA_NULL)
++ {
++ if (m_pMutex) { m_pMutex->LockRead(); }
++ }
++ ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
++
++private:
++ VMA_RW_MUTEX* m_pMutex;
++};
++
++// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
++struct VmaMutexLockWrite
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite)
++public:
++ VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
++ : m_pMutex(useMutex ? &mutex : VMA_NULL)
++ {
++ if (m_pMutex) { m_pMutex->LockWrite(); }
++ }
++ ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
++
++private:
++ VMA_RW_MUTEX* m_pMutex;
++};
++
++#if VMA_DEBUG_GLOBAL_MUTEX
++ static VMA_MUTEX gDebugGlobalMutex;
++ #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
++#else
++ #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
++#endif
++#endif // _VMA_MUTEX_LOCK
++
++#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
++// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
++template<typename AtomicT>
++struct AtomicTransactionalIncrement
++{
++public:
++ using T = decltype(AtomicT().load());
++
++ ~AtomicTransactionalIncrement()
++ {
++ if(m_Atomic)
++ --(*m_Atomic);
++ }
++
++ void Commit() { m_Atomic = nullptr; }
++ T Increment(AtomicT* atomic)
++ {
++ m_Atomic = atomic;
++ return m_Atomic->fetch_add(1);
++ }
++
++private:
++ AtomicT* m_Atomic = nullptr;
++};
++#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
++
++#ifndef _VMA_STL_ALLOCATOR
++// STL-compatible allocator.
++template<typename T>
++struct VmaStlAllocator
++{
++ const VkAllocationCallbacks* const m_pCallbacks;
++ typedef T value_type;
++
++ VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
++ template<typename U>
++ VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
++ VmaStlAllocator(const VmaStlAllocator&) = default;
++ VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
++
++ T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
++ void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
++
++ template<typename U>
++ bool operator==(const VmaStlAllocator<U>& rhs) const
++ {
++ return m_pCallbacks == rhs.m_pCallbacks;
++ }
++ template<typename U>
++ bool operator!=(const VmaStlAllocator<U>& rhs) const
++ {
++ return m_pCallbacks != rhs.m_pCallbacks;
++ }
++};
++#endif // _VMA_STL_ALLOCATOR
++
++#ifndef _VMA_VECTOR
++/* Class with interface compatible with subset of std::vector.
++T must be POD because constructors and destructors are not called and memcpy is
++used for these objects. */
++template<typename T, typename AllocatorT>
++class VmaVector
++{
++public:
++ typedef T value_type;
++ typedef T* iterator;
++ typedef const T* const_iterator;
++
++ VmaVector(const AllocatorT& allocator);
++ VmaVector(size_t count, const AllocatorT& allocator);
++ // This version of the constructor is here for compatibility with pre-C++14 std::vector.
++ // value is unused.
++ VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
++ VmaVector(const VmaVector<T, AllocatorT>& src);
++ VmaVector& operator=(const VmaVector& rhs);
++ ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
++
++ bool empty() const { return m_Count == 0; }
++ size_t size() const { return m_Count; }
++ T* data() { return m_pArray; }
++ T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
++ T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
++ const T* data() const { return m_pArray; }
++ const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
++ const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
++
++ iterator begin() { return m_pArray; }
++ iterator end() { return m_pArray + m_Count; }
++ const_iterator cbegin() const { return m_pArray; }
++ const_iterator cend() const { return m_pArray + m_Count; }
++ const_iterator begin() const { return cbegin(); }
++ const_iterator end() const { return cend(); }
++
++ void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
++ void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
++ void push_front(const T& src) { insert(0, src); }
++
++ void push_back(const T& src);
++ void reserve(size_t newCapacity, bool freeMemory = false);
++ void resize(size_t newCount);
++ void clear() { resize(0); }
++ void shrink_to_fit();
++ void insert(size_t index, const T& src);
++ void remove(size_t index);
++
++ T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
++ const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
++
++private:
++ AllocatorT m_Allocator;
++ T* m_pArray;
++ size_t m_Count;
++ size_t m_Capacity;
++};
++
++#ifndef _VMA_VECTOR_FUNCTIONS
++template<typename T, typename AllocatorT>
++VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
++ : m_Allocator(allocator),
++ m_pArray(VMA_NULL),
++ m_Count(0),
++ m_Capacity(0) {}
++
++template<typename T, typename AllocatorT>
++VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
++ : m_Allocator(allocator),
++ m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
++ m_Count(count),
++ m_Capacity(count) {}
++
++template<typename T, typename AllocatorT>
++VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
++ : m_Allocator(src.m_Allocator),
++ m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
++ m_Count(src.m_Count),
++ m_Capacity(src.m_Count)
++{
++ if (m_Count != 0)
++ {
++ memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
++ }
++}
++
++template<typename T, typename AllocatorT>
++VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
++{
++ if (&rhs != this)
++ {
++ resize(rhs.m_Count);
++ if (m_Count != 0)
++ {
++ memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
++ }
++ }
++ return *this;
++}
++
++template<typename T, typename AllocatorT>
++void VmaVector<T, AllocatorT>::push_back(const T& src)
++{
++ const size_t newIndex = size();
++ resize(newIndex + 1);
++ m_pArray[newIndex] = src;
++}
++
++template<typename T, typename AllocatorT>
++void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
++{
++ newCapacity = VMA_MAX(newCapacity, m_Count);
++
++ if ((newCapacity < m_Capacity) && !freeMemory)
++ {
++ newCapacity = m_Capacity;
++ }
++
++ if (newCapacity != m_Capacity)
++ {
++ T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
++ if (m_Count != 0)
++ {
++ memcpy(newArray, m_pArray, m_Count * sizeof(T));
++ }
++ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
++ m_Capacity = newCapacity;
++ m_pArray = newArray;
++ }
++}
++
++template<typename T, typename AllocatorT>
++void VmaVector<T, AllocatorT>::resize(size_t newCount)
++{
++ size_t newCapacity = m_Capacity;
++ if (newCount > m_Capacity)
++ {
++ newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
++ }
++
++ if (newCapacity != m_Capacity)
++ {
++ T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
++ const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
++ if (elementsToCopy != 0)
++ {
++ memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
++ }
++ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
++ m_Capacity = newCapacity;
++ m_pArray = newArray;
++ }
++
++ m_Count = newCount;
++}
++
++template<typename T, typename AllocatorT>
++void VmaVector<T, AllocatorT>::shrink_to_fit()
++{
++ if (m_Capacity > m_Count)
++ {
++ T* newArray = VMA_NULL;
++ if (m_Count > 0)
++ {
++ newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
++ memcpy(newArray, m_pArray, m_Count * sizeof(T));
++ }
++ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
++ m_Capacity = m_Count;
++ m_pArray = newArray;
++ }
++}
++
++template<typename T, typename AllocatorT>
++void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
++{
++ VMA_HEAVY_ASSERT(index <= m_Count);
++ const size_t oldCount = size();
++ resize(oldCount + 1);
++ if (index < oldCount)
++ {
++ memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
++ }
++ m_pArray[index] = src;
++}
++
++template<typename T, typename AllocatorT>
++void VmaVector<T, AllocatorT>::remove(size_t index)
++{
++ VMA_HEAVY_ASSERT(index < m_Count);
++ const size_t oldCount = size();
++ if (index < oldCount - 1)
++ {
++ memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
++ }
++ resize(oldCount - 1);
++}
++#endif // _VMA_VECTOR_FUNCTIONS
++
++template<typename T, typename allocatorT>
++static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
++{
++ vec.insert(index, item);
++}
++
++template<typename T, typename allocatorT>
++static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
++{
++ vec.remove(index);
++}
++#endif // _VMA_VECTOR
++
++#ifndef _VMA_SMALL_VECTOR
++/*
++This is a vector (a variable-sized array), optimized for the case when the array is small.
++
++It contains some number of elements in-place, which allows it to avoid heap allocation
++when the actual number of elements is below that threshold. This allows normal "small"
++cases to be fast without losing generality for large inputs.
++*/
++template<typename T, typename AllocatorT, size_t N>
++class VmaSmallVector
++{
++public:
++ typedef T value_type;
++ typedef T* iterator;
++
++ VmaSmallVector(const AllocatorT& allocator);
++ VmaSmallVector(size_t count, const AllocatorT& allocator);
++ template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
++ VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
++ template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
++ VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
++ ~VmaSmallVector() = default;
++
++ bool empty() const { return m_Count == 0; }
++ size_t size() const { return m_Count; }
++ T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
++ T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
++ T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
++ const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
++ const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
++ const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
++
++ iterator begin() { return data(); }
++ iterator end() { return data() + m_Count; }
++
++ void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
++ void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
++ void push_front(const T& src) { insert(0, src); }
++
++ void push_back(const T& src);
++ void resize(size_t newCount, bool freeMemory = false);
++ void clear(bool freeMemory = false);
++ void insert(size_t index, const T& src);
++ void remove(size_t index);
++
++ T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
++ const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
++
++private:
++ size_t m_Count;
++ T m_StaticArray[N]; // Used when m_Size <= N
++ VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
++};
++
++#ifndef _VMA_SMALL_VECTOR_FUNCTIONS
++template<typename T, typename AllocatorT, size_t N>
++VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
++ : m_Count(0),
++ m_DynamicArray(allocator) {}
++
++template<typename T, typename AllocatorT, size_t N>
++VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
++ : m_Count(count),
++ m_DynamicArray(count > N ? count : 0, allocator) {}
++
++template<typename T, typename AllocatorT, size_t N>
++void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
++{
++ const size_t newIndex = size();
++ resize(newIndex + 1);
++ data()[newIndex] = src;
++}
++
++template<typename T, typename AllocatorT, size_t N>
++void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
++{
++ if (newCount > N && m_Count > N)
++ {
++ // Any direction, staying in m_DynamicArray
++ m_DynamicArray.resize(newCount);
++ if (freeMemory)
++ {
++ m_DynamicArray.shrink_to_fit();
++ }
++ }
++ else if (newCount > N && m_Count <= N)
++ {
++ // Growing, moving from m_StaticArray to m_DynamicArray
++ m_DynamicArray.resize(newCount);
++ if (m_Count > 0)
++ {
++ memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
++ }
++ }
++ else if (newCount <= N && m_Count > N)
++ {
++ // Shrinking, moving from m_DynamicArray to m_StaticArray
++ if (newCount > 0)
++ {
++ memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
++ }
++ m_DynamicArray.resize(0);
++ if (freeMemory)
++ {
++ m_DynamicArray.shrink_to_fit();
++ }
++ }
++ else
++ {
++ // Any direction, staying in m_StaticArray - nothing to do here
++ }
++ m_Count = newCount;
++}
++
++template<typename T, typename AllocatorT, size_t N>
++void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
++{
++ m_DynamicArray.clear();
++ if (freeMemory)
++ {
++ m_DynamicArray.shrink_to_fit();
++ }
++ m_Count = 0;
++}
++
++template<typename T, typename AllocatorT, size_t N>
++void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
++{
++ VMA_HEAVY_ASSERT(index <= m_Count);
++ const size_t oldCount = size();
++ resize(oldCount + 1);
++ T* const dataPtr = data();
++ if (index < oldCount)
++ {
++ // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
++ memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
++ }
++ dataPtr[index] = src;
++}
++
++template<typename T, typename AllocatorT, size_t N>
++void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
++{
++ VMA_HEAVY_ASSERT(index < m_Count);
++ const size_t oldCount = size();
++ if (index < oldCount - 1)
++ {
++ // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
++ T* const dataPtr = data();
++ memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
++ }
++ resize(oldCount - 1);
++}
++#endif // _VMA_SMALL_VECTOR_FUNCTIONS
++#endif // _VMA_SMALL_VECTOR
++
++#ifndef _VMA_POOL_ALLOCATOR
++/*
++Allocator for objects of type T using a list of arrays (pools) to speed up
++allocation. Number of elements that can be allocated is not bounded because
++allocator can create multiple blocks.
++*/
++template<typename T>
++class VmaPoolAllocator
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator)
++public:
++ VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
++ ~VmaPoolAllocator();
++ template<typename... Types> T* Alloc(Types&&... args);
++ void Free(T* ptr);
++
++private:
++ union Item
++ {
++ uint32_t NextFreeIndex;
++ alignas(T) char Value[sizeof(T)];
++ };
++ struct ItemBlock
++ {
++ Item* pItems;
++ uint32_t Capacity;
++ uint32_t FirstFreeIndex;
++ };
++
++ const VkAllocationCallbacks* m_pAllocationCallbacks;
++ const uint32_t m_FirstBlockCapacity;
++ VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
++
++ ItemBlock& CreateNewBlock();
++};
++
++#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
++template<typename T>
++VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
++ : m_pAllocationCallbacks(pAllocationCallbacks),
++ m_FirstBlockCapacity(firstBlockCapacity),
++ m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
++{
++ VMA_ASSERT(m_FirstBlockCapacity > 1);
++}
++
++template<typename T>
++VmaPoolAllocator<T>::~VmaPoolAllocator()
++{
++ for (size_t i = m_ItemBlocks.size(); i--;)
++ vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
++ m_ItemBlocks.clear();
++}
++
++template<typename T>
++template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
++{
++ for (size_t i = m_ItemBlocks.size(); i--; )
++ {
++ ItemBlock& block = m_ItemBlocks[i];
++ // This block has some free items: Use first one.
++ if (block.FirstFreeIndex != UINT32_MAX)
++ {
++ Item* const pItem = &block.pItems[block.FirstFreeIndex];
++ block.FirstFreeIndex = pItem->NextFreeIndex;
++ T* result = (T*)&pItem->Value;
++ new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
++ return result;
++ }
++ }
++
++ // No block has free item: Create new one and use it.
++ ItemBlock& newBlock = CreateNewBlock();
++ Item* const pItem = &newBlock.pItems[0];
++ newBlock.FirstFreeIndex = pItem->NextFreeIndex;
++ T* result = (T*)&pItem->Value;
++ new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
++ return result;
++}
++
++template<typename T>
++void VmaPoolAllocator<T>::Free(T* ptr)
++{
++ // Search all memory blocks to find ptr.
++ for (size_t i = m_ItemBlocks.size(); i--; )
++ {
++ ItemBlock& block = m_ItemBlocks[i];
++
++ // Casting to union.
++ Item* pItemPtr;
++ memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
++
++ // Check if pItemPtr is in address range of this block.
++ if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
++ {
++ ptr->~T(); // Explicit destructor call.
++ const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
++ pItemPtr->NextFreeIndex = block.FirstFreeIndex;
++ block.FirstFreeIndex = index;
++ return;
++ }
++ }
++ VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
++}
++
++template<typename T>
++typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
++{
++ const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
++ m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
++
++ const ItemBlock newBlock =
++ {
++ vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
++ newBlockCapacity,
++ 0
++ };
++
++ m_ItemBlocks.push_back(newBlock);
++
++ // Setup singly-linked list of all free items in this block.
++ for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
++ newBlock.pItems[i].NextFreeIndex = i + 1;
++ newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
++ return m_ItemBlocks.back();
++}
++#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
++#endif // _VMA_POOL_ALLOCATOR
++
++#ifndef _VMA_RAW_LIST
++template<typename T>
++struct VmaListItem
++{
++ VmaListItem* pPrev;
++ VmaListItem* pNext;
++ T Value;
++};
++
++// Doubly linked list.
++template<typename T>
++class VmaRawList
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList)
++public:
++ typedef VmaListItem<T> ItemType;
++
++ VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
++ // Intentionally not calling Clear, because that would be unnecessary
++ // computations to return all items to m_ItemAllocator as free.
++ ~VmaRawList() = default;
++
++ size_t GetCount() const { return m_Count; }
++ bool IsEmpty() const { return m_Count == 0; }
++
++ ItemType* Front() { return m_pFront; }
++ ItemType* Back() { return m_pBack; }
++ const ItemType* Front() const { return m_pFront; }
++ const ItemType* Back() const { return m_pBack; }
++
++ ItemType* PushFront();
++ ItemType* PushBack();
++ ItemType* PushFront(const T& value);
++ ItemType* PushBack(const T& value);
++ void PopFront();
++ void PopBack();
++
++ // Item can be null - it means PushBack.
++ ItemType* InsertBefore(ItemType* pItem);
++ // Item can be null - it means PushFront.
++ ItemType* InsertAfter(ItemType* pItem);
++ ItemType* InsertBefore(ItemType* pItem, const T& value);
++ ItemType* InsertAfter(ItemType* pItem, const T& value);
++
++ void Clear();
++ void Remove(ItemType* pItem);
++
++private:
++ const VkAllocationCallbacks* const m_pAllocationCallbacks;
++ VmaPoolAllocator<ItemType> m_ItemAllocator;
++ ItemType* m_pFront;
++ ItemType* m_pBack;
++ size_t m_Count;
++};
++
++#ifndef _VMA_RAW_LIST_FUNCTIONS
++template<typename T>
++VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
++ : m_pAllocationCallbacks(pAllocationCallbacks),
++ m_ItemAllocator(pAllocationCallbacks, 128),
++ m_pFront(VMA_NULL),
++ m_pBack(VMA_NULL),
++ m_Count(0) {}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::PushFront()
++{
++ ItemType* const pNewItem = m_ItemAllocator.Alloc();
++ pNewItem->pPrev = VMA_NULL;
++ if (IsEmpty())
++ {
++ pNewItem->pNext = VMA_NULL;
++ m_pFront = pNewItem;
++ m_pBack = pNewItem;
++ m_Count = 1;
++ }
++ else
++ {
++ pNewItem->pNext = m_pFront;
++ m_pFront->pPrev = pNewItem;
++ m_pFront = pNewItem;
++ ++m_Count;
++ }
++ return pNewItem;
++}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::PushBack()
++{
++ ItemType* const pNewItem = m_ItemAllocator.Alloc();
++ pNewItem->pNext = VMA_NULL;
++ if(IsEmpty())
++ {
++ pNewItem->pPrev = VMA_NULL;
++ m_pFront = pNewItem;
++ m_pBack = pNewItem;
++ m_Count = 1;
++ }
++ else
++ {
++ pNewItem->pPrev = m_pBack;
++ m_pBack->pNext = pNewItem;
++ m_pBack = pNewItem;
++ ++m_Count;
++ }
++ return pNewItem;
++}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
++{
++ ItemType* const pNewItem = PushFront();
++ pNewItem->Value = value;
++ return pNewItem;
++}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
++{
++ ItemType* const pNewItem = PushBack();
++ pNewItem->Value = value;
++ return pNewItem;
++}
++
++template<typename T>
++void VmaRawList<T>::PopFront()
++{
++ VMA_HEAVY_ASSERT(m_Count > 0);
++ ItemType* const pFrontItem = m_pFront;
++ ItemType* const pNextItem = pFrontItem->pNext;
++ if (pNextItem != VMA_NULL)
++ {
++ pNextItem->pPrev = VMA_NULL;
++ }
++ m_pFront = pNextItem;
++ m_ItemAllocator.Free(pFrontItem);
++ --m_Count;
++}
++
++template<typename T>
++void VmaRawList<T>::PopBack()
++{
++ VMA_HEAVY_ASSERT(m_Count > 0);
++ ItemType* const pBackItem = m_pBack;
++ ItemType* const pPrevItem = pBackItem->pPrev;
++ if(pPrevItem != VMA_NULL)
++ {
++ pPrevItem->pNext = VMA_NULL;
++ }
++ m_pBack = pPrevItem;
++ m_ItemAllocator.Free(pBackItem);
++ --m_Count;
++}
++
++template<typename T>
++void VmaRawList<T>::Clear()
++{
++ if (IsEmpty() == false)
++ {
++ ItemType* pItem = m_pBack;
++ while (pItem != VMA_NULL)
++ {
++ ItemType* const pPrevItem = pItem->pPrev;
++ m_ItemAllocator.Free(pItem);
++ pItem = pPrevItem;
++ }
++ m_pFront = VMA_NULL;
++ m_pBack = VMA_NULL;
++ m_Count = 0;
++ }
++}
++
++template<typename T>
++void VmaRawList<T>::Remove(ItemType* pItem)
++{
++ VMA_HEAVY_ASSERT(pItem != VMA_NULL);
++ VMA_HEAVY_ASSERT(m_Count > 0);
++
++ if(pItem->pPrev != VMA_NULL)
++ {
++ pItem->pPrev->pNext = pItem->pNext;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_pFront == pItem);
++ m_pFront = pItem->pNext;
++ }
++
++ if(pItem->pNext != VMA_NULL)
++ {
++ pItem->pNext->pPrev = pItem->pPrev;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_pBack == pItem);
++ m_pBack = pItem->pPrev;
++ }
++
++ m_ItemAllocator.Free(pItem);
++ --m_Count;
++}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
++{
++ if(pItem != VMA_NULL)
++ {
++ ItemType* const prevItem = pItem->pPrev;
++ ItemType* const newItem = m_ItemAllocator.Alloc();
++ newItem->pPrev = prevItem;
++ newItem->pNext = pItem;
++ pItem->pPrev = newItem;
++ if(prevItem != VMA_NULL)
++ {
++ prevItem->pNext = newItem;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_pFront == pItem);
++ m_pFront = newItem;
++ }
++ ++m_Count;
++ return newItem;
++ }
++ else
++ return PushBack();
++}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
++{
++ if(pItem != VMA_NULL)
++ {
++ ItemType* const nextItem = pItem->pNext;
++ ItemType* const newItem = m_ItemAllocator.Alloc();
++ newItem->pNext = nextItem;
++ newItem->pPrev = pItem;
++ pItem->pNext = newItem;
++ if(nextItem != VMA_NULL)
++ {
++ nextItem->pPrev = newItem;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_pBack == pItem);
++ m_pBack = newItem;
++ }
++ ++m_Count;
++ return newItem;
++ }
++ else
++ return PushFront();
++}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
++{
++ ItemType* const newItem = InsertBefore(pItem);
++ newItem->Value = value;
++ return newItem;
++}
++
++template<typename T>
++VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
++{
++ ItemType* const newItem = InsertAfter(pItem);
++ newItem->Value = value;
++ return newItem;
++}
++#endif // _VMA_RAW_LIST_FUNCTIONS
++#endif // _VMA_RAW_LIST
++
++#ifndef _VMA_LIST
++template<typename T, typename AllocatorT>
++class VmaList
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaList)
++public:
++ class reverse_iterator;
++ class const_iterator;
++ class const_reverse_iterator;
++
++ class iterator
++ {
++ friend class const_iterator;
++ friend class VmaList<T, AllocatorT>;
++ public:
++ iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
++ iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
++
++ T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
++ T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
++
++ bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
++ bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
++
++ iterator operator++(int) { iterator result = *this; ++*this; return result; }
++ iterator operator--(int) { iterator result = *this; --*this; return result; }
++
++ iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
++ iterator& operator--();
++
++ private:
++ VmaRawList<T>* m_pList;
++ VmaListItem<T>* m_pItem;
++
++ iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
++ };
++ class reverse_iterator
++ {
++ friend class const_reverse_iterator;
++ friend class VmaList<T, AllocatorT>;
++ public:
++ reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
++ reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
++
++ T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
++ T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
++
++ bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
++ bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
++
++ reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
++ reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
++
++ reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
++ reverse_iterator& operator--();
++
++ private:
++ VmaRawList<T>* m_pList;
++ VmaListItem<T>* m_pItem;
++
++ reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
++ };
++ class const_iterator
++ {
++ friend class VmaList<T, AllocatorT>;
++ public:
++ const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
++ const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
++ const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
++
++ iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
++
++ const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
++ const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
++
++ bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
++ bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
++
++ const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
++ const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
++
++ const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
++ const_iterator& operator--();
++
++ private:
++ const VmaRawList<T>* m_pList;
++ const VmaListItem<T>* m_pItem;
++
++ const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
++ };
++ class const_reverse_iterator
++ {
++ friend class VmaList<T, AllocatorT>;
++ public:
++ const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
++ const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
++ const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
++
++ reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
++
++ const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
++ const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
++
++ bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
++ bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
++
++ const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
++ const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
++
++ const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
++ const_reverse_iterator& operator--();
++
++ private:
++ const VmaRawList<T>* m_pList;
++ const VmaListItem<T>* m_pItem;
++
++ const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
++ };
++
++ VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
++
++ bool empty() const { return m_RawList.IsEmpty(); }
++ size_t size() const { return m_RawList.GetCount(); }
++
++ iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
++ iterator end() { return iterator(&m_RawList, VMA_NULL); }
++
++ const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
++ const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
++
++ const_iterator begin() const { return cbegin(); }
++ const_iterator end() const { return cend(); }
++
++ reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
++ reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
++
++ const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
++ const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
++
++ const_reverse_iterator rbegin() const { return crbegin(); }
++ const_reverse_iterator rend() const { return crend(); }
++
++ void push_back(const T& value) { m_RawList.PushBack(value); }
++ iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
++
++ void clear() { m_RawList.Clear(); }
++ void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
++
++private:
++ VmaRawList<T> m_RawList;
++};
++
++#ifndef _VMA_LIST_FUNCTIONS
++template<typename T, typename AllocatorT>
++typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
++{
++ if (m_pItem != VMA_NULL)
++ {
++ m_pItem = m_pItem->pPrev;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
++ m_pItem = m_pList->Back();
++ }
++ return *this;
++}
++
++template<typename T, typename AllocatorT>
++typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
++{
++ if (m_pItem != VMA_NULL)
++ {
++ m_pItem = m_pItem->pNext;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
++ m_pItem = m_pList->Front();
++ }
++ return *this;
++}
++
++template<typename T, typename AllocatorT>
++typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
++{
++ if (m_pItem != VMA_NULL)
++ {
++ m_pItem = m_pItem->pPrev;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
++ m_pItem = m_pList->Back();
++ }
++ return *this;
++}
++
++template<typename T, typename AllocatorT>
++typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
++{
++ if (m_pItem != VMA_NULL)
++ {
++ m_pItem = m_pItem->pNext;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
++ m_pItem = m_pList->Back();
++ }
++ return *this;
++}
++#endif // _VMA_LIST_FUNCTIONS
++#endif // _VMA_LIST
++
++#ifndef _VMA_INTRUSIVE_LINKED_LIST
++/*
++Expected interface of ItemTypeTraits:
++struct MyItemTypeTraits
++{
++ typedef MyItem ItemType;
++ static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
++ static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
++ static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
++ static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
++};
++*/
++template<typename ItemTypeTraits>
++class VmaIntrusiveLinkedList
++{
++public:
++ typedef typename ItemTypeTraits::ItemType ItemType;
++ static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
++ static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
++
++ // Movable, not copyable.
++ VmaIntrusiveLinkedList() = default;
++ VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
++ VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
++ VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
++ VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
++ ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
++
++ size_t GetCount() const { return m_Count; }
++ bool IsEmpty() const { return m_Count == 0; }
++ ItemType* Front() { return m_Front; }
++ ItemType* Back() { return m_Back; }
++ const ItemType* Front() const { return m_Front; }
++ const ItemType* Back() const { return m_Back; }
++
++ void PushBack(ItemType* item);
++ void PushFront(ItemType* item);
++ ItemType* PopBack();
++ ItemType* PopFront();
++
++ // MyItem can be null - it means PushBack.
++ void InsertBefore(ItemType* existingItem, ItemType* newItem);
++ // MyItem can be null - it means PushFront.
++ void InsertAfter(ItemType* existingItem, ItemType* newItem);
++ void Remove(ItemType* item);
++ void RemoveAll();
++
++private:
++ ItemType* m_Front = VMA_NULL;
++ ItemType* m_Back = VMA_NULL;
++ size_t m_Count = 0;
++};
++
++#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
++template<typename ItemTypeTraits>
++VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
++ : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
++{
++ src.m_Front = src.m_Back = VMA_NULL;
++ src.m_Count = 0;
++}
++
++template<typename ItemTypeTraits>
++VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
++{
++ if (&src != this)
++ {
++ VMA_HEAVY_ASSERT(IsEmpty());
++ m_Front = src.m_Front;
++ m_Back = src.m_Back;
++ m_Count = src.m_Count;
++ src.m_Front = src.m_Back = VMA_NULL;
++ src.m_Count = 0;
++ }
++ return *this;
++}
++
++template<typename ItemTypeTraits>
++void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
++{
++ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
++ if (IsEmpty())
++ {
++ m_Front = item;
++ m_Back = item;
++ m_Count = 1;
++ }
++ else
++ {
++ ItemTypeTraits::AccessPrev(item) = m_Back;
++ ItemTypeTraits::AccessNext(m_Back) = item;
++ m_Back = item;
++ ++m_Count;
++ }
++}
++
++template<typename ItemTypeTraits>
++void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
++{
++ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
++ if (IsEmpty())
++ {
++ m_Front = item;
++ m_Back = item;
++ m_Count = 1;
++ }
++ else
++ {
++ ItemTypeTraits::AccessNext(item) = m_Front;
++ ItemTypeTraits::AccessPrev(m_Front) = item;
++ m_Front = item;
++ ++m_Count;
++ }
++}
++
++template<typename ItemTypeTraits>
++typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
++{
++ VMA_HEAVY_ASSERT(m_Count > 0);
++ ItemType* const backItem = m_Back;
++ ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
++ if (prevItem != VMA_NULL)
++ {
++ ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
++ }
++ m_Back = prevItem;
++ --m_Count;
++ ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
++ ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
++ return backItem;
++}
++
++template<typename ItemTypeTraits>
++typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
++{
++ VMA_HEAVY_ASSERT(m_Count > 0);
++ ItemType* const frontItem = m_Front;
++ ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
++ if (nextItem != VMA_NULL)
++ {
++ ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
++ }
++ m_Front = nextItem;
++ --m_Count;
++ ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
++ ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
++ return frontItem;
++}
++
++template<typename ItemTypeTraits>
++void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
++{
++ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
++ if (existingItem != VMA_NULL)
++ {
++ ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
++ ItemTypeTraits::AccessPrev(newItem) = prevItem;
++ ItemTypeTraits::AccessNext(newItem) = existingItem;
++ ItemTypeTraits::AccessPrev(existingItem) = newItem;
++ if (prevItem != VMA_NULL)
++ {
++ ItemTypeTraits::AccessNext(prevItem) = newItem;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_Front == existingItem);
++ m_Front = newItem;
++ }
++ ++m_Count;
++ }
++ else
++ PushBack(newItem);
++}
++
++template<typename ItemTypeTraits>
++void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
++{
++ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
++ if (existingItem != VMA_NULL)
++ {
++ ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
++ ItemTypeTraits::AccessNext(newItem) = nextItem;
++ ItemTypeTraits::AccessPrev(newItem) = existingItem;
++ ItemTypeTraits::AccessNext(existingItem) = newItem;
++ if (nextItem != VMA_NULL)
++ {
++ ItemTypeTraits::AccessPrev(nextItem) = newItem;
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_Back == existingItem);
++ m_Back = newItem;
++ }
++ ++m_Count;
++ }
++ else
++ return PushFront(newItem);
++}
++
++template<typename ItemTypeTraits>
++void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
++{
++ VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
++ if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
++ {
++ ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_Front == item);
++ m_Front = ItemTypeTraits::GetNext(item);
++ }
++
++ if (ItemTypeTraits::GetNext(item) != VMA_NULL)
++ {
++ ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
++ }
++ else
++ {
++ VMA_HEAVY_ASSERT(m_Back == item);
++ m_Back = ItemTypeTraits::GetPrev(item);
++ }
++ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
++ ItemTypeTraits::AccessNext(item) = VMA_NULL;
++ --m_Count;
++}
++
++template<typename ItemTypeTraits>
++void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
++{
++ if (!IsEmpty())
++ {
++ ItemType* item = m_Back;
++ while (item != VMA_NULL)
++ {
++ ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
++ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
++ ItemTypeTraits::AccessNext(item) = VMA_NULL;
++ item = prevItem;
++ }
++ m_Front = VMA_NULL;
++ m_Back = VMA_NULL;
++ m_Count = 0;
++ }
++}
++#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
++#endif // _VMA_INTRUSIVE_LINKED_LIST
++
++// Unused in this version.
++#if 0
++
++#ifndef _VMA_PAIR
++template<typename T1, typename T2>
++struct VmaPair
++{
++ T1 first;
++ T2 second;
++
++ VmaPair() : first(), second() {}
++ VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
++};
++
++template<typename FirstT, typename SecondT>
++struct VmaPairFirstLess
++{
++ bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
++ {
++ return lhs.first < rhs.first;
++ }
++ bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
++ {
++ return lhs.first < rhsFirst;
++ }
++};
++#endif // _VMA_PAIR
++
++#ifndef _VMA_MAP
++/* Class compatible with subset of interface of std::unordered_map.
++KeyT, ValueT must be POD because they will be stored in VmaVector.
++*/
++template<typename KeyT, typename ValueT>
++class VmaMap
++{
++public:
++ typedef VmaPair<KeyT, ValueT> PairType;
++ typedef PairType* iterator;
++
++ VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
++
++ iterator begin() { return m_Vector.begin(); }
++ iterator end() { return m_Vector.end(); }
++ size_t size() { return m_Vector.size(); }
++
++ void insert(const PairType& pair);
++ iterator find(const KeyT& key);
++ void erase(iterator it);
++
++private:
++ VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
++};
++
++#ifndef _VMA_MAP_FUNCTIONS
++template<typename KeyT, typename ValueT>
++void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
++{
++ const size_t indexToInsert = VmaBinaryFindFirstNotLess(
++ m_Vector.data(),
++ m_Vector.data() + m_Vector.size(),
++ pair,
++ VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
++ VmaVectorInsert(m_Vector, indexToInsert, pair);
++}
++
++template<typename KeyT, typename ValueT>
++VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
++{
++ PairType* it = VmaBinaryFindFirstNotLess(
++ m_Vector.data(),
++ m_Vector.data() + m_Vector.size(),
++ key,
++ VmaPairFirstLess<KeyT, ValueT>());
++ if ((it != m_Vector.end()) && (it->first == key))
++ {
++ return it;
++ }
++ else
++ {
++ return m_Vector.end();
++ }
++}
++
++template<typename KeyT, typename ValueT>
++void VmaMap<KeyT, ValueT>::erase(iterator it)
++{
++ VmaVectorRemove(m_Vector, it - m_Vector.begin());
++}
++#endif // _VMA_MAP_FUNCTIONS
++#endif // _VMA_MAP
++
++#endif // #if 0
++
++#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
++class VmaStringBuilder
++{
++public:
++ VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
++ ~VmaStringBuilder() = default;
++
++ size_t GetLength() const { return m_Data.size(); }
++ const char* GetData() const { return m_Data.data(); }
++ void AddNewLine() { Add('\n'); }
++ void Add(char ch) { m_Data.push_back(ch); }
++
++ void Add(const char* pStr);
++ void AddNumber(uint32_t num);
++ void AddNumber(uint64_t num);
++ void AddPointer(const void* ptr);
++
++private:
++ VmaVector<char, VmaStlAllocator<char>> m_Data;
++};
++
++#ifndef _VMA_STRING_BUILDER_FUNCTIONS
++void VmaStringBuilder::Add(const char* pStr)
++{
++ const size_t strLen = strlen(pStr);
++ if (strLen > 0)
++ {
++ const size_t oldCount = m_Data.size();
++ m_Data.resize(oldCount + strLen);
++ memcpy(m_Data.data() + oldCount, pStr, strLen);
++ }
++}
++
++void VmaStringBuilder::AddNumber(uint32_t num)
++{
++ char buf[11];
++ buf[10] = '\0';
++ char* p = &buf[10];
++ do
++ {
++ *--p = '0' + (char)(num % 10);
++ num /= 10;
++ } while (num);
++ Add(p);
++}
++
++void VmaStringBuilder::AddNumber(uint64_t num)
++{
++ char buf[21];
++ buf[20] = '\0';
++ char* p = &buf[20];
++ do
++ {
++ *--p = '0' + (char)(num % 10);
++ num /= 10;
++ } while (num);
++ Add(p);
++}
++
++void VmaStringBuilder::AddPointer(const void* ptr)
++{
++ char buf[21];
++ VmaPtrToStr(buf, sizeof(buf), ptr);
++ Add(buf);
++}
++#endif //_VMA_STRING_BUILDER_FUNCTIONS
++#endif // _VMA_STRING_BUILDER
++
++#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
++/*
++Allows to conveniently build a correct JSON document to be written to the
++VmaStringBuilder passed to the constructor.
++*/
++class VmaJsonWriter
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter)
++public:
++ // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
++ VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
++ ~VmaJsonWriter();
++
++ // Begins object by writing "{".
++ // Inside an object, you must call pairs of WriteString and a value, e.g.:
++ // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
++ // Will write: { "A": 1, "B": 2 }
++ void BeginObject(bool singleLine = false);
++ // Ends object by writing "}".
++ void EndObject();
++
++ // Begins array by writing "[".
++ // Inside an array, you can write a sequence of any values.
++ void BeginArray(bool singleLine = false);
++ // Ends array by writing "[".
++ void EndArray();
++
++ // Writes a string value inside "".
++ // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
++ void WriteString(const char* pStr);
++
++ // Begins writing a string value.
++ // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
++ // WriteString to conveniently build the string content incrementally, made of
++ // parts including numbers.
++ void BeginString(const char* pStr = VMA_NULL);
++ // Posts next part of an open string.
++ void ContinueString(const char* pStr);
++ // Posts next part of an open string. The number is converted to decimal characters.
++ void ContinueString(uint32_t n);
++ void ContinueString(uint64_t n);
++ // Posts next part of an open string. Pointer value is converted to characters
++ // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
++ void ContinueString_Pointer(const void* ptr);
++ // Ends writing a string value by writing '"'.
++ void EndString(const char* pStr = VMA_NULL);
++
++ // Writes a number value.
++ void WriteNumber(uint32_t n);
++ void WriteNumber(uint64_t n);
++ // Writes a boolean value - false or true.
++ void WriteBool(bool b);
++ // Writes a null value.
++ void WriteNull();
++
++private:
++ enum COLLECTION_TYPE
++ {
++ COLLECTION_TYPE_OBJECT,
++ COLLECTION_TYPE_ARRAY,
++ };
++ struct StackItem
++ {
++ COLLECTION_TYPE type;
++ uint32_t valueCount;
++ bool singleLineMode;
++ };
++
++ static const char* const INDENT;
++
++ VmaStringBuilder& m_SB;
++ VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
++ bool m_InsideString;
++
++ void BeginValue(bool isString);
++ void WriteIndent(bool oneLess = false);
++};
++const char* const VmaJsonWriter::INDENT = " ";
++
++#ifndef _VMA_JSON_WRITER_FUNCTIONS
++VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
++ : m_SB(sb),
++ m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
++ m_InsideString(false) {}
++
++VmaJsonWriter::~VmaJsonWriter()
++{
++ VMA_ASSERT(!m_InsideString);
++ VMA_ASSERT(m_Stack.empty());
++}
++
++void VmaJsonWriter::BeginObject(bool singleLine)
++{
++ VMA_ASSERT(!m_InsideString);
++
++ BeginValue(false);
++ m_SB.Add('{');
++
++ StackItem item;
++ item.type = COLLECTION_TYPE_OBJECT;
++ item.valueCount = 0;
++ item.singleLineMode = singleLine;
++ m_Stack.push_back(item);
++}
++
++void VmaJsonWriter::EndObject()
++{
++ VMA_ASSERT(!m_InsideString);
++
++ WriteIndent(true);
++ m_SB.Add('}');
++
++ VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
++ m_Stack.pop_back();
++}
++
++void VmaJsonWriter::BeginArray(bool singleLine)
++{
++ VMA_ASSERT(!m_InsideString);
++
++ BeginValue(false);
++ m_SB.Add('[');
++
++ StackItem item;
++ item.type = COLLECTION_TYPE_ARRAY;
++ item.valueCount = 0;
++ item.singleLineMode = singleLine;
++ m_Stack.push_back(item);
++}
++
++void VmaJsonWriter::EndArray()
++{
++ VMA_ASSERT(!m_InsideString);
++
++ WriteIndent(true);
++ m_SB.Add(']');
++
++ VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
++ m_Stack.pop_back();
++}
++
++void VmaJsonWriter::WriteString(const char* pStr)
++{
++ BeginString(pStr);
++ EndString();
++}
++
++void VmaJsonWriter::BeginString(const char* pStr)
++{
++ VMA_ASSERT(!m_InsideString);
++
++ BeginValue(true);
++ m_SB.Add('"');
++ m_InsideString = true;
++ if (pStr != VMA_NULL && pStr[0] != '\0')
++ {
++ ContinueString(pStr);
++ }
++}
++
++void VmaJsonWriter::ContinueString(const char* pStr)
++{
++ VMA_ASSERT(m_InsideString);
++
++ const size_t strLen = strlen(pStr);
++ for (size_t i = 0; i < strLen; ++i)
++ {
++ char ch = pStr[i];
++ if (ch == '\\')
++ {
++ m_SB.Add("\\\\");
++ }
++ else if (ch == '"')
++ {
++ m_SB.Add("\\\"");
++ }
++ else if (ch >= 32)
++ {
++ m_SB.Add(ch);
++ }
++ else switch (ch)
++ {
++ case '\b':
++ m_SB.Add("\\b");
++ break;
++ case '\f':
++ m_SB.Add("\\f");
++ break;
++ case '\n':
++ m_SB.Add("\\n");
++ break;
++ case '\r':
++ m_SB.Add("\\r");
++ break;
++ case '\t':
++ m_SB.Add("\\t");
++ break;
++ default:
++ VMA_ASSERT(0 && "Character not currently supported.");
++ }
++ }
++}
++
++void VmaJsonWriter::ContinueString(uint32_t n)
++{
++ VMA_ASSERT(m_InsideString);
++ m_SB.AddNumber(n);
++}
++
++void VmaJsonWriter::ContinueString(uint64_t n)
++{
++ VMA_ASSERT(m_InsideString);
++ m_SB.AddNumber(n);
++}
++
++void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
++{
++ VMA_ASSERT(m_InsideString);
++ m_SB.AddPointer(ptr);
++}
++
++void VmaJsonWriter::EndString(const char* pStr)
++{
++ VMA_ASSERT(m_InsideString);
++ if (pStr != VMA_NULL && pStr[0] != '\0')
++ {
++ ContinueString(pStr);
++ }
++ m_SB.Add('"');
++ m_InsideString = false;
++}
++
++void VmaJsonWriter::WriteNumber(uint32_t n)
++{
++ VMA_ASSERT(!m_InsideString);
++ BeginValue(false);
++ m_SB.AddNumber(n);
++}
++
++void VmaJsonWriter::WriteNumber(uint64_t n)
++{
++ VMA_ASSERT(!m_InsideString);
++ BeginValue(false);
++ m_SB.AddNumber(n);
++}
++
++void VmaJsonWriter::WriteBool(bool b)
++{
++ VMA_ASSERT(!m_InsideString);
++ BeginValue(false);
++ m_SB.Add(b ? "true" : "false");
++}
++
++void VmaJsonWriter::WriteNull()
++{
++ VMA_ASSERT(!m_InsideString);
++ BeginValue(false);
++ m_SB.Add("null");
++}
++
++void VmaJsonWriter::BeginValue(bool isString)
++{
++ if (!m_Stack.empty())
++ {
++ StackItem& currItem = m_Stack.back();
++ if (currItem.type == COLLECTION_TYPE_OBJECT &&
++ currItem.valueCount % 2 == 0)
++ {
++ VMA_ASSERT(isString);
++ }
++
++ if (currItem.type == COLLECTION_TYPE_OBJECT &&
++ currItem.valueCount % 2 != 0)
++ {
++ m_SB.Add(": ");
++ }
++ else if (currItem.valueCount > 0)
++ {
++ m_SB.Add(", ");
++ WriteIndent();
++ }
++ else
++ {
++ WriteIndent();
++ }
++ ++currItem.valueCount;
++ }
++}
++
++void VmaJsonWriter::WriteIndent(bool oneLess)
++{
++ if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
++ {
++ m_SB.AddNewLine();
++
++ size_t count = m_Stack.size();
++ if (count > 0 && oneLess)
++ {
++ --count;
++ }
++ for (size_t i = 0; i < count; ++i)
++ {
++ m_SB.Add(INDENT);
++ }
++ }
++}
++#endif // _VMA_JSON_WRITER_FUNCTIONS
++
++static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
++{
++ json.BeginObject();
++
++ json.WriteString("BlockCount");
++ json.WriteNumber(stat.statistics.blockCount);
++ json.WriteString("BlockBytes");
++ json.WriteNumber(stat.statistics.blockBytes);
++ json.WriteString("AllocationCount");
++ json.WriteNumber(stat.statistics.allocationCount);
++ json.WriteString("AllocationBytes");
++ json.WriteNumber(stat.statistics.allocationBytes);
++ json.WriteString("UnusedRangeCount");
++ json.WriteNumber(stat.unusedRangeCount);
++
++ if (stat.statistics.allocationCount > 1)
++ {
++ json.WriteString("AllocationSizeMin");
++ json.WriteNumber(stat.allocationSizeMin);
++ json.WriteString("AllocationSizeMax");
++ json.WriteNumber(stat.allocationSizeMax);
++ }
++ if (stat.unusedRangeCount > 1)
++ {
++ json.WriteString("UnusedRangeSizeMin");
++ json.WriteNumber(stat.unusedRangeSizeMin);
++ json.WriteString("UnusedRangeSizeMax");
++ json.WriteNumber(stat.unusedRangeSizeMax);
++ }
++ json.EndObject();
++}
++#endif // _VMA_JSON_WRITER
++
++#ifndef _VMA_MAPPING_HYSTERESIS
++
++class VmaMappingHysteresis
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis)
++public:
++ VmaMappingHysteresis() = default;
++
++ uint32_t GetExtraMapping() const { return m_ExtraMapping; }
++
++ // Call when Map was called.
++ // Returns true if switched to extra +1 mapping reference count.
++ bool PostMap()
++ {
++#if VMA_MAPPING_HYSTERESIS_ENABLED
++ if(m_ExtraMapping == 0)
++ {
++ ++m_MajorCounter;
++ if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
++ {
++ m_ExtraMapping = 1;
++ m_MajorCounter = 0;
++ m_MinorCounter = 0;
++ return true;
++ }
++ }
++ else // m_ExtraMapping == 1
++ PostMinorCounter();
++#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
++ return false;
++ }
++
++ // Call when Unmap was called.
++ void PostUnmap()
++ {
++#if VMA_MAPPING_HYSTERESIS_ENABLED
++ if(m_ExtraMapping == 0)
++ ++m_MajorCounter;
++ else // m_ExtraMapping == 1
++ PostMinorCounter();
++#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
++ }
++
++ // Call when allocation was made from the memory block.
++ void PostAlloc()
++ {
++#if VMA_MAPPING_HYSTERESIS_ENABLED
++ if(m_ExtraMapping == 1)
++ ++m_MajorCounter;
++ else // m_ExtraMapping == 0
++ PostMinorCounter();
++#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
++ }
++
++ // Call when allocation was freed from the memory block.
++ // Returns true if switched to extra -1 mapping reference count.
++ bool PostFree()
++ {
++#if VMA_MAPPING_HYSTERESIS_ENABLED
++ if(m_ExtraMapping == 1)
++ {
++ ++m_MajorCounter;
++ if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
++ m_MajorCounter > m_MinorCounter + 1)
++ {
++ m_ExtraMapping = 0;
++ m_MajorCounter = 0;
++ m_MinorCounter = 0;
++ return true;
++ }
++ }
++ else // m_ExtraMapping == 0
++ PostMinorCounter();
++#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
++ return false;
++ }
++
++private:
++ static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
++
++ uint32_t m_MinorCounter = 0;
++ uint32_t m_MajorCounter = 0;
++ uint32_t m_ExtraMapping = 0; // 0 or 1.
++
++ void PostMinorCounter()
++ {
++ if(m_MinorCounter < m_MajorCounter)
++ {
++ ++m_MinorCounter;
++ }
++ else if(m_MajorCounter > 0)
++ {
++ --m_MajorCounter;
++ --m_MinorCounter;
++ }
++ }
++};
++
++#endif // _VMA_MAPPING_HYSTERESIS
++
++#ifndef _VMA_DEVICE_MEMORY_BLOCK
++/*
++Represents a single block of device memory (`VkDeviceMemory`) with all the
++data about its regions (aka suballocations, #VmaAllocation), assigned and free.
++
++Thread-safety:
++- Access to m_pMetadata must be externally synchronized.
++- Map, Unmap, Bind* are synchronized internally.
++*/
++class VmaDeviceMemoryBlock
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock)
++public:
++ VmaBlockMetadata* m_pMetadata;
++
++ VmaDeviceMemoryBlock(VmaAllocator hAllocator);
++ ~VmaDeviceMemoryBlock();
++
++ // Always call after construction.
++ void Init(
++ VmaAllocator hAllocator,
++ VmaPool hParentPool,
++ uint32_t newMemoryTypeIndex,
++ VkDeviceMemory newMemory,
++ VkDeviceSize newSize,
++ uint32_t id,
++ uint32_t algorithm,
++ VkDeviceSize bufferImageGranularity);
++ // Always call before destruction.
++ void Destroy(VmaAllocator allocator);
++
++ VmaPool GetParentPool() const { return m_hParentPool; }
++ VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
++ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
++ uint32_t GetId() const { return m_Id; }
++ void* GetMappedData() const { return m_pMappedData; }
++ uint32_t GetMapRefCount() const { return m_MapCount; }
++
++ // Call when allocation/free was made from m_pMetadata.
++ // Used for m_MappingHysteresis.
++ void PostAlloc(VmaAllocator hAllocator);
++ void PostFree(VmaAllocator hAllocator);
++
++ // Validates all data structures inside this object. If not valid, returns false.
++ bool Validate() const;
++ VkResult CheckCorruption(VmaAllocator hAllocator);
++
++ // ppData can be null.
++ VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
++ void Unmap(VmaAllocator hAllocator, uint32_t count);
++
++ VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
++ VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
++
++ VkResult BindBufferMemory(
++ const VmaAllocator hAllocator,
++ const VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkBuffer hBuffer,
++ const void* pNext);
++ VkResult BindImageMemory(
++ const VmaAllocator hAllocator,
++ const VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkImage hImage,
++ const void* pNext);
++
++private:
++ VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
++ uint32_t m_MemoryTypeIndex;
++ uint32_t m_Id;
++ VkDeviceMemory m_hMemory;
++
++ /*
++ Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
++ Also protects m_MapCount, m_pMappedData.
++ Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
++ */
++ VMA_MUTEX m_MapAndBindMutex;
++ VmaMappingHysteresis m_MappingHysteresis;
++ uint32_t m_MapCount;
++ void* m_pMappedData;
++};
++#endif // _VMA_DEVICE_MEMORY_BLOCK
++
++#ifndef _VMA_ALLOCATION_T
++struct VmaAllocation_T
++{
++ friend struct VmaDedicatedAllocationListItemTraits;
++
++ enum FLAGS
++ {
++ FLAG_PERSISTENT_MAP = 0x01,
++ FLAG_MAPPING_ALLOWED = 0x02,
++ };
++
++public:
++ enum ALLOCATION_TYPE
++ {
++ ALLOCATION_TYPE_NONE,
++ ALLOCATION_TYPE_BLOCK,
++ ALLOCATION_TYPE_DEDICATED,
++ };
++
++ // This struct is allocated using VmaPoolAllocator.
++ VmaAllocation_T(bool mappingAllowed);
++ ~VmaAllocation_T();
++
++ void InitBlockAllocation(
++ VmaDeviceMemoryBlock* block,
++ VmaAllocHandle allocHandle,
++ VkDeviceSize alignment,
++ VkDeviceSize size,
++ uint32_t memoryTypeIndex,
++ VmaSuballocationType suballocationType,
++ bool mapped);
++ // pMappedData not null means allocation is created with MAPPED flag.
++ void InitDedicatedAllocation(
++ VmaPool hParentPool,
++ uint32_t memoryTypeIndex,
++ VkDeviceMemory hMemory,
++ VmaSuballocationType suballocationType,
++ void* pMappedData,
++ VkDeviceSize size);
++
++ ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
++ VkDeviceSize GetAlignment() const { return m_Alignment; }
++ VkDeviceSize GetSize() const { return m_Size; }
++ void* GetUserData() const { return m_pUserData; }
++ const char* GetName() const { return m_pName; }
++ VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
++
++ VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
++ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
++ bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
++ bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
++
++ void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
++ void SetName(VmaAllocator hAllocator, const char* pName);
++ void FreeName(VmaAllocator hAllocator);
++ uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
++ VmaAllocHandle GetAllocHandle() const;
++ VkDeviceSize GetOffset() const;
++ VmaPool GetParentPool() const;
++ VkDeviceMemory GetMemory() const;
++ void* GetMappedData() const;
++
++ void BlockAllocMap();
++ void BlockAllocUnmap();
++ VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
++ void DedicatedAllocUnmap(VmaAllocator hAllocator);
++
++#if VMA_STATS_STRING_ENABLED
++ uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
++
++ void InitBufferImageUsage(uint32_t bufferImageUsage);
++ void PrintParameters(class VmaJsonWriter& json) const;
++#endif
++
++private:
++ // Allocation out of VmaDeviceMemoryBlock.
++ struct BlockAllocation
++ {
++ VmaDeviceMemoryBlock* m_Block;
++ VmaAllocHandle m_AllocHandle;
++ };
++ // Allocation for an object that has its own private VkDeviceMemory.
++ struct DedicatedAllocation
++ {
++ VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
++ VkDeviceMemory m_hMemory;
++ void* m_pMappedData; // Not null means memory is mapped.
++ VmaAllocation_T* m_Prev;
++ VmaAllocation_T* m_Next;
++ };
++ union
++ {
++ // Allocation out of VmaDeviceMemoryBlock.
++ BlockAllocation m_BlockAllocation;
++ // Allocation for an object that has its own private VkDeviceMemory.
++ DedicatedAllocation m_DedicatedAllocation;
++ };
++
++ VkDeviceSize m_Alignment;
++ VkDeviceSize m_Size;
++ void* m_pUserData;
++ char* m_pName;
++ uint32_t m_MemoryTypeIndex;
++ uint8_t m_Type; // ALLOCATION_TYPE
++ uint8_t m_SuballocationType; // VmaSuballocationType
++ // Reference counter for vmaMapMemory()/vmaUnmapMemory().
++ uint8_t m_MapCount;
++ uint8_t m_Flags; // enum FLAGS
++#if VMA_STATS_STRING_ENABLED
++ uint32_t m_BufferImageUsage; // 0 if unknown.
++#endif
++};
++#endif // _VMA_ALLOCATION_T
++
++#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
++struct VmaDedicatedAllocationListItemTraits
++{
++ typedef VmaAllocation_T ItemType;
++
++ static ItemType* GetPrev(const ItemType* item)
++ {
++ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
++ return item->m_DedicatedAllocation.m_Prev;
++ }
++ static ItemType* GetNext(const ItemType* item)
++ {
++ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
++ return item->m_DedicatedAllocation.m_Next;
++ }
++ static ItemType*& AccessPrev(ItemType* item)
++ {
++ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
++ return item->m_DedicatedAllocation.m_Prev;
++ }
++ static ItemType*& AccessNext(ItemType* item)
++ {
++ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
++ return item->m_DedicatedAllocation.m_Next;
++ }
++};
++#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
++
++#ifndef _VMA_DEDICATED_ALLOCATION_LIST
++/*
++Stores linked list of VmaAllocation_T objects.
++Thread-safe, synchronized internally.
++*/
++class VmaDedicatedAllocationList
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)
++public:
++ VmaDedicatedAllocationList() {}
++ ~VmaDedicatedAllocationList();
++
++ void Init(bool useMutex) { m_UseMutex = useMutex; }
++ bool Validate();
++
++ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
++ void AddStatistics(VmaStatistics& inoutStats);
++#if VMA_STATS_STRING_ENABLED
++ // Writes JSON array with the list of allocations.
++ void BuildStatsString(VmaJsonWriter& json);
++#endif
++
++ bool IsEmpty();
++ void Register(VmaAllocation alloc);
++ void Unregister(VmaAllocation alloc);
++
++private:
++ typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
++
++ bool m_UseMutex = true;
++ VMA_RW_MUTEX m_Mutex;
++ DedicatedAllocationLinkedList m_AllocationList;
++};
++
++#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
++
++VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
++{
++ VMA_HEAVY_ASSERT(Validate());
++
++ if (!m_AllocationList.IsEmpty())
++ {
++ VMA_ASSERT(false && "Unfreed dedicated allocations found!");
++ }
++}
++
++bool VmaDedicatedAllocationList::Validate()
++{
++ const size_t declaredCount = m_AllocationList.GetCount();
++ size_t actualCount = 0;
++ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
++ for (VmaAllocation alloc = m_AllocationList.Front();
++ alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
++ {
++ ++actualCount;
++ }
++ VMA_VALIDATE(actualCount == declaredCount);
++
++ return true;
++}
++
++void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
++{
++ for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
++ {
++ const VkDeviceSize size = item->GetSize();
++ inoutStats.statistics.blockCount++;
++ inoutStats.statistics.blockBytes += size;
++ VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
++ }
++}
++
++void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
++{
++ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
++
++ const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
++ inoutStats.blockCount += allocCount;
++ inoutStats.allocationCount += allocCount;
++
++ for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
++ {
++ const VkDeviceSize size = item->GetSize();
++ inoutStats.blockBytes += size;
++ inoutStats.allocationBytes += size;
++ }
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
++{
++ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
++ json.BeginArray();
++ for (VmaAllocation alloc = m_AllocationList.Front();
++ alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
++ {
++ json.BeginObject(true);
++ alloc->PrintParameters(json);
++ json.EndObject();
++ }
++ json.EndArray();
++}
++#endif // VMA_STATS_STRING_ENABLED
++
++bool VmaDedicatedAllocationList::IsEmpty()
++{
++ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
++ return m_AllocationList.IsEmpty();
++}
++
++void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
++{
++ VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
++ m_AllocationList.PushBack(alloc);
++}
++
++void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
++{
++ VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
++ m_AllocationList.Remove(alloc);
++}
++#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
++#endif // _VMA_DEDICATED_ALLOCATION_LIST
++
++#ifndef _VMA_SUBALLOCATION
++/*
++Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
++allocated memory block or free.
++*/
++struct VmaSuballocation
++{
++ VkDeviceSize offset;
++ VkDeviceSize size;
++ void* userData;
++ VmaSuballocationType type;
++};
++
++// Comparator for offsets.
++struct VmaSuballocationOffsetLess
++{
++ bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
++ {
++ return lhs.offset < rhs.offset;
++ }
++};
++
++struct VmaSuballocationOffsetGreater
++{
++ bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
++ {
++ return lhs.offset > rhs.offset;
++ }
++};
++
++struct VmaSuballocationItemSizeLess
++{
++ bool operator()(const VmaSuballocationList::iterator lhs,
++ const VmaSuballocationList::iterator rhs) const
++ {
++ return lhs->size < rhs->size;
++ }
++
++ bool operator()(const VmaSuballocationList::iterator lhs,
++ VkDeviceSize rhsSize) const
++ {
++ return lhs->size < rhsSize;
++ }
++};
++#endif // _VMA_SUBALLOCATION
++
++#ifndef _VMA_ALLOCATION_REQUEST
++/*
++Parameters of planned allocation inside a VmaDeviceMemoryBlock.
++item points to a FREE suballocation.
++*/
++struct VmaAllocationRequest
++{
++ VmaAllocHandle allocHandle;
++ VkDeviceSize size;
++ VmaSuballocationList::iterator item;
++ void* customData;
++ uint64_t algorithmData;
++ VmaAllocationRequestType type;
++};
++#endif // _VMA_ALLOCATION_REQUEST
++
++#ifndef _VMA_BLOCK_METADATA
++/*
++Data structure used for bookkeeping of allocations and unused ranges of memory
++in a single VkDeviceMemory block.
++*/
++class VmaBlockMetadata
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata)
++public:
++ // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
++ VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual);
++ virtual ~VmaBlockMetadata() = default;
++
++ virtual void Init(VkDeviceSize size) { m_Size = size; }
++ bool IsVirtual() const { return m_IsVirtual; }
++ VkDeviceSize GetSize() const { return m_Size; }
++
++ // Validates all data structures inside this object. If not valid, returns false.
++ virtual bool Validate() const = 0;
++ virtual size_t GetAllocationCount() const = 0;
++ virtual size_t GetFreeRegionsCount() const = 0;
++ virtual VkDeviceSize GetSumFreeSize() const = 0;
++ // Returns true if this block is empty - contains only single free suballocation.
++ virtual bool IsEmpty() const = 0;
++ virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
++ virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
++ virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
++
++ virtual VmaAllocHandle GetAllocationListBegin() const = 0;
++ virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
++ virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
++
++ // Shouldn't modify blockCount.
++ virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
++ virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
++
++#if VMA_STATS_STRING_ENABLED
++ virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
++#endif
++
++ // Tries to find a place for suballocation with given parameters inside this block.
++ // If succeeded, fills pAllocationRequest and returns true.
++ // If failed, returns false.
++ virtual bool CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest) = 0;
++
++ virtual VkResult CheckCorruption(const void* pBlockData) = 0;
++
++ // Makes actual allocation based on request. Request must already be checked and valid.
++ virtual void Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData) = 0;
++
++ // Frees suballocation assigned to given memory region.
++ virtual void Free(VmaAllocHandle allocHandle) = 0;
++
++ // Frees all allocations.
++ // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
++ virtual void Clear() = 0;
++
++ virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
++ virtual void DebugLogAllAllocations() const = 0;
++
++protected:
++ const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
++ VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
++ VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); }
++
++ void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
++#if VMA_STATS_STRING_ENABLED
++ // mapRefCount == UINT32_MAX means unspecified.
++ void PrintDetailedMap_Begin(class VmaJsonWriter& json,
++ VkDeviceSize unusedBytes,
++ size_t allocationCount,
++ size_t unusedRangeCount) const;
++ void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
++ VkDeviceSize offset, VkDeviceSize size, void* userData) const;
++ void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
++ VkDeviceSize offset,
++ VkDeviceSize size) const;
++ void PrintDetailedMap_End(class VmaJsonWriter& json) const;
++#endif
++
++private:
++ VkDeviceSize m_Size;
++ const VkAllocationCallbacks* m_pAllocationCallbacks;
++ const VkDeviceSize m_BufferImageGranularity;
++ const bool m_IsVirtual;
++};
++
++#ifndef _VMA_BLOCK_METADATA_FUNCTIONS
++VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual)
++ : m_Size(0),
++ m_pAllocationCallbacks(pAllocationCallbacks),
++ m_BufferImageGranularity(bufferImageGranularity),
++ m_IsVirtual(isVirtual) {}
++
++void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
++{
++ if (IsVirtual())
++ {
++ VMA_DEBUG_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData);
++ }
++ else
++ {
++ VMA_ASSERT(userData != VMA_NULL);
++ VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
++
++ userData = allocation->GetUserData();
++ const char* name = allocation->GetName();
++
++#if VMA_STATS_STRING_ENABLED
++ VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u",
++ offset, size, userData, name ? name : "vma_empty",
++ VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
++ allocation->GetBufferImageUsage());
++#else
++ VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u",
++ offset, size, userData, name ? name : "vma_empty",
++ (uint32_t)allocation->GetSuballocationType());
++#endif // VMA_STATS_STRING_ENABLED
++ }
++
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
++ VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
++{
++ json.WriteString("TotalBytes");
++ json.WriteNumber(GetSize());
++
++ json.WriteString("UnusedBytes");
++ json.WriteNumber(unusedBytes);
++
++ json.WriteString("Allocations");
++ json.WriteNumber((uint64_t)allocationCount);
++
++ json.WriteString("UnusedRanges");
++ json.WriteNumber((uint64_t)unusedRangeCount);
++
++ json.WriteString("Suballocations");
++ json.BeginArray();
++}
++
++void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
++ VkDeviceSize offset, VkDeviceSize size, void* userData) const
++{
++ json.BeginObject(true);
++
++ json.WriteString("Offset");
++ json.WriteNumber(offset);
++
++ if (IsVirtual())
++ {
++ json.WriteString("Size");
++ json.WriteNumber(size);
++ if (userData)
++ {
++ json.WriteString("CustomData");
++ json.BeginString();
++ json.ContinueString_Pointer(userData);
++ json.EndString();
++ }
++ }
++ else
++ {
++ ((VmaAllocation)userData)->PrintParameters(json);
++ }
++
++ json.EndObject();
++}
++
++void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
++ VkDeviceSize offset, VkDeviceSize size) const
++{
++ json.BeginObject(true);
++
++ json.WriteString("Offset");
++ json.WriteNumber(offset);
++
++ json.WriteString("Type");
++ json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
++
++ json.WriteString("Size");
++ json.WriteNumber(size);
++
++ json.EndObject();
++}
++
++void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
++{
++ json.EndArray();
++}
++#endif // VMA_STATS_STRING_ENABLED
++#endif // _VMA_BLOCK_METADATA_FUNCTIONS
++#endif // _VMA_BLOCK_METADATA
++
++#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
++// Before deleting object of this class remember to call 'Destroy()'
++class VmaBlockBufferImageGranularity final
++{
++public:
++ struct ValidationContext
++ {
++ const VkAllocationCallbacks* allocCallbacks;
++ uint16_t* pageAllocs;
++ };
++
++ VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
++ ~VmaBlockBufferImageGranularity();
++
++ bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
++
++ void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
++ // Before destroying object you must call free it's memory
++ void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
++
++ void RoundupAllocRequest(VmaSuballocationType allocType,
++ VkDeviceSize& inOutAllocSize,
++ VkDeviceSize& inOutAllocAlignment) const;
++
++ bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
++ VkDeviceSize allocSize,
++ VkDeviceSize blockOffset,
++ VkDeviceSize blockSize,
++ VmaSuballocationType allocType) const;
++
++ void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
++ void FreePages(VkDeviceSize offset, VkDeviceSize size);
++ void Clear();
++
++ ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
++ bool isVirutal) const;
++ bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
++ bool FinishValidation(ValidationContext& ctx) const;
++
++private:
++ static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
++
++ struct RegionInfo
++ {
++ uint8_t allocType;
++ uint16_t allocCount;
++ };
++
++ VkDeviceSize m_BufferImageGranularity;
++ uint32_t m_RegionCount;
++ RegionInfo* m_RegionInfo;
++
++ uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
++ uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
++
++ uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
++ void AllocPage(RegionInfo& page, uint8_t allocType);
++};
++
++#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
++VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
++ : m_BufferImageGranularity(bufferImageGranularity),
++ m_RegionCount(0),
++ m_RegionInfo(VMA_NULL) {}
++
++VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
++{
++ VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
++}
++
++void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
++{
++ if (IsEnabled())
++ {
++ m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
++ m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
++ memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
++ }
++}
++
++void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
++{
++ if (m_RegionInfo)
++ {
++ vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
++ m_RegionInfo = VMA_NULL;
++ }
++}
++
++void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
++ VkDeviceSize& inOutAllocSize,
++ VkDeviceSize& inOutAllocAlignment) const
++{
++ if (m_BufferImageGranularity > 1 &&
++ m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
++ {
++ if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
++ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
++ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
++ {
++ inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
++ inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
++ }
++ }
++}
++
++bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
++ VkDeviceSize allocSize,
++ VkDeviceSize blockOffset,
++ VkDeviceSize blockSize,
++ VmaSuballocationType allocType) const
++{
++ if (IsEnabled())
++ {
++ uint32_t startPage = GetStartPage(inOutAllocOffset);
++ if (m_RegionInfo[startPage].allocCount > 0 &&
++ VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
++ {
++ inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
++ if (blockSize < allocSize + inOutAllocOffset - blockOffset)
++ return true;
++ ++startPage;
++ }
++ uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
++ if (endPage != startPage &&
++ m_RegionInfo[endPage].allocCount > 0 &&
++ VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
++ {
++ return true;
++ }
++ }
++ return false;
++}
++
++void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
++{
++ if (IsEnabled())
++ {
++ uint32_t startPage = GetStartPage(offset);
++ AllocPage(m_RegionInfo[startPage], allocType);
++
++ uint32_t endPage = GetEndPage(offset, size);
++ if (startPage != endPage)
++ AllocPage(m_RegionInfo[endPage], allocType);
++ }
++}
++
++void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
++{
++ if (IsEnabled())
++ {
++ uint32_t startPage = GetStartPage(offset);
++ --m_RegionInfo[startPage].allocCount;
++ if (m_RegionInfo[startPage].allocCount == 0)
++ m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
++ uint32_t endPage = GetEndPage(offset, size);
++ if (startPage != endPage)
++ {
++ --m_RegionInfo[endPage].allocCount;
++ if (m_RegionInfo[endPage].allocCount == 0)
++ m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
++ }
++ }
++}
++
++void VmaBlockBufferImageGranularity::Clear()
++{
++ if (m_RegionInfo)
++ memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
++}
++
++VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
++ const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
++{
++ ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
++ if (!isVirutal && IsEnabled())
++ {
++ ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
++ memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
++ }
++ return ctx;
++}
++
++bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
++ VkDeviceSize offset, VkDeviceSize size) const
++{
++ if (IsEnabled())
++ {
++ uint32_t start = GetStartPage(offset);
++ ++ctx.pageAllocs[start];
++ VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
++
++ uint32_t end = GetEndPage(offset, size);
++ if (start != end)
++ {
++ ++ctx.pageAllocs[end];
++ VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
++ }
++ }
++ return true;
++}
++
++bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
++{
++ // Check proper page structure
++ if (IsEnabled())
++ {
++ VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
++
++ for (uint32_t page = 0; page < m_RegionCount; ++page)
++ {
++ VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
++ }
++ vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
++ ctx.pageAllocs = VMA_NULL;
++ }
++ return true;
++}
++
++uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
++{
++ return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
++}
++
++void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
++{
++ // When current alloc type is free then it can be overridden by new type
++ if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
++ page.allocType = allocType;
++
++ ++page.allocCount;
++}
++#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
++#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
++
++#if 0
++#ifndef _VMA_BLOCK_METADATA_GENERIC
++class VmaBlockMetadata_Generic : public VmaBlockMetadata
++{
++ friend class VmaDefragmentationAlgorithm_Generic;
++ friend class VmaDefragmentationAlgorithm_Fast;
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Generic)
++public:
++ VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual);
++ virtual ~VmaBlockMetadata_Generic() = default;
++
++ size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
++ VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
++ bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); }
++ void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); }
++ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
++
++ void Init(VkDeviceSize size) override;
++ bool Validate() const override;
++
++ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
++ void AddStatistics(VmaStatistics& inoutStats) const override;
++
++#if VMA_STATS_STRING_ENABLED
++ void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
++#endif
++
++ bool CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest) override;
++
++ VkResult CheckCorruption(const void* pBlockData) override;
++
++ void Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData) override;
++
++ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
++ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
++ VmaAllocHandle GetAllocationListBegin() const override;
++ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
++ void Clear() override;
++ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
++ void DebugLogAllAllocations() const override;
++
++private:
++ uint32_t m_FreeCount;
++ VkDeviceSize m_SumFreeSize;
++ VmaSuballocationList m_Suballocations;
++ // Suballocations that are free. Sorted by size, ascending.
++ VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator>> m_FreeSuballocationsBySize;
++
++ VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); }
++
++ VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const;
++ bool ValidateFreeSuballocationList() const;
++
++ // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
++ // If yes, fills pOffset and returns true. If no, returns false.
++ bool CheckAllocation(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ VmaSuballocationList::const_iterator suballocItem,
++ VmaAllocHandle* pAllocHandle) const;
++
++ // Given free suballocation, it merges it with following one, which must also be free.
++ void MergeFreeWithNext(VmaSuballocationList::iterator item);
++ // Releases given suballocation, making it free.
++ // Merges it with adjacent free suballocations if applicable.
++ // Returns iterator to new free suballocation at this place.
++ VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
++ // Given free suballocation, it inserts it into sorted list of
++ // m_FreeSuballocationsBySize if it is suitable.
++ void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
++ // Given free suballocation, it removes it from sorted list of
++ // m_FreeSuballocationsBySize if it is suitable.
++ void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
++};
++
++#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
++VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual)
++ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
++ m_FreeCount(0),
++ m_SumFreeSize(0),
++ m_Suballocations(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
++ m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(pAllocationCallbacks)) {}
++
++void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
++{
++ VmaBlockMetadata::Init(size);
++
++ m_FreeCount = 1;
++ m_SumFreeSize = size;
++
++ VmaSuballocation suballoc = {};
++ suballoc.offset = 0;
++ suballoc.size = size;
++ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
++
++ m_Suballocations.push_back(suballoc);
++ m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
++}
++
++bool VmaBlockMetadata_Generic::Validate() const
++{
++ VMA_VALIDATE(!m_Suballocations.empty());
++
++ // Expected offset of new suballocation as calculated from previous ones.
++ VkDeviceSize calculatedOffset = 0;
++ // Expected number of free suballocations as calculated from traversing their list.
++ uint32_t calculatedFreeCount = 0;
++ // Expected sum size of free suballocations as calculated from traversing their list.
++ VkDeviceSize calculatedSumFreeSize = 0;
++ // Expected number of free suballocations that should be registered in
++ // m_FreeSuballocationsBySize calculated from traversing their list.
++ size_t freeSuballocationsToRegister = 0;
++ // True if previous visited suballocation was free.
++ bool prevFree = false;
++
++ const VkDeviceSize debugMargin = GetDebugMargin();
++
++ for (const auto& subAlloc : m_Suballocations)
++ {
++ // Actual offset of this suballocation doesn't match expected one.
++ VMA_VALIDATE(subAlloc.offset == calculatedOffset);
++
++ const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
++ // Two adjacent free suballocations are invalid. They should be merged.
++ VMA_VALIDATE(!prevFree || !currFree);
++
++ VmaAllocation alloc = (VmaAllocation)subAlloc.userData;
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
++ }
++
++ if (currFree)
++ {
++ calculatedSumFreeSize += subAlloc.size;
++ ++calculatedFreeCount;
++ ++freeSuballocationsToRegister;
++
++ // Margin required between allocations - every free space must be at least that large.
++ VMA_VALIDATE(subAlloc.size >= debugMargin);
++ }
++ else
++ {
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1);
++ VMA_VALIDATE(alloc->GetSize() == subAlloc.size);
++ }
++
++ // Margin required between allocations - previous allocation must be free.
++ VMA_VALIDATE(debugMargin == 0 || prevFree);
++ }
++
++ calculatedOffset += subAlloc.size;
++ prevFree = currFree;
++ }
++
++ // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
++ // match expected one.
++ VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
++
++ VkDeviceSize lastSize = 0;
++ for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
++ {
++ VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
++
++ // Only free suballocations can be registered in m_FreeSuballocationsBySize.
++ VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
++ // They must be sorted by size ascending.
++ VMA_VALIDATE(suballocItem->size >= lastSize);
++
++ lastSize = suballocItem->size;
++ }
++
++ // Check if totals match calculated values.
++ VMA_VALIDATE(ValidateFreeSuballocationList());
++ VMA_VALIDATE(calculatedOffset == GetSize());
++ VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
++ VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
++
++ return true;
++}
++
++void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
++{
++ const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
++ inoutStats.statistics.blockCount++;
++ inoutStats.statistics.blockBytes += GetSize();
++
++ for (const auto& suballoc : m_Suballocations)
++ {
++ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
++ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
++ else
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
++ }
++}
++
++void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const
++{
++ inoutStats.blockCount++;
++ inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount;
++ inoutStats.blockBytes += GetSize();
++ inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
++{
++ PrintDetailedMap_Begin(json,
++ m_SumFreeSize, // unusedBytes
++ m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
++ m_FreeCount, // unusedRangeCount
++ mapRefCount);
++
++ for (const auto& suballoc : m_Suballocations)
++ {
++ if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
++ }
++ else
++ {
++ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
++ }
++ }
++
++ PrintDetailedMap_End(json);
++}
++#endif // VMA_STATS_STRING_ENABLED
++
++bool VmaBlockMetadata_Generic::CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest)
++{
++ VMA_ASSERT(allocSize > 0);
++ VMA_ASSERT(!upperAddress);
++ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
++ VMA_ASSERT(pAllocationRequest != VMA_NULL);
++ VMA_HEAVY_ASSERT(Validate());
++
++ allocSize = AlignAllocationSize(allocSize);
++
++ pAllocationRequest->type = VmaAllocationRequestType::Normal;
++ pAllocationRequest->size = allocSize;
++
++ const VkDeviceSize debugMargin = GetDebugMargin();
++
++ // There is not enough total free space in this block to fulfill the request: Early return.
++ if (m_SumFreeSize < allocSize + debugMargin)
++ {
++ return false;
++ }
++
++ // New algorithm, efficiently searching freeSuballocationsBySize.
++ const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
++ if (freeSuballocCount > 0)
++ {
++ if (strategy == 0 ||
++ strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
++ {
++ // Find first free suballocation with size not less than allocSize + debugMargin.
++ VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
++ m_FreeSuballocationsBySize.data(),
++ m_FreeSuballocationsBySize.data() + freeSuballocCount,
++ allocSize + debugMargin,
++ VmaSuballocationItemSizeLess());
++ size_t index = it - m_FreeSuballocationsBySize.data();
++ for (; index < freeSuballocCount; ++index)
++ {
++ if (CheckAllocation(
++ allocSize,
++ allocAlignment,
++ allocType,
++ m_FreeSuballocationsBySize[index],
++ &pAllocationRequest->allocHandle))
++ {
++ pAllocationRequest->item = m_FreeSuballocationsBySize[index];
++ return true;
++ }
++ }
++ }
++ else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
++ {
++ for (VmaSuballocationList::iterator it = m_Suballocations.begin();
++ it != m_Suballocations.end();
++ ++it)
++ {
++ if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
++ allocSize,
++ allocAlignment,
++ allocType,
++ it,
++ &pAllocationRequest->allocHandle))
++ {
++ pAllocationRequest->item = it;
++ return true;
++ }
++ }
++ }
++ else
++ {
++ VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ));
++ // Search staring from biggest suballocations.
++ for (size_t index = freeSuballocCount; index--; )
++ {
++ if (CheckAllocation(
++ allocSize,
++ allocAlignment,
++ allocType,
++ m_FreeSuballocationsBySize[index],
++ &pAllocationRequest->allocHandle))
++ {
++ pAllocationRequest->item = m_FreeSuballocationsBySize[index];
++ return true;
++ }
++ }
++ }
++ }
++
++ return false;
++}
++
++VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
++{
++ for (auto& suballoc : m_Suballocations)
++ {
++ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
++ {
++ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
++ return VK_ERROR_UNKNOWN_COPY;
++ }
++ }
++ }
++
++ return VK_SUCCESS;
++}
++
++void VmaBlockMetadata_Generic::Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData)
++{
++ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
++ VMA_ASSERT(request.item != m_Suballocations.end());
++ VmaSuballocation& suballoc = *request.item;
++ // Given suballocation is a free block.
++ VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
++
++ // Given offset is inside this suballocation.
++ VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset);
++ const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1;
++ VMA_ASSERT(suballoc.size >= paddingBegin + request.size);
++ const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size;
++
++ // Unregister this free suballocation from m_FreeSuballocationsBySize and update
++ // it to become used.
++ UnregisterFreeSuballocation(request.item);
++
++ suballoc.offset = (VkDeviceSize)request.allocHandle - 1;
++ suballoc.size = request.size;
++ suballoc.type = type;
++ suballoc.userData = userData;
++
++ // If there are any free bytes remaining at the end, insert new free suballocation after current one.
++ if (paddingEnd)
++ {
++ VmaSuballocation paddingSuballoc = {};
++ paddingSuballoc.offset = suballoc.offset + suballoc.size;
++ paddingSuballoc.size = paddingEnd;
++ paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
++ VmaSuballocationList::iterator next = request.item;
++ ++next;
++ const VmaSuballocationList::iterator paddingEndItem =
++ m_Suballocations.insert(next, paddingSuballoc);
++ RegisterFreeSuballocation(paddingEndItem);
++ }
++
++ // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
++ if (paddingBegin)
++ {
++ VmaSuballocation paddingSuballoc = {};
++ paddingSuballoc.offset = suballoc.offset - paddingBegin;
++ paddingSuballoc.size = paddingBegin;
++ paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
++ const VmaSuballocationList::iterator paddingBeginItem =
++ m_Suballocations.insert(request.item, paddingSuballoc);
++ RegisterFreeSuballocation(paddingBeginItem);
++ }
++
++ // Update totals.
++ m_FreeCount = m_FreeCount - 1;
++ if (paddingBegin > 0)
++ {
++ ++m_FreeCount;
++ }
++ if (paddingEnd > 0)
++ {
++ ++m_FreeCount;
++ }
++ m_SumFreeSize -= request.size;
++}
++
++void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
++{
++ outInfo.offset = (VkDeviceSize)allocHandle - 1;
++ const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset);
++ outInfo.size = suballoc.size;
++ outInfo.pUserData = suballoc.userData;
++}
++
++void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const
++{
++ return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData;
++}
++
++VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const
++{
++ if (IsEmpty())
++ return VK_NULL_HANDLE;
++
++ for (const auto& suballoc : m_Suballocations)
++ {
++ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
++ return (VmaAllocHandle)(suballoc.offset + 1);
++ }
++ VMA_ASSERT(false && "Should contain at least 1 allocation!");
++ return VK_NULL_HANDLE;
++}
++
++VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const
++{
++ VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1);
++
++ for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it)
++ {
++ if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
++ return (VmaAllocHandle)(it->offset + 1);
++ }
++ return VK_NULL_HANDLE;
++}
++
++void VmaBlockMetadata_Generic::Clear()
++{
++ const VkDeviceSize size = GetSize();
++
++ VMA_ASSERT(IsVirtual());
++ m_FreeCount = 1;
++ m_SumFreeSize = size;
++ m_Suballocations.clear();
++ m_FreeSuballocationsBySize.clear();
++
++ VmaSuballocation suballoc = {};
++ suballoc.offset = 0;
++ suballoc.size = size;
++ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
++ m_Suballocations.push_back(suballoc);
++
++ m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
++}
++
++void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
++{
++ VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1);
++ suballoc.userData = userData;
++}
++
++void VmaBlockMetadata_Generic::DebugLogAllAllocations() const
++{
++ for (const auto& suballoc : m_Suballocations)
++ {
++ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
++ DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData);
++ }
++}
++
++VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const
++{
++ VMA_HEAVY_ASSERT(!m_Suballocations.empty());
++ const VkDeviceSize last = m_Suballocations.rbegin()->offset;
++ if (last == offset)
++ return m_Suballocations.rbegin().drop_const();
++ const VkDeviceSize first = m_Suballocations.begin()->offset;
++ if (first == offset)
++ return m_Suballocations.begin().drop_const();
++
++ const size_t suballocCount = m_Suballocations.size();
++ const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount;
++ auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator
++ {
++ for (auto suballocItem = begin;
++ suballocItem != end;
++ ++suballocItem)
++ {
++ if (suballocItem->offset == offset)
++ return suballocItem.drop_const();
++ }
++ VMA_ASSERT(false && "Not found!");
++ return m_Suballocations.end().drop_const();
++ };
++ // If requested offset is closer to the end of range, search from the end
++ if (offset - first > suballocCount * step / 2)
++ {
++ return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend());
++ }
++ return findSuballocation(m_Suballocations.begin(), m_Suballocations.end());
++}
++
++bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
++{
++ VkDeviceSize lastSize = 0;
++ for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
++ {
++ const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
++
++ VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
++ VMA_VALIDATE(it->size >= lastSize);
++ lastSize = it->size;
++ }
++ return true;
++}
++
++bool VmaBlockMetadata_Generic::CheckAllocation(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ VmaSuballocationList::const_iterator suballocItem,
++ VmaAllocHandle* pAllocHandle) const
++{
++ VMA_ASSERT(allocSize > 0);
++ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
++ VMA_ASSERT(suballocItem != m_Suballocations.cend());
++ VMA_ASSERT(pAllocHandle != VMA_NULL);
++
++ const VkDeviceSize debugMargin = GetDebugMargin();
++ const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
++
++ const VmaSuballocation& suballoc = *suballocItem;
++ VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
++
++ // Size of this suballocation is too small for this request: Early return.
++ if (suballoc.size < allocSize)
++ {
++ return false;
++ }
++
++ // Start from offset equal to beginning of this suballocation.
++ VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
++
++ // Apply debugMargin from the end of previous alloc.
++ if (debugMargin > 0)
++ {
++ offset += debugMargin;
++ }
++
++ // Apply alignment.
++ offset = VmaAlignUp(offset, allocAlignment);
++
++ // Check previous suballocations for BufferImageGranularity conflicts.
++ // Make bigger alignment if necessary.
++ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
++ {
++ bool bufferImageGranularityConflict = false;
++ VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
++ while (prevSuballocItem != m_Suballocations.cbegin())
++ {
++ --prevSuballocItem;
++ const VmaSuballocation& prevSuballoc = *prevSuballocItem;
++ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
++ {
++ bufferImageGranularityConflict = true;
++ break;
++ }
++ }
++ else
++ // Already on previous page.
++ break;
++ }
++ if (bufferImageGranularityConflict)
++ {
++ offset = VmaAlignUp(offset, bufferImageGranularity);
++ }
++ }
++
++ // Calculate padding at the beginning based on current offset.
++ const VkDeviceSize paddingBegin = offset - suballoc.offset;
++
++ // Fail if requested size plus margin after is bigger than size of this suballocation.
++ if (paddingBegin + allocSize + debugMargin > suballoc.size)
++ {
++ return false;
++ }
++
++ // Check next suballocations for BufferImageGranularity conflicts.
++ // If conflict exists, allocation cannot be made here.
++ if (allocSize % bufferImageGranularity || offset % bufferImageGranularity)
++ {
++ VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
++ ++nextSuballocItem;
++ while (nextSuballocItem != m_Suballocations.cend())
++ {
++ const VmaSuballocation& nextSuballoc = *nextSuballocItem;
++ if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
++ {
++ return false;
++ }
++ }
++ else
++ {
++ // Already on next page.
++ break;
++ }
++ ++nextSuballocItem;
++ }
++ }
++
++ *pAllocHandle = (VmaAllocHandle)(offset + 1);
++ // All tests passed: Success. pAllocHandle is already filled.
++ return true;
++}
++
++void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
++{
++ VMA_ASSERT(item != m_Suballocations.end());
++ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
++
++ VmaSuballocationList::iterator nextItem = item;
++ ++nextItem;
++ VMA_ASSERT(nextItem != m_Suballocations.end());
++ VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
++
++ item->size += nextItem->size;
++ --m_FreeCount;
++ m_Suballocations.erase(nextItem);
++}
++
++VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
++{
++ // Change this suballocation to be marked as free.
++ VmaSuballocation& suballoc = *suballocItem;
++ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
++ suballoc.userData = VMA_NULL;
++
++ // Update totals.
++ ++m_FreeCount;
++ m_SumFreeSize += suballoc.size;
++
++ // Merge with previous and/or next suballocation if it's also free.
++ bool mergeWithNext = false;
++ bool mergeWithPrev = false;
++
++ VmaSuballocationList::iterator nextItem = suballocItem;
++ ++nextItem;
++ if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
++ {
++ mergeWithNext = true;
++ }
++
++ VmaSuballocationList::iterator prevItem = suballocItem;
++ if (suballocItem != m_Suballocations.begin())
++ {
++ --prevItem;
++ if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ mergeWithPrev = true;
++ }
++ }
++
++ if (mergeWithNext)
++ {
++ UnregisterFreeSuballocation(nextItem);
++ MergeFreeWithNext(suballocItem);
++ }
++
++ if (mergeWithPrev)
++ {
++ UnregisterFreeSuballocation(prevItem);
++ MergeFreeWithNext(prevItem);
++ RegisterFreeSuballocation(prevItem);
++ return prevItem;
++ }
++ else
++ {
++ RegisterFreeSuballocation(suballocItem);
++ return suballocItem;
++ }
++}
++
++void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
++{
++ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
++ VMA_ASSERT(item->size > 0);
++
++ // You may want to enable this validation at the beginning or at the end of
++ // this function, depending on what do you want to check.
++ VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
++
++ if (m_FreeSuballocationsBySize.empty())
++ {
++ m_FreeSuballocationsBySize.push_back(item);
++ }
++ else
++ {
++ VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
++ }
++
++ //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
++}
++
++void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
++{
++ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
++ VMA_ASSERT(item->size > 0);
++
++ // You may want to enable this validation at the beginning or at the end of
++ // this function, depending on what do you want to check.
++ VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
++
++ VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
++ m_FreeSuballocationsBySize.data(),
++ m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
++ item,
++ VmaSuballocationItemSizeLess());
++ for (size_t index = it - m_FreeSuballocationsBySize.data();
++ index < m_FreeSuballocationsBySize.size();
++ ++index)
++ {
++ if (m_FreeSuballocationsBySize[index] == item)
++ {
++ VmaVectorRemove(m_FreeSuballocationsBySize, index);
++ return;
++ }
++ VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
++ }
++ VMA_ASSERT(0 && "Not found.");
++
++ //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
++}
++#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
++#endif // _VMA_BLOCK_METADATA_GENERIC
++#endif // #if 0
++
++#ifndef _VMA_BLOCK_METADATA_LINEAR
++/*
++Allocations and their references in internal data structure look like this:
++
++if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
++
++ 0 +-------+
++ | |
++ | |
++ | |
++ +-------+
++ | Alloc | 1st[m_1stNullItemsBeginCount]
++ +-------+
++ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
++ +-------+
++ | ... |
++ +-------+
++ | Alloc | 1st[1st.size() - 1]
++ +-------+
++ | |
++ | |
++ | |
++GetSize() +-------+
++
++if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
++
++ 0 +-------+
++ | Alloc | 2nd[0]
++ +-------+
++ | Alloc | 2nd[1]
++ +-------+
++ | ... |
++ +-------+
++ | Alloc | 2nd[2nd.size() - 1]
++ +-------+
++ | |
++ | |
++ | |
++ +-------+
++ | Alloc | 1st[m_1stNullItemsBeginCount]
++ +-------+
++ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
++ +-------+
++ | ... |
++ +-------+
++ | Alloc | 1st[1st.size() - 1]
++ +-------+
++ | |
++GetSize() +-------+
++
++if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
++
++ 0 +-------+
++ | |
++ | |
++ | |
++ +-------+
++ | Alloc | 1st[m_1stNullItemsBeginCount]
++ +-------+
++ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
++ +-------+
++ | ... |
++ +-------+
++ | Alloc | 1st[1st.size() - 1]
++ +-------+
++ | |
++ | |
++ | |
++ +-------+
++ | Alloc | 2nd[2nd.size() - 1]
++ +-------+
++ | ... |
++ +-------+
++ | Alloc | 2nd[1]
++ +-------+
++ | Alloc | 2nd[0]
++GetSize() +-------+
++
++*/
++class VmaBlockMetadata_Linear : public VmaBlockMetadata
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear)
++public:
++ VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual);
++ virtual ~VmaBlockMetadata_Linear() = default;
++
++ VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
++ bool IsEmpty() const override { return GetAllocationCount() == 0; }
++ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
++
++ void Init(VkDeviceSize size) override;
++ bool Validate() const override;
++ size_t GetAllocationCount() const override;
++ size_t GetFreeRegionsCount() const override;
++
++ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
++ void AddStatistics(VmaStatistics& inoutStats) const override;
++
++#if VMA_STATS_STRING_ENABLED
++ void PrintDetailedMap(class VmaJsonWriter& json) const override;
++#endif
++
++ bool CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest) override;
++
++ VkResult CheckCorruption(const void* pBlockData) override;
++
++ void Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData) override;
++
++ void Free(VmaAllocHandle allocHandle) override;
++ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
++ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
++ VmaAllocHandle GetAllocationListBegin() const override;
++ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
++ VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
++ void Clear() override;
++ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
++ void DebugLogAllAllocations() const override;
++
++private:
++ /*
++ There are two suballocation vectors, used in ping-pong way.
++ The one with index m_1stVectorIndex is called 1st.
++ The one with index (m_1stVectorIndex ^ 1) is called 2nd.
++ 2nd can be non-empty only when 1st is not empty.
++ When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
++ */
++ typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
++
++ enum SECOND_VECTOR_MODE
++ {
++ SECOND_VECTOR_EMPTY,
++ /*
++ Suballocations in 2nd vector are created later than the ones in 1st, but they
++ all have smaller offset.
++ */
++ SECOND_VECTOR_RING_BUFFER,
++ /*
++ Suballocations in 2nd vector are upper side of double stack.
++ They all have offsets higher than those in 1st vector.
++ Top of this stack means smaller offsets, but higher indices in this vector.
++ */
++ SECOND_VECTOR_DOUBLE_STACK,
++ };
++
++ VkDeviceSize m_SumFreeSize;
++ SuballocationVectorType m_Suballocations0, m_Suballocations1;
++ uint32_t m_1stVectorIndex;
++ SECOND_VECTOR_MODE m_2ndVectorMode;
++ // Number of items in 1st vector with hAllocation = null at the beginning.
++ size_t m_1stNullItemsBeginCount;
++ // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
++ size_t m_1stNullItemsMiddleCount;
++ // Number of items in 2nd vector with hAllocation = null.
++ size_t m_2ndNullItemsCount;
++
++ SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
++ SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
++ const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
++ const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
++
++ VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
++ bool ShouldCompact1st() const;
++ void CleanupAfterFree();
++
++ bool CreateAllocationRequest_LowerAddress(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest);
++ bool CreateAllocationRequest_UpperAddress(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest);
++};
++
++#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
++VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual)
++ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
++ m_SumFreeSize(0),
++ m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
++ m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
++ m_1stVectorIndex(0),
++ m_2ndVectorMode(SECOND_VECTOR_EMPTY),
++ m_1stNullItemsBeginCount(0),
++ m_1stNullItemsMiddleCount(0),
++ m_2ndNullItemsCount(0) {}
++
++void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
++{
++ VmaBlockMetadata::Init(size);
++ m_SumFreeSize = size;
++}
++
++bool VmaBlockMetadata_Linear::Validate() const
++{
++ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++
++ VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
++ VMA_VALIDATE(!suballocations1st.empty() ||
++ suballocations2nd.empty() ||
++ m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
++
++ if (!suballocations1st.empty())
++ {
++ // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
++ VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
++ // Null item at the end should be just pop_back().
++ VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
++ }
++ if (!suballocations2nd.empty())
++ {
++ // Null item at the end should be just pop_back().
++ VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
++ }
++
++ VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
++ VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
++
++ VkDeviceSize sumUsedSize = 0;
++ const size_t suballoc1stCount = suballocations1st.size();
++ const VkDeviceSize debugMargin = GetDebugMargin();
++ VkDeviceSize offset = 0;
++
++ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ const size_t suballoc2ndCount = suballocations2nd.size();
++ size_t nullItem2ndCount = 0;
++ for (size_t i = 0; i < suballoc2ndCount; ++i)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[i];
++ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
++
++ VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
++ }
++ VMA_VALIDATE(suballoc.offset >= offset);
++
++ if (!currFree)
++ {
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
++ VMA_VALIDATE(alloc->GetSize() == suballoc.size);
++ }
++ sumUsedSize += suballoc.size;
++ }
++ else
++ {
++ ++nullItem2ndCount;
++ }
++
++ offset = suballoc.offset + suballoc.size + debugMargin;
++ }
++
++ VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
++ }
++
++ for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
++ {
++ const VmaSuballocation& suballoc = suballocations1st[i];
++ VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
++ suballoc.userData == VMA_NULL);
++ }
++
++ size_t nullItem1stCount = m_1stNullItemsBeginCount;
++
++ for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
++ {
++ const VmaSuballocation& suballoc = suballocations1st[i];
++ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
++
++ VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
++ }
++ VMA_VALIDATE(suballoc.offset >= offset);
++ VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
++
++ if (!currFree)
++ {
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
++ VMA_VALIDATE(alloc->GetSize() == suballoc.size);
++ }
++ sumUsedSize += suballoc.size;
++ }
++ else
++ {
++ ++nullItem1stCount;
++ }
++
++ offset = suballoc.offset + suballoc.size + debugMargin;
++ }
++ VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
++
++ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ const size_t suballoc2ndCount = suballocations2nd.size();
++ size_t nullItem2ndCount = 0;
++ for (size_t i = suballoc2ndCount; i--; )
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[i];
++ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
++
++ VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
++ }
++ VMA_VALIDATE(suballoc.offset >= offset);
++
++ if (!currFree)
++ {
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
++ VMA_VALIDATE(alloc->GetSize() == suballoc.size);
++ }
++ sumUsedSize += suballoc.size;
++ }
++ else
++ {
++ ++nullItem2ndCount;
++ }
++
++ offset = suballoc.offset + suballoc.size + debugMargin;
++ }
++
++ VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
++ }
++
++ VMA_VALIDATE(offset <= GetSize());
++ VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
++
++ return true;
++}
++
++size_t VmaBlockMetadata_Linear::GetAllocationCount() const
++{
++ return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
++ AccessSuballocations2nd().size() - m_2ndNullItemsCount;
++}
++
++size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
++{
++ // Function only used for defragmentation, which is disabled for this algorithm
++ VMA_ASSERT(0);
++ return SIZE_MAX;
++}
++
++void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
++{
++ const VkDeviceSize size = GetSize();
++ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++ const size_t suballoc1stCount = suballocations1st.size();
++ const size_t suballoc2ndCount = suballocations2nd.size();
++
++ inoutStats.statistics.blockCount++;
++ inoutStats.statistics.blockBytes += size;
++
++ VkDeviceSize lastOffset = 0;
++
++ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
++ size_t nextAlloc2ndIndex = 0;
++ while (lastOffset < freeSpace2ndTo1stEnd)
++ {
++ // Find next non-null allocation or move nextAllocIndex to the end.
++ while (nextAlloc2ndIndex < suballoc2ndCount &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex < suballoc2ndCount)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
++ if (lastOffset < freeSpace2ndTo1stEnd)
++ {
++ const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
++ }
++
++ // End of loop.
++ lastOffset = freeSpace2ndTo1stEnd;
++ }
++ }
++ }
++
++ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
++ const VkDeviceSize freeSpace1stTo2ndEnd =
++ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
++ while (lastOffset < freeSpace1stTo2ndEnd)
++ {
++ // Find next non-null allocation or move nextAllocIndex to the end.
++ while (nextAlloc1stIndex < suballoc1stCount &&
++ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc1stIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc1stIndex < suballoc1stCount)
++ {
++ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc1stIndex;
++ }
++ // We are at the end.
++ else
++ {
++ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
++ if (lastOffset < freeSpace1stTo2ndEnd)
++ {
++ const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
++ }
++
++ // End of loop.
++ lastOffset = freeSpace1stTo2ndEnd;
++ }
++ }
++
++ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
++ while (lastOffset < size)
++ {
++ // Find next non-null allocation or move nextAllocIndex to the end.
++ while (nextAlloc2ndIndex != SIZE_MAX &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ --nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex != SIZE_MAX)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ --nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ // There is free space from lastOffset to size.
++ if (lastOffset < size)
++ {
++ const VkDeviceSize unusedRangeSize = size - lastOffset;
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
++ }
++
++ // End of loop.
++ lastOffset = size;
++ }
++ }
++ }
++}
++
++void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
++{
++ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++ const VkDeviceSize size = GetSize();
++ const size_t suballoc1stCount = suballocations1st.size();
++ const size_t suballoc2ndCount = suballocations2nd.size();
++
++ inoutStats.blockCount++;
++ inoutStats.blockBytes += size;
++ inoutStats.allocationBytes += size - m_SumFreeSize;
++
++ VkDeviceSize lastOffset = 0;
++
++ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
++ size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
++ while (lastOffset < freeSpace2ndTo1stEnd)
++ {
++ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
++ while (nextAlloc2ndIndex < suballoc2ndCount &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex < suballoc2ndCount)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ ++inoutStats.allocationCount;
++
++ // Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ // End of loop.
++ lastOffset = freeSpace2ndTo1stEnd;
++ }
++ }
++ }
++
++ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
++ const VkDeviceSize freeSpace1stTo2ndEnd =
++ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
++ while (lastOffset < freeSpace1stTo2ndEnd)
++ {
++ // Find next non-null allocation or move nextAllocIndex to the end.
++ while (nextAlloc1stIndex < suballoc1stCount &&
++ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc1stIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc1stIndex < suballoc1stCount)
++ {
++ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
++
++ // Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ ++inoutStats.allocationCount;
++
++ // Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc1stIndex;
++ }
++ // We are at the end.
++ else
++ {
++ // End of loop.
++ lastOffset = freeSpace1stTo2ndEnd;
++ }
++ }
++
++ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
++ while (lastOffset < size)
++ {
++ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
++ while (nextAlloc2ndIndex != SIZE_MAX &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ --nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex != SIZE_MAX)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ ++inoutStats.allocationCount;
++
++ // Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ --nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ // End of loop.
++ lastOffset = size;
++ }
++ }
++ }
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
++{
++ const VkDeviceSize size = GetSize();
++ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++ const size_t suballoc1stCount = suballocations1st.size();
++ const size_t suballoc2ndCount = suballocations2nd.size();
++
++ // FIRST PASS
++
++ size_t unusedRangeCount = 0;
++ VkDeviceSize usedBytes = 0;
++
++ VkDeviceSize lastOffset = 0;
++
++ size_t alloc2ndCount = 0;
++ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
++ size_t nextAlloc2ndIndex = 0;
++ while (lastOffset < freeSpace2ndTo1stEnd)
++ {
++ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
++ while (nextAlloc2ndIndex < suballoc2ndCount &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex < suballoc2ndCount)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ ++unusedRangeCount;
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ ++alloc2ndCount;
++ usedBytes += suballoc.size;
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ if (lastOffset < freeSpace2ndTo1stEnd)
++ {
++ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
++ ++unusedRangeCount;
++ }
++
++ // End of loop.
++ lastOffset = freeSpace2ndTo1stEnd;
++ }
++ }
++ }
++
++ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
++ size_t alloc1stCount = 0;
++ const VkDeviceSize freeSpace1stTo2ndEnd =
++ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
++ while (lastOffset < freeSpace1stTo2ndEnd)
++ {
++ // Find next non-null allocation or move nextAllocIndex to the end.
++ while (nextAlloc1stIndex < suballoc1stCount &&
++ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc1stIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc1stIndex < suballoc1stCount)
++ {
++ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ ++unusedRangeCount;
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ ++alloc1stCount;
++ usedBytes += suballoc.size;
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc1stIndex;
++ }
++ // We are at the end.
++ else
++ {
++ if (lastOffset < size)
++ {
++ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
++ ++unusedRangeCount;
++ }
++
++ // End of loop.
++ lastOffset = freeSpace1stTo2ndEnd;
++ }
++ }
++
++ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
++ while (lastOffset < size)
++ {
++ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
++ while (nextAlloc2ndIndex != SIZE_MAX &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ --nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex != SIZE_MAX)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ ++unusedRangeCount;
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ ++alloc2ndCount;
++ usedBytes += suballoc.size;
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ --nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ if (lastOffset < size)
++ {
++ // There is free space from lastOffset to size.
++ ++unusedRangeCount;
++ }
++
++ // End of loop.
++ lastOffset = size;
++ }
++ }
++ }
++
++ const VkDeviceSize unusedBytes = size - usedBytes;
++ PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
++
++ // SECOND PASS
++ lastOffset = 0;
++
++ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
++ size_t nextAlloc2ndIndex = 0;
++ while (lastOffset < freeSpace2ndTo1stEnd)
++ {
++ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
++ while (nextAlloc2ndIndex < suballoc2ndCount &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex < suballoc2ndCount)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
++ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ if (lastOffset < freeSpace2ndTo1stEnd)
++ {
++ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
++ const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
++ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
++ }
++
++ // End of loop.
++ lastOffset = freeSpace2ndTo1stEnd;
++ }
++ }
++ }
++
++ nextAlloc1stIndex = m_1stNullItemsBeginCount;
++ while (lastOffset < freeSpace1stTo2ndEnd)
++ {
++ // Find next non-null allocation or move nextAllocIndex to the end.
++ while (nextAlloc1stIndex < suballoc1stCount &&
++ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
++ {
++ ++nextAlloc1stIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc1stIndex < suballoc1stCount)
++ {
++ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
++ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ ++nextAlloc1stIndex;
++ }
++ // We are at the end.
++ else
++ {
++ if (lastOffset < freeSpace1stTo2ndEnd)
++ {
++ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
++ const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
++ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
++ }
++
++ // End of loop.
++ lastOffset = freeSpace1stTo2ndEnd;
++ }
++ }
++
++ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
++ while (lastOffset < size)
++ {
++ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
++ while (nextAlloc2ndIndex != SIZE_MAX &&
++ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
++ {
++ --nextAlloc2ndIndex;
++ }
++
++ // Found non-null allocation.
++ if (nextAlloc2ndIndex != SIZE_MAX)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
++
++ // 1. Process free space before this allocation.
++ if (lastOffset < suballoc.offset)
++ {
++ // There is free space from lastOffset to suballoc.offset.
++ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
++ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
++ }
++
++ // 2. Process this allocation.
++ // There is allocation with suballoc.offset, suballoc.size.
++ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
++
++ // 3. Prepare for next iteration.
++ lastOffset = suballoc.offset + suballoc.size;
++ --nextAlloc2ndIndex;
++ }
++ // We are at the end.
++ else
++ {
++ if (lastOffset < size)
++ {
++ // There is free space from lastOffset to size.
++ const VkDeviceSize unusedRangeSize = size - lastOffset;
++ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
++ }
++
++ // End of loop.
++ lastOffset = size;
++ }
++ }
++ }
++
++ PrintDetailedMap_End(json);
++}
++#endif // VMA_STATS_STRING_ENABLED
++
++bool VmaBlockMetadata_Linear::CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest)
++{
++ VMA_ASSERT(allocSize > 0);
++ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
++ VMA_ASSERT(pAllocationRequest != VMA_NULL);
++ VMA_HEAVY_ASSERT(Validate());
++ pAllocationRequest->size = allocSize;
++ return upperAddress ?
++ CreateAllocationRequest_UpperAddress(
++ allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
++ CreateAllocationRequest_LowerAddress(
++ allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
++}
++
++VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
++{
++ VMA_ASSERT(!IsVirtual());
++ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
++ {
++ const VmaSuballocation& suballoc = suballocations1st[i];
++ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
++ {
++ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
++ return VK_ERROR_UNKNOWN_COPY;
++ }
++ }
++ }
++
++ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++ for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
++ {
++ const VmaSuballocation& suballoc = suballocations2nd[i];
++ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
++ {
++ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
++ return VK_ERROR_UNKNOWN_COPY;
++ }
++ }
++ }
++
++ return VK_SUCCESS;
++}
++
++void VmaBlockMetadata_Linear::Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData)
++{
++ const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
++ const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
++
++ switch (request.type)
++ {
++ case VmaAllocationRequestType::UpperAddress:
++ {
++ VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
++ "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
++ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++ suballocations2nd.push_back(newSuballoc);
++ m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
++ }
++ break;
++ case VmaAllocationRequestType::EndOf1st:
++ {
++ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++
++ VMA_ASSERT(suballocations1st.empty() ||
++ offset >= suballocations1st.back().offset + suballocations1st.back().size);
++ // Check if it fits before the end of the block.
++ VMA_ASSERT(offset + request.size <= GetSize());
++
++ suballocations1st.push_back(newSuballoc);
++ }
++ break;
++ case VmaAllocationRequestType::EndOf2nd:
++ {
++ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
++ VMA_ASSERT(!suballocations1st.empty() &&
++ offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
++ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++
++ switch (m_2ndVectorMode)
++ {
++ case SECOND_VECTOR_EMPTY:
++ // First allocation from second part ring buffer.
++ VMA_ASSERT(suballocations2nd.empty());
++ m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
++ break;
++ case SECOND_VECTOR_RING_BUFFER:
++ // 2-part ring buffer is already started.
++ VMA_ASSERT(!suballocations2nd.empty());
++ break;
++ case SECOND_VECTOR_DOUBLE_STACK:
++ VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++
++ suballocations2nd.push_back(newSuballoc);
++ }
++ break;
++ default:
++ VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
++ }
++
++ m_SumFreeSize -= newSuballoc.size;
++}
++
++void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
++{
++ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++ VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
++
++ if (!suballocations1st.empty())
++ {
++ // First allocation: Mark it as next empty at the beginning.
++ VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
++ if (firstSuballoc.offset == offset)
++ {
++ firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
++ firstSuballoc.userData = VMA_NULL;
++ m_SumFreeSize += firstSuballoc.size;
++ ++m_1stNullItemsBeginCount;
++ CleanupAfterFree();
++ return;
++ }
++ }
++
++ // Last allocation in 2-part ring buffer or top of upper stack (same logic).
++ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
++ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ VmaSuballocation& lastSuballoc = suballocations2nd.back();
++ if (lastSuballoc.offset == offset)
++ {
++ m_SumFreeSize += lastSuballoc.size;
++ suballocations2nd.pop_back();
++ CleanupAfterFree();
++ return;
++ }
++ }
++ // Last allocation in 1st vector.
++ else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
++ {
++ VmaSuballocation& lastSuballoc = suballocations1st.back();
++ if (lastSuballoc.offset == offset)
++ {
++ m_SumFreeSize += lastSuballoc.size;
++ suballocations1st.pop_back();
++ CleanupAfterFree();
++ return;
++ }
++ }
++
++ VmaSuballocation refSuballoc;
++ refSuballoc.offset = offset;
++ // Rest of members stays uninitialized intentionally for better performance.
++
++ // Item from the middle of 1st vector.
++ {
++ const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
++ suballocations1st.begin() + m_1stNullItemsBeginCount,
++ suballocations1st.end(),
++ refSuballoc,
++ VmaSuballocationOffsetLess());
++ if (it != suballocations1st.end())
++ {
++ it->type = VMA_SUBALLOCATION_TYPE_FREE;
++ it->userData = VMA_NULL;
++ ++m_1stNullItemsMiddleCount;
++ m_SumFreeSize += it->size;
++ CleanupAfterFree();
++ return;
++ }
++ }
++
++ if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
++ {
++ // Item from the middle of 2nd vector.
++ const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
++ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
++ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
++ if (it != suballocations2nd.end())
++ {
++ it->type = VMA_SUBALLOCATION_TYPE_FREE;
++ it->userData = VMA_NULL;
++ ++m_2ndNullItemsCount;
++ m_SumFreeSize += it->size;
++ CleanupAfterFree();
++ return;
++ }
++ }
++
++ VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
++}
++
++void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
++{
++ outInfo.offset = (VkDeviceSize)allocHandle - 1;
++ VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
++ outInfo.size = suballoc.size;
++ outInfo.pUserData = suballoc.userData;
++}
++
++void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
++{
++ return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
++}
++
++VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
++{
++ // Function only used for defragmentation, which is disabled for this algorithm
++ VMA_ASSERT(0);
++ return VK_NULL_HANDLE;
++}
++
++VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
++{
++ // Function only used for defragmentation, which is disabled for this algorithm
++ VMA_ASSERT(0);
++ return VK_NULL_HANDLE;
++}
++
++VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
++{
++ // Function only used for defragmentation, which is disabled for this algorithm
++ VMA_ASSERT(0);
++ return 0;
++}
++
++void VmaBlockMetadata_Linear::Clear()
++{
++ m_SumFreeSize = GetSize();
++ m_Suballocations0.clear();
++ m_Suballocations1.clear();
++ // Leaving m_1stVectorIndex unchanged - it doesn't matter.
++ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
++ m_1stNullItemsBeginCount = 0;
++ m_1stNullItemsMiddleCount = 0;
++ m_2ndNullItemsCount = 0;
++}
++
++void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
++{
++ VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
++ suballoc.userData = userData;
++}
++
++void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
++{
++ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
++ if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
++ DebugLogAllocation(it->offset, it->size, it->userData);
++
++ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++ for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
++ if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
++ DebugLogAllocation(it->offset, it->size, it->userData);
++}
++
++VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
++{
++ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++
++ VmaSuballocation refSuballoc;
++ refSuballoc.offset = offset;
++ // Rest of members stays uninitialized intentionally for better performance.
++
++ // Item from the 1st vector.
++ {
++ SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
++ suballocations1st.begin() + m_1stNullItemsBeginCount,
++ suballocations1st.end(),
++ refSuballoc,
++ VmaSuballocationOffsetLess());
++ if (it != suballocations1st.end())
++ {
++ return const_cast<VmaSuballocation&>(*it);
++ }
++ }
++
++ if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
++ {
++ // Rest of members stays uninitialized intentionally for better performance.
++ SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
++ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
++ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
++ if (it != suballocations2nd.end())
++ {
++ return const_cast<VmaSuballocation&>(*it);
++ }
++ }
++
++ VMA_ASSERT(0 && "Allocation not found in linear allocator!");
++ return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
++}
++
++bool VmaBlockMetadata_Linear::ShouldCompact1st() const
++{
++ const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
++ const size_t suballocCount = AccessSuballocations1st().size();
++ return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
++}
++
++void VmaBlockMetadata_Linear::CleanupAfterFree()
++{
++ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++
++ if (IsEmpty())
++ {
++ suballocations1st.clear();
++ suballocations2nd.clear();
++ m_1stNullItemsBeginCount = 0;
++ m_1stNullItemsMiddleCount = 0;
++ m_2ndNullItemsCount = 0;
++ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
++ }
++ else
++ {
++ const size_t suballoc1stCount = suballocations1st.size();
++ const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
++ VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
++
++ // Find more null items at the beginning of 1st vector.
++ while (m_1stNullItemsBeginCount < suballoc1stCount &&
++ suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ ++m_1stNullItemsBeginCount;
++ --m_1stNullItemsMiddleCount;
++ }
++
++ // Find more null items at the end of 1st vector.
++ while (m_1stNullItemsMiddleCount > 0 &&
++ suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ --m_1stNullItemsMiddleCount;
++ suballocations1st.pop_back();
++ }
++
++ // Find more null items at the end of 2nd vector.
++ while (m_2ndNullItemsCount > 0 &&
++ suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ --m_2ndNullItemsCount;
++ suballocations2nd.pop_back();
++ }
++
++ // Find more null items at the beginning of 2nd vector.
++ while (m_2ndNullItemsCount > 0 &&
++ suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ --m_2ndNullItemsCount;
++ VmaVectorRemove(suballocations2nd, 0);
++ }
++
++ if (ShouldCompact1st())
++ {
++ const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
++ size_t srcIndex = m_1stNullItemsBeginCount;
++ for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
++ {
++ while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ ++srcIndex;
++ }
++ if (dstIndex != srcIndex)
++ {
++ suballocations1st[dstIndex] = suballocations1st[srcIndex];
++ }
++ ++srcIndex;
++ }
++ suballocations1st.resize(nonNullItemCount);
++ m_1stNullItemsBeginCount = 0;
++ m_1stNullItemsMiddleCount = 0;
++ }
++
++ // 2nd vector became empty.
++ if (suballocations2nd.empty())
++ {
++ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
++ }
++
++ // 1st vector became empty.
++ if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
++ {
++ suballocations1st.clear();
++ m_1stNullItemsBeginCount = 0;
++
++ if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ // Swap 1st with 2nd. Now 2nd is empty.
++ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
++ m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
++ while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
++ suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
++ {
++ ++m_1stNullItemsBeginCount;
++ --m_1stNullItemsMiddleCount;
++ }
++ m_2ndNullItemsCount = 0;
++ m_1stVectorIndex ^= 1;
++ }
++ }
++ }
++
++ VMA_HEAVY_ASSERT(Validate());
++}
++
++bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest)
++{
++ const VkDeviceSize blockSize = GetSize();
++ const VkDeviceSize debugMargin = GetDebugMargin();
++ const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
++ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++
++ if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ // Try to allocate at the end of 1st vector.
++
++ VkDeviceSize resultBaseOffset = 0;
++ if (!suballocations1st.empty())
++ {
++ const VmaSuballocation& lastSuballoc = suballocations1st.back();
++ resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
++ }
++
++ // Start from offset equal to beginning of free space.
++ VkDeviceSize resultOffset = resultBaseOffset;
++
++ // Apply alignment.
++ resultOffset = VmaAlignUp(resultOffset, allocAlignment);
++
++ // Check previous suballocations for BufferImageGranularity conflicts.
++ // Make bigger alignment if necessary.
++ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
++ {
++ bool bufferImageGranularityConflict = false;
++ for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
++ {
++ const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
++ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
++ {
++ bufferImageGranularityConflict = true;
++ break;
++ }
++ }
++ else
++ // Already on previous page.
++ break;
++ }
++ if (bufferImageGranularityConflict)
++ {
++ resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
++ }
++ }
++
++ const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
++ suballocations2nd.back().offset : blockSize;
++
++ // There is enough free space at the end after alignment.
++ if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
++ {
++ // Check next suballocations for BufferImageGranularity conflicts.
++ // If conflict exists, allocation cannot be made here.
++ if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
++ {
++ for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
++ {
++ const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
++ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
++ {
++ return false;
++ }
++ }
++ else
++ {
++ // Already on previous page.
++ break;
++ }
++ }
++ }
++
++ // All tests passed: Success.
++ pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
++ // pAllocationRequest->item, customData unused.
++ pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
++ return true;
++ }
++ }
++
++ // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
++ // beginning of 1st vector as the end of free space.
++ if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ VMA_ASSERT(!suballocations1st.empty());
++
++ VkDeviceSize resultBaseOffset = 0;
++ if (!suballocations2nd.empty())
++ {
++ const VmaSuballocation& lastSuballoc = suballocations2nd.back();
++ resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
++ }
++
++ // Start from offset equal to beginning of free space.
++ VkDeviceSize resultOffset = resultBaseOffset;
++
++ // Apply alignment.
++ resultOffset = VmaAlignUp(resultOffset, allocAlignment);
++
++ // Check previous suballocations for BufferImageGranularity conflicts.
++ // Make bigger alignment if necessary.
++ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
++ {
++ bool bufferImageGranularityConflict = false;
++ for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
++ {
++ const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
++ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
++ {
++ bufferImageGranularityConflict = true;
++ break;
++ }
++ }
++ else
++ // Already on previous page.
++ break;
++ }
++ if (bufferImageGranularityConflict)
++ {
++ resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
++ }
++ }
++
++ size_t index1st = m_1stNullItemsBeginCount;
++
++ // There is enough free space at the end after alignment.
++ if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
++ (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
++ {
++ // Check next suballocations for BufferImageGranularity conflicts.
++ // If conflict exists, allocation cannot be made here.
++ if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
++ {
++ for (size_t nextSuballocIndex = index1st;
++ nextSuballocIndex < suballocations1st.size();
++ nextSuballocIndex++)
++ {
++ const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
++ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
++ {
++ return false;
++ }
++ }
++ else
++ {
++ // Already on next page.
++ break;
++ }
++ }
++ }
++
++ // All tests passed: Success.
++ pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
++ pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
++ // pAllocationRequest->item, customData unused.
++ return true;
++ }
++ }
++
++ return false;
++}
++
++bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest)
++{
++ const VkDeviceSize blockSize = GetSize();
++ const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
++ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
++ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
++
++ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
++ {
++ VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
++ return false;
++ }
++
++ // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
++ if (allocSize > blockSize)
++ {
++ return false;
++ }
++ VkDeviceSize resultBaseOffset = blockSize - allocSize;
++ if (!suballocations2nd.empty())
++ {
++ const VmaSuballocation& lastSuballoc = suballocations2nd.back();
++ resultBaseOffset = lastSuballoc.offset - allocSize;
++ if (allocSize > lastSuballoc.offset)
++ {
++ return false;
++ }
++ }
++
++ // Start from offset equal to end of free space.
++ VkDeviceSize resultOffset = resultBaseOffset;
++
++ const VkDeviceSize debugMargin = GetDebugMargin();
++
++ // Apply debugMargin at the end.
++ if (debugMargin > 0)
++ {
++ if (resultOffset < debugMargin)
++ {
++ return false;
++ }
++ resultOffset -= debugMargin;
++ }
++
++ // Apply alignment.
++ resultOffset = VmaAlignDown(resultOffset, allocAlignment);
++
++ // Check next suballocations from 2nd for BufferImageGranularity conflicts.
++ // Make bigger alignment if necessary.
++ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
++ {
++ bool bufferImageGranularityConflict = false;
++ for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
++ {
++ const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
++ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
++ {
++ bufferImageGranularityConflict = true;
++ break;
++ }
++ }
++ else
++ // Already on previous page.
++ break;
++ }
++ if (bufferImageGranularityConflict)
++ {
++ resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
++ }
++ }
++
++ // There is enough free space.
++ const VkDeviceSize endOf1st = !suballocations1st.empty() ?
++ suballocations1st.back().offset + suballocations1st.back().size :
++ 0;
++ if (endOf1st + debugMargin <= resultOffset)
++ {
++ // Check previous suballocations for BufferImageGranularity conflicts.
++ // If conflict exists, allocation cannot be made here.
++ if (bufferImageGranularity > 1)
++ {
++ for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
++ {
++ const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
++ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
++ {
++ if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
++ {
++ return false;
++ }
++ }
++ else
++ {
++ // Already on next page.
++ break;
++ }
++ }
++ }
++
++ // All tests passed: Success.
++ pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
++ // pAllocationRequest->item unused.
++ pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
++ return true;
++ }
++
++ return false;
++}
++#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
++#endif // _VMA_BLOCK_METADATA_LINEAR
++
++#if 0
++#ifndef _VMA_BLOCK_METADATA_BUDDY
++/*
++- GetSize() is the original size of allocated memory block.
++- m_UsableSize is this size aligned down to a power of two.
++ All allocations and calculations happen relative to m_UsableSize.
++- GetUnusableSize() is the difference between them.
++ It is reported as separate, unused range, not available for allocations.
++
++Node at level 0 has size = m_UsableSize.
++Each next level contains nodes with size 2 times smaller than current level.
++m_LevelCount is the maximum number of levels to use in the current object.
++*/
++class VmaBlockMetadata_Buddy : public VmaBlockMetadata
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Buddy)
++public:
++ VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual);
++ virtual ~VmaBlockMetadata_Buddy();
++
++ size_t GetAllocationCount() const override { return m_AllocationCount; }
++ VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); }
++ bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; }
++ VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; }
++ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
++ void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); }
++
++ void Init(VkDeviceSize size) override;
++ bool Validate() const override;
++
++ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
++ void AddStatistics(VmaStatistics& inoutStats) const override;
++
++#if VMA_STATS_STRING_ENABLED
++ void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
++#endif
++
++ bool CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest) override;
++
++ void Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData) override;
++
++ void Free(VmaAllocHandle allocHandle) override;
++ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
++ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
++ VmaAllocHandle GetAllocationListBegin() const override;
++ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
++ void Clear() override;
++ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
++
++private:
++ static const size_t MAX_LEVELS = 48;
++
++ struct ValidationContext
++ {
++ size_t calculatedAllocationCount = 0;
++ size_t calculatedFreeCount = 0;
++ VkDeviceSize calculatedSumFreeSize = 0;
++ };
++ struct Node
++ {
++ VkDeviceSize offset;
++ enum TYPE
++ {
++ TYPE_FREE,
++ TYPE_ALLOCATION,
++ TYPE_SPLIT,
++ TYPE_COUNT
++ } type;
++ Node* parent;
++ Node* buddy;
++
++ union
++ {
++ struct
++ {
++ Node* prev;
++ Node* next;
++ } free;
++ struct
++ {
++ void* userData;
++ } allocation;
++ struct
++ {
++ Node* leftChild;
++ } split;
++ };
++ };
++
++ // Size of the memory block aligned down to a power of two.
++ VkDeviceSize m_UsableSize;
++ uint32_t m_LevelCount;
++ VmaPoolAllocator<Node> m_NodeAllocator;
++ Node* m_Root;
++ struct
++ {
++ Node* front;
++ Node* back;
++ } m_FreeList[MAX_LEVELS];
++
++ // Number of nodes in the tree with type == TYPE_ALLOCATION.
++ size_t m_AllocationCount;
++ // Number of nodes in the tree with type == TYPE_FREE.
++ size_t m_FreeCount;
++ // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes.
++ // Doesn't include unusable size.
++ VkDeviceSize m_SumFreeSize;
++
++ VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
++ VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
++
++ VkDeviceSize AlignAllocationSize(VkDeviceSize size) const
++ {
++ if (!IsVirtual())
++ {
++ size = VmaAlignUp(size, (VkDeviceSize)16);
++ }
++ return VmaNextPow2(size);
++ }
++ Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const;
++ void DeleteNodeChildren(Node* node);
++ bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
++ uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
++ void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const;
++ // Adds node to the front of FreeList at given level.
++ // node->type must be FREE.
++ // node->free.prev, next can be undefined.
++ void AddToFreeListFront(uint32_t level, Node* node);
++ // Removes node from FreeList at given level.
++ // node->type must be FREE.
++ // node->free.prev, next stay untouched.
++ void RemoveFromFreeList(uint32_t level, Node* node);
++ void DebugLogAllAllocationNode(Node* node, uint32_t level) const;
++
++#if VMA_STATS_STRING_ENABLED
++ void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
++#endif
++};
++
++#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
++VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual)
++ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
++ m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity
++ m_Root(VMA_NULL),
++ m_AllocationCount(0),
++ m_FreeCount(1),
++ m_SumFreeSize(0)
++{
++ memset(m_FreeList, 0, sizeof(m_FreeList));
++}
++
++VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
++{
++ DeleteNodeChildren(m_Root);
++ m_NodeAllocator.Free(m_Root);
++}
++
++void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
++{
++ VmaBlockMetadata::Init(size);
++
++ m_UsableSize = VmaPrevPow2(size);
++ m_SumFreeSize = m_UsableSize;
++
++ // Calculate m_LevelCount.
++ const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16;
++ m_LevelCount = 1;
++ while (m_LevelCount < MAX_LEVELS &&
++ LevelToNodeSize(m_LevelCount) >= minNodeSize)
++ {
++ ++m_LevelCount;
++ }
++
++ Node* rootNode = m_NodeAllocator.Alloc();
++ rootNode->offset = 0;
++ rootNode->type = Node::TYPE_FREE;
++ rootNode->parent = VMA_NULL;
++ rootNode->buddy = VMA_NULL;
++
++ m_Root = rootNode;
++ AddToFreeListFront(0, rootNode);
++}
++
++bool VmaBlockMetadata_Buddy::Validate() const
++{
++ // Validate tree.
++ ValidationContext ctx;
++ if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
++ {
++ VMA_VALIDATE(false && "ValidateNode failed.");
++ }
++ VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
++ VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
++
++ // Validate free node lists.
++ for (uint32_t level = 0; level < m_LevelCount; ++level)
++ {
++ VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
++ m_FreeList[level].front->free.prev == VMA_NULL);
++
++ for (Node* node = m_FreeList[level].front;
++ node != VMA_NULL;
++ node = node->free.next)
++ {
++ VMA_VALIDATE(node->type == Node::TYPE_FREE);
++
++ if (node->free.next == VMA_NULL)
++ {
++ VMA_VALIDATE(m_FreeList[level].back == node);
++ }
++ else
++ {
++ VMA_VALIDATE(node->free.next->free.prev == node);
++ }
++ }
++ }
++
++ // Validate that free lists ar higher levels are empty.
++ for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
++ {
++ VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
++ }
++
++ return true;
++}
++
++void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
++{
++ inoutStats.statistics.blockCount++;
++ inoutStats.statistics.blockBytes += GetSize();
++
++ AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0));
++
++ const VkDeviceSize unusableSize = GetUnusableSize();
++ if (unusableSize > 0)
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize);
++}
++
++void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const
++{
++ inoutStats.blockCount++;
++ inoutStats.allocationCount += (uint32_t)m_AllocationCount;
++ inoutStats.blockBytes += GetSize();
++ inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
++{
++ VmaDetailedStatistics stats;
++ VmaClearDetailedStatistics(stats);
++ AddDetailedStatistics(stats);
++
++ PrintDetailedMap_Begin(
++ json,
++ stats.statistics.blockBytes - stats.statistics.allocationBytes,
++ stats.statistics.allocationCount,
++ stats.unusedRangeCount,
++ mapRefCount);
++
++ PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
++
++ const VkDeviceSize unusableSize = GetUnusableSize();
++ if (unusableSize > 0)
++ {
++ PrintDetailedMap_UnusedRange(json,
++ m_UsableSize, // offset
++ unusableSize); // size
++ }
++
++ PrintDetailedMap_End(json);
++}
++#endif // VMA_STATS_STRING_ENABLED
++
++bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest)
++{
++ VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
++
++ allocSize = AlignAllocationSize(allocSize);
++
++ // Simple way to respect bufferImageGranularity. May be optimized some day.
++ // Whenever it might be an OPTIMAL image...
++ if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
++ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
++ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
++ {
++ allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity());
++ allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity());
++ }
++
++ if (allocSize > m_UsableSize)
++ {
++ return false;
++ }
++
++ const uint32_t targetLevel = AllocSizeToLevel(allocSize);
++ for (uint32_t level = targetLevel; level--; )
++ {
++ for (Node* freeNode = m_FreeList[level].front;
++ freeNode != VMA_NULL;
++ freeNode = freeNode->free.next)
++ {
++ if (freeNode->offset % allocAlignment == 0)
++ {
++ pAllocationRequest->type = VmaAllocationRequestType::Normal;
++ pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1);
++ pAllocationRequest->size = allocSize;
++ pAllocationRequest->customData = (void*)(uintptr_t)level;
++ return true;
++ }
++ }
++ }
++
++ return false;
++}
++
++void VmaBlockMetadata_Buddy::Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData)
++{
++ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
++
++ const uint32_t targetLevel = AllocSizeToLevel(request.size);
++ uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
++
++ Node* currNode = m_FreeList[currLevel].front;
++ VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
++ const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
++ while (currNode->offset != offset)
++ {
++ currNode = currNode->free.next;
++ VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
++ }
++
++ // Go down, splitting free nodes.
++ while (currLevel < targetLevel)
++ {
++ // currNode is already first free node at currLevel.
++ // Remove it from list of free nodes at this currLevel.
++ RemoveFromFreeList(currLevel, currNode);
++
++ const uint32_t childrenLevel = currLevel + 1;
++
++ // Create two free sub-nodes.
++ Node* leftChild = m_NodeAllocator.Alloc();
++ Node* rightChild = m_NodeAllocator.Alloc();
++
++ leftChild->offset = currNode->offset;
++ leftChild->type = Node::TYPE_FREE;
++ leftChild->parent = currNode;
++ leftChild->buddy = rightChild;
++
++ rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
++ rightChild->type = Node::TYPE_FREE;
++ rightChild->parent = currNode;
++ rightChild->buddy = leftChild;
++
++ // Convert current currNode to split type.
++ currNode->type = Node::TYPE_SPLIT;
++ currNode->split.leftChild = leftChild;
++
++ // Add child nodes to free list. Order is important!
++ AddToFreeListFront(childrenLevel, rightChild);
++ AddToFreeListFront(childrenLevel, leftChild);
++
++ ++m_FreeCount;
++ ++currLevel;
++ currNode = m_FreeList[currLevel].front;
++
++ /*
++ We can be sure that currNode, as left child of node previously split,
++ also fulfills the alignment requirement.
++ */
++ }
++
++ // Remove from free list.
++ VMA_ASSERT(currLevel == targetLevel &&
++ currNode != VMA_NULL &&
++ currNode->type == Node::TYPE_FREE);
++ RemoveFromFreeList(currLevel, currNode);
++
++ // Convert to allocation node.
++ currNode->type = Node::TYPE_ALLOCATION;
++ currNode->allocation.userData = userData;
++
++ ++m_AllocationCount;
++ --m_FreeCount;
++ m_SumFreeSize -= request.size;
++}
++
++void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
++{
++ uint32_t level = 0;
++ outInfo.offset = (VkDeviceSize)allocHandle - 1;
++ const Node* const node = FindAllocationNode(outInfo.offset, level);
++ outInfo.size = LevelToNodeSize(level);
++ outInfo.pUserData = node->allocation.userData;
++}
++
++void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const
++{
++ uint32_t level = 0;
++ const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
++ return node->allocation.userData;
++}
++
++VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const
++{
++ // Function only used for defragmentation, which is disabled for this algorithm
++ return VK_NULL_HANDLE;
++}
++
++VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const
++{
++ // Function only used for defragmentation, which is disabled for this algorithm
++ return VK_NULL_HANDLE;
++}
++
++void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node)
++{
++ if (node->type == Node::TYPE_SPLIT)
++ {
++ DeleteNodeChildren(node->split.leftChild->buddy);
++ DeleteNodeChildren(node->split.leftChild);
++ const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks();
++ m_NodeAllocator.Free(node->split.leftChild->buddy);
++ m_NodeAllocator.Free(node->split.leftChild);
++ }
++}
++
++void VmaBlockMetadata_Buddy::Clear()
++{
++ DeleteNodeChildren(m_Root);
++ m_Root->type = Node::TYPE_FREE;
++ m_AllocationCount = 0;
++ m_FreeCount = 1;
++ m_SumFreeSize = m_UsableSize;
++}
++
++void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
++{
++ uint32_t level = 0;
++ Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
++ node->allocation.userData = userData;
++}
++
++VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const
++{
++ Node* node = m_Root;
++ VkDeviceSize nodeOffset = 0;
++ outLevel = 0;
++ VkDeviceSize levelNodeSize = LevelToNodeSize(0);
++ while (node->type == Node::TYPE_SPLIT)
++ {
++ const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1;
++ if (offset < nodeOffset + nextLevelNodeSize)
++ {
++ node = node->split.leftChild;
++ }
++ else
++ {
++ node = node->split.leftChild->buddy;
++ nodeOffset += nextLevelNodeSize;
++ }
++ ++outLevel;
++ levelNodeSize = nextLevelNodeSize;
++ }
++
++ VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
++ return node;
++}
++
++bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
++{
++ VMA_VALIDATE(level < m_LevelCount);
++ VMA_VALIDATE(curr->parent == parent);
++ VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
++ VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
++ switch (curr->type)
++ {
++ case Node::TYPE_FREE:
++ // curr->free.prev, next are validated separately.
++ ctx.calculatedSumFreeSize += levelNodeSize;
++ ++ctx.calculatedFreeCount;
++ break;
++ case Node::TYPE_ALLOCATION:
++ ++ctx.calculatedAllocationCount;
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE(curr->allocation.userData != VMA_NULL);
++ }
++ break;
++ case Node::TYPE_SPLIT:
++ {
++ const uint32_t childrenLevel = level + 1;
++ const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1;
++ const Node* const leftChild = curr->split.leftChild;
++ VMA_VALIDATE(leftChild != VMA_NULL);
++ VMA_VALIDATE(leftChild->offset == curr->offset);
++ if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
++ {
++ VMA_VALIDATE(false && "ValidateNode for left child failed.");
++ }
++ const Node* const rightChild = leftChild->buddy;
++ VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
++ if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
++ {
++ VMA_VALIDATE(false && "ValidateNode for right child failed.");
++ }
++ }
++ break;
++ default:
++ return false;
++ }
++
++ return true;
++}
++
++uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
++{
++ // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
++ uint32_t level = 0;
++ VkDeviceSize currLevelNodeSize = m_UsableSize;
++ VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
++ while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
++ {
++ ++level;
++ currLevelNodeSize >>= 1;
++ nextLevelNodeSize >>= 1;
++ }
++ return level;
++}
++
++void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle)
++{
++ uint32_t level = 0;
++ Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
++
++ ++m_FreeCount;
++ --m_AllocationCount;
++ m_SumFreeSize += LevelToNodeSize(level);
++
++ node->type = Node::TYPE_FREE;
++
++ // Join free nodes if possible.
++ while (level > 0 && node->buddy->type == Node::TYPE_FREE)
++ {
++ RemoveFromFreeList(level, node->buddy);
++ Node* const parent = node->parent;
++
++ m_NodeAllocator.Free(node->buddy);
++ m_NodeAllocator.Free(node);
++ parent->type = Node::TYPE_FREE;
++
++ node = parent;
++ --level;
++ --m_FreeCount;
++ }
++
++ AddToFreeListFront(level, node);
++}
++
++void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const
++{
++ switch (node->type)
++ {
++ case Node::TYPE_FREE:
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize);
++ break;
++ case Node::TYPE_ALLOCATION:
++ VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize);
++ break;
++ case Node::TYPE_SPLIT:
++ {
++ const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
++ const Node* const leftChild = node->split.leftChild;
++ AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize);
++ const Node* const rightChild = leftChild->buddy;
++ AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize);
++ }
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++}
++
++void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
++{
++ VMA_ASSERT(node->type == Node::TYPE_FREE);
++
++ // List is empty.
++ Node* const frontNode = m_FreeList[level].front;
++ if (frontNode == VMA_NULL)
++ {
++ VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
++ node->free.prev = node->free.next = VMA_NULL;
++ m_FreeList[level].front = m_FreeList[level].back = node;
++ }
++ else
++ {
++ VMA_ASSERT(frontNode->free.prev == VMA_NULL);
++ node->free.prev = VMA_NULL;
++ node->free.next = frontNode;
++ frontNode->free.prev = node;
++ m_FreeList[level].front = node;
++ }
++}
++
++void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
++{
++ VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
++
++ // It is at the front.
++ if (node->free.prev == VMA_NULL)
++ {
++ VMA_ASSERT(m_FreeList[level].front == node);
++ m_FreeList[level].front = node->free.next;
++ }
++ else
++ {
++ Node* const prevFreeNode = node->free.prev;
++ VMA_ASSERT(prevFreeNode->free.next == node);
++ prevFreeNode->free.next = node->free.next;
++ }
++
++ // It is at the back.
++ if (node->free.next == VMA_NULL)
++ {
++ VMA_ASSERT(m_FreeList[level].back == node);
++ m_FreeList[level].back = node->free.prev;
++ }
++ else
++ {
++ Node* const nextFreeNode = node->free.next;
++ VMA_ASSERT(nextFreeNode->free.prev == node);
++ nextFreeNode->free.prev = node->free.prev;
++ }
++}
++
++void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const
++{
++ switch (node->type)
++ {
++ case Node::TYPE_FREE:
++ break;
++ case Node::TYPE_ALLOCATION:
++ DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData);
++ break;
++ case Node::TYPE_SPLIT:
++ {
++ ++level;
++ DebugLogAllAllocationNode(node->split.leftChild, level);
++ DebugLogAllAllocationNode(node->split.leftChild->buddy, level);
++ }
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
++{
++ switch (node->type)
++ {
++ case Node::TYPE_FREE:
++ PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
++ break;
++ case Node::TYPE_ALLOCATION:
++ PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData);
++ break;
++ case Node::TYPE_SPLIT:
++ {
++ const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
++ const Node* const leftChild = node->split.leftChild;
++ PrintDetailedMapNode(json, leftChild, childrenNodeSize);
++ const Node* const rightChild = leftChild->buddy;
++ PrintDetailedMapNode(json, rightChild, childrenNodeSize);
++ }
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++}
++#endif // VMA_STATS_STRING_ENABLED
++#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
++#endif // _VMA_BLOCK_METADATA_BUDDY
++#endif // #if 0
++
++#ifndef _VMA_BLOCK_METADATA_TLSF
++// To not search current larger region if first allocation won't succeed and skip to smaller range
++// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
++// When fragmentation and reusal of previous blocks doesn't matter then use with
++// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
++class VmaBlockMetadata_TLSF : public VmaBlockMetadata
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF)
++public:
++ VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual);
++ virtual ~VmaBlockMetadata_TLSF();
++
++ size_t GetAllocationCount() const override { return m_AllocCount; }
++ size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
++ VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
++ bool IsEmpty() const override { return m_NullBlock->offset == 0; }
++ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }
++
++ void Init(VkDeviceSize size) override;
++ bool Validate() const override;
++
++ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
++ void AddStatistics(VmaStatistics& inoutStats) const override;
++
++#if VMA_STATS_STRING_ENABLED
++ void PrintDetailedMap(class VmaJsonWriter& json) const override;
++#endif
++
++ bool CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest) override;
++
++ VkResult CheckCorruption(const void* pBlockData) override;
++ void Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData) override;
++
++ void Free(VmaAllocHandle allocHandle) override;
++ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
++ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
++ VmaAllocHandle GetAllocationListBegin() const override;
++ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
++ VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
++ void Clear() override;
++ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
++ void DebugLogAllAllocations() const override;
++
++private:
++ // According to original paper it should be preferable 4 or 5:
++ // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
++ // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
++ static const uint8_t SECOND_LEVEL_INDEX = 5;
++ static const uint16_t SMALL_BUFFER_SIZE = 256;
++ static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
++ static const uint8_t MEMORY_CLASS_SHIFT = 7;
++ static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
++
++ class Block
++ {
++ public:
++ VkDeviceSize offset;
++ VkDeviceSize size;
++ Block* prevPhysical;
++ Block* nextPhysical;
++
++ void MarkFree() { prevFree = VMA_NULL; }
++ void MarkTaken() { prevFree = this; }
++ bool IsFree() const { return prevFree != this; }
++ void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
++ Block*& PrevFree() { return prevFree; }
++ Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
++
++ private:
++ Block* prevFree; // Address of the same block here indicates that block is taken
++ union
++ {
++ Block* nextFree;
++ void* userData;
++ };
++ };
++
++ size_t m_AllocCount;
++ // Total number of free blocks besides null block
++ size_t m_BlocksFreeCount;
++ // Total size of free blocks excluding null block
++ VkDeviceSize m_BlocksFreeSize;
++ uint32_t m_IsFreeBitmap;
++ uint8_t m_MemoryClasses;
++ uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
++ uint32_t m_ListsCount;
++ /*
++ * 0: 0-3 lists for small buffers
++ * 1+: 0-(2^SLI-1) lists for normal buffers
++ */
++ Block** m_FreeList;
++ VmaPoolAllocator<Block> m_BlockAllocator;
++ Block* m_NullBlock;
++ VmaBlockBufferImageGranularity m_GranularityHandler;
++
++ uint8_t SizeToMemoryClass(VkDeviceSize size) const;
++ uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
++ uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
++ uint32_t GetListIndex(VkDeviceSize size) const;
++
++ void RemoveFreeBlock(Block* block);
++ void InsertFreeBlock(Block* block);
++ void MergeBlock(Block* block, Block* prev);
++
++ Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
++ bool CheckBlock(
++ Block& block,
++ uint32_t listIndex,
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ VmaAllocationRequest* pAllocationRequest);
++};
++
++#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
++VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
++ VkDeviceSize bufferImageGranularity, bool isVirtual)
++ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
++ m_AllocCount(0),
++ m_BlocksFreeCount(0),
++ m_BlocksFreeSize(0),
++ m_IsFreeBitmap(0),
++ m_MemoryClasses(0),
++ m_ListsCount(0),
++ m_FreeList(VMA_NULL),
++ m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
++ m_NullBlock(VMA_NULL),
++ m_GranularityHandler(bufferImageGranularity) {}
++
++VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
++{
++ if (m_FreeList)
++ vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
++ m_GranularityHandler.Destroy(GetAllocationCallbacks());
++}
++
++void VmaBlockMetadata_TLSF::Init(VkDeviceSize size)
++{
++ VmaBlockMetadata::Init(size);
++
++ if (!IsVirtual())
++ m_GranularityHandler.Init(GetAllocationCallbacks(), size);
++
++ m_NullBlock = m_BlockAllocator.Alloc();
++ m_NullBlock->size = size;
++ m_NullBlock->offset = 0;
++ m_NullBlock->prevPhysical = VMA_NULL;
++ m_NullBlock->nextPhysical = VMA_NULL;
++ m_NullBlock->MarkFree();
++ m_NullBlock->NextFree() = VMA_NULL;
++ m_NullBlock->PrevFree() = VMA_NULL;
++ uint8_t memoryClass = SizeToMemoryClass(size);
++ uint16_t sli = SizeToSecondIndex(size, memoryClass);
++ m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
++ if (IsVirtual())
++ m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
++ else
++ m_ListsCount += 4;
++
++ m_MemoryClasses = memoryClass + uint8_t(2);
++ memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
++
++ m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
++ memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
++}
++
++bool VmaBlockMetadata_TLSF::Validate() const
++{
++ VMA_VALIDATE(GetSumFreeSize() <= GetSize());
++
++ VkDeviceSize calculatedSize = m_NullBlock->size;
++ VkDeviceSize calculatedFreeSize = m_NullBlock->size;
++ size_t allocCount = 0;
++ size_t freeCount = 0;
++
++ // Check integrity of free lists
++ for (uint32_t list = 0; list < m_ListsCount; ++list)
++ {
++ Block* block = m_FreeList[list];
++ if (block != VMA_NULL)
++ {
++ VMA_VALIDATE(block->IsFree());
++ VMA_VALIDATE(block->PrevFree() == VMA_NULL);
++ while (block->NextFree())
++ {
++ VMA_VALIDATE(block->NextFree()->IsFree());
++ VMA_VALIDATE(block->NextFree()->PrevFree() == block);
++ block = block->NextFree();
++ }
++ }
++ }
++
++ VkDeviceSize nextOffset = m_NullBlock->offset;
++ auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
++
++ VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
++ if (m_NullBlock->prevPhysical)
++ {
++ VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
++ }
++ // Check all blocks
++ for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
++ {
++ VMA_VALIDATE(prev->offset + prev->size == nextOffset);
++ nextOffset = prev->offset;
++ calculatedSize += prev->size;
++
++ uint32_t listIndex = GetListIndex(prev->size);
++ if (prev->IsFree())
++ {
++ ++freeCount;
++ // Check if free block belongs to free list
++ Block* freeBlock = m_FreeList[listIndex];
++ VMA_VALIDATE(freeBlock != VMA_NULL);
++
++ bool found = false;
++ do
++ {
++ if (freeBlock == prev)
++ found = true;
++
++ freeBlock = freeBlock->NextFree();
++ } while (!found && freeBlock != VMA_NULL);
++
++ VMA_VALIDATE(found);
++ calculatedFreeSize += prev->size;
++ }
++ else
++ {
++ ++allocCount;
++ // Check if taken block is not on a free list
++ Block* freeBlock = m_FreeList[listIndex];
++ while (freeBlock)
++ {
++ VMA_VALIDATE(freeBlock != prev);
++ freeBlock = freeBlock->NextFree();
++ }
++
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
++ }
++ }
++
++ if (prev->prevPhysical)
++ {
++ VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
++ }
++ }
++
++ if (!IsVirtual())
++ {
++ VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
++ }
++
++ VMA_VALIDATE(nextOffset == 0);
++ VMA_VALIDATE(calculatedSize == GetSize());
++ VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
++ VMA_VALIDATE(allocCount == m_AllocCount);
++ VMA_VALIDATE(freeCount == m_BlocksFreeCount);
++
++ return true;
++}
++
++void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
++{
++ inoutStats.statistics.blockCount++;
++ inoutStats.statistics.blockBytes += GetSize();
++ if (m_NullBlock->size > 0)
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
++
++ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
++ {
++ if (block->IsFree())
++ VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
++ else
++ VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
++ }
++}
++
++void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
++{
++ inoutStats.blockCount++;
++ inoutStats.allocationCount += (uint32_t)m_AllocCount;
++ inoutStats.blockBytes += GetSize();
++ inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
++{
++ size_t blockCount = m_AllocCount + m_BlocksFreeCount;
++ VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
++ VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
++
++ size_t i = blockCount;
++ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
++ {
++ blockList[--i] = block;
++ }
++ VMA_ASSERT(i == 0);
++
++ VmaDetailedStatistics stats;
++ VmaClearDetailedStatistics(stats);
++ AddDetailedStatistics(stats);
++
++ PrintDetailedMap_Begin(json,
++ stats.statistics.blockBytes - stats.statistics.allocationBytes,
++ stats.statistics.allocationCount,
++ stats.unusedRangeCount);
++
++ for (; i < blockCount; ++i)
++ {
++ Block* block = blockList[i];
++ if (block->IsFree())
++ PrintDetailedMap_UnusedRange(json, block->offset, block->size);
++ else
++ PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
++ }
++ if (m_NullBlock->size > 0)
++ PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
++
++ PrintDetailedMap_End(json);
++}
++#endif
++
++bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ bool upperAddress,
++ VmaSuballocationType allocType,
++ uint32_t strategy,
++ VmaAllocationRequest* pAllocationRequest)
++{
++ VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
++ VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
++
++ // For small granularity round up
++ if (!IsVirtual())
++ m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
++
++ allocSize += GetDebugMargin();
++ // Quick check for too small pool
++ if (allocSize > GetSumFreeSize())
++ return false;
++
++ // If no free blocks in pool then check only null block
++ if (m_BlocksFreeCount == 0)
++ return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
++
++ // Round up to the next block
++ VkDeviceSize sizeForNextList = allocSize;
++ VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4));
++ if (allocSize > SMALL_BUFFER_SIZE)
++ {
++ sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
++ }
++ else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
++ sizeForNextList = SMALL_BUFFER_SIZE + 1;
++ else
++ sizeForNextList += smallSizeStep;
++
++ uint32_t nextListIndex = m_ListsCount;
++ uint32_t prevListIndex = m_ListsCount;
++ Block* nextListBlock = VMA_NULL;
++ Block* prevListBlock = VMA_NULL;
++
++ // Check blocks according to strategies
++ if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)
++ {
++ // Quick check for larger block first
++ nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
++ if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++
++ // If not fitted then null block
++ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++
++ // Null block failed, search larger bucket
++ while (nextListBlock)
++ {
++ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ nextListBlock = nextListBlock->NextFree();
++ }
++
++ // Failed again, check best fit bucket
++ prevListBlock = FindFreeBlock(allocSize, prevListIndex);
++ while (prevListBlock)
++ {
++ if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ prevListBlock = prevListBlock->NextFree();
++ }
++ }
++ else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
++ {
++ // Check best fit bucket
++ prevListBlock = FindFreeBlock(allocSize, prevListIndex);
++ while (prevListBlock)
++ {
++ if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ prevListBlock = prevListBlock->NextFree();
++ }
++
++ // If failed check null block
++ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++
++ // Check larger bucket
++ nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
++ while (nextListBlock)
++ {
++ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ nextListBlock = nextListBlock->NextFree();
++ }
++ }
++ else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )
++ {
++ // Perform search from the start
++ VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
++ VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
++
++ size_t i = m_BlocksFreeCount;
++ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
++ {
++ if (block->IsFree() && block->size >= allocSize)
++ blockList[--i] = block;
++ }
++
++ for (; i < m_BlocksFreeCount; ++i)
++ {
++ Block& block = *blockList[i];
++ if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ }
++
++ // If failed check null block
++ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++
++ // Whole range searched, no more memory
++ return false;
++ }
++ else
++ {
++ // Check larger bucket
++ nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
++ while (nextListBlock)
++ {
++ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ nextListBlock = nextListBlock->NextFree();
++ }
++
++ // If failed check null block
++ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++
++ // Check best fit bucket
++ prevListBlock = FindFreeBlock(allocSize, prevListIndex);
++ while (prevListBlock)
++ {
++ if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ prevListBlock = prevListBlock->NextFree();
++ }
++ }
++
++ // Worst case, full search has to be done
++ while (++nextListIndex < m_ListsCount)
++ {
++ nextListBlock = m_FreeList[nextListIndex];
++ while (nextListBlock)
++ {
++ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
++ return true;
++ nextListBlock = nextListBlock->NextFree();
++ }
++ }
++
++ // No more memory sadly
++ return false;
++}
++
++VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
++{
++ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
++ {
++ if (!block->IsFree())
++ {
++ if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
++ {
++ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
++ return VK_ERROR_UNKNOWN_COPY;
++ }
++ }
++ }
++
++ return VK_SUCCESS;
++}
++
++void VmaBlockMetadata_TLSF::Alloc(
++ const VmaAllocationRequest& request,
++ VmaSuballocationType type,
++ void* userData)
++{
++ VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
++
++ // Get block and pop it from the free list
++ Block* currentBlock = (Block*)request.allocHandle;
++ VkDeviceSize offset = request.algorithmData;
++ VMA_ASSERT(currentBlock != VMA_NULL);
++ VMA_ASSERT(currentBlock->offset <= offset);
++
++ if (currentBlock != m_NullBlock)
++ RemoveFreeBlock(currentBlock);
++
++ VkDeviceSize debugMargin = GetDebugMargin();
++ VkDeviceSize misssingAlignment = offset - currentBlock->offset;
++
++ // Append missing alignment to prev block or create new one
++ if (misssingAlignment)
++ {
++ Block* prevBlock = currentBlock->prevPhysical;
++ VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
++
++ if (prevBlock->IsFree() && prevBlock->size != debugMargin)
++ {
++ uint32_t oldList = GetListIndex(prevBlock->size);
++ prevBlock->size += misssingAlignment;
++ // Check if new size crosses list bucket
++ if (oldList != GetListIndex(prevBlock->size))
++ {
++ prevBlock->size -= misssingAlignment;
++ RemoveFreeBlock(prevBlock);
++ prevBlock->size += misssingAlignment;
++ InsertFreeBlock(prevBlock);
++ }
++ else
++ m_BlocksFreeSize += misssingAlignment;
++ }
++ else
++ {
++ Block* newBlock = m_BlockAllocator.Alloc();
++ currentBlock->prevPhysical = newBlock;
++ prevBlock->nextPhysical = newBlock;
++ newBlock->prevPhysical = prevBlock;
++ newBlock->nextPhysical = currentBlock;
++ newBlock->size = misssingAlignment;
++ newBlock->offset = currentBlock->offset;
++ newBlock->MarkTaken();
++
++ InsertFreeBlock(newBlock);
++ }
++
++ currentBlock->size -= misssingAlignment;
++ currentBlock->offset += misssingAlignment;
++ }
++
++ VkDeviceSize size = request.size + debugMargin;
++ if (currentBlock->size == size)
++ {
++ if (currentBlock == m_NullBlock)
++ {
++ // Setup new null block
++ m_NullBlock = m_BlockAllocator.Alloc();
++ m_NullBlock->size = 0;
++ m_NullBlock->offset = currentBlock->offset + size;
++ m_NullBlock->prevPhysical = currentBlock;
++ m_NullBlock->nextPhysical = VMA_NULL;
++ m_NullBlock->MarkFree();
++ m_NullBlock->PrevFree() = VMA_NULL;
++ m_NullBlock->NextFree() = VMA_NULL;
++ currentBlock->nextPhysical = m_NullBlock;
++ currentBlock->MarkTaken();
++ }
++ }
++ else
++ {
++ VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
++
++ // Create new free block
++ Block* newBlock = m_BlockAllocator.Alloc();
++ newBlock->size = currentBlock->size - size;
++ newBlock->offset = currentBlock->offset + size;
++ newBlock->prevPhysical = currentBlock;
++ newBlock->nextPhysical = currentBlock->nextPhysical;
++ currentBlock->nextPhysical = newBlock;
++ currentBlock->size = size;
++
++ if (currentBlock == m_NullBlock)
++ {
++ m_NullBlock = newBlock;
++ m_NullBlock->MarkFree();
++ m_NullBlock->NextFree() = VMA_NULL;
++ m_NullBlock->PrevFree() = VMA_NULL;
++ currentBlock->MarkTaken();
++ }
++ else
++ {
++ newBlock->nextPhysical->prevPhysical = newBlock;
++ newBlock->MarkTaken();
++ InsertFreeBlock(newBlock);
++ }
++ }
++ currentBlock->UserData() = userData;
++
++ if (debugMargin > 0)
++ {
++ currentBlock->size -= debugMargin;
++ Block* newBlock = m_BlockAllocator.Alloc();
++ newBlock->size = debugMargin;
++ newBlock->offset = currentBlock->offset + currentBlock->size;
++ newBlock->prevPhysical = currentBlock;
++ newBlock->nextPhysical = currentBlock->nextPhysical;
++ newBlock->MarkTaken();
++ currentBlock->nextPhysical->prevPhysical = newBlock;
++ currentBlock->nextPhysical = newBlock;
++ InsertFreeBlock(newBlock);
++ }
++
++ if (!IsVirtual())
++ m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
++ currentBlock->offset, currentBlock->size);
++ ++m_AllocCount;
++}
++
++void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
++{
++ Block* block = (Block*)allocHandle;
++ Block* next = block->nextPhysical;
++ VMA_ASSERT(!block->IsFree() && "Block is already free!");
++
++ if (!IsVirtual())
++ m_GranularityHandler.FreePages(block->offset, block->size);
++ --m_AllocCount;
++
++ VkDeviceSize debugMargin = GetDebugMargin();
++ if (debugMargin > 0)
++ {
++ RemoveFreeBlock(next);
++ MergeBlock(next, block);
++ block = next;
++ next = next->nextPhysical;
++ }
++
++ // Try merging
++ Block* prev = block->prevPhysical;
++ if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
++ {
++ RemoveFreeBlock(prev);
++ MergeBlock(block, prev);
++ }
++
++ if (!next->IsFree())
++ InsertFreeBlock(block);
++ else if (next == m_NullBlock)
++ MergeBlock(m_NullBlock, block);
++ else
++ {
++ RemoveFreeBlock(next);
++ MergeBlock(next, block);
++ InsertFreeBlock(next);
++ }
++}
++
++void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
++{
++ Block* block = (Block*)allocHandle;
++ VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
++ outInfo.offset = block->offset;
++ outInfo.size = block->size;
++ outInfo.pUserData = block->UserData();
++}
++
++void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
++{
++ Block* block = (Block*)allocHandle;
++ VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
++ return block->UserData();
++}
++
++VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
++{
++ if (m_AllocCount == 0)
++ return VK_NULL_HANDLE;
++
++ for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
++ {
++ if (!block->IsFree())
++ return (VmaAllocHandle)block;
++ }
++ VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
++ return VK_NULL_HANDLE;
++}
++
++VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
++{
++ Block* startBlock = (Block*)prevAlloc;
++ VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
++
++ for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
++ {
++ if (!block->IsFree())
++ return (VmaAllocHandle)block;
++ }
++ return VK_NULL_HANDLE;
++}
++
++VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
++{
++ Block* block = (Block*)alloc;
++ VMA_ASSERT(!block->IsFree() && "Incorrect block!");
++
++ if (block->prevPhysical)
++ return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
++ return 0;
++}
++
++void VmaBlockMetadata_TLSF::Clear()
++{
++ m_AllocCount = 0;
++ m_BlocksFreeCount = 0;
++ m_BlocksFreeSize = 0;
++ m_IsFreeBitmap = 0;
++ m_NullBlock->offset = 0;
++ m_NullBlock->size = GetSize();
++ Block* block = m_NullBlock->prevPhysical;
++ m_NullBlock->prevPhysical = VMA_NULL;
++ while (block)
++ {
++ Block* prev = block->prevPhysical;
++ m_BlockAllocator.Free(block);
++ block = prev;
++ }
++ memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
++ memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
++ m_GranularityHandler.Clear();
++}
++
++void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
++{
++ Block* block = (Block*)allocHandle;
++ VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
++ block->UserData() = userData;
++}
++
++void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
++{
++ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
++ if (!block->IsFree())
++ DebugLogAllocation(block->offset, block->size, block->UserData());
++}
++
++uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
++{
++ if (size > SMALL_BUFFER_SIZE)
++ return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT);
++ return 0;
++}
++
++uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
++{
++ if (memoryClass == 0)
++ {
++ if (IsVirtual())
++ return static_cast<uint16_t>((size - 1) / 8);
++ else
++ return static_cast<uint16_t>((size - 1) / 64);
++ }
++ return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
++}
++
++uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
++{
++ if (memoryClass == 0)
++ return secondIndex;
++
++ const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
++ if (IsVirtual())
++ return index + (1 << SECOND_LEVEL_INDEX);
++ else
++ return index + 4;
++}
++
++uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
++{
++ uint8_t memoryClass = SizeToMemoryClass(size);
++ return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
++}
++
++void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
++{
++ VMA_ASSERT(block != m_NullBlock);
++ VMA_ASSERT(block->IsFree());
++
++ if (block->NextFree() != VMA_NULL)
++ block->NextFree()->PrevFree() = block->PrevFree();
++ if (block->PrevFree() != VMA_NULL)
++ block->PrevFree()->NextFree() = block->NextFree();
++ else
++ {
++ uint8_t memClass = SizeToMemoryClass(block->size);
++ uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
++ uint32_t index = GetListIndex(memClass, secondIndex);
++ VMA_ASSERT(m_FreeList[index] == block);
++ m_FreeList[index] = block->NextFree();
++ if (block->NextFree() == VMA_NULL)
++ {
++ m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
++ if (m_InnerIsFreeBitmap[memClass] == 0)
++ m_IsFreeBitmap &= ~(1UL << memClass);
++ }
++ }
++ block->MarkTaken();
++ block->UserData() = VMA_NULL;
++ --m_BlocksFreeCount;
++ m_BlocksFreeSize -= block->size;
++}
++
++void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
++{
++ VMA_ASSERT(block != m_NullBlock);
++ VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
++
++ uint8_t memClass = SizeToMemoryClass(block->size);
++ uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
++ uint32_t index = GetListIndex(memClass, secondIndex);
++ VMA_ASSERT(index < m_ListsCount);
++ block->PrevFree() = VMA_NULL;
++ block->NextFree() = m_FreeList[index];
++ m_FreeList[index] = block;
++ if (block->NextFree() != VMA_NULL)
++ block->NextFree()->PrevFree() = block;
++ else
++ {
++ m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
++ m_IsFreeBitmap |= 1UL << memClass;
++ }
++ ++m_BlocksFreeCount;
++ m_BlocksFreeSize += block->size;
++}
++
++void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
++{
++ VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!");
++ VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
++
++ block->offset = prev->offset;
++ block->size += prev->size;
++ block->prevPhysical = prev->prevPhysical;
++ if (block->prevPhysical)
++ block->prevPhysical->nextPhysical = block;
++ m_BlockAllocator.Free(prev);
++}
++
++VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
++{
++ uint8_t memoryClass = SizeToMemoryClass(size);
++ uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
++ if (!innerFreeMap)
++ {
++ // Check higher levels for available blocks
++ uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
++ if (!freeMap)
++ return VMA_NULL; // No more memory available
++
++ // Find lowest free region
++ memoryClass = VMA_BITSCAN_LSB(freeMap);
++ innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
++ VMA_ASSERT(innerFreeMap != 0);
++ }
++ // Find lowest free subregion
++ listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
++ VMA_ASSERT(m_FreeList[listIndex]);
++ return m_FreeList[listIndex];
++}
++
++bool VmaBlockMetadata_TLSF::CheckBlock(
++ Block& block,
++ uint32_t listIndex,
++ VkDeviceSize allocSize,
++ VkDeviceSize allocAlignment,
++ VmaSuballocationType allocType,
++ VmaAllocationRequest* pAllocationRequest)
++{
++ VMA_ASSERT(block.IsFree() && "Block is already taken!");
++
++ VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
++ if (block.size < allocSize + alignedOffset - block.offset)
++ return false;
++
++ // Check for granularity conflicts
++ if (!IsVirtual() &&
++ m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
++ return false;
++
++ // Alloc successful
++ pAllocationRequest->type = VmaAllocationRequestType::TLSF;
++ pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
++ pAllocationRequest->size = allocSize - GetDebugMargin();
++ pAllocationRequest->customData = (void*)allocType;
++ pAllocationRequest->algorithmData = alignedOffset;
++
++ // Place block at the start of list if it's normal block
++ if (listIndex != m_ListsCount && block.PrevFree())
++ {
++ block.PrevFree()->NextFree() = block.NextFree();
++ if (block.NextFree())
++ block.NextFree()->PrevFree() = block.PrevFree();
++ block.PrevFree() = VMA_NULL;
++ block.NextFree() = m_FreeList[listIndex];
++ m_FreeList[listIndex] = &block;
++ if (block.NextFree())
++ block.NextFree()->PrevFree() = &block;
++ }
++
++ return true;
++}
++#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
++#endif // _VMA_BLOCK_METADATA_TLSF
++
++#ifndef _VMA_BLOCK_VECTOR
++/*
++Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
++Vulkan memory type.
++
++Synchronized internally with a mutex.
++*/
++class VmaBlockVector
++{
++ friend struct VmaDefragmentationContext_T;
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector)
++public:
++ VmaBlockVector(
++ VmaAllocator hAllocator,
++ VmaPool hParentPool,
++ uint32_t memoryTypeIndex,
++ VkDeviceSize preferredBlockSize,
++ size_t minBlockCount,
++ size_t maxBlockCount,
++ VkDeviceSize bufferImageGranularity,
++ bool explicitBlockSize,
++ uint32_t algorithm,
++ float priority,
++ VkDeviceSize minAllocationAlignment,
++ void* pMemoryAllocateNext);
++ ~VmaBlockVector();
++
++ VmaAllocator GetAllocator() const { return m_hAllocator; }
++ VmaPool GetParentPool() const { return m_hParentPool; }
++ bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
++ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
++ VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
++ VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
++ uint32_t GetAlgorithm() const { return m_Algorithm; }
++ bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
++ float GetPriority() const { return m_Priority; }
++ const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
++ // To be used only while the m_Mutex is locked. Used during defragmentation.
++ size_t GetBlockCount() const { return m_Blocks.size(); }
++ // To be used only while the m_Mutex is locked. Used during defragmentation.
++ VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
++ VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
++
++ VkResult CreateMinBlocks();
++ void AddStatistics(VmaStatistics& inoutStats);
++ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
++ bool IsEmpty();
++ bool IsCorruptionDetectionEnabled() const;
++
++ VkResult Allocate(
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ const VmaAllocationCreateInfo& createInfo,
++ VmaSuballocationType suballocType,
++ size_t allocationCount,
++ VmaAllocation* pAllocations);
++
++ void Free(const VmaAllocation hAllocation);
++
++#if VMA_STATS_STRING_ENABLED
++ void PrintDetailedMap(class VmaJsonWriter& json);
++#endif
++
++ VkResult CheckCorruption();
++
++private:
++ const VmaAllocator m_hAllocator;
++ const VmaPool m_hParentPool;
++ const uint32_t m_MemoryTypeIndex;
++ const VkDeviceSize m_PreferredBlockSize;
++ const size_t m_MinBlockCount;
++ const size_t m_MaxBlockCount;
++ const VkDeviceSize m_BufferImageGranularity;
++ const bool m_ExplicitBlockSize;
++ const uint32_t m_Algorithm;
++ const float m_Priority;
++ const VkDeviceSize m_MinAllocationAlignment;
++
++ void* const m_pMemoryAllocateNext;
++ VMA_RW_MUTEX m_Mutex;
++ // Incrementally sorted by sumFreeSize, ascending.
++ VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
++ uint32_t m_NextBlockId;
++ bool m_IncrementalSort = true;
++
++ void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
++
++ VkDeviceSize CalcMaxBlockSize() const;
++ // Finds and removes given block from vector.
++ void Remove(VmaDeviceMemoryBlock* pBlock);
++ // Performs single step in sorting m_Blocks. They may not be fully sorted
++ // after this call.
++ void IncrementallySortBlocks();
++ void SortByFreeSize();
++
++ VkResult AllocatePage(
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ const VmaAllocationCreateInfo& createInfo,
++ VmaSuballocationType suballocType,
++ VmaAllocation* pAllocation);
++
++ VkResult AllocateFromBlock(
++ VmaDeviceMemoryBlock* pBlock,
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ VmaAllocationCreateFlags allocFlags,
++ void* pUserData,
++ VmaSuballocationType suballocType,
++ uint32_t strategy,
++ VmaAllocation* pAllocation);
++
++ VkResult CommitAllocationRequest(
++ VmaAllocationRequest& allocRequest,
++ VmaDeviceMemoryBlock* pBlock,
++ VkDeviceSize alignment,
++ VmaAllocationCreateFlags allocFlags,
++ void* pUserData,
++ VmaSuballocationType suballocType,
++ VmaAllocation* pAllocation);
++
++ VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
++ bool HasEmptyBlock();
++};
++#endif // _VMA_BLOCK_VECTOR
++
++#ifndef _VMA_DEFRAGMENTATION_CONTEXT
++struct VmaDefragmentationContext_T
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T)
++public:
++ VmaDefragmentationContext_T(
++ VmaAllocator hAllocator,
++ const VmaDefragmentationInfo& info);
++ ~VmaDefragmentationContext_T();
++
++ void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
++
++ VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
++ VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
++
++private:
++ // Max number of allocations to ignore due to size constraints before ending single pass
++ static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
++ enum class CounterStatus { Pass, Ignore, End };
++
++ struct FragmentedBlock
++ {
++ uint32_t data;
++ VmaDeviceMemoryBlock* block;
++ };
++ struct StateBalanced
++ {
++ VkDeviceSize avgFreeSize = 0;
++ VkDeviceSize avgAllocSize = UINT64_MAX;
++ };
++ struct StateExtensive
++ {
++ enum class Operation : uint8_t
++ {
++ FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
++ MoveBuffers, MoveTextures, MoveAll,
++ Cleanup, Done
++ };
++
++ Operation operation = Operation::FindFreeBlockTexture;
++ size_t firstFreeBlock = SIZE_MAX;
++ };
++ struct MoveAllocationData
++ {
++ VkDeviceSize size;
++ VkDeviceSize alignment;
++ VmaSuballocationType type;
++ VmaAllocationCreateFlags flags;
++ VmaDefragmentationMove move = {};
++ };
++
++ const VkDeviceSize m_MaxPassBytes;
++ const uint32_t m_MaxPassAllocations;
++ const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;
++ void* m_BreakCallbackUserData;
++
++ VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
++ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
++
++ uint8_t m_IgnoredAllocs = 0;
++ uint32_t m_Algorithm;
++ uint32_t m_BlockVectorCount;
++ VmaBlockVector* m_PoolBlockVector;
++ VmaBlockVector** m_pBlockVectors;
++ size_t m_ImmovableBlockCount = 0;
++ VmaDefragmentationStats m_GlobalStats = { 0 };
++ VmaDefragmentationStats m_PassStats = { 0 };
++ void* m_AlgorithmState = VMA_NULL;
++
++ static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
++ CounterStatus CheckCounters(VkDeviceSize bytes);
++ bool IncrementCounters(VkDeviceSize bytes);
++ bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
++ bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
++
++ bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
++ bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
++ bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
++ bool ComputeDefragmentation_Full(VmaBlockVector& vector);
++ bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
++
++ void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
++ bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
++ VmaBlockVector& vector, size_t firstFreeBlock,
++ bool& texturePresent, bool& bufferPresent, bool& otherPresent);
++};
++#endif // _VMA_DEFRAGMENTATION_CONTEXT
++
++#ifndef _VMA_POOL_T
++struct VmaPool_T
++{
++ friend struct VmaPoolListItemTraits;
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T)
++public:
++ VmaBlockVector m_BlockVector;
++ VmaDedicatedAllocationList m_DedicatedAllocations;
++
++ VmaPool_T(
++ VmaAllocator hAllocator,
++ const VmaPoolCreateInfo& createInfo,
++ VkDeviceSize preferredBlockSize);
++ ~VmaPool_T();
++
++ uint32_t GetId() const { return m_Id; }
++ void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
++
++ const char* GetName() const { return m_Name; }
++ void SetName(const char* pName);
++
++#if VMA_STATS_STRING_ENABLED
++ //void PrintDetailedMap(class VmaStringBuilder& sb);
++#endif
++
++private:
++ uint32_t m_Id;
++ char* m_Name;
++ VmaPool_T* m_PrevPool = VMA_NULL;
++ VmaPool_T* m_NextPool = VMA_NULL;
++};
++
++struct VmaPoolListItemTraits
++{
++ typedef VmaPool_T ItemType;
++
++ static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
++ static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
++ static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
++ static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
++};
++#endif // _VMA_POOL_T
++
++#ifndef _VMA_CURRENT_BUDGET_DATA
++struct VmaCurrentBudgetData
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData)
++public:
++
++ VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
++ VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
++ VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
++ VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
++
++#if VMA_MEMORY_BUDGET
++ VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
++ VMA_RW_MUTEX m_BudgetMutex;
++ uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
++ uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
++ uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
++#endif // VMA_MEMORY_BUDGET
++
++ VmaCurrentBudgetData();
++
++ void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
++ void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
++};
++
++#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
++VmaCurrentBudgetData::VmaCurrentBudgetData()
++{
++ for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
++ {
++ m_BlockCount[heapIndex] = 0;
++ m_AllocationCount[heapIndex] = 0;
++ m_BlockBytes[heapIndex] = 0;
++ m_AllocationBytes[heapIndex] = 0;
++#if VMA_MEMORY_BUDGET
++ m_VulkanUsage[heapIndex] = 0;
++ m_VulkanBudget[heapIndex] = 0;
++ m_BlockBytesAtBudgetFetch[heapIndex] = 0;
++#endif
++ }
++
++#if VMA_MEMORY_BUDGET
++ m_OperationsSinceBudgetFetch = 0;
++#endif
++}
++
++void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
++{
++ m_AllocationBytes[heapIndex] += allocationSize;
++ ++m_AllocationCount[heapIndex];
++#if VMA_MEMORY_BUDGET
++ ++m_OperationsSinceBudgetFetch;
++#endif
++}
++
++void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
++{
++ VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
++ m_AllocationBytes[heapIndex] -= allocationSize;
++ VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
++ --m_AllocationCount[heapIndex];
++#if VMA_MEMORY_BUDGET
++ ++m_OperationsSinceBudgetFetch;
++#endif
++}
++#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
++#endif // _VMA_CURRENT_BUDGET_DATA
++
++#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
++/*
++Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
++*/
++class VmaAllocationObjectAllocator
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)
++public:
++ VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
++ : m_Allocator(pAllocationCallbacks, 1024) {}
++
++ template<typename... Types> VmaAllocation Allocate(Types&&... args);
++ void Free(VmaAllocation hAlloc);
++
++private:
++ VMA_MUTEX m_Mutex;
++ VmaPoolAllocator<VmaAllocation_T> m_Allocator;
++};
++
++template<typename... Types>
++VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)
++{
++ VmaMutexLock mutexLock(m_Mutex);
++ return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
++}
++
++void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
++{
++ VmaMutexLock mutexLock(m_Mutex);
++ m_Allocator.Free(hAlloc);
++}
++#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
++
++#ifndef _VMA_VIRTUAL_BLOCK_T
++struct VmaVirtualBlock_T
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T)
++public:
++ const bool m_AllocationCallbacksSpecified;
++ const VkAllocationCallbacks m_AllocationCallbacks;
++
++ VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
++ ~VmaVirtualBlock_T();
++
++ VkResult Init() { return VK_SUCCESS; }
++ bool IsEmpty() const { return m_Metadata->IsEmpty(); }
++ void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
++ void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
++ void Clear() { m_Metadata->Clear(); }
++
++ const VkAllocationCallbacks* GetAllocationCallbacks() const;
++ void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
++ VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
++ VkDeviceSize* outOffset);
++ void GetStatistics(VmaStatistics& outStats) const;
++ void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
++#if VMA_STATS_STRING_ENABLED
++ void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
++#endif
++
++private:
++ VmaBlockMetadata* m_Metadata;
++};
++
++#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
++VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
++ : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
++ m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
++{
++ const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
++ switch (algorithm)
++ {
++ case 0:
++ m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
++ break;
++ case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:
++ m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
++ break;
++ default:
++ VMA_ASSERT(0);
++ m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
++ }
++
++ m_Metadata->Init(createInfo.size);
++}
++
++VmaVirtualBlock_T::~VmaVirtualBlock_T()
++{
++ // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
++ if (!m_Metadata->IsEmpty())
++ m_Metadata->DebugLogAllAllocations();
++ // This is the most important assert in the entire library.
++ // Hitting it means you have some memory leak - unreleased virtual allocations.
++ VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
++
++ vma_delete(GetAllocationCallbacks(), m_Metadata);
++}
++
++const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
++{
++ return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
++}
++
++void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
++{
++ m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
++}
++
++VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
++ VkDeviceSize* outOffset)
++{
++ VmaAllocationRequest request = {};
++ if (m_Metadata->CreateAllocationRequest(
++ createInfo.size, // allocSize
++ VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
++ (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
++ VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
++ createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
++ &request))
++ {
++ m_Metadata->Alloc(request,
++ VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
++ createInfo.pUserData);
++ outAllocation = (VmaVirtualAllocation)request.allocHandle;
++ if(outOffset)
++ *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
++ return VK_SUCCESS;
++ }
++ outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
++ if (outOffset)
++ *outOffset = UINT64_MAX;
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++}
++
++void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
++{
++ VmaClearStatistics(outStats);
++ m_Metadata->AddStatistics(outStats);
++}
++
++void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
++{
++ VmaClearDetailedStatistics(outStats);
++ m_Metadata->AddDetailedStatistics(outStats);
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
++{
++ VmaJsonWriter json(GetAllocationCallbacks(), sb);
++ json.BeginObject();
++
++ VmaDetailedStatistics stats;
++ CalculateDetailedStatistics(stats);
++
++ json.WriteString("Stats");
++ VmaPrintDetailedStatistics(json, stats);
++
++ if (detailedMap)
++ {
++ json.WriteString("Details");
++ json.BeginObject();
++ m_Metadata->PrintDetailedMap(json);
++ json.EndObject();
++ }
++
++ json.EndObject();
++}
++#endif // VMA_STATS_STRING_ENABLED
++#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
++#endif // _VMA_VIRTUAL_BLOCK_T
++
++
++// Main allocator object.
++struct VmaAllocator_T
++{
++ VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T)
++public:
++ bool m_UseMutex;
++ uint32_t m_VulkanApiVersion;
++ bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
++ bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
++ bool m_UseExtMemoryBudget;
++ bool m_UseAmdDeviceCoherentMemory;
++ bool m_UseKhrBufferDeviceAddress;
++ bool m_UseExtMemoryPriority;
++ VkDevice m_hDevice;
++ VkInstance m_hInstance;
++ bool m_AllocationCallbacksSpecified;
++ VkAllocationCallbacks m_AllocationCallbacks;
++ VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
++ VmaAllocationObjectAllocator m_AllocationObjectAllocator;
++
++ // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
++ uint32_t m_HeapSizeLimitMask;
++
++ VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
++ VkPhysicalDeviceMemoryProperties m_MemProps;
++
++ // Default pools.
++ VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
++ VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
++
++ VmaCurrentBudgetData m_Budget;
++ VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
++
++ VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
++ VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
++ ~VmaAllocator_T();
++
++ const VkAllocationCallbacks* GetAllocationCallbacks() const
++ {
++ return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
++ }
++ const VmaVulkanFunctions& GetVulkanFunctions() const
++ {
++ return m_VulkanFunctions;
++ }
++
++ VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
++
++ VkDeviceSize GetBufferImageGranularity() const
++ {
++ return VMA_MAX(
++ static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
++ m_PhysicalDeviceProperties.limits.bufferImageGranularity);
++ }
++
++ uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
++ uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
++
++ uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
++ {
++ VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
++ return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
++ }
++ // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
++ bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
++ {
++ return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
++ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
++ }
++ // Minimum alignment for all allocations in specific memory type.
++ VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
++ {
++ return IsMemoryTypeNonCoherent(memTypeIndex) ?
++ VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
++ (VkDeviceSize)VMA_MIN_ALIGNMENT;
++ }
++
++ bool IsIntegratedGpu() const
++ {
++ return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
++ }
++
++ uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
++
++ void GetBufferMemoryRequirements(
++ VkBuffer hBuffer,
++ VkMemoryRequirements& memReq,
++ bool& requiresDedicatedAllocation,
++ bool& prefersDedicatedAllocation) const;
++ void GetImageMemoryRequirements(
++ VkImage hImage,
++ VkMemoryRequirements& memReq,
++ bool& requiresDedicatedAllocation,
++ bool& prefersDedicatedAllocation) const;
++ VkResult FindMemoryTypeIndex(
++ uint32_t memoryTypeBits,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
++ uint32_t* pMemoryTypeIndex) const;
++
++ // Main allocation function.
++ VkResult AllocateMemory(
++ const VkMemoryRequirements& vkMemReq,
++ bool requiresDedicatedAllocation,
++ bool prefersDedicatedAllocation,
++ VkBuffer dedicatedBuffer,
++ VkImage dedicatedImage,
++ VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown.
++ const VmaAllocationCreateInfo& createInfo,
++ VmaSuballocationType suballocType,
++ size_t allocationCount,
++ VmaAllocation* pAllocations);
++
++ // Main deallocation function.
++ void FreeMemory(
++ size_t allocationCount,
++ const VmaAllocation* pAllocations);
++
++ void CalculateStatistics(VmaTotalStatistics* pStats);
++
++ void GetHeapBudgets(
++ VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
++
++#if VMA_STATS_STRING_ENABLED
++ void PrintDetailedMap(class VmaJsonWriter& json);
++#endif
++
++ void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
++
++ VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
++ void DestroyPool(VmaPool pool);
++ void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
++ void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
++
++ void SetCurrentFrameIndex(uint32_t frameIndex);
++ uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
++
++ VkResult CheckPoolCorruption(VmaPool hPool);
++ VkResult CheckCorruption(uint32_t memoryTypeBits);
++
++ // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
++ VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
++ // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
++ void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
++ // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
++ VkResult BindVulkanBuffer(
++ VkDeviceMemory memory,
++ VkDeviceSize memoryOffset,
++ VkBuffer buffer,
++ const void* pNext);
++ // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
++ VkResult BindVulkanImage(
++ VkDeviceMemory memory,
++ VkDeviceSize memoryOffset,
++ VkImage image,
++ const void* pNext);
++
++ VkResult Map(VmaAllocation hAllocation, void** ppData);
++ void Unmap(VmaAllocation hAllocation);
++
++ VkResult BindBufferMemory(
++ VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkBuffer hBuffer,
++ const void* pNext);
++ VkResult BindImageMemory(
++ VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkImage hImage,
++ const void* pNext);
++
++ VkResult FlushOrInvalidateAllocation(
++ VmaAllocation hAllocation,
++ VkDeviceSize offset, VkDeviceSize size,
++ VMA_CACHE_OPERATION op);
++ VkResult FlushOrInvalidateAllocations(
++ uint32_t allocationCount,
++ const VmaAllocation* allocations,
++ const VkDeviceSize* offsets, const VkDeviceSize* sizes,
++ VMA_CACHE_OPERATION op);
++
++ void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
++
++ /*
++ Returns bit mask of memory types that can support defragmentation on GPU as
++ they support creation of required buffer for copy operations.
++ */
++ uint32_t GetGpuDefragmentationMemoryTypeBits();
++
++#if VMA_EXTERNAL_MEMORY
++ VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
++ {
++ return m_TypeExternalMemoryHandleTypes[memTypeIndex];
++ }
++#endif // #if VMA_EXTERNAL_MEMORY
++
++private:
++ VkDeviceSize m_PreferredLargeHeapBlockSize;
++
++ VkPhysicalDevice m_PhysicalDevice;
++ VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
++ VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
++#if VMA_EXTERNAL_MEMORY
++ VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
++#endif // #if VMA_EXTERNAL_MEMORY
++
++ VMA_RW_MUTEX m_PoolsMutex;
++ typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
++ // Protected by m_PoolsMutex.
++ PoolList m_Pools;
++ uint32_t m_NextPoolId;
++
++ VmaVulkanFunctions m_VulkanFunctions;
++
++ // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
++ uint32_t m_GlobalMemoryTypeBits;
++
++ void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
++
++#if VMA_STATIC_VULKAN_FUNCTIONS == 1
++ void ImportVulkanFunctions_Static();
++#endif
++
++ void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
++
++#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
++ void ImportVulkanFunctions_Dynamic();
++#endif
++
++ void ValidateVulkanFunctions();
++
++ VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
++
++ VkResult AllocateMemoryOfType(
++ VmaPool pool,
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ bool dedicatedPreferred,
++ VkBuffer dedicatedBuffer,
++ VkImage dedicatedImage,
++ VkFlags dedicatedBufferImageUsage,
++ const VmaAllocationCreateInfo& createInfo,
++ uint32_t memTypeIndex,
++ VmaSuballocationType suballocType,
++ VmaDedicatedAllocationList& dedicatedAllocations,
++ VmaBlockVector& blockVector,
++ size_t allocationCount,
++ VmaAllocation* pAllocations);
++
++ // Helper function only to be used inside AllocateDedicatedMemory.
++ VkResult AllocateDedicatedMemoryPage(
++ VmaPool pool,
++ VkDeviceSize size,
++ VmaSuballocationType suballocType,
++ uint32_t memTypeIndex,
++ const VkMemoryAllocateInfo& allocInfo,
++ bool map,
++ bool isUserDataString,
++ bool isMappingAllowed,
++ void* pUserData,
++ VmaAllocation* pAllocation);
++
++ // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
++ VkResult AllocateDedicatedMemory(
++ VmaPool pool,
++ VkDeviceSize size,
++ VmaSuballocationType suballocType,
++ VmaDedicatedAllocationList& dedicatedAllocations,
++ uint32_t memTypeIndex,
++ bool map,
++ bool isUserDataString,
++ bool isMappingAllowed,
++ bool canAliasMemory,
++ void* pUserData,
++ float priority,
++ VkBuffer dedicatedBuffer,
++ VkImage dedicatedImage,
++ VkFlags dedicatedBufferImageUsage,
++ size_t allocationCount,
++ VmaAllocation* pAllocations,
++ const void* pNextChain = nullptr);
++
++ void FreeDedicatedMemory(const VmaAllocation allocation);
++
++ VkResult CalcMemTypeParams(
++ VmaAllocationCreateInfo& outCreateInfo,
++ uint32_t memTypeIndex,
++ VkDeviceSize size,
++ size_t allocationCount);
++ VkResult CalcAllocationParams(
++ VmaAllocationCreateInfo& outCreateInfo,
++ bool dedicatedRequired,
++ bool dedicatedPreferred);
++
++ /*
++ Calculates and returns bit mask of memory types that can support defragmentation
++ on GPU as they support creation of required buffer for copy operations.
++ */
++ uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
++ uint32_t CalculateGlobalMemoryTypeBits() const;
++
++ bool GetFlushOrInvalidateRange(
++ VmaAllocation allocation,
++ VkDeviceSize offset, VkDeviceSize size,
++ VkMappedMemoryRange& outRange) const;
++
++#if VMA_MEMORY_BUDGET
++ void UpdateVulkanBudget();
++#endif // #if VMA_MEMORY_BUDGET
++};
++
++
++#ifndef _VMA_MEMORY_FUNCTIONS
++static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
++{
++ return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
++}
++
++static void VmaFree(VmaAllocator hAllocator, void* ptr)
++{
++ VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
++}
++
++template<typename T>
++static T* VmaAllocate(VmaAllocator hAllocator)
++{
++ return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
++}
++
++template<typename T>
++static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
++{
++ return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
++}
++
++template<typename T>
++static void vma_delete(VmaAllocator hAllocator, T* ptr)
++{
++ if(ptr != VMA_NULL)
++ {
++ ptr->~T();
++ VmaFree(hAllocator, ptr);
++ }
++}
++
++template<typename T>
++static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
++{
++ if(ptr != VMA_NULL)
++ {
++ for(size_t i = count; i--; )
++ ptr[i].~T();
++ VmaFree(hAllocator, ptr);
++ }
++}
++#endif // _VMA_MEMORY_FUNCTIONS
++
++#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
++VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
++ : m_pMetadata(VMA_NULL),
++ m_MemoryTypeIndex(UINT32_MAX),
++ m_Id(0),
++ m_hMemory(VK_NULL_HANDLE),
++ m_MapCount(0),
++ m_pMappedData(VMA_NULL) {}
++
++VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
++{
++ VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
++ VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
++}
++
++void VmaDeviceMemoryBlock::Init(
++ VmaAllocator hAllocator,
++ VmaPool hParentPool,
++ uint32_t newMemoryTypeIndex,
++ VkDeviceMemory newMemory,
++ VkDeviceSize newSize,
++ uint32_t id,
++ uint32_t algorithm,
++ VkDeviceSize bufferImageGranularity)
++{
++ VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
++
++ m_hParentPool = hParentPool;
++ m_MemoryTypeIndex = newMemoryTypeIndex;
++ m_Id = id;
++ m_hMemory = newMemory;
++
++ switch (algorithm)
++ {
++ case 0:
++ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
++ bufferImageGranularity, false); // isVirtual
++ break;
++ case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
++ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
++ bufferImageGranularity, false); // isVirtual
++ break;
++ default:
++ VMA_ASSERT(0);
++ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
++ bufferImageGranularity, false); // isVirtual
++ }
++ m_pMetadata->Init(newSize);
++}
++
++void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
++{
++ // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
++ if (!m_pMetadata->IsEmpty())
++ m_pMetadata->DebugLogAllAllocations();
++ // This is the most important assert in the entire library.
++ // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
++ VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
++
++ VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
++ allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
++ m_hMemory = VK_NULL_HANDLE;
++
++ vma_delete(allocator, m_pMetadata);
++ m_pMetadata = VMA_NULL;
++}
++
++void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator)
++{
++ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
++ m_MappingHysteresis.PostAlloc();
++}
++
++void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
++{
++ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
++ if(m_MappingHysteresis.PostFree())
++ {
++ VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
++ if (m_MapCount == 0)
++ {
++ m_pMappedData = VMA_NULL;
++ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
++ }
++ }
++}
++
++bool VmaDeviceMemoryBlock::Validate() const
++{
++ VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
++ (m_pMetadata->GetSize() != 0));
++
++ return m_pMetadata->Validate();
++}
++
++VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
++{
++ void* pData = nullptr;
++ VkResult res = Map(hAllocator, 1, &pData);
++ if (res != VK_SUCCESS)
++ {
++ return res;
++ }
++
++ res = m_pMetadata->CheckCorruption(pData);
++
++ Unmap(hAllocator, 1);
++
++ return res;
++}
++
++VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
++{
++ if (count == 0)
++ {
++ return VK_SUCCESS;
++ }
++
++ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
++ const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
++ m_MappingHysteresis.PostMap();
++ if (oldTotalMapCount != 0)
++ {
++ m_MapCount += count;
++ VMA_ASSERT(m_pMappedData != VMA_NULL);
++ if (ppData != VMA_NULL)
++ {
++ *ppData = m_pMappedData;
++ }
++ return VK_SUCCESS;
++ }
++ else
++ {
++ VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
++ hAllocator->m_hDevice,
++ m_hMemory,
++ 0, // offset
++ VK_WHOLE_SIZE,
++ 0, // flags
++ &m_pMappedData);
++ if (result == VK_SUCCESS)
++ {
++ if (ppData != VMA_NULL)
++ {
++ *ppData = m_pMappedData;
++ }
++ m_MapCount = count;
++ }
++ return result;
++ }
++}
++
++void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
++{
++ if (count == 0)
++ {
++ return;
++ }
++
++ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
++ if (m_MapCount >= count)
++ {
++ m_MapCount -= count;
++ const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
++ if (totalMapCount == 0)
++ {
++ m_pMappedData = VMA_NULL;
++ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
++ }
++ m_MappingHysteresis.PostUnmap();
++ }
++ else
++ {
++ VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
++ }
++}
++
++VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
++{
++ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
++
++ void* pData;
++ VkResult res = Map(hAllocator, 1, &pData);
++ if (res != VK_SUCCESS)
++ {
++ return res;
++ }
++
++ VmaWriteMagicValue(pData, allocOffset + allocSize);
++
++ Unmap(hAllocator, 1);
++ return VK_SUCCESS;
++}
++
++VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
++{
++ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
++
++ void* pData;
++ VkResult res = Map(hAllocator, 1, &pData);
++ if (res != VK_SUCCESS)
++ {
++ return res;
++ }
++
++ if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
++ {
++ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
++ }
++
++ Unmap(hAllocator, 1);
++ return VK_SUCCESS;
++}
++
++VkResult VmaDeviceMemoryBlock::BindBufferMemory(
++ const VmaAllocator hAllocator,
++ const VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkBuffer hBuffer,
++ const void* pNext)
++{
++ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
++ hAllocation->GetBlock() == this);
++ VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
++ "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
++ const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
++ // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
++ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
++ return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
++}
++
++VkResult VmaDeviceMemoryBlock::BindImageMemory(
++ const VmaAllocator hAllocator,
++ const VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkImage hImage,
++ const void* pNext)
++{
++ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
++ hAllocation->GetBlock() == this);
++ VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
++ "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
++ const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
++ // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
++ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
++ return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
++}
++#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
++
++#ifndef _VMA_ALLOCATION_T_FUNCTIONS
++VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
++ : m_Alignment{ 1 },
++ m_Size{ 0 },
++ m_pUserData{ VMA_NULL },
++ m_pName{ VMA_NULL },
++ m_MemoryTypeIndex{ 0 },
++ m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
++ m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
++ m_MapCount{ 0 },
++ m_Flags{ 0 }
++{
++ if(mappingAllowed)
++ m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
++
++#if VMA_STATS_STRING_ENABLED
++ m_BufferImageUsage = 0;
++#endif
++}
++
++VmaAllocation_T::~VmaAllocation_T()
++{
++ VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
++
++ // Check if owned string was freed.
++ VMA_ASSERT(m_pName == VMA_NULL);
++}
++
++void VmaAllocation_T::InitBlockAllocation(
++ VmaDeviceMemoryBlock* block,
++ VmaAllocHandle allocHandle,
++ VkDeviceSize alignment,
++ VkDeviceSize size,
++ uint32_t memoryTypeIndex,
++ VmaSuballocationType suballocationType,
++ bool mapped)
++{
++ VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
++ VMA_ASSERT(block != VMA_NULL);
++ m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
++ m_Alignment = alignment;
++ m_Size = size;
++ m_MemoryTypeIndex = memoryTypeIndex;
++ if(mapped)
++ {
++ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
++ m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
++ }
++ m_SuballocationType = (uint8_t)suballocationType;
++ m_BlockAllocation.m_Block = block;
++ m_BlockAllocation.m_AllocHandle = allocHandle;
++}
++
++void VmaAllocation_T::InitDedicatedAllocation(
++ VmaPool hParentPool,
++ uint32_t memoryTypeIndex,
++ VkDeviceMemory hMemory,
++ VmaSuballocationType suballocationType,
++ void* pMappedData,
++ VkDeviceSize size)
++{
++ VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
++ VMA_ASSERT(hMemory != VK_NULL_HANDLE);
++ m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
++ m_Alignment = 0;
++ m_Size = size;
++ m_MemoryTypeIndex = memoryTypeIndex;
++ m_SuballocationType = (uint8_t)suballocationType;
++ if(pMappedData != VMA_NULL)
++ {
++ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
++ m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
++ }
++ m_DedicatedAllocation.m_hParentPool = hParentPool;
++ m_DedicatedAllocation.m_hMemory = hMemory;
++ m_DedicatedAllocation.m_pMappedData = pMappedData;
++ m_DedicatedAllocation.m_Prev = VMA_NULL;
++ m_DedicatedAllocation.m_Next = VMA_NULL;
++}
++
++void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
++{
++ VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
++
++ FreeName(hAllocator);
++
++ if (pName != VMA_NULL)
++ m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
++}
++
++uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
++{
++ VMA_ASSERT(allocation != VMA_NULL);
++ VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
++ VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
++
++ if (m_MapCount != 0)
++ m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
++
++ m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
++ VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation);
++ m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
++
++#if VMA_STATS_STRING_ENABLED
++ VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage);
++#endif
++ return m_MapCount;
++}
++
++VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
++{
++ switch (m_Type)
++ {
++ case ALLOCATION_TYPE_BLOCK:
++ return m_BlockAllocation.m_AllocHandle;
++ case ALLOCATION_TYPE_DEDICATED:
++ return VK_NULL_HANDLE;
++ default:
++ VMA_ASSERT(0);
++ return VK_NULL_HANDLE;
++ }
++}
++
++VkDeviceSize VmaAllocation_T::GetOffset() const
++{
++ switch (m_Type)
++ {
++ case ALLOCATION_TYPE_BLOCK:
++ return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
++ case ALLOCATION_TYPE_DEDICATED:
++ return 0;
++ default:
++ VMA_ASSERT(0);
++ return 0;
++ }
++}
++
++VmaPool VmaAllocation_T::GetParentPool() const
++{
++ switch (m_Type)
++ {
++ case ALLOCATION_TYPE_BLOCK:
++ return m_BlockAllocation.m_Block->GetParentPool();
++ case ALLOCATION_TYPE_DEDICATED:
++ return m_DedicatedAllocation.m_hParentPool;
++ default:
++ VMA_ASSERT(0);
++ return VK_NULL_HANDLE;
++ }
++}
++
++VkDeviceMemory VmaAllocation_T::GetMemory() const
++{
++ switch (m_Type)
++ {
++ case ALLOCATION_TYPE_BLOCK:
++ return m_BlockAllocation.m_Block->GetDeviceMemory();
++ case ALLOCATION_TYPE_DEDICATED:
++ return m_DedicatedAllocation.m_hMemory;
++ default:
++ VMA_ASSERT(0);
++ return VK_NULL_HANDLE;
++ }
++}
++
++void* VmaAllocation_T::GetMappedData() const
++{
++ switch (m_Type)
++ {
++ case ALLOCATION_TYPE_BLOCK:
++ if (m_MapCount != 0 || IsPersistentMap())
++ {
++ void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
++ VMA_ASSERT(pBlockData != VMA_NULL);
++ return (char*)pBlockData + GetOffset();
++ }
++ else
++ {
++ return VMA_NULL;
++ }
++ break;
++ case ALLOCATION_TYPE_DEDICATED:
++ VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
++ return m_DedicatedAllocation.m_pMappedData;
++ default:
++ VMA_ASSERT(0);
++ return VMA_NULL;
++ }
++}
++
++void VmaAllocation_T::BlockAllocMap()
++{
++ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
++ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
++
++ if (m_MapCount < 0xFF)
++ {
++ ++m_MapCount;
++ }
++ else
++ {
++ VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
++ }
++}
++
++void VmaAllocation_T::BlockAllocUnmap()
++{
++ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
++
++ if (m_MapCount > 0)
++ {
++ --m_MapCount;
++ }
++ else
++ {
++ VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
++ }
++}
++
++VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
++{
++ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
++ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
++
++ if (m_MapCount != 0 || IsPersistentMap())
++ {
++ if (m_MapCount < 0xFF)
++ {
++ VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
++ *ppData = m_DedicatedAllocation.m_pMappedData;
++ ++m_MapCount;
++ return VK_SUCCESS;
++ }
++ else
++ {
++ VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
++ return VK_ERROR_MEMORY_MAP_FAILED;
++ }
++ }
++ else
++ {
++ VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
++ hAllocator->m_hDevice,
++ m_DedicatedAllocation.m_hMemory,
++ 0, // offset
++ VK_WHOLE_SIZE,
++ 0, // flags
++ ppData);
++ if (result == VK_SUCCESS)
++ {
++ m_DedicatedAllocation.m_pMappedData = *ppData;
++ m_MapCount = 1;
++ }
++ return result;
++ }
++}
++
++void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
++{
++ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
++
++ if (m_MapCount > 0)
++ {
++ --m_MapCount;
++ if (m_MapCount == 0 && !IsPersistentMap())
++ {
++ m_DedicatedAllocation.m_pMappedData = VMA_NULL;
++ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
++ hAllocator->m_hDevice,
++ m_DedicatedAllocation.m_hMemory);
++ }
++ }
++ else
++ {
++ VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
++ }
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage)
++{
++ VMA_ASSERT(m_BufferImageUsage == 0);
++ m_BufferImageUsage = bufferImageUsage;
++}
++
++void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
++{
++ json.WriteString("Type");
++ json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
++
++ json.WriteString("Size");
++ json.WriteNumber(m_Size);
++ json.WriteString("Usage");
++ json.WriteNumber(m_BufferImageUsage);
++
++ if (m_pUserData != VMA_NULL)
++ {
++ json.WriteString("CustomData");
++ json.BeginString();
++ json.ContinueString_Pointer(m_pUserData);
++ json.EndString();
++ }
++ if (m_pName != VMA_NULL)
++ {
++ json.WriteString("Name");
++ json.WriteString(m_pName);
++ }
++}
++#endif // VMA_STATS_STRING_ENABLED
++
++void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
++{
++ if(m_pName)
++ {
++ VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
++ m_pName = VMA_NULL;
++ }
++}
++#endif // _VMA_ALLOCATION_T_FUNCTIONS
++
++#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
++VmaBlockVector::VmaBlockVector(
++ VmaAllocator hAllocator,
++ VmaPool hParentPool,
++ uint32_t memoryTypeIndex,
++ VkDeviceSize preferredBlockSize,
++ size_t minBlockCount,
++ size_t maxBlockCount,
++ VkDeviceSize bufferImageGranularity,
++ bool explicitBlockSize,
++ uint32_t algorithm,
++ float priority,
++ VkDeviceSize minAllocationAlignment,
++ void* pMemoryAllocateNext)
++ : m_hAllocator(hAllocator),
++ m_hParentPool(hParentPool),
++ m_MemoryTypeIndex(memoryTypeIndex),
++ m_PreferredBlockSize(preferredBlockSize),
++ m_MinBlockCount(minBlockCount),
++ m_MaxBlockCount(maxBlockCount),
++ m_BufferImageGranularity(bufferImageGranularity),
++ m_ExplicitBlockSize(explicitBlockSize),
++ m_Algorithm(algorithm),
++ m_Priority(priority),
++ m_MinAllocationAlignment(minAllocationAlignment),
++ m_pMemoryAllocateNext(pMemoryAllocateNext),
++ m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
++ m_NextBlockId(0) {}
++
++VmaBlockVector::~VmaBlockVector()
++{
++ for (size_t i = m_Blocks.size(); i--; )
++ {
++ m_Blocks[i]->Destroy(m_hAllocator);
++ vma_delete(m_hAllocator, m_Blocks[i]);
++ }
++}
++
++VkResult VmaBlockVector::CreateMinBlocks()
++{
++ for (size_t i = 0; i < m_MinBlockCount; ++i)
++ {
++ VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
++ if (res != VK_SUCCESS)
++ {
++ return res;
++ }
++ }
++ return VK_SUCCESS;
++}
++
++void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
++{
++ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
++
++ const size_t blockCount = m_Blocks.size();
++ for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
++ {
++ const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
++ VMA_ASSERT(pBlock);
++ VMA_HEAVY_ASSERT(pBlock->Validate());
++ pBlock->m_pMetadata->AddStatistics(inoutStats);
++ }
++}
++
++void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
++{
++ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
++
++ const size_t blockCount = m_Blocks.size();
++ for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
++ {
++ const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
++ VMA_ASSERT(pBlock);
++ VMA_HEAVY_ASSERT(pBlock->Validate());
++ pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
++ }
++}
++
++bool VmaBlockVector::IsEmpty()
++{
++ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
++ return m_Blocks.empty();
++}
++
++bool VmaBlockVector::IsCorruptionDetectionEnabled() const
++{
++ const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
++ return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
++ (VMA_DEBUG_MARGIN > 0) &&
++ (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
++ (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
++}
++
++VkResult VmaBlockVector::Allocate(
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ const VmaAllocationCreateInfo& createInfo,
++ VmaSuballocationType suballocType,
++ size_t allocationCount,
++ VmaAllocation* pAllocations)
++{
++ size_t allocIndex;
++ VkResult res = VK_SUCCESS;
++
++ alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
++
++ if (IsCorruptionDetectionEnabled())
++ {
++ size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
++ alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
++ }
++
++ {
++ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
++ for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
++ {
++ res = AllocatePage(
++ size,
++ alignment,
++ createInfo,
++ suballocType,
++ pAllocations + allocIndex);
++ if (res != VK_SUCCESS)
++ {
++ break;
++ }
++ }
++ }
++
++ if (res != VK_SUCCESS)
++ {
++ // Free all already created allocations.
++ while (allocIndex--)
++ Free(pAllocations[allocIndex]);
++ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
++ }
++
++ return res;
++}
++
++VkResult VmaBlockVector::AllocatePage(
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ const VmaAllocationCreateInfo& createInfo,
++ VmaSuballocationType suballocType,
++ VmaAllocation* pAllocation)
++{
++ const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
++
++ VkDeviceSize freeMemory;
++ {
++ const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
++ VmaBudget heapBudget = {};
++ m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
++ freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
++ }
++
++ const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
++ (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;
++ const bool canCreateNewBlock =
++ ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
++ (m_Blocks.size() < m_MaxBlockCount) &&
++ (freeMemory >= size || !canFallbackToDedicated);
++ uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
++
++ // Upper address can only be used with linear allocator and within single memory block.
++ if (isUpperAddress &&
++ (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
++ {
++ return VK_ERROR_FEATURE_NOT_PRESENT;
++ }
++
++ // Early reject: requested allocation size is larger that maximum block size for this block vector.
++ if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
++ {
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++ }
++
++ // 1. Search existing allocations. Try to allocate.
++ if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
++ {
++ // Use only last block.
++ if (!m_Blocks.empty())
++ {
++ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
++ VMA_ASSERT(pCurrBlock);
++ VkResult res = AllocateFromBlock(
++ pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
++ if (res == VK_SUCCESS)
++ {
++ VMA_DEBUG_LOG_FORMAT(" Returned from last block #%u", pCurrBlock->GetId());
++ IncrementallySortBlocks();
++ return VK_SUCCESS;
++ }
++ }
++ }
++ else
++ {
++ if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
++ {
++ const bool isHostVisible =
++ (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
++ if(isHostVisible)
++ {
++ const bool isMappingAllowed = (createInfo.flags &
++ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
++ /*
++ For non-mappable allocations, check blocks that are not mapped first.
++ For mappable allocations, check blocks that are already mapped first.
++ This way, having many blocks, we will separate mappable and non-mappable allocations,
++ hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
++ */
++ for(size_t mappingI = 0; mappingI < 2; ++mappingI)
++ {
++ // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
++ for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
++ {
++ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
++ VMA_ASSERT(pCurrBlock);
++ const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
++ if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
++ {
++ VkResult res = AllocateFromBlock(
++ pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
++ if (res == VK_SUCCESS)
++ {
++ VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
++ IncrementallySortBlocks();
++ return VK_SUCCESS;
++ }
++ }
++ }
++ }
++ }
++ else
++ {
++ // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
++ for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
++ {
++ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
++ VMA_ASSERT(pCurrBlock);
++ VkResult res = AllocateFromBlock(
++ pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
++ if (res == VK_SUCCESS)
++ {
++ VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
++ IncrementallySortBlocks();
++ return VK_SUCCESS;
++ }
++ }
++ }
++ }
++ else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
++ {
++ // Backward order in m_Blocks - prefer blocks with largest amount of free space.
++ for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
++ {
++ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
++ VMA_ASSERT(pCurrBlock);
++ VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
++ if (res == VK_SUCCESS)
++ {
++ VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
++ IncrementallySortBlocks();
++ return VK_SUCCESS;
++ }
++ }
++ }
++ }
++
++ // 2. Try to create new block.
++ if (canCreateNewBlock)
++ {
++ // Calculate optimal size for new block.
++ VkDeviceSize newBlockSize = m_PreferredBlockSize;
++ uint32_t newBlockSizeShift = 0;
++ const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
++
++ if (!m_ExplicitBlockSize)
++ {
++ // Allocate 1/8, 1/4, 1/2 as first blocks.
++ const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
++ for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
++ {
++ const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
++ if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
++ {
++ newBlockSize = smallerNewBlockSize;
++ ++newBlockSizeShift;
++ }
++ else
++ {
++ break;
++ }
++ }
++ }
++
++ size_t newBlockIndex = 0;
++ VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
++ CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
++ // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
++ if (!m_ExplicitBlockSize)
++ {
++ while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
++ {
++ const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
++ if (smallerNewBlockSize >= size)
++ {
++ newBlockSize = smallerNewBlockSize;
++ ++newBlockSizeShift;
++ res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
++ CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
++ }
++ else
++ {
++ break;
++ }
++ }
++ }
++
++ if (res == VK_SUCCESS)
++ {
++ VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
++ VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
++
++ res = AllocateFromBlock(
++ pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
++ if (res == VK_SUCCESS)
++ {
++ VMA_DEBUG_LOG_FORMAT(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
++ IncrementallySortBlocks();
++ return VK_SUCCESS;
++ }
++ else
++ {
++ // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++ }
++ }
++ }
++
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++}
++
++void VmaBlockVector::Free(const VmaAllocation hAllocation)
++{
++ VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
++
++ bool budgetExceeded = false;
++ {
++ const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
++ VmaBudget heapBudget = {};
++ m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
++ budgetExceeded = heapBudget.usage >= heapBudget.budget;
++ }
++
++ // Scope for lock.
++ {
++ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
++
++ VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
++
++ if (IsCorruptionDetectionEnabled())
++ {
++ VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
++ VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
++ }
++
++ if (hAllocation->IsPersistentMap())
++ {
++ pBlock->Unmap(m_hAllocator, 1);
++ }
++
++ const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
++ pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
++ pBlock->PostFree(m_hAllocator);
++ VMA_HEAVY_ASSERT(pBlock->Validate());
++
++ VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
++
++ const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
++ // pBlock became empty after this deallocation.
++ if (pBlock->m_pMetadata->IsEmpty())
++ {
++ // Already had empty block. We don't want to have two, so delete this one.
++ if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
++ {
++ pBlockToDelete = pBlock;
++ Remove(pBlock);
++ }
++ // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
++ }
++ // pBlock didn't become empty, but we have another empty block - find and free that one.
++ // (This is optional, heuristics.)
++ else if (hadEmptyBlockBeforeFree && canDeleteBlock)
++ {
++ VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
++ if (pLastBlock->m_pMetadata->IsEmpty())
++ {
++ pBlockToDelete = pLastBlock;
++ m_Blocks.pop_back();
++ }
++ }
++
++ IncrementallySortBlocks();
++ }
++
++ // Destruction of a free block. Deferred until this point, outside of mutex
++ // lock, for performance reason.
++ if (pBlockToDelete != VMA_NULL)
++ {
++ VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%u", pBlockToDelete->GetId());
++ pBlockToDelete->Destroy(m_hAllocator);
++ vma_delete(m_hAllocator, pBlockToDelete);
++ }
++
++ m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
++ m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
++}
++
++VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
++{
++ VkDeviceSize result = 0;
++ for (size_t i = m_Blocks.size(); i--; )
++ {
++ result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
++ if (result >= m_PreferredBlockSize)
++ {
++ break;
++ }
++ }
++ return result;
++}
++
++void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
++{
++ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
++ {
++ if (m_Blocks[blockIndex] == pBlock)
++ {
++ VmaVectorRemove(m_Blocks, blockIndex);
++ return;
++ }
++ }
++ VMA_ASSERT(0);
++}
++
++void VmaBlockVector::IncrementallySortBlocks()
++{
++ if (!m_IncrementalSort)
++ return;
++ if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
++ {
++ // Bubble sort only until first swap.
++ for (size_t i = 1; i < m_Blocks.size(); ++i)
++ {
++ if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
++ {
++ VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
++ return;
++ }
++ }
++ }
++}
++
++void VmaBlockVector::SortByFreeSize()
++{
++ VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
++ [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
++ {
++ return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
++ });
++}
++
++VkResult VmaBlockVector::AllocateFromBlock(
++ VmaDeviceMemoryBlock* pBlock,
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ VmaAllocationCreateFlags allocFlags,
++ void* pUserData,
++ VmaSuballocationType suballocType,
++ uint32_t strategy,
++ VmaAllocation* pAllocation)
++{
++ const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
++
++ VmaAllocationRequest currRequest = {};
++ if (pBlock->m_pMetadata->CreateAllocationRequest(
++ size,
++ alignment,
++ isUpperAddress,
++ suballocType,
++ strategy,
++ &currRequest))
++ {
++ return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
++ }
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++}
++
++VkResult VmaBlockVector::CommitAllocationRequest(
++ VmaAllocationRequest& allocRequest,
++ VmaDeviceMemoryBlock* pBlock,
++ VkDeviceSize alignment,
++ VmaAllocationCreateFlags allocFlags,
++ void* pUserData,
++ VmaSuballocationType suballocType,
++ VmaAllocation* pAllocation)
++{
++ const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
++ const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
++ const bool isMappingAllowed = (allocFlags &
++ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
++
++ pBlock->PostAlloc(m_hAllocator);
++ // Allocate from pCurrBlock.
++ if (mapped)
++ {
++ VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
++ if (res != VK_SUCCESS)
++ {
++ return res;
++ }
++ }
++
++ *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
++ pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
++ (*pAllocation)->InitBlockAllocation(
++ pBlock,
++ allocRequest.allocHandle,
++ alignment,
++ allocRequest.size, // Not size, as actual allocation size may be larger than requested!
++ m_MemoryTypeIndex,
++ suballocType,
++ mapped);
++ VMA_HEAVY_ASSERT(pBlock->Validate());
++ if (isUserDataString)
++ (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
++ else
++ (*pAllocation)->SetUserData(m_hAllocator, pUserData);
++ m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
++ if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
++ {
++ m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
++ }
++ if (IsCorruptionDetectionEnabled())
++ {
++ VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
++ VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
++ }
++ return VK_SUCCESS;
++}
++
++VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
++{
++ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
++ allocInfo.pNext = m_pMemoryAllocateNext;
++ allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
++ allocInfo.allocationSize = blockSize;
++
++#if VMA_BUFFER_DEVICE_ADDRESS
++ // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
++ VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
++ if (m_hAllocator->m_UseKhrBufferDeviceAddress)
++ {
++ allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
++ VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
++ }
++#endif // VMA_BUFFER_DEVICE_ADDRESS
++
++#if VMA_MEMORY_PRIORITY
++ VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
++ if (m_hAllocator->m_UseExtMemoryPriority)
++ {
++ VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
++ priorityInfo.priority = m_Priority;
++ VmaPnextChainPushFront(&allocInfo, &priorityInfo);
++ }
++#endif // VMA_MEMORY_PRIORITY
++
++#if VMA_EXTERNAL_MEMORY
++ // Attach VkExportMemoryAllocateInfoKHR if necessary.
++ VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
++ exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
++ if (exportMemoryAllocInfo.handleTypes != 0)
++ {
++ VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
++ }
++#endif // VMA_EXTERNAL_MEMORY
++
++ VkDeviceMemory mem = VK_NULL_HANDLE;
++ VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
++ if (res < 0)
++ {
++ return res;
++ }
++
++ // New VkDeviceMemory successfully created.
++
++ // Create new Allocation for it.
++ VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
++ pBlock->Init(
++ m_hAllocator,
++ m_hParentPool,
++ m_MemoryTypeIndex,
++ mem,
++ allocInfo.allocationSize,
++ m_NextBlockId++,
++ m_Algorithm,
++ m_BufferImageGranularity);
++
++ m_Blocks.push_back(pBlock);
++ if (pNewBlockIndex != VMA_NULL)
++ {
++ *pNewBlockIndex = m_Blocks.size() - 1;
++ }
++
++ return VK_SUCCESS;
++}
++
++bool VmaBlockVector::HasEmptyBlock()
++{
++ for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
++ {
++ VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
++ if (pBlock->m_pMetadata->IsEmpty())
++ {
++ return true;
++ }
++ }
++ return false;
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
++{
++ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
++
++
++ json.BeginObject();
++ for (size_t i = 0; i < m_Blocks.size(); ++i)
++ {
++ json.BeginString();
++ json.ContinueString(m_Blocks[i]->GetId());
++ json.EndString();
++
++ json.BeginObject();
++ json.WriteString("MapRefCount");
++ json.WriteNumber(m_Blocks[i]->GetMapRefCount());
++
++ m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
++ json.EndObject();
++ }
++ json.EndObject();
++}
++#endif // VMA_STATS_STRING_ENABLED
++
++VkResult VmaBlockVector::CheckCorruption()
++{
++ if (!IsCorruptionDetectionEnabled())
++ {
++ return VK_ERROR_FEATURE_NOT_PRESENT;
++ }
++
++ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
++ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
++ {
++ VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
++ VMA_ASSERT(pBlock);
++ VkResult res = pBlock->CheckCorruption(m_hAllocator);
++ if (res != VK_SUCCESS)
++ {
++ return res;
++ }
++ }
++ return VK_SUCCESS;
++}
++
++#endif // _VMA_BLOCK_VECTOR_FUNCTIONS
++
++#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
++VmaDefragmentationContext_T::VmaDefragmentationContext_T(
++ VmaAllocator hAllocator,
++ const VmaDefragmentationInfo& info)
++ : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
++ m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
++ m_BreakCallback(info.pfnBreakCallback),
++ m_BreakCallbackUserData(info.pBreakCallbackUserData),
++ m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
++ m_Moves(m_MoveAllocator)
++{
++ m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
++
++ if (info.pool != VMA_NULL)
++ {
++ m_BlockVectorCount = 1;
++ m_PoolBlockVector = &info.pool->m_BlockVector;
++ m_pBlockVectors = &m_PoolBlockVector;
++ m_PoolBlockVector->SetIncrementalSort(false);
++ m_PoolBlockVector->SortByFreeSize();
++ }
++ else
++ {
++ m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
++ m_PoolBlockVector = VMA_NULL;
++ m_pBlockVectors = hAllocator->m_pBlockVectors;
++ for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
++ {
++ VmaBlockVector* vector = m_pBlockVectors[i];
++ if (vector != VMA_NULL)
++ {
++ vector->SetIncrementalSort(false);
++ vector->SortByFreeSize();
++ }
++ }
++ }
++
++ switch (m_Algorithm)
++ {
++ case 0: // Default algorithm
++ m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;
++ m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
++ break;
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
++ m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
++ break;
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
++ if (hAllocator->GetBufferImageGranularity() > 1)
++ {
++ m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
++ }
++ break;
++ }
++}
++
++VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
++{
++ if (m_PoolBlockVector != VMA_NULL)
++ {
++ m_PoolBlockVector->SetIncrementalSort(true);
++ }
++ else
++ {
++ for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
++ {
++ VmaBlockVector* vector = m_pBlockVectors[i];
++ if (vector != VMA_NULL)
++ vector->SetIncrementalSort(true);
++ }
++ }
++
++ if (m_AlgorithmState)
++ {
++ switch (m_Algorithm)
++ {
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
++ vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
++ break;
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
++ vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++ }
++}
++
++VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
++{
++ if (m_PoolBlockVector != VMA_NULL)
++ {
++ VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
++
++ if (m_PoolBlockVector->GetBlockCount() > 1)
++ ComputeDefragmentation(*m_PoolBlockVector, 0);
++ else if (m_PoolBlockVector->GetBlockCount() == 1)
++ ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
++ }
++ else
++ {
++ for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
++ {
++ if (m_pBlockVectors[i] != VMA_NULL)
++ {
++ VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
++
++ if (m_pBlockVectors[i]->GetBlockCount() > 1)
++ {
++ if (ComputeDefragmentation(*m_pBlockVectors[i], i))
++ break;
++ }
++ else if (m_pBlockVectors[i]->GetBlockCount() == 1)
++ {
++ if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
++ break;
++ }
++ }
++ }
++ }
++
++ moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
++ if (moveInfo.moveCount > 0)
++ {
++ moveInfo.pMoves = m_Moves.data();
++ return VK_INCOMPLETE;
++ }
++
++ moveInfo.pMoves = VMA_NULL;
++ return VK_SUCCESS;
++}
++
++VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
++{
++ VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
++
++ VkResult result = VK_SUCCESS;
++ VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
++ VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
++ VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
++
++ VmaAllocator allocator = VMA_NULL;
++ for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
++ {
++ VmaDefragmentationMove& move = moveInfo.pMoves[i];
++ size_t prevCount = 0, currentCount = 0;
++ VkDeviceSize freedBlockSize = 0;
++
++ uint32_t vectorIndex;
++ VmaBlockVector* vector;
++ if (m_PoolBlockVector != VMA_NULL)
++ {
++ vectorIndex = 0;
++ vector = m_PoolBlockVector;
++ }
++ else
++ {
++ vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
++ vector = m_pBlockVectors[vectorIndex];
++ VMA_ASSERT(vector != VMA_NULL);
++ }
++
++ switch (move.operation)
++ {
++ case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:
++ {
++ uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
++ if (mapCount > 0)
++ {
++ allocator = vector->m_hAllocator;
++ VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
++ bool notPresent = true;
++ for (FragmentedBlock& block : mappedBlocks)
++ {
++ if (block.block == newMapBlock)
++ {
++ notPresent = false;
++ block.data += mapCount;
++ break;
++ }
++ }
++ if (notPresent)
++ mappedBlocks.push_back({ mapCount, newMapBlock });
++ }
++
++ // Scope for locks, Free have it's own lock
++ {
++ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++ prevCount = vector->GetBlockCount();
++ freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
++ }
++ vector->Free(move.dstTmpAllocation);
++ {
++ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++ currentCount = vector->GetBlockCount();
++ }
++
++ result = VK_INCOMPLETE;
++ break;
++ }
++ case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
++ {
++ m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
++ --m_PassStats.allocationsMoved;
++ vector->Free(move.dstTmpAllocation);
++
++ VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
++ bool notPresent = true;
++ for (const FragmentedBlock& block : immovableBlocks)
++ {
++ if (block.block == newBlock)
++ {
++ notPresent = false;
++ break;
++ }
++ }
++ if (notPresent)
++ immovableBlocks.push_back({ vectorIndex, newBlock });
++ break;
++ }
++ case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
++ {
++ m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
++ --m_PassStats.allocationsMoved;
++ // Scope for locks, Free have it's own lock
++ {
++ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++ prevCount = vector->GetBlockCount();
++ freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
++ }
++ vector->Free(move.srcAllocation);
++ {
++ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++ currentCount = vector->GetBlockCount();
++ }
++ freedBlockSize *= prevCount - currentCount;
++
++ VkDeviceSize dstBlockSize;
++ {
++ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++ dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
++ }
++ vector->Free(move.dstTmpAllocation);
++ {
++ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++ freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
++ currentCount = vector->GetBlockCount();
++ }
++
++ result = VK_INCOMPLETE;
++ break;
++ }
++ default:
++ VMA_ASSERT(0);
++ }
++
++ if (prevCount > currentCount)
++ {
++ size_t freedBlocks = prevCount - currentCount;
++ m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
++ m_PassStats.bytesFreed += freedBlockSize;
++ }
++
++ if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT &&
++ m_AlgorithmState != VMA_NULL)
++ {
++ // Avoid unnecessary tries to allocate when new free block is available
++ StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
++ if (state.firstFreeBlock != SIZE_MAX)
++ {
++ const size_t diff = prevCount - currentCount;
++ if (state.firstFreeBlock >= diff)
++ {
++ state.firstFreeBlock -= diff;
++ if (state.firstFreeBlock != 0)
++ state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
++ }
++ else
++ state.firstFreeBlock = 0;
++ }
++ }
++ }
++ moveInfo.moveCount = 0;
++ moveInfo.pMoves = VMA_NULL;
++ m_Moves.clear();
++
++ // Update stats
++ m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
++ m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
++ m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
++ m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
++ m_PassStats = { 0 };
++
++ // Move blocks with immovable allocations according to algorithm
++ if (immovableBlocks.size() > 0)
++ {
++ do
++ {
++ if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT)
++ {
++ if (m_AlgorithmState != VMA_NULL)
++ {
++ bool swapped = false;
++ // Move to the start of free blocks range
++ for (const FragmentedBlock& block : immovableBlocks)
++ {
++ StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
++ if (state.operation != StateExtensive::Operation::Cleanup)
++ {
++ VmaBlockVector* vector = m_pBlockVectors[block.data];
++ VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++
++ for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
++ {
++ if (vector->GetBlock(i) == block.block)
++ {
++ VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
++ if (state.firstFreeBlock != SIZE_MAX)
++ {
++ if (i + 1 < state.firstFreeBlock)
++ {
++ if (state.firstFreeBlock > 1)
++ VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
++ else
++ --state.firstFreeBlock;
++ }
++ }
++ swapped = true;
++ break;
++ }
++ }
++ }
++ }
++ if (swapped)
++ result = VK_INCOMPLETE;
++ break;
++ }
++ }
++
++ // Move to the beginning
++ for (const FragmentedBlock& block : immovableBlocks)
++ {
++ VmaBlockVector* vector = m_pBlockVectors[block.data];
++ VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
++
++ for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
++ {
++ if (vector->GetBlock(i) == block.block)
++ {
++ VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
++ break;
++ }
++ }
++ }
++ } while (false);
++ }
++
++ // Bulk-map destination blocks
++ for (const FragmentedBlock& block : mappedBlocks)
++ {
++ VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
++ VMA_ASSERT(res == VK_SUCCESS);
++ }
++ return result;
++}
++
++bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
++{
++ switch (m_Algorithm)
++ {
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:
++ return ComputeDefragmentation_Fast(vector);
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
++ return ComputeDefragmentation_Balanced(vector, index, true);
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:
++ return ComputeDefragmentation_Full(vector);
++ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
++ return ComputeDefragmentation_Extensive(vector, index);
++ default:
++ VMA_ASSERT(0);
++ return ComputeDefragmentation_Balanced(vector, index, true);
++ }
++}
++
++VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
++ VmaAllocHandle handle, VmaBlockMetadata* metadata)
++{
++ MoveAllocationData moveData;
++ moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
++ moveData.size = moveData.move.srcAllocation->GetSize();
++ moveData.alignment = moveData.move.srcAllocation->GetAlignment();
++ moveData.type = moveData.move.srcAllocation->GetSuballocationType();
++ moveData.flags = 0;
++
++ if (moveData.move.srcAllocation->IsPersistentMap())
++ moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
++ if (moveData.move.srcAllocation->IsMappingAllowed())
++ moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
++
++ return moveData;
++}
++
++VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
++{
++ // Check custom criteria if exists
++ if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))
++ return CounterStatus::End;
++
++ // Ignore allocation if will exceed max size for copy
++ if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
++ {
++ if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
++ return CounterStatus::Ignore;
++ else
++ return CounterStatus::End;
++ }
++ else
++ m_IgnoredAllocs = 0;
++ return CounterStatus::Pass;
++}
++
++bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
++{
++ m_PassStats.bytesMoved += bytes;
++ // Early return when max found
++ if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
++ {
++ VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations ||
++ m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
++ return true;
++ }
++ return false;
++}
++
++bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
++{
++ VmaBlockMetadata* metadata = block->m_pMetadata;
++
++ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
++ handle != VK_NULL_HANDLE;
++ handle = metadata->GetNextAllocation(handle))
++ {
++ MoveAllocationData moveData = GetMoveData(handle, metadata);
++ // Ignore newly created allocations by defragmentation algorithm
++ if (moveData.move.srcAllocation->GetUserData() == this)
++ continue;
++ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
++ {
++ case CounterStatus::Ignore:
++ continue;
++ case CounterStatus::End:
++ return true;
++ case CounterStatus::Pass:
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++
++ VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
++ if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
++ {
++ VmaAllocationRequest request = {};
++ if (metadata->CreateAllocationRequest(
++ moveData.size,
++ moveData.alignment,
++ false,
++ moveData.type,
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
++ &request))
++ {
++ if (metadata->GetAllocationOffset(request.allocHandle) < offset)
++ {
++ if (vector.CommitAllocationRequest(
++ request,
++ block,
++ moveData.alignment,
++ moveData.flags,
++ this,
++ moveData.type,
++ &moveData.move.dstTmpAllocation) == VK_SUCCESS)
++ {
++ m_Moves.push_back(moveData.move);
++ if (IncrementCounters(moveData.size))
++ return true;
++ }
++ }
++ }
++ }
++ }
++ return false;
++}
++
++bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
++{
++ for (; start < end; ++start)
++ {
++ VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
++ if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
++ {
++ if (vector.AllocateFromBlock(dstBlock,
++ data.size,
++ data.alignment,
++ data.flags,
++ this,
++ data.type,
++ 0,
++ &data.move.dstTmpAllocation) == VK_SUCCESS)
++ {
++ m_Moves.push_back(data.move);
++ if (IncrementCounters(data.size))
++ return true;
++ break;
++ }
++ }
++ }
++ return false;
++}
++
++bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
++{
++ // Move only between blocks
++
++ // Go through allocations in last blocks and try to fit them inside first ones
++ for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
++ {
++ VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
++
++ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
++ handle != VK_NULL_HANDLE;
++ handle = metadata->GetNextAllocation(handle))
++ {
++ MoveAllocationData moveData = GetMoveData(handle, metadata);
++ // Ignore newly created allocations by defragmentation algorithm
++ if (moveData.move.srcAllocation->GetUserData() == this)
++ continue;
++ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
++ {
++ case CounterStatus::Ignore:
++ continue;
++ case CounterStatus::End:
++ return true;
++ case CounterStatus::Pass:
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++
++ // Check all previous blocks for free space
++ if (AllocInOtherBlock(0, i, moveData, vector))
++ return true;
++ }
++ }
++ return false;
++}
++
++bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
++{
++ // Go over every allocation and try to fit it in previous blocks at lowest offsets,
++ // if not possible: realloc within single block to minimize offset (exclude offset == 0),
++ // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)
++ VMA_ASSERT(m_AlgorithmState != VMA_NULL);
++
++ StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
++ if (update && vectorState.avgAllocSize == UINT64_MAX)
++ UpdateVectorStatistics(vector, vectorState);
++
++ const size_t startMoveCount = m_Moves.size();
++ VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
++ for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
++ {
++ VmaDeviceMemoryBlock* block = vector.GetBlock(i);
++ VmaBlockMetadata* metadata = block->m_pMetadata;
++ VkDeviceSize prevFreeRegionSize = 0;
++
++ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
++ handle != VK_NULL_HANDLE;
++ handle = metadata->GetNextAllocation(handle))
++ {
++ MoveAllocationData moveData = GetMoveData(handle, metadata);
++ // Ignore newly created allocations by defragmentation algorithm
++ if (moveData.move.srcAllocation->GetUserData() == this)
++ continue;
++ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
++ {
++ case CounterStatus::Ignore:
++ continue;
++ case CounterStatus::End:
++ return true;
++ case CounterStatus::Pass:
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++
++ // Check all previous blocks for free space
++ const size_t prevMoveCount = m_Moves.size();
++ if (AllocInOtherBlock(0, i, moveData, vector))
++ return true;
++
++ VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
++ // If no room found then realloc within block for lower offset
++ VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
++ if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
++ {
++ // Check if realloc will make sense
++ if (prevFreeRegionSize >= minimalFreeRegion ||
++ nextFreeRegionSize >= minimalFreeRegion ||
++ moveData.size <= vectorState.avgFreeSize ||
++ moveData.size <= vectorState.avgAllocSize)
++ {
++ VmaAllocationRequest request = {};
++ if (metadata->CreateAllocationRequest(
++ moveData.size,
++ moveData.alignment,
++ false,
++ moveData.type,
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
++ &request))
++ {
++ if (metadata->GetAllocationOffset(request.allocHandle) < offset)
++ {
++ if (vector.CommitAllocationRequest(
++ request,
++ block,
++ moveData.alignment,
++ moveData.flags,
++ this,
++ moveData.type,
++ &moveData.move.dstTmpAllocation) == VK_SUCCESS)
++ {
++ m_Moves.push_back(moveData.move);
++ if (IncrementCounters(moveData.size))
++ return true;
++ }
++ }
++ }
++ }
++ }
++ prevFreeRegionSize = nextFreeRegionSize;
++ }
++ }
++
++ // No moves performed, update statistics to current vector state
++ if (startMoveCount == m_Moves.size() && !update)
++ {
++ vectorState.avgAllocSize = UINT64_MAX;
++ return ComputeDefragmentation_Balanced(vector, index, false);
++ }
++ return false;
++}
++
++bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
++{
++ // Go over every allocation and try to fit it in previous blocks at lowest offsets,
++ // if not possible: realloc within single block to minimize offset (exclude offset == 0)
++
++ for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
++ {
++ VmaDeviceMemoryBlock* block = vector.GetBlock(i);
++ VmaBlockMetadata* metadata = block->m_pMetadata;
++
++ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
++ handle != VK_NULL_HANDLE;
++ handle = metadata->GetNextAllocation(handle))
++ {
++ MoveAllocationData moveData = GetMoveData(handle, metadata);
++ // Ignore newly created allocations by defragmentation algorithm
++ if (moveData.move.srcAllocation->GetUserData() == this)
++ continue;
++ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
++ {
++ case CounterStatus::Ignore:
++ continue;
++ case CounterStatus::End:
++ return true;
++ case CounterStatus::Pass:
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++
++ // Check all previous blocks for free space
++ const size_t prevMoveCount = m_Moves.size();
++ if (AllocInOtherBlock(0, i, moveData, vector))
++ return true;
++
++ // If no room found then realloc within block for lower offset
++ VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
++ if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
++ {
++ VmaAllocationRequest request = {};
++ if (metadata->CreateAllocationRequest(
++ moveData.size,
++ moveData.alignment,
++ false,
++ moveData.type,
++ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
++ &request))
++ {
++ if (metadata->GetAllocationOffset(request.allocHandle) < offset)
++ {
++ if (vector.CommitAllocationRequest(
++ request,
++ block,
++ moveData.alignment,
++ moveData.flags,
++ this,
++ moveData.type,
++ &moveData.move.dstTmpAllocation) == VK_SUCCESS)
++ {
++ m_Moves.push_back(moveData.move);
++ if (IncrementCounters(moveData.size))
++ return true;
++ }
++ }
++ }
++ }
++ }
++ }
++ return false;
++}
++
++bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
++{
++ // First free single block, then populate it to the brim, then free another block, and so on
++
++ // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
++ if (vector.m_BufferImageGranularity == 1)
++ return ComputeDefragmentation_Full(vector);
++
++ VMA_ASSERT(m_AlgorithmState != VMA_NULL);
++
++ StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
++
++ bool texturePresent = false, bufferPresent = false, otherPresent = false;
++ switch (vectorState.operation)
++ {
++ case StateExtensive::Operation::Done: // Vector defragmented
++ return false;
++ case StateExtensive::Operation::FindFreeBlockBuffer:
++ case StateExtensive::Operation::FindFreeBlockTexture:
++ case StateExtensive::Operation::FindFreeBlockAll:
++ {
++ // No more blocks to free, just perform fast realloc and move to cleanup
++ if (vectorState.firstFreeBlock == 0)
++ {
++ vectorState.operation = StateExtensive::Operation::Cleanup;
++ return ComputeDefragmentation_Fast(vector);
++ }
++
++ // No free blocks, have to clear last one
++ size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
++ VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
++
++ const size_t prevMoveCount = m_Moves.size();
++ for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
++ handle != VK_NULL_HANDLE;
++ handle = freeMetadata->GetNextAllocation(handle))
++ {
++ MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
++ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
++ {
++ case CounterStatus::Ignore:
++ continue;
++ case CounterStatus::End:
++ return true;
++ case CounterStatus::Pass:
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++
++ // Check all previous blocks for free space
++ if (AllocInOtherBlock(0, last, moveData, vector))
++ {
++ // Full clear performed already
++ if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
++ vectorState.firstFreeBlock = last;
++ return true;
++ }
++ }
++
++ if (prevMoveCount == m_Moves.size())
++ {
++ // Cannot perform full clear, have to move data in other blocks around
++ if (last != 0)
++ {
++ for (size_t i = last - 1; i; --i)
++ {
++ if (ReallocWithinBlock(vector, vector.GetBlock(i)))
++ return true;
++ }
++ }
++
++ if (prevMoveCount == m_Moves.size())
++ {
++ // No possible reallocs within blocks, try to move them around fast
++ return ComputeDefragmentation_Fast(vector);
++ }
++ }
++ else
++ {
++ switch (vectorState.operation)
++ {
++ case StateExtensive::Operation::FindFreeBlockBuffer:
++ vectorState.operation = StateExtensive::Operation::MoveBuffers;
++ break;
++ case StateExtensive::Operation::FindFreeBlockTexture:
++ vectorState.operation = StateExtensive::Operation::MoveTextures;
++ break;
++ case StateExtensive::Operation::FindFreeBlockAll:
++ vectorState.operation = StateExtensive::Operation::MoveAll;
++ break;
++ default:
++ VMA_ASSERT(0);
++ vectorState.operation = StateExtensive::Operation::MoveTextures;
++ }
++ vectorState.firstFreeBlock = last;
++ // Nothing done, block found without reallocations, can perform another reallocs in same pass
++ return ComputeDefragmentation_Extensive(vector, index);
++ }
++ break;
++ }
++ case StateExtensive::Operation::MoveTextures:
++ {
++ if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
++ vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
++ {
++ if (texturePresent)
++ {
++ vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
++ return ComputeDefragmentation_Extensive(vector, index);
++ }
++
++ if (!bufferPresent && !otherPresent)
++ {
++ vectorState.operation = StateExtensive::Operation::Cleanup;
++ break;
++ }
++
++ // No more textures to move, check buffers
++ vectorState.operation = StateExtensive::Operation::MoveBuffers;
++ bufferPresent = false;
++ otherPresent = false;
++ }
++ else
++ break;
++ VMA_FALLTHROUGH; // Fallthrough
++ }
++ case StateExtensive::Operation::MoveBuffers:
++ {
++ if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
++ vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
++ {
++ if (bufferPresent)
++ {
++ vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
++ return ComputeDefragmentation_Extensive(vector, index);
++ }
++
++ if (!otherPresent)
++ {
++ vectorState.operation = StateExtensive::Operation::Cleanup;
++ break;
++ }
++
++ // No more buffers to move, check all others
++ vectorState.operation = StateExtensive::Operation::MoveAll;
++ otherPresent = false;
++ }
++ else
++ break;
++ VMA_FALLTHROUGH; // Fallthrough
++ }
++ case StateExtensive::Operation::MoveAll:
++ {
++ if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
++ vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
++ {
++ if (otherPresent)
++ {
++ vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
++ return ComputeDefragmentation_Extensive(vector, index);
++ }
++ // Everything moved
++ vectorState.operation = StateExtensive::Operation::Cleanup;
++ }
++ break;
++ }
++ case StateExtensive::Operation::Cleanup:
++ // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
++ break;
++ }
++
++ if (vectorState.operation == StateExtensive::Operation::Cleanup)
++ {
++ // All other work done, pack data in blocks even tighter if possible
++ const size_t prevMoveCount = m_Moves.size();
++ for (size_t i = 0; i < vector.GetBlockCount(); ++i)
++ {
++ if (ReallocWithinBlock(vector, vector.GetBlock(i)))
++ return true;
++ }
++
++ if (prevMoveCount == m_Moves.size())
++ vectorState.operation = StateExtensive::Operation::Done;
++ }
++ return false;
++}
++
++void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
++{
++ size_t allocCount = 0;
++ size_t freeCount = 0;
++ state.avgFreeSize = 0;
++ state.avgAllocSize = 0;
++
++ for (size_t i = 0; i < vector.GetBlockCount(); ++i)
++ {
++ VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
++
++ allocCount += metadata->GetAllocationCount();
++ freeCount += metadata->GetFreeRegionsCount();
++ state.avgFreeSize += metadata->GetSumFreeSize();
++ state.avgAllocSize += metadata->GetSize();
++ }
++
++ state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
++ state.avgFreeSize /= freeCount;
++}
++
++bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
++ VmaBlockVector& vector, size_t firstFreeBlock,
++ bool& texturePresent, bool& bufferPresent, bool& otherPresent)
++{
++ const size_t prevMoveCount = m_Moves.size();
++ for (size_t i = firstFreeBlock ; i;)
++ {
++ VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
++ VmaBlockMetadata* metadata = block->m_pMetadata;
++
++ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
++ handle != VK_NULL_HANDLE;
++ handle = metadata->GetNextAllocation(handle))
++ {
++ MoveAllocationData moveData = GetMoveData(handle, metadata);
++ // Ignore newly created allocations by defragmentation algorithm
++ if (moveData.move.srcAllocation->GetUserData() == this)
++ continue;
++ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
++ {
++ case CounterStatus::Ignore:
++ continue;
++ case CounterStatus::End:
++ return true;
++ case CounterStatus::Pass:
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++
++ // Move only single type of resources at once
++ if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
++ {
++ // Try to fit allocation into free blocks
++ if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
++ return false;
++ }
++
++ if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
++ texturePresent = true;
++ else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
++ bufferPresent = true;
++ else
++ otherPresent = true;
++ }
++ }
++ return prevMoveCount == m_Moves.size();
++}
++#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
++
++#ifndef _VMA_POOL_T_FUNCTIONS
++VmaPool_T::VmaPool_T(
++ VmaAllocator hAllocator,
++ const VmaPoolCreateInfo& createInfo,
++ VkDeviceSize preferredBlockSize)
++ : m_BlockVector(
++ hAllocator,
++ this, // hParentPool
++ createInfo.memoryTypeIndex,
++ createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
++ createInfo.minBlockCount,
++ createInfo.maxBlockCount,
++ (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
++ createInfo.blockSize != 0, // explicitBlockSize
++ createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
++ createInfo.priority,
++ VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
++ createInfo.pMemoryAllocateNext),
++ m_Id(0),
++ m_Name(VMA_NULL) {}
++
++VmaPool_T::~VmaPool_T()
++{
++ VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
++}
++
++void VmaPool_T::SetName(const char* pName)
++{
++ const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
++ VmaFreeString(allocs, m_Name);
++
++ if (pName != VMA_NULL)
++ {
++ m_Name = VmaCreateStringCopy(allocs, pName);
++ }
++ else
++ {
++ m_Name = VMA_NULL;
++ }
++}
++#endif // _VMA_POOL_T_FUNCTIONS
++
++#ifndef _VMA_ALLOCATOR_T_FUNCTIONS
++VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
++ m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
++ m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
++ m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
++ m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
++ m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
++ m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
++ m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
++ m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
++ m_hDevice(pCreateInfo->device),
++ m_hInstance(pCreateInfo->instance),
++ m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
++ m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
++ *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
++ m_AllocationObjectAllocator(&m_AllocationCallbacks),
++ m_HeapSizeLimitMask(0),
++ m_DeviceMemoryCount(0),
++ m_PreferredLargeHeapBlockSize(0),
++ m_PhysicalDevice(pCreateInfo->physicalDevice),
++ m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
++ m_NextPoolId(0),
++ m_GlobalMemoryTypeBits(UINT32_MAX)
++{
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ m_UseKhrDedicatedAllocation = false;
++ m_UseKhrBindMemory2 = false;
++ }
++
++ if(VMA_DEBUG_DETECT_CORRUPTION)
++ {
++ // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
++ VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
++ }
++
++ VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
++
++ if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
++ {
++#if !(VMA_DEDICATED_ALLOCATION)
++ if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
++ {
++ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
++ }
++#endif
++#if !(VMA_BIND_MEMORY2)
++ if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
++ {
++ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
++ }
++#endif
++ }
++#if !(VMA_MEMORY_BUDGET)
++ if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
++ {
++ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
++ }
++#endif
++#if !(VMA_BUFFER_DEVICE_ADDRESS)
++ if(m_UseKhrBufferDeviceAddress)
++ {
++ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
++ }
++#endif
++#if VMA_VULKAN_VERSION < 1003000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
++ {
++ VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros.");
++ }
++#endif
++#if VMA_VULKAN_VERSION < 1002000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
++ {
++ VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
++ }
++#endif
++#if VMA_VULKAN_VERSION < 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
++ }
++#endif
++#if !(VMA_MEMORY_PRIORITY)
++ if(m_UseExtMemoryPriority)
++ {
++ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
++ }
++#endif
++
++ memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
++ memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
++ memset(&m_MemProps, 0, sizeof(m_MemProps));
++
++ memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
++ memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
++
++#if VMA_EXTERNAL_MEMORY
++ memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
++#endif // #if VMA_EXTERNAL_MEMORY
++
++ if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
++ {
++ m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
++ m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
++ m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
++ }
++
++ ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
++
++ (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
++ (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
++
++ VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
++ VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
++ VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
++ VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
++
++ m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
++ pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
++
++ m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
++
++#if VMA_EXTERNAL_MEMORY
++ if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
++ {
++ memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
++ sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
++ }
++#endif // #if VMA_EXTERNAL_MEMORY
++
++ if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
++ {
++ for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
++ {
++ const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
++ if(limit != VK_WHOLE_SIZE)
++ {
++ m_HeapSizeLimitMask |= 1u << heapIndex;
++ if(limit < m_MemProps.memoryHeaps[heapIndex].size)
++ {
++ m_MemProps.memoryHeaps[heapIndex].size = limit;
++ }
++ }
++ }
++ }
++
++ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ // Create only supported types
++ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
++ {
++ const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
++ m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
++ this,
++ VK_NULL_HANDLE, // hParentPool
++ memTypeIndex,
++ preferredBlockSize,
++ 0,
++ SIZE_MAX,
++ GetBufferImageGranularity(),
++ false, // explicitBlockSize
++ 0, // algorithm
++ 0.5f, // priority (0.5 is the default per Vulkan spec)
++ GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
++ VMA_NULL); // // pMemoryAllocateNext
++ // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
++ // becase minBlockCount is 0.
++ }
++ }
++}
++
++VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
++{
++ VkResult res = VK_SUCCESS;
++
++#if VMA_MEMORY_BUDGET
++ if(m_UseExtMemoryBudget)
++ {
++ UpdateVulkanBudget();
++ }
++#endif // #if VMA_MEMORY_BUDGET
++
++ return res;
++}
++
++VmaAllocator_T::~VmaAllocator_T()
++{
++ VMA_ASSERT(m_Pools.IsEmpty());
++
++ for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
++ {
++ vma_delete(this, m_pBlockVectors[memTypeIndex]);
++ }
++}
++
++void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
++{
++#if VMA_STATIC_VULKAN_FUNCTIONS == 1
++ ImportVulkanFunctions_Static();
++#endif
++
++ if(pVulkanFunctions != VMA_NULL)
++ {
++ ImportVulkanFunctions_Custom(pVulkanFunctions);
++ }
++
++#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
++ ImportVulkanFunctions_Dynamic();
++#endif
++
++ ValidateVulkanFunctions();
++}
++
++#if VMA_STATIC_VULKAN_FUNCTIONS == 1
++
++void VmaAllocator_T::ImportVulkanFunctions_Static()
++{
++ // Vulkan 1.0
++ m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
++ m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
++ m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
++ m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
++ m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
++ m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
++ m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
++ m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
++ m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
++ m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
++ m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
++ m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
++ m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
++ m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
++ m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
++ m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
++ m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
++ m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
++ m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
++
++ // Vulkan 1.1
++#if VMA_VULKAN_VERSION >= 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
++ m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
++ m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
++ m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
++ }
++#endif
++
++#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
++ }
++#endif
++
++#if VMA_VULKAN_VERSION >= 1003000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
++ {
++ m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
++ m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
++ }
++#endif
++}
++
++#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
++
++void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
++{
++ VMA_ASSERT(pVulkanFunctions != VMA_NULL);
++
++#define VMA_COPY_IF_NOT_NULL(funcName) \
++ if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
++
++ VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
++ VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
++ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
++ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
++ VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
++ VMA_COPY_IF_NOT_NULL(vkFreeMemory);
++ VMA_COPY_IF_NOT_NULL(vkMapMemory);
++ VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
++ VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
++ VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
++ VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
++ VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
++ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
++ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
++ VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
++ VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
++ VMA_COPY_IF_NOT_NULL(vkCreateImage);
++ VMA_COPY_IF_NOT_NULL(vkDestroyImage);
++ VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
++
++#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
++ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
++#endif
++
++#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
++ VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
++ VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
++#endif
++
++#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
++ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
++#endif
++
++#if VMA_VULKAN_VERSION >= 1003000
++ VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
++ VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
++#endif
++
++#undef VMA_COPY_IF_NOT_NULL
++}
++
++#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
++
++void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
++{
++ VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
++ "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
++ "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
++ "Other members can be null.");
++
++#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
++ if(m_VulkanFunctions.memberName == VMA_NULL) \
++ m_VulkanFunctions.memberName = \
++ (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
++#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
++ if(m_VulkanFunctions.memberName == VMA_NULL) \
++ m_VulkanFunctions.memberName = \
++ (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
++
++ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
++ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
++ VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
++ VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
++ VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
++ VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
++ VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
++ VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
++ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
++ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
++ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
++ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
++ VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
++ VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
++ VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
++ VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
++ VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
++
++#if VMA_VULKAN_VERSION >= 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
++ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
++ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
++ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
++ }
++#endif
++
++#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
++ }
++ else if(m_UseExtMemoryBudget)
++ {
++ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2KHR");
++ }
++#endif
++
++#if VMA_DEDICATED_ALLOCATION
++ if(m_UseKhrDedicatedAllocation)
++ {
++ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
++ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
++ }
++#endif
++
++#if VMA_BIND_MEMORY2
++ if(m_UseKhrBindMemory2)
++ {
++ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
++ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
++ }
++#endif // #if VMA_BIND_MEMORY2
++
++#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2");
++ }
++ else if(m_UseExtMemoryBudget)
++ {
++ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
++ }
++#endif // #if VMA_MEMORY_BUDGET
++
++#if VMA_VULKAN_VERSION >= 1003000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
++ {
++ VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
++ VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
++ }
++#endif
++
++#undef VMA_FETCH_DEVICE_FUNC
++#undef VMA_FETCH_INSTANCE_FUNC
++}
++
++#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
++
++void VmaAllocator_T::ValidateVulkanFunctions()
++{
++ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
++
++#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
++ {
++ VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
++ }
++#endif
++
++#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
++ {
++ VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
++ }
++#endif
++
++#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
++ if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
++ }
++#endif
++
++#if VMA_VULKAN_VERSION >= 1003000
++ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
++ {
++ VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
++ VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
++ }
++#endif
++}
++
++VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
++{
++ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
++ const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
++ const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
++ return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
++}
++
++VkResult VmaAllocator_T::AllocateMemoryOfType(
++ VmaPool pool,
++ VkDeviceSize size,
++ VkDeviceSize alignment,
++ bool dedicatedPreferred,
++ VkBuffer dedicatedBuffer,
++ VkImage dedicatedImage,
++ VkFlags dedicatedBufferImageUsage,
++ const VmaAllocationCreateInfo& createInfo,
++ uint32_t memTypeIndex,
++ VmaSuballocationType suballocType,
++ VmaDedicatedAllocationList& dedicatedAllocations,
++ VmaBlockVector& blockVector,
++ size_t allocationCount,
++ VmaAllocation* pAllocations)
++{
++ VMA_ASSERT(pAllocations != VMA_NULL);
++ VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
++
++ VmaAllocationCreateInfo finalCreateInfo = createInfo;
++ VkResult res = CalcMemTypeParams(
++ finalCreateInfo,
++ memTypeIndex,
++ size,
++ allocationCount);
++ if(res != VK_SUCCESS)
++ return res;
++
++ if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
++ {
++ return AllocateDedicatedMemory(
++ pool,
++ size,
++ suballocType,
++ dedicatedAllocations,
++ memTypeIndex,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
++ (finalCreateInfo.flags &
++ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
++ finalCreateInfo.pUserData,
++ finalCreateInfo.priority,
++ dedicatedBuffer,
++ dedicatedImage,
++ dedicatedBufferImageUsage,
++ allocationCount,
++ pAllocations,
++ blockVector.GetAllocationNextPtr());
++ }
++ else
++ {
++ const bool canAllocateDedicated =
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
++ (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
++
++ if(canAllocateDedicated)
++ {
++ // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
++ if(size > blockVector.GetPreferredBlockSize() / 2)
++ {
++ dedicatedPreferred = true;
++ }
++ // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
++ // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
++ // 3/4 of the maximum allocation count.
++ if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 &&
++ m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
++ {
++ dedicatedPreferred = false;
++ }
++
++ if(dedicatedPreferred)
++ {
++ res = AllocateDedicatedMemory(
++ pool,
++ size,
++ suballocType,
++ dedicatedAllocations,
++ memTypeIndex,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
++ (finalCreateInfo.flags &
++ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
++ finalCreateInfo.pUserData,
++ finalCreateInfo.priority,
++ dedicatedBuffer,
++ dedicatedImage,
++ dedicatedBufferImageUsage,
++ allocationCount,
++ pAllocations,
++ blockVector.GetAllocationNextPtr());
++ if(res == VK_SUCCESS)
++ {
++ // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
++ VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
++ return VK_SUCCESS;
++ }
++ }
++ }
++
++ res = blockVector.Allocate(
++ size,
++ alignment,
++ finalCreateInfo,
++ suballocType,
++ allocationCount,
++ pAllocations);
++ if(res == VK_SUCCESS)
++ return VK_SUCCESS;
++
++ // Try dedicated memory.
++ if(canAllocateDedicated && !dedicatedPreferred)
++ {
++ res = AllocateDedicatedMemory(
++ pool,
++ size,
++ suballocType,
++ dedicatedAllocations,
++ memTypeIndex,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
++ (finalCreateInfo.flags &
++ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
++ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
++ finalCreateInfo.pUserData,
++ finalCreateInfo.priority,
++ dedicatedBuffer,
++ dedicatedImage,
++ dedicatedBufferImageUsage,
++ allocationCount,
++ pAllocations,
++ blockVector.GetAllocationNextPtr());
++ if(res == VK_SUCCESS)
++ {
++ // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
++ VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
++ return VK_SUCCESS;
++ }
++ }
++ // Everything failed: Return error code.
++ VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
++ return res;
++ }
++}
++
++VkResult VmaAllocator_T::AllocateDedicatedMemory(
++ VmaPool pool,
++ VkDeviceSize size,
++ VmaSuballocationType suballocType,
++ VmaDedicatedAllocationList& dedicatedAllocations,
++ uint32_t memTypeIndex,
++ bool map,
++ bool isUserDataString,
++ bool isMappingAllowed,
++ bool canAliasMemory,
++ void* pUserData,
++ float priority,
++ VkBuffer dedicatedBuffer,
++ VkImage dedicatedImage,
++ VkFlags dedicatedBufferImageUsage,
++ size_t allocationCount,
++ VmaAllocation* pAllocations,
++ const void* pNextChain)
++{
++ VMA_ASSERT(allocationCount > 0 && pAllocations);
++
++ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
++ allocInfo.memoryTypeIndex = memTypeIndex;
++ allocInfo.allocationSize = size;
++ allocInfo.pNext = pNextChain;
++
++#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
++ if(!canAliasMemory)
++ {
++ if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ if(dedicatedBuffer != VK_NULL_HANDLE)
++ {
++ VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
++ dedicatedAllocInfo.buffer = dedicatedBuffer;
++ VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
++ }
++ else if(dedicatedImage != VK_NULL_HANDLE)
++ {
++ dedicatedAllocInfo.image = dedicatedImage;
++ VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
++ }
++ }
++ }
++#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++
++#if VMA_BUFFER_DEVICE_ADDRESS
++ VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
++ if(m_UseKhrBufferDeviceAddress)
++ {
++ bool canContainBufferWithDeviceAddress = true;
++ if(dedicatedBuffer != VK_NULL_HANDLE)
++ {
++ canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown
++ (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
++ }
++ else if(dedicatedImage != VK_NULL_HANDLE)
++ {
++ canContainBufferWithDeviceAddress = false;
++ }
++ if(canContainBufferWithDeviceAddress)
++ {
++ allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
++ VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
++ }
++ }
++#endif // #if VMA_BUFFER_DEVICE_ADDRESS
++
++#if VMA_MEMORY_PRIORITY
++ VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
++ if(m_UseExtMemoryPriority)
++ {
++ VMA_ASSERT(priority >= 0.f && priority <= 1.f);
++ priorityInfo.priority = priority;
++ VmaPnextChainPushFront(&allocInfo, &priorityInfo);
++ }
++#endif // #if VMA_MEMORY_PRIORITY
++
++#if VMA_EXTERNAL_MEMORY
++ // Attach VkExportMemoryAllocateInfoKHR if necessary.
++ VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
++ exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
++ if(exportMemoryAllocInfo.handleTypes != 0)
++ {
++ VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
++ }
++#endif // #if VMA_EXTERNAL_MEMORY
++
++ size_t allocIndex;
++ VkResult res = VK_SUCCESS;
++ for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
++ {
++ res = AllocateDedicatedMemoryPage(
++ pool,
++ size,
++ suballocType,
++ memTypeIndex,
++ allocInfo,
++ map,
++ isUserDataString,
++ isMappingAllowed,
++ pUserData,
++ pAllocations + allocIndex);
++ if(res != VK_SUCCESS)
++ {
++ break;
++ }
++ }
++
++ if(res == VK_SUCCESS)
++ {
++ for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
++ {
++ dedicatedAllocations.Register(pAllocations[allocIndex]);
++ }
++ VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
++ }
++ else
++ {
++ // Free all already created allocations.
++ while(allocIndex--)
++ {
++ VmaAllocation currAlloc = pAllocations[allocIndex];
++ VkDeviceMemory hMemory = currAlloc->GetMemory();
++
++ /*
++ There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
++ before vkFreeMemory.
++
++ if(currAlloc->GetMappedData() != VMA_NULL)
++ {
++ (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
++ }
++ */
++
++ FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
++ m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
++ m_AllocationObjectAllocator.Free(currAlloc);
++ }
++
++ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
++ }
++
++ return res;
++}
++
++VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
++ VmaPool pool,
++ VkDeviceSize size,
++ VmaSuballocationType suballocType,
++ uint32_t memTypeIndex,
++ const VkMemoryAllocateInfo& allocInfo,
++ bool map,
++ bool isUserDataString,
++ bool isMappingAllowed,
++ void* pUserData,
++ VmaAllocation* pAllocation)
++{
++ VkDeviceMemory hMemory = VK_NULL_HANDLE;
++ VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
++ if(res < 0)
++ {
++ VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
++ return res;
++ }
++
++ void* pMappedData = VMA_NULL;
++ if(map)
++ {
++ res = (*m_VulkanFunctions.vkMapMemory)(
++ m_hDevice,
++ hMemory,
++ 0,
++ VK_WHOLE_SIZE,
++ 0,
++ &pMappedData);
++ if(res < 0)
++ {
++ VMA_DEBUG_LOG(" vkMapMemory FAILED");
++ FreeVulkanMemory(memTypeIndex, size, hMemory);
++ return res;
++ }
++ }
++
++ *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
++ (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
++ if (isUserDataString)
++ (*pAllocation)->SetName(this, (const char*)pUserData);
++ else
++ (*pAllocation)->SetUserData(this, pUserData);
++ m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
++ if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
++ {
++ FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
++ }
++
++ return VK_SUCCESS;
++}
++
++void VmaAllocator_T::GetBufferMemoryRequirements(
++ VkBuffer hBuffer,
++ VkMemoryRequirements& memReq,
++ bool& requiresDedicatedAllocation,
++ bool& prefersDedicatedAllocation) const
++{
++#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
++ memReqInfo.buffer = hBuffer;
++
++ VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
++
++ VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
++ VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
++
++ (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
++
++ memReq = memReq2.memoryRequirements;
++ requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
++ prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
++ }
++ else
++#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ {
++ (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
++ requiresDedicatedAllocation = false;
++ prefersDedicatedAllocation = false;
++ }
++}
++
++void VmaAllocator_T::GetImageMemoryRequirements(
++ VkImage hImage,
++ VkMemoryRequirements& memReq,
++ bool& requiresDedicatedAllocation,
++ bool& prefersDedicatedAllocation) const
++{
++#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
++ {
++ VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
++ memReqInfo.image = hImage;
++
++ VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
++
++ VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
++ VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
++
++ (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
++
++ memReq = memReq2.memoryRequirements;
++ requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
++ prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
++ }
++ else
++#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
++ {
++ (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
++ requiresDedicatedAllocation = false;
++ prefersDedicatedAllocation = false;
++ }
++}
++
++VkResult VmaAllocator_T::FindMemoryTypeIndex(
++ uint32_t memoryTypeBits,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ VkFlags bufImgUsage,
++ uint32_t* pMemoryTypeIndex) const
++{
++ memoryTypeBits &= GetGlobalMemoryTypeBits();
++
++ if(pAllocationCreateInfo->memoryTypeBits != 0)
++ {
++ memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
++ }
++
++ VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
++ if(!FindMemoryPreferences(
++ IsIntegratedGpu(),
++ *pAllocationCreateInfo,
++ bufImgUsage,
++ requiredFlags, preferredFlags, notPreferredFlags))
++ {
++ return VK_ERROR_FEATURE_NOT_PRESENT;
++ }
++
++ *pMemoryTypeIndex = UINT32_MAX;
++ uint32_t minCost = UINT32_MAX;
++ for(uint32_t memTypeIndex = 0, memTypeBit = 1;
++ memTypeIndex < GetMemoryTypeCount();
++ ++memTypeIndex, memTypeBit <<= 1)
++ {
++ // This memory type is acceptable according to memoryTypeBits bitmask.
++ if((memTypeBit & memoryTypeBits) != 0)
++ {
++ const VkMemoryPropertyFlags currFlags =
++ m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
++ // This memory type contains requiredFlags.
++ if((requiredFlags & ~currFlags) == 0)
++ {
++ // Calculate cost as number of bits from preferredFlags not present in this memory type.
++ uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
++ VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
++ // Remember memory type with lowest cost.
++ if(currCost < minCost)
++ {
++ *pMemoryTypeIndex = memTypeIndex;
++ if(currCost == 0)
++ {
++ return VK_SUCCESS;
++ }
++ minCost = currCost;
++ }
++ }
++ }
++ }
++ return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
++}
++
++VkResult VmaAllocator_T::CalcMemTypeParams(
++ VmaAllocationCreateInfo& inoutCreateInfo,
++ uint32_t memTypeIndex,
++ VkDeviceSize size,
++ size_t allocationCount)
++{
++ // If memory type is not HOST_VISIBLE, disable MAPPED.
++ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
++ (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
++ {
++ inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
++ }
++
++ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
++ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
++ {
++ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
++ VmaBudget heapBudget = {};
++ GetHeapBudgets(&heapBudget, heapIndex, 1);
++ if(heapBudget.usage + size * allocationCount > heapBudget.budget)
++ {
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++ }
++ }
++ return VK_SUCCESS;
++}
++
++VkResult VmaAllocator_T::CalcAllocationParams(
++ VmaAllocationCreateInfo& inoutCreateInfo,
++ bool dedicatedRequired,
++ bool dedicatedPreferred)
++{
++ VMA_ASSERT((inoutCreateInfo.flags &
++ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=
++ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&
++ "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
++ VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||
++ (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&
++ "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
++ if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
++ {
++ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
++ {
++ VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&
++ "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
++ }
++ }
++
++ // If memory is lazily allocated, it should be always dedicated.
++ if(dedicatedRequired ||
++ inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
++ {
++ inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
++ }
++
++ if(inoutCreateInfo.pool != VK_NULL_HANDLE)
++ {
++ if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
++ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
++ {
++ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
++ return VK_ERROR_FEATURE_NOT_PRESENT;
++ }
++ inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
++ }
++
++ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
++ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
++ {
++ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
++ return VK_ERROR_FEATURE_NOT_PRESENT;
++ }
++
++ if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
++ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
++ {
++ inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
++ }
++
++ // Non-auto USAGE values imply HOST_ACCESS flags.
++ // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
++ // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
++ // Otherwise they just protect from assert on mapping.
++ if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
++ inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
++ inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
++ {
++ if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)
++ {
++ inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
++ }
++ }
++
++ return VK_SUCCESS;
++}
++
++VkResult VmaAllocator_T::AllocateMemory(
++ const VkMemoryRequirements& vkMemReq,
++ bool requiresDedicatedAllocation,
++ bool prefersDedicatedAllocation,
++ VkBuffer dedicatedBuffer,
++ VkImage dedicatedImage,
++ VkFlags dedicatedBufferImageUsage,
++ const VmaAllocationCreateInfo& createInfo,
++ VmaSuballocationType suballocType,
++ size_t allocationCount,
++ VmaAllocation* pAllocations)
++{
++ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
++
++ VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
++
++ if(vkMemReq.size == 0)
++ {
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++
++ VmaAllocationCreateInfo createInfoFinal = createInfo;
++ VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
++ if(res != VK_SUCCESS)
++ return res;
++
++ if(createInfoFinal.pool != VK_NULL_HANDLE)
++ {
++ VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
++ return AllocateMemoryOfType(
++ createInfoFinal.pool,
++ vkMemReq.size,
++ vkMemReq.alignment,
++ prefersDedicatedAllocation,
++ dedicatedBuffer,
++ dedicatedImage,
++ dedicatedBufferImageUsage,
++ createInfoFinal,
++ blockVector.GetMemoryTypeIndex(),
++ suballocType,
++ createInfoFinal.pool->m_DedicatedAllocations,
++ blockVector,
++ allocationCount,
++ pAllocations);
++ }
++ else
++ {
++ // Bit mask of memory Vulkan types acceptable for this allocation.
++ uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
++ uint32_t memTypeIndex = UINT32_MAX;
++ res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
++ // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
++ if(res != VK_SUCCESS)
++ return res;
++ do
++ {
++ VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
++ VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
++ res = AllocateMemoryOfType(
++ VK_NULL_HANDLE,
++ vkMemReq.size,
++ vkMemReq.alignment,
++ requiresDedicatedAllocation || prefersDedicatedAllocation,
++ dedicatedBuffer,
++ dedicatedImage,
++ dedicatedBufferImageUsage,
++ createInfoFinal,
++ memTypeIndex,
++ suballocType,
++ m_DedicatedAllocations[memTypeIndex],
++ *blockVector,
++ allocationCount,
++ pAllocations);
++ // Allocation succeeded
++ if(res == VK_SUCCESS)
++ return VK_SUCCESS;
++
++ // Remove old memTypeIndex from list of possibilities.
++ memoryTypeBits &= ~(1u << memTypeIndex);
++ // Find alternative memTypeIndex.
++ res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
++ } while(res == VK_SUCCESS);
++
++ // No other matching memory type index could be found.
++ // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++ }
++}
++
++void VmaAllocator_T::FreeMemory(
++ size_t allocationCount,
++ const VmaAllocation* pAllocations)
++{
++ VMA_ASSERT(pAllocations);
++
++ for(size_t allocIndex = allocationCount; allocIndex--; )
++ {
++ VmaAllocation allocation = pAllocations[allocIndex];
++
++ if(allocation != VK_NULL_HANDLE)
++ {
++ if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
++ {
++ FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
++ }
++
++ allocation->FreeName(this);
++
++ switch(allocation->GetType())
++ {
++ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
++ {
++ VmaBlockVector* pBlockVector = VMA_NULL;
++ VmaPool hPool = allocation->GetParentPool();
++ if(hPool != VK_NULL_HANDLE)
++ {
++ pBlockVector = &hPool->m_BlockVector;
++ }
++ else
++ {
++ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
++ pBlockVector = m_pBlockVectors[memTypeIndex];
++ VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
++ }
++ pBlockVector->Free(allocation);
++ }
++ break;
++ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
++ FreeDedicatedMemory(allocation);
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++ }
++ }
++}
++
++void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
++{
++ // Initialize.
++ VmaClearDetailedStatistics(pStats->total);
++ for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
++ VmaClearDetailedStatistics(pStats->memoryType[i]);
++ for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
++ VmaClearDetailedStatistics(pStats->memoryHeap[i]);
++
++ // Process default pools.
++ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
++ if (pBlockVector != VMA_NULL)
++ pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
++ }
++
++ // Process custom pools.
++ {
++ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
++ for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
++ {
++ VmaBlockVector& blockVector = pool->m_BlockVector;
++ const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
++ blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
++ pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
++ }
++ }
++
++ // Process dedicated allocations.
++ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
++ }
++
++ // Sum from memory types to memory heaps.
++ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
++ VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
++ }
++
++ // Sum from memory heaps to total.
++ for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
++ VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
++
++ VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||
++ pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
++ VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
++ pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);
++}
++
++void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
++{
++#if VMA_MEMORY_BUDGET
++ if(m_UseExtMemoryBudget)
++ {
++ if(m_Budget.m_OperationsSinceBudgetFetch < 30)
++ {
++ VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
++ for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
++ {
++ const uint32_t heapIndex = firstHeap + i;
++
++ outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
++ outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
++ outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
++ outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
++
++ if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
++ {
++ outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
++ outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
++ }
++ else
++ {
++ outBudgets->usage = 0;
++ }
++
++ // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
++ outBudgets->budget = VMA_MIN(
++ m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
++ }
++ }
++ else
++ {
++ UpdateVulkanBudget(); // Outside of mutex lock
++ GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
++ }
++ }
++ else
++#endif
++ {
++ for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
++ {
++ const uint32_t heapIndex = firstHeap + i;
++
++ outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
++ outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
++ outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
++ outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
++
++ outBudgets->usage = outBudgets->statistics.blockBytes;
++ outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
++ }
++ }
++}
++
++void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
++{
++ pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
++ pAllocationInfo->deviceMemory = hAllocation->GetMemory();
++ pAllocationInfo->offset = hAllocation->GetOffset();
++ pAllocationInfo->size = hAllocation->GetSize();
++ pAllocationInfo->pMappedData = hAllocation->GetMappedData();
++ pAllocationInfo->pUserData = hAllocation->GetUserData();
++ pAllocationInfo->pName = hAllocation->GetName();
++}
++
++VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
++{
++ VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
++
++ VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
++
++ // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
++ if(pCreateInfo->pMemoryAllocateNext)
++ {
++ VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
++ }
++
++ if(newCreateInfo.maxBlockCount == 0)
++ {
++ newCreateInfo.maxBlockCount = SIZE_MAX;
++ }
++ if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
++ {
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++ // Memory type index out of range or forbidden.
++ if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
++ ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
++ {
++ return VK_ERROR_FEATURE_NOT_PRESENT;
++ }
++ if(newCreateInfo.minAllocationAlignment > 0)
++ {
++ VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
++ }
++
++ const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
++
++ *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
++
++ VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
++ if(res != VK_SUCCESS)
++ {
++ vma_delete(this, *pPool);
++ *pPool = VMA_NULL;
++ return res;
++ }
++
++ // Add to m_Pools.
++ {
++ VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
++ (*pPool)->SetId(m_NextPoolId++);
++ m_Pools.PushBack(*pPool);
++ }
++
++ return VK_SUCCESS;
++}
++
++void VmaAllocator_T::DestroyPool(VmaPool pool)
++{
++ // Remove from m_Pools.
++ {
++ VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
++ m_Pools.Remove(pool);
++ }
++
++ vma_delete(this, pool);
++}
++
++void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
++{
++ VmaClearStatistics(*pPoolStats);
++ pool->m_BlockVector.AddStatistics(*pPoolStats);
++ pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
++}
++
++void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
++{
++ VmaClearDetailedStatistics(*pPoolStats);
++ pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
++ pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
++}
++
++void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
++{
++ m_CurrentFrameIndex.store(frameIndex);
++
++#if VMA_MEMORY_BUDGET
++ if(m_UseExtMemoryBudget)
++ {
++ UpdateVulkanBudget();
++ }
++#endif // #if VMA_MEMORY_BUDGET
++}
++
++VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
++{
++ return hPool->m_BlockVector.CheckCorruption();
++}
++
++VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
++{
++ VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
++
++ // Process default pools.
++ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
++ if(pBlockVector != VMA_NULL)
++ {
++ VkResult localRes = pBlockVector->CheckCorruption();
++ switch(localRes)
++ {
++ case VK_ERROR_FEATURE_NOT_PRESENT:
++ break;
++ case VK_SUCCESS:
++ finalRes = VK_SUCCESS;
++ break;
++ default:
++ return localRes;
++ }
++ }
++ }
++
++ // Process custom pools.
++ {
++ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
++ for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
++ {
++ if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
++ {
++ VkResult localRes = pool->m_BlockVector.CheckCorruption();
++ switch(localRes)
++ {
++ case VK_ERROR_FEATURE_NOT_PRESENT:
++ break;
++ case VK_SUCCESS:
++ finalRes = VK_SUCCESS;
++ break;
++ default:
++ return localRes;
++ }
++ }
++ }
++ }
++
++ return finalRes;
++}
++
++VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
++{
++ AtomicTransactionalIncrement<VMA_ATOMIC_UINT32> deviceMemoryCountIncrement;
++ const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
++#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
++ if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
++ {
++ return VK_ERROR_TOO_MANY_OBJECTS;
++ }
++#endif
++
++ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
++
++ // HeapSizeLimit is in effect for this heap.
++ if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
++ {
++ const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
++ VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
++ for(;;)
++ {
++ const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
++ if(blockBytesAfterAllocation > heapSize)
++ {
++ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
++ }
++ if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
++ {
++ break;
++ }
++ }
++ }
++ else
++ {
++ m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
++ }
++ ++m_Budget.m_BlockCount[heapIndex];
++
++ // VULKAN CALL vkAllocateMemory.
++ VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
++
++ if(res == VK_SUCCESS)
++ {
++#if VMA_MEMORY_BUDGET
++ ++m_Budget.m_OperationsSinceBudgetFetch;
++#endif
++
++ // Informative callback.
++ if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
++ {
++ (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
++ }
++
++ deviceMemoryCountIncrement.Commit();
++ }
++ else
++ {
++ --m_Budget.m_BlockCount[heapIndex];
++ m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
++ }
++
++ return res;
++}
++
++void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
++{
++ // Informative callback.
++ if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
++ {
++ (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
++ }
++
++ // VULKAN CALL vkFreeMemory.
++ (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
++
++ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
++ --m_Budget.m_BlockCount[heapIndex];
++ m_Budget.m_BlockBytes[heapIndex] -= size;
++
++ --m_DeviceMemoryCount;
++}
++
++VkResult VmaAllocator_T::BindVulkanBuffer(
++ VkDeviceMemory memory,
++ VkDeviceSize memoryOffset,
++ VkBuffer buffer,
++ const void* pNext)
++{
++ if(pNext != VMA_NULL)
++ {
++#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
++ if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
++ m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
++ {
++ VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
++ bindBufferMemoryInfo.pNext = pNext;
++ bindBufferMemoryInfo.buffer = buffer;
++ bindBufferMemoryInfo.memory = memory;
++ bindBufferMemoryInfo.memoryOffset = memoryOffset;
++ return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
++ }
++ else
++#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
++ {
++ return VK_ERROR_EXTENSION_NOT_PRESENT;
++ }
++ }
++ else
++ {
++ return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
++ }
++}
++
++VkResult VmaAllocator_T::BindVulkanImage(
++ VkDeviceMemory memory,
++ VkDeviceSize memoryOffset,
++ VkImage image,
++ const void* pNext)
++{
++ if(pNext != VMA_NULL)
++ {
++#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
++ if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
++ m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
++ {
++ VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
++ bindBufferMemoryInfo.pNext = pNext;
++ bindBufferMemoryInfo.image = image;
++ bindBufferMemoryInfo.memory = memory;
++ bindBufferMemoryInfo.memoryOffset = memoryOffset;
++ return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
++ }
++ else
++#endif // #if VMA_BIND_MEMORY2
++ {
++ return VK_ERROR_EXTENSION_NOT_PRESENT;
++ }
++ }
++ else
++ {
++ return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
++ }
++}
++
++VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
++{
++ switch(hAllocation->GetType())
++ {
++ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
++ {
++ VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
++ char *pBytes = VMA_NULL;
++ VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
++ if(res == VK_SUCCESS)
++ {
++ *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
++ hAllocation->BlockAllocMap();
++ }
++ return res;
++ }
++ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
++ return hAllocation->DedicatedAllocMap(this, ppData);
++ default:
++ VMA_ASSERT(0);
++ return VK_ERROR_MEMORY_MAP_FAILED;
++ }
++}
++
++void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
++{
++ switch(hAllocation->GetType())
++ {
++ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
++ {
++ VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
++ hAllocation->BlockAllocUnmap();
++ pBlock->Unmap(this, 1);
++ }
++ break;
++ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
++ hAllocation->DedicatedAllocUnmap(this);
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++}
++
++VkResult VmaAllocator_T::BindBufferMemory(
++ VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkBuffer hBuffer,
++ const void* pNext)
++{
++ VkResult res = VK_ERROR_UNKNOWN;
++ switch(hAllocation->GetType())
++ {
++ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
++ res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
++ break;
++ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
++ {
++ VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
++ VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
++ res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
++ break;
++ }
++ default:
++ VMA_ASSERT(0);
++ }
++ return res;
++}
++
++VkResult VmaAllocator_T::BindImageMemory(
++ VmaAllocation hAllocation,
++ VkDeviceSize allocationLocalOffset,
++ VkImage hImage,
++ const void* pNext)
++{
++ VkResult res = VK_ERROR_UNKNOWN;
++ switch(hAllocation->GetType())
++ {
++ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
++ res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
++ break;
++ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
++ {
++ VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
++ VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
++ res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
++ break;
++ }
++ default:
++ VMA_ASSERT(0);
++ }
++ return res;
++}
++
++VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
++ VmaAllocation hAllocation,
++ VkDeviceSize offset, VkDeviceSize size,
++ VMA_CACHE_OPERATION op)
++{
++ VkResult res = VK_SUCCESS;
++
++ VkMappedMemoryRange memRange = {};
++ if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
++ {
++ switch(op)
++ {
++ case VMA_CACHE_FLUSH:
++ res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
++ break;
++ case VMA_CACHE_INVALIDATE:
++ res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++ }
++ // else: Just ignore this call.
++ return res;
++}
++
++VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
++ uint32_t allocationCount,
++ const VmaAllocation* allocations,
++ const VkDeviceSize* offsets, const VkDeviceSize* sizes,
++ VMA_CACHE_OPERATION op)
++{
++ typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
++ typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
++ RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
++
++ for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
++ {
++ const VmaAllocation alloc = allocations[allocIndex];
++ const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
++ const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
++ VkMappedMemoryRange newRange;
++ if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
++ {
++ ranges.push_back(newRange);
++ }
++ }
++
++ VkResult res = VK_SUCCESS;
++ if(!ranges.empty())
++ {
++ switch(op)
++ {
++ case VMA_CACHE_FLUSH:
++ res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
++ break;
++ case VMA_CACHE_INVALIDATE:
++ res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
++ break;
++ default:
++ VMA_ASSERT(0);
++ }
++ }
++ // else: Just ignore this call.
++ return res;
++}
++
++void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
++{
++ VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
++
++ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
++ VmaPool parentPool = allocation->GetParentPool();
++ if(parentPool == VK_NULL_HANDLE)
++ {
++ // Default pool
++ m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
++ }
++ else
++ {
++ // Custom pool
++ parentPool->m_DedicatedAllocations.Unregister(allocation);
++ }
++
++ VkDeviceMemory hMemory = allocation->GetMemory();
++
++ /*
++ There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
++ before vkFreeMemory.
++
++ if(allocation->GetMappedData() != VMA_NULL)
++ {
++ (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
++ }
++ */
++
++ FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
++
++ m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
++ m_AllocationObjectAllocator.Free(allocation);
++
++ VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
++}
++
++uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
++{
++ VkBufferCreateInfo dummyBufCreateInfo;
++ VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
++
++ uint32_t memoryTypeBits = 0;
++
++ // Create buffer.
++ VkBuffer buf = VK_NULL_HANDLE;
++ VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
++ m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
++ if(res == VK_SUCCESS)
++ {
++ // Query for supported memory types.
++ VkMemoryRequirements memReq;
++ (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
++ memoryTypeBits = memReq.memoryTypeBits;
++
++ // Destroy buffer.
++ (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
++ }
++
++ return memoryTypeBits;
++}
++
++uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
++{
++ // Make sure memory information is already fetched.
++ VMA_ASSERT(GetMemoryTypeCount() > 0);
++
++ uint32_t memoryTypeBits = UINT32_MAX;
++
++ if(!m_UseAmdDeviceCoherentMemory)
++ {
++ // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
++ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
++ {
++ memoryTypeBits &= ~(1u << memTypeIndex);
++ }
++ }
++ }
++
++ return memoryTypeBits;
++}
++
++bool VmaAllocator_T::GetFlushOrInvalidateRange(
++ VmaAllocation allocation,
++ VkDeviceSize offset, VkDeviceSize size,
++ VkMappedMemoryRange& outRange) const
++{
++ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
++ if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
++ {
++ const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
++ const VkDeviceSize allocationSize = allocation->GetSize();
++ VMA_ASSERT(offset <= allocationSize);
++
++ outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
++ outRange.pNext = VMA_NULL;
++ outRange.memory = allocation->GetMemory();
++
++ switch(allocation->GetType())
++ {
++ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
++ outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
++ if(size == VK_WHOLE_SIZE)
++ {
++ outRange.size = allocationSize - outRange.offset;
++ }
++ else
++ {
++ VMA_ASSERT(offset + size <= allocationSize);
++ outRange.size = VMA_MIN(
++ VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
++ allocationSize - outRange.offset);
++ }
++ break;
++ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
++ {
++ // 1. Still within this allocation.
++ outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
++ if(size == VK_WHOLE_SIZE)
++ {
++ size = allocationSize - offset;
++ }
++ else
++ {
++ VMA_ASSERT(offset + size <= allocationSize);
++ }
++ outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
++
++ // 2. Adjust to whole block.
++ const VkDeviceSize allocationOffset = allocation->GetOffset();
++ VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
++ const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
++ outRange.offset += allocationOffset;
++ outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
++
++ break;
++ }
++ default:
++ VMA_ASSERT(0);
++ }
++ return true;
++ }
++ return false;
++}
++
++#if VMA_MEMORY_BUDGET
++void VmaAllocator_T::UpdateVulkanBudget()
++{
++ VMA_ASSERT(m_UseExtMemoryBudget);
++
++ VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
++
++ VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
++ VmaPnextChainPushFront(&memProps, &budgetProps);
++
++ GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
++
++ {
++ VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
++
++ for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
++ {
++ m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
++ m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
++ m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
++
++ // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
++ if(m_Budget.m_VulkanBudget[heapIndex] == 0)
++ {
++ m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
++ }
++ else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
++ {
++ m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
++ }
++ if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
++ {
++ m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
++ }
++ }
++ m_Budget.m_OperationsSinceBudgetFetch = 0;
++ }
++}
++#endif // VMA_MEMORY_BUDGET
++
++void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
++{
++ if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
++ hAllocation->IsMappingAllowed() &&
++ (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
++ {
++ void* pData = VMA_NULL;
++ VkResult res = Map(hAllocation, &pData);
++ if(res == VK_SUCCESS)
++ {
++ memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
++ FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
++ Unmap(hAllocation);
++ }
++ else
++ {
++ VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
++ }
++ }
++}
++
++uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
++{
++ uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
++ if(memoryTypeBits == UINT32_MAX)
++ {
++ memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
++ m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
++ }
++ return memoryTypeBits;
++}
++
++#if VMA_STATS_STRING_ENABLED
++void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
++{
++ json.WriteString("DefaultPools");
++ json.BeginObject();
++ {
++ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
++ VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
++ if (pBlockVector != VMA_NULL)
++ {
++ json.BeginString("Type ");
++ json.ContinueString(memTypeIndex);
++ json.EndString();
++ json.BeginObject();
++ {
++ json.WriteString("PreferredBlockSize");
++ json.WriteNumber(pBlockVector->GetPreferredBlockSize());
++
++ json.WriteString("Blocks");
++ pBlockVector->PrintDetailedMap(json);
++
++ json.WriteString("DedicatedAllocations");
++ dedicatedAllocList.BuildStatsString(json);
++ }
++ json.EndObject();
++ }
++ }
++ }
++ json.EndObject();
++
++ json.WriteString("CustomPools");
++ json.BeginObject();
++ {
++ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
++ if (!m_Pools.IsEmpty())
++ {
++ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
++ {
++ bool displayType = true;
++ size_t index = 0;
++ for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
++ {
++ VmaBlockVector& blockVector = pool->m_BlockVector;
++ if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
++ {
++ if (displayType)
++ {
++ json.BeginString("Type ");
++ json.ContinueString(memTypeIndex);
++ json.EndString();
++ json.BeginArray();
++ displayType = false;
++ }
++
++ json.BeginObject();
++ {
++ json.WriteString("Name");
++ json.BeginString();
++ json.ContinueString((uint64_t)index++);
++ if (pool->GetName())
++ {
++ json.ContinueString(" - ");
++ json.ContinueString(pool->GetName());
++ }
++ json.EndString();
++
++ json.WriteString("PreferredBlockSize");
++ json.WriteNumber(blockVector.GetPreferredBlockSize());
++
++ json.WriteString("Blocks");
++ blockVector.PrintDetailedMap(json);
++
++ json.WriteString("DedicatedAllocations");
++ pool->m_DedicatedAllocations.BuildStatsString(json);
++ }
++ json.EndObject();
++ }
++ }
++
++ if (!displayType)
++ json.EndArray();
++ }
++ }
++ }
++ json.EndObject();
++}
++#endif // VMA_STATS_STRING_ENABLED
++#endif // _VMA_ALLOCATOR_T_FUNCTIONS
++
++
++#ifndef _VMA_PUBLIC_INTERFACE
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
++ const VmaAllocatorCreateInfo* pCreateInfo,
++ VmaAllocator* pAllocator)
++{
++ VMA_ASSERT(pCreateInfo && pAllocator);
++ VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
++ (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
++ VMA_DEBUG_LOG("vmaCreateAllocator");
++ *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
++ VkResult result = (*pAllocator)->Init(pCreateInfo);
++ if(result < 0)
++ {
++ vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
++ *pAllocator = VK_NULL_HANDLE;
++ }
++ return result;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
++ VmaAllocator allocator)
++{
++ if(allocator != VK_NULL_HANDLE)
++ {
++ VMA_DEBUG_LOG("vmaDestroyAllocator");
++ VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
++ vma_delete(&allocationCallbacks, allocator);
++ }
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
++{
++ VMA_ASSERT(allocator && pAllocatorInfo);
++ pAllocatorInfo->instance = allocator->m_hInstance;
++ pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
++ pAllocatorInfo->device = allocator->m_hDevice;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
++ VmaAllocator allocator,
++ const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
++{
++ VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
++ *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
++ VmaAllocator allocator,
++ const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
++{
++ VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
++ *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
++ VmaAllocator allocator,
++ uint32_t memoryTypeIndex,
++ VkMemoryPropertyFlags* pFlags)
++{
++ VMA_ASSERT(allocator && pFlags);
++ VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
++ *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
++ VmaAllocator allocator,
++ uint32_t frameIndex)
++{
++ VMA_ASSERT(allocator);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->SetCurrentFrameIndex(frameIndex);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
++ VmaAllocator allocator,
++ VmaTotalStatistics* pStats)
++{
++ VMA_ASSERT(allocator && pStats);
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++ allocator->CalculateStatistics(pStats);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
++ VmaAllocator allocator,
++ VmaBudget* pBudgets)
++{
++ VMA_ASSERT(allocator && pBudgets);
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++ allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
++}
++
++#if VMA_STATS_STRING_ENABLED
++
++VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
++ VmaAllocator allocator,
++ char** ppStatsString,
++ VkBool32 detailedMap)
++{
++ VMA_ASSERT(allocator && ppStatsString);
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ VmaStringBuilder sb(allocator->GetAllocationCallbacks());
++ {
++ VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
++ allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
++
++ VmaTotalStatistics stats;
++ allocator->CalculateStatistics(&stats);
++
++ VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
++ json.BeginObject();
++ {
++ json.WriteString("General");
++ json.BeginObject();
++ {
++ const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
++ const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
++
++ json.WriteString("API");
++ json.WriteString("Vulkan");
++
++ json.WriteString("apiVersion");
++ json.BeginString();
++ json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion));
++ json.ContinueString(".");
++ json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion));
++ json.ContinueString(".");
++ json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion));
++ json.EndString();
++
++ json.WriteString("GPU");
++ json.WriteString(deviceProperties.deviceName);
++ json.WriteString("deviceType");
++ json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
++
++ json.WriteString("maxMemoryAllocationCount");
++ json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
++ json.WriteString("bufferImageGranularity");
++ json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
++ json.WriteString("nonCoherentAtomSize");
++ json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
++
++ json.WriteString("memoryHeapCount");
++ json.WriteNumber(memoryProperties.memoryHeapCount);
++ json.WriteString("memoryTypeCount");
++ json.WriteNumber(memoryProperties.memoryTypeCount);
++ }
++ json.EndObject();
++ }
++ {
++ json.WriteString("Total");
++ VmaPrintDetailedStatistics(json, stats.total);
++ }
++ {
++ json.WriteString("MemoryInfo");
++ json.BeginObject();
++ {
++ for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
++ {
++ json.BeginString("Heap ");
++ json.ContinueString(heapIndex);
++ json.EndString();
++ json.BeginObject();
++ {
++ const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
++ json.WriteString("Flags");
++ json.BeginArray(true);
++ {
++ if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
++ json.WriteString("DEVICE_LOCAL");
++ #if VMA_VULKAN_VERSION >= 1001000
++ if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)
++ json.WriteString("MULTI_INSTANCE");
++ #endif
++
++ VkMemoryHeapFlags flags = heapInfo.flags &
++ ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
++ #if VMA_VULKAN_VERSION >= 1001000
++ | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
++ #endif
++ );
++ if (flags != 0)
++ json.WriteNumber(flags);
++ }
++ json.EndArray();
++
++ json.WriteString("Size");
++ json.WriteNumber(heapInfo.size);
++
++ json.WriteString("Budget");
++ json.BeginObject();
++ {
++ json.WriteString("BudgetBytes");
++ json.WriteNumber(budgets[heapIndex].budget);
++ json.WriteString("UsageBytes");
++ json.WriteNumber(budgets[heapIndex].usage);
++ }
++ json.EndObject();
++
++ json.WriteString("Stats");
++ VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
++
++ json.WriteString("MemoryPools");
++ json.BeginObject();
++ {
++ for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
++ {
++ if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
++ {
++ json.BeginString("Type ");
++ json.ContinueString(typeIndex);
++ json.EndString();
++ json.BeginObject();
++ {
++ json.WriteString("Flags");
++ json.BeginArray(true);
++ {
++ VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
++ if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
++ json.WriteString("DEVICE_LOCAL");
++ if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
++ json.WriteString("HOST_VISIBLE");
++ if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
++ json.WriteString("HOST_COHERENT");
++ if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
++ json.WriteString("HOST_CACHED");
++ if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
++ json.WriteString("LAZILY_ALLOCATED");
++ #if VMA_VULKAN_VERSION >= 1001000
++ if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
++ json.WriteString("PROTECTED");
++ #endif
++ #if VK_AMD_device_coherent_memory
++ if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
++ json.WriteString("DEVICE_COHERENT_AMD");
++ if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
++ json.WriteString("DEVICE_UNCACHED_AMD");
++ #endif
++
++ flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
++ #if VMA_VULKAN_VERSION >= 1001000
++ | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
++ #endif
++ #if VK_AMD_device_coherent_memory
++ | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
++ | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
++ #endif
++ | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
++ | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
++ | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
++ if (flags != 0)
++ json.WriteNumber(flags);
++ }
++ json.EndArray();
++
++ json.WriteString("Stats");
++ VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
++ }
++ json.EndObject();
++ }
++ }
++
++ }
++ json.EndObject();
++ }
++ json.EndObject();
++ }
++ }
++ json.EndObject();
++ }
++
++ if (detailedMap == VK_TRUE)
++ allocator->PrintDetailedMap(json);
++
++ json.EndObject();
++ }
++
++ *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
++ VmaAllocator allocator,
++ char* pStatsString)
++{
++ if(pStatsString != VMA_NULL)
++ {
++ VMA_ASSERT(allocator);
++ VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
++ }
++}
++
++#endif // VMA_STATS_STRING_ENABLED
++
++/*
++This function is not protected by any mutex because it just reads immutable data.
++*/
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
++ VmaAllocator allocator,
++ uint32_t memoryTypeBits,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ uint32_t* pMemoryTypeIndex)
++{
++ VMA_ASSERT(allocator != VK_NULL_HANDLE);
++ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
++ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
++
++ return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
++ VmaAllocator allocator,
++ const VkBufferCreateInfo* pBufferCreateInfo,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ uint32_t* pMemoryTypeIndex)
++{
++ VMA_ASSERT(allocator != VK_NULL_HANDLE);
++ VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
++ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
++ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
++
++ const VkDevice hDev = allocator->m_hDevice;
++ const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
++ VkResult res;
++
++#if VMA_VULKAN_VERSION >= 1003000
++ if(funcs->vkGetDeviceBufferMemoryRequirements)
++ {
++ // Can query straight from VkBufferCreateInfo :)
++ VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS};
++ devBufMemReq.pCreateInfo = pBufferCreateInfo;
++
++ VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
++ (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
++
++ res = allocator->FindMemoryTypeIndex(
++ memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
++ }
++ else
++#endif // #if VMA_VULKAN_VERSION >= 1003000
++ {
++ // Must create a dummy buffer to query :(
++ VkBuffer hBuffer = VK_NULL_HANDLE;
++ res = funcs->vkCreateBuffer(
++ hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
++ if(res == VK_SUCCESS)
++ {
++ VkMemoryRequirements memReq = {};
++ funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
++
++ res = allocator->FindMemoryTypeIndex(
++ memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
++
++ funcs->vkDestroyBuffer(
++ hDev, hBuffer, allocator->GetAllocationCallbacks());
++ }
++ }
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
++ VmaAllocator allocator,
++ const VkImageCreateInfo* pImageCreateInfo,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ uint32_t* pMemoryTypeIndex)
++{
++ VMA_ASSERT(allocator != VK_NULL_HANDLE);
++ VMA_ASSERT(pImageCreateInfo != VMA_NULL);
++ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
++ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
++
++ const VkDevice hDev = allocator->m_hDevice;
++ const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
++ VkResult res;
++
++#if VMA_VULKAN_VERSION >= 1003000
++ if(funcs->vkGetDeviceImageMemoryRequirements)
++ {
++ // Can query straight from VkImageCreateInfo :)
++ VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS};
++ devImgMemReq.pCreateInfo = pImageCreateInfo;
++ VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
++ "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
++
++ VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
++ (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
++
++ res = allocator->FindMemoryTypeIndex(
++ memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
++ }
++ else
++#endif // #if VMA_VULKAN_VERSION >= 1003000
++ {
++ // Must create a dummy image to query :(
++ VkImage hImage = VK_NULL_HANDLE;
++ res = funcs->vkCreateImage(
++ hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
++ if(res == VK_SUCCESS)
++ {
++ VkMemoryRequirements memReq = {};
++ funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
++
++ res = allocator->FindMemoryTypeIndex(
++ memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
++
++ funcs->vkDestroyImage(
++ hDev, hImage, allocator->GetAllocationCallbacks());
++ }
++ }
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
++ VmaAllocator allocator,
++ const VmaPoolCreateInfo* pCreateInfo,
++ VmaPool* pPool)
++{
++ VMA_ASSERT(allocator && pCreateInfo && pPool);
++
++ VMA_DEBUG_LOG("vmaCreatePool");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return allocator->CreatePool(pCreateInfo, pPool);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
++ VmaAllocator allocator,
++ VmaPool pool)
++{
++ VMA_ASSERT(allocator);
++
++ if(pool == VK_NULL_HANDLE)
++ {
++ return;
++ }
++
++ VMA_DEBUG_LOG("vmaDestroyPool");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->DestroyPool(pool);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
++ VmaAllocator allocator,
++ VmaPool pool,
++ VmaStatistics* pPoolStats)
++{
++ VMA_ASSERT(allocator && pool && pPoolStats);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->GetPoolStatistics(pool, pPoolStats);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
++ VmaAllocator allocator,
++ VmaPool pool,
++ VmaDetailedStatistics* pPoolStats)
++{
++ VMA_ASSERT(allocator && pool && pPoolStats);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->CalculatePoolStatistics(pool, pPoolStats);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
++{
++ VMA_ASSERT(allocator && pool);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ VMA_DEBUG_LOG("vmaCheckPoolCorruption");
++
++ return allocator->CheckPoolCorruption(pool);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
++ VmaAllocator allocator,
++ VmaPool pool,
++ const char** ppName)
++{
++ VMA_ASSERT(allocator && pool && ppName);
++
++ VMA_DEBUG_LOG("vmaGetPoolName");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ *ppName = pool->GetName();
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
++ VmaAllocator allocator,
++ VmaPool pool,
++ const char* pName)
++{
++ VMA_ASSERT(allocator && pool);
++
++ VMA_DEBUG_LOG("vmaSetPoolName");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ pool->SetName(pName);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
++ VmaAllocator allocator,
++ const VkMemoryRequirements* pVkMemoryRequirements,
++ const VmaAllocationCreateInfo* pCreateInfo,
++ VmaAllocation* pAllocation,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
++
++ VMA_DEBUG_LOG("vmaAllocateMemory");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ VkResult result = allocator->AllocateMemory(
++ *pVkMemoryRequirements,
++ false, // requiresDedicatedAllocation
++ false, // prefersDedicatedAllocation
++ VK_NULL_HANDLE, // dedicatedBuffer
++ VK_NULL_HANDLE, // dedicatedImage
++ UINT32_MAX, // dedicatedBufferImageUsage
++ *pCreateInfo,
++ VMA_SUBALLOCATION_TYPE_UNKNOWN,
++ 1, // allocationCount
++ pAllocation);
++
++ if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
++ {
++ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
++ }
++
++ return result;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
++ VmaAllocator allocator,
++ const VkMemoryRequirements* pVkMemoryRequirements,
++ const VmaAllocationCreateInfo* pCreateInfo,
++ size_t allocationCount,
++ VmaAllocation* pAllocations,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ if(allocationCount == 0)
++ {
++ return VK_SUCCESS;
++ }
++
++ VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
++
++ VMA_DEBUG_LOG("vmaAllocateMemoryPages");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ VkResult result = allocator->AllocateMemory(
++ *pVkMemoryRequirements,
++ false, // requiresDedicatedAllocation
++ false, // prefersDedicatedAllocation
++ VK_NULL_HANDLE, // dedicatedBuffer
++ VK_NULL_HANDLE, // dedicatedImage
++ UINT32_MAX, // dedicatedBufferImageUsage
++ *pCreateInfo,
++ VMA_SUBALLOCATION_TYPE_UNKNOWN,
++ allocationCount,
++ pAllocations);
++
++ if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
++ {
++ for(size_t i = 0; i < allocationCount; ++i)
++ {
++ allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
++ }
++ }
++
++ return result;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
++ VmaAllocator allocator,
++ VkBuffer buffer,
++ const VmaAllocationCreateInfo* pCreateInfo,
++ VmaAllocation* pAllocation,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
++
++ VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ VkMemoryRequirements vkMemReq = {};
++ bool requiresDedicatedAllocation = false;
++ bool prefersDedicatedAllocation = false;
++ allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
++ requiresDedicatedAllocation,
++ prefersDedicatedAllocation);
++
++ VkResult result = allocator->AllocateMemory(
++ vkMemReq,
++ requiresDedicatedAllocation,
++ prefersDedicatedAllocation,
++ buffer, // dedicatedBuffer
++ VK_NULL_HANDLE, // dedicatedImage
++ UINT32_MAX, // dedicatedBufferImageUsage
++ *pCreateInfo,
++ VMA_SUBALLOCATION_TYPE_BUFFER,
++ 1, // allocationCount
++ pAllocation);
++
++ if(pAllocationInfo && result == VK_SUCCESS)
++ {
++ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
++ }
++
++ return result;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
++ VmaAllocator allocator,
++ VkImage image,
++ const VmaAllocationCreateInfo* pCreateInfo,
++ VmaAllocation* pAllocation,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
++
++ VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ VkMemoryRequirements vkMemReq = {};
++ bool requiresDedicatedAllocation = false;
++ bool prefersDedicatedAllocation = false;
++ allocator->GetImageMemoryRequirements(image, vkMemReq,
++ requiresDedicatedAllocation, prefersDedicatedAllocation);
++
++ VkResult result = allocator->AllocateMemory(
++ vkMemReq,
++ requiresDedicatedAllocation,
++ prefersDedicatedAllocation,
++ VK_NULL_HANDLE, // dedicatedBuffer
++ image, // dedicatedImage
++ UINT32_MAX, // dedicatedBufferImageUsage
++ *pCreateInfo,
++ VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
++ 1, // allocationCount
++ pAllocation);
++
++ if(pAllocationInfo && result == VK_SUCCESS)
++ {
++ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
++ }
++
++ return result;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
++ VmaAllocator allocator,
++ VmaAllocation allocation)
++{
++ VMA_ASSERT(allocator);
++
++ if(allocation == VK_NULL_HANDLE)
++ {
++ return;
++ }
++
++ VMA_DEBUG_LOG("vmaFreeMemory");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->FreeMemory(
++ 1, // allocationCount
++ &allocation);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
++ VmaAllocator allocator,
++ size_t allocationCount,
++ const VmaAllocation* pAllocations)
++{
++ if(allocationCount == 0)
++ {
++ return;
++ }
++
++ VMA_ASSERT(allocator);
++
++ VMA_DEBUG_LOG("vmaFreeMemoryPages");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->FreeMemory(allocationCount, pAllocations);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ VMA_ASSERT(allocator && allocation && pAllocationInfo);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->GetAllocationInfo(allocation, pAllocationInfo);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ void* pUserData)
++{
++ VMA_ASSERT(allocator && allocation);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocation->SetUserData(allocator, pUserData);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ const char* VMA_NULLABLE pName)
++{
++ allocation->SetName(allocator, pName);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)
++{
++ VMA_ASSERT(allocator && allocation && pFlags);
++ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
++ *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ void** ppData)
++{
++ VMA_ASSERT(allocator && allocation && ppData);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return allocator->Map(allocation, ppData);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
++ VmaAllocator allocator,
++ VmaAllocation allocation)
++{
++ VMA_ASSERT(allocator && allocation);
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ allocator->Unmap(allocation);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ VkDeviceSize offset,
++ VkDeviceSize size)
++{
++ VMA_ASSERT(allocator && allocation);
++
++ VMA_DEBUG_LOG("vmaFlushAllocation");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
++
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ VkDeviceSize offset,
++ VkDeviceSize size)
++{
++ VMA_ASSERT(allocator && allocation);
++
++ VMA_DEBUG_LOG("vmaInvalidateAllocation");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
++
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
++ VmaAllocator allocator,
++ uint32_t allocationCount,
++ const VmaAllocation* allocations,
++ const VkDeviceSize* offsets,
++ const VkDeviceSize* sizes)
++{
++ VMA_ASSERT(allocator);
++
++ if(allocationCount == 0)
++ {
++ return VK_SUCCESS;
++ }
++
++ VMA_ASSERT(allocations);
++
++ VMA_DEBUG_LOG("vmaFlushAllocations");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
++
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
++ VmaAllocator allocator,
++ uint32_t allocationCount,
++ const VmaAllocation* allocations,
++ const VkDeviceSize* offsets,
++ const VkDeviceSize* sizes)
++{
++ VMA_ASSERT(allocator);
++
++ if(allocationCount == 0)
++ {
++ return VK_SUCCESS;
++ }
++
++ VMA_ASSERT(allocations);
++
++ VMA_DEBUG_LOG("vmaInvalidateAllocations");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
++
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
++ VmaAllocator allocator,
++ uint32_t memoryTypeBits)
++{
++ VMA_ASSERT(allocator);
++
++ VMA_DEBUG_LOG("vmaCheckCorruption");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return allocator->CheckCorruption(memoryTypeBits);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
++ VmaAllocator allocator,
++ const VmaDefragmentationInfo* pInfo,
++ VmaDefragmentationContext* pContext)
++{
++ VMA_ASSERT(allocator && pInfo && pContext);
++
++ VMA_DEBUG_LOG("vmaBeginDefragmentation");
++
++ if (pInfo->pool != VMA_NULL)
++ {
++ // Check if run on supported algorithms
++ if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
++ return VK_ERROR_FEATURE_NOT_PRESENT;
++ }
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
++ return VK_SUCCESS;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
++ VmaAllocator allocator,
++ VmaDefragmentationContext context,
++ VmaDefragmentationStats* pStats)
++{
++ VMA_ASSERT(allocator && context);
++
++ VMA_DEBUG_LOG("vmaEndDefragmentation");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ if (pStats)
++ context->GetStats(*pStats);
++ vma_delete(allocator, context);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaDefragmentationContext VMA_NOT_NULL context,
++ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
++{
++ VMA_ASSERT(context && pPassInfo);
++
++ VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return context->DefragmentPassBegin(*pPassInfo);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaDefragmentationContext VMA_NOT_NULL context,
++ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
++{
++ VMA_ASSERT(context && pPassInfo);
++
++ VMA_DEBUG_LOG("vmaEndDefragmentationPass");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return context->DefragmentPassEnd(*pPassInfo);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ VkBuffer buffer)
++{
++ VMA_ASSERT(allocator && allocation && buffer);
++
++ VMA_DEBUG_LOG("vmaBindBufferMemory");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ VkDeviceSize allocationLocalOffset,
++ VkBuffer buffer,
++ const void* pNext)
++{
++ VMA_ASSERT(allocator && allocation && buffer);
++
++ VMA_DEBUG_LOG("vmaBindBufferMemory2");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ VkImage image)
++{
++ VMA_ASSERT(allocator && allocation && image);
++
++ VMA_DEBUG_LOG("vmaBindImageMemory");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
++ VmaAllocator allocator,
++ VmaAllocation allocation,
++ VkDeviceSize allocationLocalOffset,
++ VkImage image,
++ const void* pNext)
++{
++ VMA_ASSERT(allocator && allocation && image);
++
++ VMA_DEBUG_LOG("vmaBindImageMemory2");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
++ VmaAllocator allocator,
++ const VkBufferCreateInfo* pBufferCreateInfo,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ VkBuffer* pBuffer,
++ VmaAllocation* pAllocation,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
++
++ if(pBufferCreateInfo->size == 0)
++ {
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++ if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
++ !allocator->m_UseKhrBufferDeviceAddress)
++ {
++ VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++
++ VMA_DEBUG_LOG("vmaCreateBuffer");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ *pBuffer = VK_NULL_HANDLE;
++ *pAllocation = VK_NULL_HANDLE;
++
++ // 1. Create VkBuffer.
++ VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
++ allocator->m_hDevice,
++ pBufferCreateInfo,
++ allocator->GetAllocationCallbacks(),
++ pBuffer);
++ if(res >= 0)
++ {
++ // 2. vkGetBufferMemoryRequirements.
++ VkMemoryRequirements vkMemReq = {};
++ bool requiresDedicatedAllocation = false;
++ bool prefersDedicatedAllocation = false;
++ allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
++ requiresDedicatedAllocation, prefersDedicatedAllocation);
++
++ // 3. Allocate memory using allocator.
++ res = allocator->AllocateMemory(
++ vkMemReq,
++ requiresDedicatedAllocation,
++ prefersDedicatedAllocation,
++ *pBuffer, // dedicatedBuffer
++ VK_NULL_HANDLE, // dedicatedImage
++ pBufferCreateInfo->usage, // dedicatedBufferImageUsage
++ *pAllocationCreateInfo,
++ VMA_SUBALLOCATION_TYPE_BUFFER,
++ 1, // allocationCount
++ pAllocation);
++
++ if(res >= 0)
++ {
++ // 3. Bind buffer with memory.
++ if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
++ {
++ res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
++ }
++ if(res >= 0)
++ {
++ // All steps succeeded.
++ #if VMA_STATS_STRING_ENABLED
++ (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
++ #endif
++ if(pAllocationInfo != VMA_NULL)
++ {
++ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
++ }
++
++ return VK_SUCCESS;
++ }
++ allocator->FreeMemory(
++ 1, // allocationCount
++ pAllocation);
++ *pAllocation = VK_NULL_HANDLE;
++ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
++ *pBuffer = VK_NULL_HANDLE;
++ return res;
++ }
++ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
++ *pBuffer = VK_NULL_HANDLE;
++ return res;
++ }
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
++ VmaAllocator allocator,
++ const VkBufferCreateInfo* pBufferCreateInfo,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ VkDeviceSize minAlignment,
++ VkBuffer* pBuffer,
++ VmaAllocation* pAllocation,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
++
++ if(pBufferCreateInfo->size == 0)
++ {
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++ if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
++ !allocator->m_UseKhrBufferDeviceAddress)
++ {
++ VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++
++ VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ *pBuffer = VK_NULL_HANDLE;
++ *pAllocation = VK_NULL_HANDLE;
++
++ // 1. Create VkBuffer.
++ VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
++ allocator->m_hDevice,
++ pBufferCreateInfo,
++ allocator->GetAllocationCallbacks(),
++ pBuffer);
++ if(res >= 0)
++ {
++ // 2. vkGetBufferMemoryRequirements.
++ VkMemoryRequirements vkMemReq = {};
++ bool requiresDedicatedAllocation = false;
++ bool prefersDedicatedAllocation = false;
++ allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
++ requiresDedicatedAllocation, prefersDedicatedAllocation);
++
++ // 2a. Include minAlignment
++ vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
++
++ // 3. Allocate memory using allocator.
++ res = allocator->AllocateMemory(
++ vkMemReq,
++ requiresDedicatedAllocation,
++ prefersDedicatedAllocation,
++ *pBuffer, // dedicatedBuffer
++ VK_NULL_HANDLE, // dedicatedImage
++ pBufferCreateInfo->usage, // dedicatedBufferImageUsage
++ *pAllocationCreateInfo,
++ VMA_SUBALLOCATION_TYPE_BUFFER,
++ 1, // allocationCount
++ pAllocation);
++
++ if(res >= 0)
++ {
++ // 3. Bind buffer with memory.
++ if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
++ {
++ res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
++ }
++ if(res >= 0)
++ {
++ // All steps succeeded.
++ #if VMA_STATS_STRING_ENABLED
++ (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
++ #endif
++ if(pAllocationInfo != VMA_NULL)
++ {
++ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
++ }
++
++ return VK_SUCCESS;
++ }
++ allocator->FreeMemory(
++ 1, // allocationCount
++ pAllocation);
++ *pAllocation = VK_NULL_HANDLE;
++ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
++ *pBuffer = VK_NULL_HANDLE;
++ return res;
++ }
++ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
++ *pBuffer = VK_NULL_HANDLE;
++ return res;
++ }
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
++ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
++{
++ return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize allocationLocalOffset,
++ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
++ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
++{
++ VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
++ VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize());
++
++ VMA_DEBUG_LOG("vmaCreateAliasingBuffer2");
++
++ *pBuffer = VK_NULL_HANDLE;
++
++ if (pBufferCreateInfo->size == 0)
++ {
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++ if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
++ !allocator->m_UseKhrBufferDeviceAddress)
++ {
++ VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ // 1. Create VkBuffer.
++ VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
++ allocator->m_hDevice,
++ pBufferCreateInfo,
++ allocator->GetAllocationCallbacks(),
++ pBuffer);
++ if (res >= 0)
++ {
++ // 2. Bind buffer with memory.
++ res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL);
++ if (res >= 0)
++ {
++ return VK_SUCCESS;
++ }
++ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
++ }
++ return res;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
++ VmaAllocator allocator,
++ VkBuffer buffer,
++ VmaAllocation allocation)
++{
++ VMA_ASSERT(allocator);
++
++ if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
++ {
++ return;
++ }
++
++ VMA_DEBUG_LOG("vmaDestroyBuffer");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ if(buffer != VK_NULL_HANDLE)
++ {
++ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
++ }
++
++ if(allocation != VK_NULL_HANDLE)
++ {
++ allocator->FreeMemory(
++ 1, // allocationCount
++ &allocation);
++ }
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
++ VmaAllocator allocator,
++ const VkImageCreateInfo* pImageCreateInfo,
++ const VmaAllocationCreateInfo* pAllocationCreateInfo,
++ VkImage* pImage,
++ VmaAllocation* pAllocation,
++ VmaAllocationInfo* pAllocationInfo)
++{
++ VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
++
++ if(pImageCreateInfo->extent.width == 0 ||
++ pImageCreateInfo->extent.height == 0 ||
++ pImageCreateInfo->extent.depth == 0 ||
++ pImageCreateInfo->mipLevels == 0 ||
++ pImageCreateInfo->arrayLayers == 0)
++ {
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++
++ VMA_DEBUG_LOG("vmaCreateImage");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ *pImage = VK_NULL_HANDLE;
++ *pAllocation = VK_NULL_HANDLE;
++
++ // 1. Create VkImage.
++ VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
++ allocator->m_hDevice,
++ pImageCreateInfo,
++ allocator->GetAllocationCallbacks(),
++ pImage);
++ if(res >= 0)
++ {
++ VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
++ VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
++ VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
++
++ // 2. Allocate memory using allocator.
++ VkMemoryRequirements vkMemReq = {};
++ bool requiresDedicatedAllocation = false;
++ bool prefersDedicatedAllocation = false;
++ allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
++ requiresDedicatedAllocation, prefersDedicatedAllocation);
++
++ res = allocator->AllocateMemory(
++ vkMemReq,
++ requiresDedicatedAllocation,
++ prefersDedicatedAllocation,
++ VK_NULL_HANDLE, // dedicatedBuffer
++ *pImage, // dedicatedImage
++ pImageCreateInfo->usage, // dedicatedBufferImageUsage
++ *pAllocationCreateInfo,
++ suballocType,
++ 1, // allocationCount
++ pAllocation);
++
++ if(res >= 0)
++ {
++ // 3. Bind image with memory.
++ if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
++ {
++ res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
++ }
++ if(res >= 0)
++ {
++ // All steps succeeded.
++ #if VMA_STATS_STRING_ENABLED
++ (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
++ #endif
++ if(pAllocationInfo != VMA_NULL)
++ {
++ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
++ }
++
++ return VK_SUCCESS;
++ }
++ allocator->FreeMemory(
++ 1, // allocationCount
++ pAllocation);
++ *pAllocation = VK_NULL_HANDLE;
++ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
++ *pImage = VK_NULL_HANDLE;
++ return res;
++ }
++ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
++ *pImage = VK_NULL_HANDLE;
++ return res;
++ }
++ return res;
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
++ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
++{
++ return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VmaAllocation VMA_NOT_NULL allocation,
++ VkDeviceSize allocationLocalOffset,
++ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
++ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
++{
++ VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
++
++ *pImage = VK_NULL_HANDLE;
++
++ VMA_DEBUG_LOG("vmaCreateImage2");
++
++ if (pImageCreateInfo->extent.width == 0 ||
++ pImageCreateInfo->extent.height == 0 ||
++ pImageCreateInfo->extent.depth == 0 ||
++ pImageCreateInfo->mipLevels == 0 ||
++ pImageCreateInfo->arrayLayers == 0)
++ {
++ return VK_ERROR_INITIALIZATION_FAILED;
++ }
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ // 1. Create VkImage.
++ VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
++ allocator->m_hDevice,
++ pImageCreateInfo,
++ allocator->GetAllocationCallbacks(),
++ pImage);
++ if (res >= 0)
++ {
++ // 2. Bind image with memory.
++ res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL);
++ if (res >= 0)
++ {
++ return VK_SUCCESS;
++ }
++ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
++ }
++ return res;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
++ VmaAllocator VMA_NOT_NULL allocator,
++ VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
++ VmaAllocation VMA_NULLABLE allocation)
++{
++ VMA_ASSERT(allocator);
++
++ if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
++ {
++ return;
++ }
++
++ VMA_DEBUG_LOG("vmaDestroyImage");
++
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK
++
++ if(image != VK_NULL_HANDLE)
++ {
++ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
++ }
++ if(allocation != VK_NULL_HANDLE)
++ {
++ allocator->FreeMemory(
++ 1, // allocationCount
++ &allocation);
++ }
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
++ const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
++ VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)
++{
++ VMA_ASSERT(pCreateInfo && pVirtualBlock);
++ VMA_ASSERT(pCreateInfo->size > 0);
++ VMA_DEBUG_LOG("vmaCreateVirtualBlock");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
++ VkResult res = (*pVirtualBlock)->Init();
++ if(res < 0)
++ {
++ vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
++ *pVirtualBlock = VK_NULL_HANDLE;
++ }
++ return res;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
++{
++ if(virtualBlock != VK_NULL_HANDLE)
++ {
++ VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
++ vma_delete(&allocationCallbacks, virtualBlock);
++ }
++}
++
++VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
++ VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
++ VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
++}
++
++VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
++ VkDeviceSize* VMA_NULLABLE pOffset)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
++ VMA_DEBUG_LOG("vmaVirtualAllocate");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
++{
++ if(allocation != VK_NULL_HANDLE)
++ {
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
++ VMA_DEBUG_LOG("vmaVirtualFree");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ virtualBlock->Free(allocation);
++ }
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
++ VMA_DEBUG_LOG("vmaClearVirtualBlock");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ virtualBlock->Clear();
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
++ VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ virtualBlock->SetAllocationUserData(allocation, pUserData);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaStatistics* VMA_NOT_NULL pStats)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
++ VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ virtualBlock->GetStatistics(*pStats);
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ VmaDetailedStatistics* VMA_NOT_NULL pStats)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
++ VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ virtualBlock->CalculateDetailedStatistics(*pStats);
++}
++
++#if VMA_STATS_STRING_ENABLED
++
++VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
++{
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
++ VmaStringBuilder sb(allocationCallbacks);
++ virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
++ *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
++}
++
++VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
++ char* VMA_NULLABLE pStatsString)
++{
++ if(pStatsString != VMA_NULL)
++ {
++ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
++ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
++ VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
++ }
++}
++#endif // VMA_STATS_STRING_ENABLED
++#endif // _VMA_PUBLIC_INTERFACE
++#endif // VMA_IMPLEMENTATION
++
++/**
++\page quick_start Quick start
++
++\section quick_start_project_setup Project setup
++
++Vulkan Memory Allocator comes in form of a "stb-style" single header file.
++You don't need to build it as a separate library project.
++You can add this file directly to your project and submit it to code repository next to your other source files.
++
++"Single header" doesn't mean that everything is contained in C/C++ declarations,
++like it tends to be in case of inline functions or C++ templates.
++It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
++If you don't do it properly, you will get linker errors.
++
++To do it properly:
++
++-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
++ This includes declarations of all members of the library.
++-# In exactly one CPP file define following macro before this include.
++ It enables also internal definitions.
++
++\code
++#define VMA_IMPLEMENTATION
++#include "vk_mem_alloc.h"
++\endcode
++
++It may be a good idea to create dedicated CPP file just for this purpose.
++
++This library includes header `<vulkan/vulkan.h>`, which in turn
++includes `<windows.h>` on Windows. If you need some specific macros defined
++before including these headers (like `WIN32_LEAN_AND_MEAN` or
++`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
++them before every `#include` of this library.
++
++This library is written in C++, but has C-compatible interface.
++Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
++implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
++Some features of C++14 are used. STL containers, RTTI, or C++ exceptions are not used.
++
++
++\section quick_start_initialization Initialization
++
++At program startup:
++
++-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
++-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
++ calling vmaCreateAllocator().
++
++Only members `physicalDevice`, `device`, `instance` are required.
++However, you should inform the library which Vulkan version do you use by setting
++VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
++by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
++Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
++
++\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version
++
++VMA supports Vulkan version down to 1.0, for backward compatibility.
++If you want to use higher version, you need to inform the library about it.
++This is a two-step process.
++
++<b>Step 1: Compile time.</b> By default, VMA compiles with code supporting the highest
++Vulkan version found in the included `<vulkan/vulkan.h>` that is also supported by the library.
++If this is OK, you don't need to do anything.
++However, if you want to compile VMA as if only some lower Vulkan version was available,
++define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`.
++It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version.
++For example, to compile against Vulkan 1.2:
++
++\code
++#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2
++#include "vk_mem_alloc.h"
++\endcode
++
++<b>Step 2: Runtime.</b> Even when compiled with higher Vulkan version available,
++VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object.
++By default, only Vulkan 1.0 is used.
++To initialize the allocator with support for higher Vulkan version, you need to set member
++VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`.
++See code sample below.
++
++\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions
++
++You may need to configure importing Vulkan functions. There are 3 ways to do this:
++
++-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
++ - You don't need to do anything.
++ - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
++-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
++ `vkGetDeviceProcAddr` (this is the option presented in the example below):
++ - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
++ - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
++ VmaVulkanFunctions::vkGetDeviceProcAddr.
++ - The library will fetch pointers to all other functions it needs internally.
++-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
++ [Volk](https://github.com/zeux/volk):
++ - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
++ - Pass these pointers via structure #VmaVulkanFunctions.
++
++Example for case 2:
++
++\code
++#define VMA_STATIC_VULKAN_FUNCTIONS 0
++#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
++#include "vk_mem_alloc.h"
++
++...
++
++VmaVulkanFunctions vulkanFunctions = {};
++vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
++vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
++
++VmaAllocatorCreateInfo allocatorCreateInfo = {};
++allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
++allocatorCreateInfo.physicalDevice = physicalDevice;
++allocatorCreateInfo.device = device;
++allocatorCreateInfo.instance = instance;
++allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
++
++VmaAllocator allocator;
++vmaCreateAllocator(&allocatorCreateInfo, &allocator);
++\endcode
++
++
++\section quick_start_resource_allocation Resource allocation
++
++When you want to create a buffer or image:
++
++-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
++-# Fill VmaAllocationCreateInfo structure.
++-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
++ already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
++
++\code
++VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++bufferInfo.size = 65536;
++bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++
++VmaAllocationCreateInfo allocInfo = {};
++allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
++
++VkBuffer buffer;
++VmaAllocation allocation;
++vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
++\endcode
++
++Don't forget to destroy your objects when no longer needed:
++
++\code
++vmaDestroyBuffer(allocator, buffer, allocation);
++vmaDestroyAllocator(allocator);
++\endcode
++
++
++\page choosing_memory_type Choosing memory type
++
++Physical devices in Vulkan support various combinations of memory heaps and
++types. Help with choosing correct and optimal memory type for your specific
++resource is one of the key features of this library. You can use it by filling
++appropriate members of VmaAllocationCreateInfo structure, as described below.
++You can also combine multiple methods.
++
++-# If you just want to find memory type index that meets your requirements, you
++ can use function: vmaFindMemoryTypeIndexForBufferInfo(),
++ vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
++-# If you want to allocate a region of device memory without association with any
++ specific image or buffer, you can use function vmaAllocateMemory(). Usage of
++ this function is not recommended and usually not needed.
++ vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
++ which may be useful for sparse binding.
++-# If you already have a buffer or an image created, you want to allocate memory
++ for it and then you will bind it yourself, you can use function
++ vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
++ For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
++ or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
++-# **This is the easiest and recommended way to use this library:**
++ If you want to create a buffer or an image, allocate memory for it and bind
++ them together, all in one call, you can use function vmaCreateBuffer(),
++ vmaCreateImage().
++
++When using 3. or 4., the library internally queries Vulkan for memory types
++supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
++and uses only one of these types.
++
++If no memory type can be found that meets all the requirements, these functions
++return `VK_ERROR_FEATURE_NOT_PRESENT`.
++
++You can leave VmaAllocationCreateInfo structure completely filled with zeros.
++It means no requirements are specified for memory type.
++It is valid, although not very useful.
++
++\section choosing_memory_type_usage Usage
++
++The easiest way to specify memory requirements is to fill member
++VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
++It defines high level, common usage types.
++Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
++
++For example, if you want to create a uniform buffer that will be filled using
++transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
++do it using following code. The buffer will most likely end up in a memory type with
++`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
++
++\code
++VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++bufferInfo.size = 65536;
++bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++
++VmaAllocationCreateInfo allocInfo = {};
++allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
++
++VkBuffer buffer;
++VmaAllocation allocation;
++vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
++\endcode
++
++If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
++on systems with discrete graphics card that have the memories separate, you can use
++#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
++
++When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
++you also need to specify one of the host access flags:
++#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
++This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
++so you can map it.
++
++For example, a staging buffer that will be filled via mapped pointer and then
++used as a source of transfer to the buffer described previously can be created like this.
++It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
++but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
++
++\code
++VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++stagingBufferInfo.size = 65536;
++stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
++
++VmaAllocationCreateInfo stagingAllocInfo = {};
++stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
++stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
++
++VkBuffer stagingBuffer;
++VmaAllocation stagingAllocation;
++vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
++\endcode
++
++For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
++
++Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
++about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
++so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
++If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
++memory type, as described below.
++
++\note
++Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
++`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
++are still available and work same way as in previous versions of the library
++for backward compatibility, but they are not recommended.
++
++\section choosing_memory_type_required_preferred_flags Required and preferred flags
++
++You can specify more detailed requirements by filling members
++VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
++with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
++if you want to create a buffer that will be persistently mapped on host (so it
++must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
++use following code:
++
++\code
++VmaAllocationCreateInfo allocInfo = {};
++allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
++allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
++allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
++
++VkBuffer buffer;
++VmaAllocation allocation;
++vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
++\endcode
++
++A memory type is chosen that has all the required flags and as many preferred
++flags set as possible.
++
++Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
++plus some extra "magic" (heuristics).
++
++\section choosing_memory_type_explicit_memory_types Explicit memory types
++
++If you inspected memory types available on the physical device and you have
++a preference for memory types that you want to use, you can fill member
++VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
++means that a memory type with that index is allowed to be used for the
++allocation. Special value 0, just like `UINT32_MAX`, means there are no
++restrictions to memory type index.
++
++Please note that this member is NOT just a memory type index.
++Still you can use it to choose just one, specific memory type.
++For example, if you already determined that your buffer should be created in
++memory type 2, use following code:
++
++\code
++uint32_t memoryTypeIndex = 2;
++
++VmaAllocationCreateInfo allocInfo = {};
++allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
++
++VkBuffer buffer;
++VmaAllocation allocation;
++vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
++\endcode
++
++
++\section choosing_memory_type_custom_memory_pools Custom memory pools
++
++If you allocate from custom memory pool, all the ways of specifying memory
++requirements described above are not applicable and the aforementioned members
++of VmaAllocationCreateInfo structure are ignored. Memory type is selected
++explicitly when creating the pool and then used to make all the allocations from
++that pool. For further details, see \ref custom_memory_pools.
++
++\section choosing_memory_type_dedicated_allocations Dedicated allocations
++
++Memory for allocations is reserved out of larger block of `VkDeviceMemory`
++allocated from Vulkan internally. That is the main feature of this whole library.
++You can still request a separate memory block to be created for an allocation,
++just like you would do in a trivial solution without using any allocator.
++In that case, a buffer or image is always bound to that memory at offset 0.
++This is called a "dedicated allocation".
++You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
++The library can also internally decide to use dedicated allocation in some cases, e.g.:
++
++- When the size of the allocation is large.
++- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
++ and it reports that dedicated allocation is required or recommended for the resource.
++- When allocation of next big memory block fails due to not enough device memory,
++ but allocation with the exact requested size succeeds.
++
++
++\page memory_mapping Memory mapping
++
++To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
++to be able to read from it or write to it in CPU code.
++Mapping is possible only of memory allocated from a memory type that has
++`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
++Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
++You can use them directly with memory allocated by this library,
++but it is not recommended because of following issue:
++Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
++This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
++Because of this, Vulkan Memory Allocator provides following facilities:
++
++\note If you want to be able to map an allocation, you need to specify one of the flags
++#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
++in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
++when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
++For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
++but they can still be used for consistency.
++
++\section memory_mapping_mapping_functions Mapping functions
++
++The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
++They are safer and more convenient to use than standard Vulkan functions.
++You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
++You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
++The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
++For further details, see description of vmaMapMemory() function.
++Example:
++
++\code
++// Having these objects initialized:
++struct ConstantBuffer
++{
++ ...
++};
++ConstantBuffer constantBufferData = ...
++
++VmaAllocator allocator = ...
++VkBuffer constantBuffer = ...
++VmaAllocation constantBufferAllocation = ...
++
++// You can map and fill your buffer using following code:
++
++void* mappedData;
++vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
++memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
++vmaUnmapMemory(allocator, constantBufferAllocation);
++\endcode
++
++When mapping, you may see a warning from Vulkan validation layer similar to this one:
++
++<i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
++
++It happens because the library maps entire `VkDeviceMemory` block, where different
++types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
++You can safely ignore it if you are sure you access only memory of the intended
++object that you wanted to map.
++
++
++\section memory_mapping_persistently_mapped_memory Persistently mapped memory
++
++Keeping your memory persistently mapped is generally OK in Vulkan.
++You don't need to unmap it before using its data on the GPU.
++The library provides a special feature designed for that:
++Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
++VmaAllocationCreateInfo::flags stay mapped all the time,
++so you can just access CPU pointer to it any time
++without a need to call any "map" or "unmap" function.
++Example:
++
++\code
++VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++bufCreateInfo.size = sizeof(ConstantBuffer);
++bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
++ VMA_ALLOCATION_CREATE_MAPPED_BIT;
++
++VkBuffer buf;
++VmaAllocation alloc;
++VmaAllocationInfo allocInfo;
++vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
++
++// Buffer is already mapped. You can access its memory.
++memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
++\endcode
++
++\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
++in a mappable memory type.
++For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
++#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
++#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
++For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
++
++\section memory_mapping_cache_control Cache flush and invalidate
++
++Memory in Vulkan doesn't need to be unmapped before using it on GPU,
++but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
++you need to manually **invalidate** cache before reading of mapped pointer
++and **flush** cache after writing to mapped pointer.
++Map/unmap operations don't do that automatically.
++Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
++`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
++functions that refer to given allocation object: vmaFlushAllocation(),
++vmaInvalidateAllocation(),
++or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
++
++Regions of memory specified for flush/invalidate must be aligned to
++`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
++In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
++within blocks are aligned to this value, so their offsets are always multiply of
++`nonCoherentAtomSize` and two different allocations never share same "line" of this size.
++
++Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
++currently provide `HOST_COHERENT` flag on all memory types that are
++`HOST_VISIBLE`, so on PC you may not need to bother.
++
++
++\page staying_within_budget Staying within budget
++
++When developing a graphics-intensive game or program, it is important to avoid allocating
++more GPU memory than it is physically available. When the memory is over-committed,
++various bad things can happen, depending on the specific GPU, graphics driver, and
++operating system:
++
++- It may just work without any problems.
++- The application may slow down because some memory blocks are moved to system RAM
++ and the GPU has to access them through PCI Express bus.
++- A new allocation may take very long time to complete, even few seconds, and possibly
++ freeze entire system.
++- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
++- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
++ returned somewhere later.
++
++\section staying_within_budget_querying_for_budget Querying for budget
++
++To query for current memory usage and available budget, use function vmaGetHeapBudgets().
++Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
++
++Please note that this function returns different information and works faster than
++vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
++allocation, while vmaCalculateStatistics() is intended to be used rarely,
++only to obtain statistical information, e.g. for debugging purposes.
++
++It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
++about the budget from Vulkan device. VMA is able to use this extension automatically.
++When not enabled, the allocator behaves same way, but then it estimates current usage
++and available budget based on its internal information and Vulkan memory heap sizes,
++which may be less precise. In order to use this extension:
++
++1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
++ required by it are available and enable them. Please note that the first is a device
++ extension and the second is instance extension!
++2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
++3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
++ Vulkan inside of it to avoid overhead of querying it with every allocation.
++
++\section staying_within_budget_controlling_memory_usage Controlling memory usage
++
++There are many ways in which you can try to stay within the budget.
++
++First, when making new allocation requires allocating a new memory block, the library
++tries not to exceed the budget automatically. If a block with default recommended size
++(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
++dedicated memory for just this resource.
++
++If the size of the requested resource plus current memory usage is more than the
++budget, by default the library still tries to create it, leaving it to the Vulkan
++implementation whether the allocation succeeds or fails. You can change this behavior
++by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
++not made if it would exceed the budget or if the budget is already exceeded.
++VMA then tries to make the allocation from the next eligible Vulkan memory type.
++The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
++Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
++when creating resources that are not essential for the application (e.g. the texture
++of a specific object) and not to pass it when creating critically important resources
++(e.g. render targets).
++
++On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
++that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
++whether it should fail with an error code or still allow the allocation.
++Usage of this extension involves only passing extra structure on Vulkan device creation,
++so it is out of scope of this library.
++
++Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
++a new allocation is created only when it fits inside one of the existing memory blocks.
++If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
++This also ensures that the function call is very fast because it never goes to Vulkan
++to obtain a new block.
++
++\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
++set to more than 0 will currently try to allocate memory blocks without checking whether they
++fit within budget.
++
++
++\page resource_aliasing Resource aliasing (overlap)
++
++New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
++management, give an opportunity to alias (overlap) multiple resources in the
++same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
++It can be useful to save video memory, but it must be used with caution.
++
++For example, if you know the flow of your whole render frame in advance, you
++are going to use some intermediate textures or buffers only during a small range of render passes,
++and you know these ranges don't overlap in time, you can bind these resources to
++the same place in memory, even if they have completely different parameters (width, height, format etc.).
++
++![Resource aliasing (overlap)](../gfx/Aliasing.png)
++
++Such scenario is possible using VMA, but you need to create your images manually.
++Then you need to calculate parameters of an allocation to be made using formula:
++
++- allocation size = max(size of each image)
++- allocation alignment = max(alignment of each image)
++- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
++
++Following example shows two different images bound to the same place in memory,
++allocated to fit largest of them.
++
++\code
++// A 512x512 texture to be sampled.
++VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
++img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
++img1CreateInfo.extent.width = 512;
++img1CreateInfo.extent.height = 512;
++img1CreateInfo.extent.depth = 1;
++img1CreateInfo.mipLevels = 10;
++img1CreateInfo.arrayLayers = 1;
++img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
++img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
++img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
++img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
++img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
++
++// A full screen texture to be used as color attachment.
++VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
++img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
++img2CreateInfo.extent.width = 1920;
++img2CreateInfo.extent.height = 1080;
++img2CreateInfo.extent.depth = 1;
++img2CreateInfo.mipLevels = 1;
++img2CreateInfo.arrayLayers = 1;
++img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
++img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
++img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
++img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
++img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
++
++VkImage img1;
++res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
++VkImage img2;
++res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
++
++VkMemoryRequirements img1MemReq;
++vkGetImageMemoryRequirements(device, img1, &img1MemReq);
++VkMemoryRequirements img2MemReq;
++vkGetImageMemoryRequirements(device, img2, &img2MemReq);
++
++VkMemoryRequirements finalMemReq = {};
++finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
++finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
++finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
++// Validate if(finalMemReq.memoryTypeBits != 0)
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
++
++VmaAllocation alloc;
++res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
++
++res = vmaBindImageMemory(allocator, alloc, img1);
++res = vmaBindImageMemory(allocator, alloc, img2);
++
++// You can use img1, img2 here, but not at the same time!
++
++vmaFreeMemory(allocator, alloc);
++vkDestroyImage(allocator, img2, nullptr);
++vkDestroyImage(allocator, img1, nullptr);
++\endcode
++
++VMA also provides convenience functions that create a buffer or image and bind it to memory
++represented by an existing #VmaAllocation:
++vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(),
++vmaCreateAliasingImage(), vmaCreateAliasingImage2().
++Versions with "2" offer additional parameter `allocationLocalOffset`.
++
++Remember that using resources that alias in memory requires proper synchronization.
++You need to issue a memory barrier to make sure commands that use `img1` and `img2`
++don't overlap on GPU timeline.
++You also need to treat a resource after aliasing as uninitialized - containing garbage data.
++For example, if you use `img1` and then want to use `img2`, you need to issue
++an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
++
++Additional considerations:
++
++- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
++See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
++- You can create more complex layout where different images and buffers are bound
++at different offsets inside one large allocation. For example, one can imagine
++a big texture used in some render passes, aliasing with a set of many small buffers
++used between in some further passes. To bind a resource at non-zero offset in an allocation,
++use vmaBindBufferMemory2() / vmaBindImageMemory2().
++- Before allocating memory for the resources you want to alias, check `memoryTypeBits`
++returned in memory requirements of each resource to make sure the bits overlap.
++Some GPUs may expose multiple memory types suitable e.g. only for buffers or
++images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
++resources may be disjoint. Aliasing them is not possible in that case.
++
++
++\page custom_memory_pools Custom memory pools
++
++A memory pool contains a number of `VkDeviceMemory` blocks.
++The library automatically creates and manages default pool for each memory type available on the device.
++Default memory pool automatically grows in size.
++Size of allocated blocks is also variable and managed automatically.
++
++You can create custom pool and allocate memory out of it.
++It can be useful if you want to:
++
++- Keep certain kind of allocations separate from others.
++- Enforce particular, fixed size of Vulkan memory blocks.
++- Limit maximum amount of Vulkan memory allocated for that pool.
++- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
++- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
++ #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
++- Perform defragmentation on a specific subset of your allocations.
++
++To use custom memory pools:
++
++-# Fill VmaPoolCreateInfo structure.
++-# Call vmaCreatePool() to obtain #VmaPool handle.
++-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
++ You don't need to specify any other parameters of this structure, like `usage`.
++
++Example:
++
++\code
++// Find memoryTypeIndex for the pool.
++VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
++sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++
++VmaAllocationCreateInfo sampleAllocCreateInfo = {};
++sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++
++uint32_t memTypeIndex;
++VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
++ &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
++// Check res...
++
++// Create a pool that can have at most 2 blocks, 128 MiB each.
++VmaPoolCreateInfo poolCreateInfo = {};
++poolCreateInfo.memoryTypeIndex = memTypeIndex;
++poolCreateInfo.blockSize = 128ull * 1024 * 1024;
++poolCreateInfo.maxBlockCount = 2;
++
++VmaPool pool;
++res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
++// Check res...
++
++// Allocate a buffer out of it.
++VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++bufCreateInfo.size = 1024;
++bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.pool = pool;
++
++VkBuffer buf;
++VmaAllocation alloc;
++res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
++// Check res...
++\endcode
++
++You have to free all allocations made from this pool before destroying it.
++
++\code
++vmaDestroyBuffer(allocator, buf, alloc);
++vmaDestroyPool(allocator, pool);
++\endcode
++
++New versions of this library support creating dedicated allocations in custom pools.
++It is supported only when VmaPoolCreateInfo::blockSize = 0.
++To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
++VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
++
++\note Excessive use of custom pools is a common mistake when using this library.
++Custom pools may be useful for special purposes - when you want to
++keep certain type of resources separate e.g. to reserve minimum amount of memory
++for them or limit maximum amount of memory they can occupy. For most
++resources this is not needed and so it is not recommended to create #VmaPool
++objects and allocations out of them. Allocating from the default pool is sufficient.
++
++
++\section custom_memory_pools_MemTypeIndex Choosing memory type index
++
++When creating a pool, you must explicitly specify memory type index.
++To find the one suitable for your buffers or images, you can use helper functions
++vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
++You need to provide structures with example parameters of buffers or images
++that you are going to create in that pool.
++
++\code
++VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++exampleBufCreateInfo.size = 1024; // Doesn't matter
++exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++
++uint32_t memTypeIndex;
++vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
++
++VmaPoolCreateInfo poolCreateInfo = {};
++poolCreateInfo.memoryTypeIndex = memTypeIndex;
++// ...
++\endcode
++
++When creating buffers/images allocated in that pool, provide following parameters:
++
++- `VkBufferCreateInfo`: Prefer to pass same parameters as above.
++ Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
++ Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
++ or the other way around.
++- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
++ Other members are ignored anyway.
++
++\section linear_algorithm Linear allocation algorithm
++
++Each Vulkan memory block managed by this library has accompanying metadata that
++keeps track of used and unused regions. By default, the metadata structure and
++algorithm tries to find best place for new allocations among free regions to
++optimize memory usage. This way you can allocate and free objects in any order.
++
++![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
++
++Sometimes there is a need to use simpler, linear allocation algorithm. You can
++create custom pool that uses such algorithm by adding flag
++#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
++#VmaPool object. Then an alternative metadata management is used. It always
++creates new allocations after last one and doesn't reuse free regions after
++allocations freed in the middle. It results in better allocation performance and
++less memory consumed by metadata.
++
++![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
++
++With this one flag, you can create a custom pool that can be used in many ways:
++free-at-once, stack, double stack, and ring buffer. See below for details.
++You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
++
++\subsection linear_algorithm_free_at_once Free-at-once
++
++In a pool that uses linear algorithm, you still need to free all the allocations
++individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
++them in any order. New allocations are always made after last one - free space
++in the middle is not reused. However, when you release all the allocation and
++the pool becomes empty, allocation starts from the beginning again. This way you
++can use linear algorithm to speed up creation of allocations that you are going
++to release all at once.
++
++![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
++
++This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
++value that allows multiple memory blocks.
++
++\subsection linear_algorithm_stack Stack
++
++When you free an allocation that was created last, its space can be reused.
++Thanks to this, if you always release allocations in the order opposite to their
++creation (LIFO - Last In First Out), you can achieve behavior of a stack.
++
++![Stack](../gfx/Linear_allocator_4_stack.png)
++
++This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
++value that allows multiple memory blocks.
++
++\subsection linear_algorithm_double_stack Double stack
++
++The space reserved by a custom pool with linear algorithm may be used by two
++stacks:
++
++- First, default one, growing up from offset 0.
++- Second, "upper" one, growing down from the end towards lower offsets.
++
++To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
++to VmaAllocationCreateInfo::flags.
++
++![Double stack](../gfx/Linear_allocator_7_double_stack.png)
++
++Double stack is available only in pools with one memory block -
++VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
++
++When the two stacks' ends meet so there is not enough space between them for a
++new allocation, such allocation fails with usual
++`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
++
++\subsection linear_algorithm_ring_buffer Ring buffer
++
++When you free some allocations from the beginning and there is not enough free space
++for a new one at the end of a pool, allocator's "cursor" wraps around to the
++beginning and starts allocation there. Thanks to this, if you always release
++allocations in the same order as you created them (FIFO - First In First Out),
++you can achieve behavior of a ring buffer / queue.
++
++![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
++
++Ring buffer is available only in pools with one memory block -
++VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
++
++\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
++
++
++\page defragmentation Defragmentation
++
++Interleaved allocations and deallocations of many objects of varying size can
++cause fragmentation over time, which can lead to a situation where the library is unable
++to find a continuous range of free memory for a new allocation despite there is
++enough free space, just scattered across many small free ranges between existing
++allocations.
++
++To mitigate this problem, you can use defragmentation feature.
++It doesn't happen automatically though and needs your cooperation,
++because VMA is a low level library that only allocates memory.
++It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
++It cannot copy their contents as it doesn't record any commands to a command buffer.
++
++Example:
++
++\code
++VmaDefragmentationInfo defragInfo = {};
++defragInfo.pool = myPool;
++defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
++
++VmaDefragmentationContext defragCtx;
++VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
++// Check res...
++
++for(;;)
++{
++ VmaDefragmentationPassMoveInfo pass;
++ res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
++ if(res == VK_SUCCESS)
++ break;
++ else if(res != VK_INCOMPLETE)
++ // Handle error...
++
++ for(uint32_t i = 0; i < pass.moveCount; ++i)
++ {
++ // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
++ VmaAllocationInfo allocInfo;
++ vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo);
++ MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
++
++ // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
++ VkImageCreateInfo imgCreateInfo = ...
++ VkImage newImg;
++ res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
++ // Check res...
++ res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg);
++ // Check res...
++
++ // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
++ vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
++ }
++
++ // Make sure the copy commands finished executing.
++ vkWaitForFences(...);
++
++ // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
++ for(uint32_t i = 0; i < pass.moveCount; ++i)
++ {
++ // ...
++ vkDestroyImage(device, resData->img, nullptr);
++ }
++
++ // Update appropriate descriptors to point to the new places...
++
++ res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
++ if(res == VK_SUCCESS)
++ break;
++ else if(res != VK_INCOMPLETE)
++ // Handle error...
++}
++
++vmaEndDefragmentation(allocator, defragCtx, nullptr);
++\endcode
++
++Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
++create/destroy an allocation and a buffer/image at once, these are just a shortcut for
++creating the resource, allocating memory, and binding them together.
++Defragmentation works on memory allocations only. You must handle the rest manually.
++Defragmentation is an iterative process that should repreat "passes" as long as related functions
++return `VK_INCOMPLETE` not `VK_SUCCESS`.
++In each pass:
++
++1. vmaBeginDefragmentationPass() function call:
++ - Calculates and returns the list of allocations to be moved in this pass.
++ Note this can be a time-consuming process.
++ - Reserves destination memory for them by creating temporary destination allocations
++ that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
++2. Inside the pass, **you should**:
++ - Inspect the returned list of allocations to be moved.
++ - Create new buffers/images and bind them at the returned destination temporary allocations.
++ - Copy data from source to destination resources if necessary.
++ - Destroy the source buffers/images, but NOT their allocations.
++3. vmaEndDefragmentationPass() function call:
++ - Frees the source memory reserved for the allocations that are moved.
++ - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
++ - Frees `VkDeviceMemory` blocks that became empty.
++
++Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
++Defragmentation algorithm tries to move all suitable allocations.
++You can, however, refuse to move some of them inside a defragmentation pass, by setting
++`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
++This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
++If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
++
++Inside a pass, for each allocation that should be moved:
++
++- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
++ - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
++- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
++ filled, and used temporarily in each rendering frame, you can just recreate this image
++ without copying its data.
++- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
++ using `memcpy()`.
++- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
++ This will cancel the move.
++ - vmaEndDefragmentationPass() will then free the destination memory
++ not the source memory of the allocation, leaving it unchanged.
++- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
++ you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
++ - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
++
++You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
++(like in the example above) or all the default pools by setting this member to null.
++
++Defragmentation is always performed in each pool separately.
++Allocations are never moved between different Vulkan memory types.
++The size of the destination memory reserved for a moved allocation is the same as the original one.
++Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
++Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
++
++You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
++in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
++See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
++
++It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
++usage, possibly from multiple threads, with the exception that allocations
++returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
++
++<b>Mapping</b> is preserved on allocations that are moved during defragmentation.
++Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
++are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
++using VmaAllocationInfo::pMappedData.
++
++\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
++
++
++\page statistics Statistics
++
++This library contains several functions that return information about its internal state,
++especially the amount of memory allocated from Vulkan.
++
++\section statistics_numeric_statistics Numeric statistics
++
++If you need to obtain basic statistics about memory usage per heap, together with current budget,
++you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
++This is useful to keep track of memory usage and stay within budget
++(see also \ref staying_within_budget).
++Example:
++
++\code
++uint32_t heapIndex = ...
++
++VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
++vmaGetHeapBudgets(allocator, budgets);
++
++printf("My heap currently has %u allocations taking %llu B,\n",
++ budgets[heapIndex].statistics.allocationCount,
++ budgets[heapIndex].statistics.allocationBytes);
++printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
++ budgets[heapIndex].statistics.blockCount,
++ budgets[heapIndex].statistics.blockBytes);
++printf("Vulkan reports total usage %llu B with budget %llu B.\n",
++ budgets[heapIndex].usage,
++ budgets[heapIndex].budget);
++\endcode
++
++You can query for more detailed statistics per memory heap, type, and totals,
++including minimum and maximum allocation size and unused range size,
++by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
++This function is slower though, as it has to traverse all the internal data structures,
++so it should be used only for debugging purposes.
++
++You can query for statistics of a custom pool using function vmaGetPoolStatistics()
++or vmaCalculatePoolStatistics().
++
++You can query for information about a specific allocation using function vmaGetAllocationInfo().
++It fill structure #VmaAllocationInfo.
++
++\section statistics_json_dump JSON dump
++
++You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
++The result is guaranteed to be correct JSON.
++It uses ANSI encoding.
++Any strings provided by user (see [Allocation names](@ref allocation_names))
++are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
++this JSON string can be treated as using this encoding.
++It must be freed using function vmaFreeStatsString().
++
++The format of this JSON string is not part of official documentation of the library,
++but it will not change in backward-incompatible way without increasing library major version number
++and appropriate mention in changelog.
++
++The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
++It can also contain detailed map of allocated memory blocks and their regions -
++free and occupied by allocations.
++This allows e.g. to visualize the memory or assess fragmentation.
++
++
++\page allocation_annotation Allocation names and user data
++
++\section allocation_user_data Allocation user data
++
++You can annotate allocations with your own information, e.g. for debugging purposes.
++To do that, fill VmaAllocationCreateInfo::pUserData field when creating
++an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
++some handle, index, key, ordinal number or any other value that would associate
++the allocation with your custom metadata.
++It is useful to identify appropriate data structures in your engine given #VmaAllocation,
++e.g. when doing \ref defragmentation.
++
++\code
++VkBufferCreateInfo bufCreateInfo = ...
++
++MyBufferMetadata* pMetadata = CreateBufferMetadata();
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++allocCreateInfo.pUserData = pMetadata;
++
++VkBuffer buffer;
++VmaAllocation allocation;
++vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
++\endcode
++
++The pointer may be later retrieved as VmaAllocationInfo::pUserData:
++
++\code
++VmaAllocationInfo allocInfo;
++vmaGetAllocationInfo(allocator, allocation, &allocInfo);
++MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
++\endcode
++
++It can also be changed using function vmaSetAllocationUserData().
++
++Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
++vmaBuildStatsString() in hexadecimal form.
++
++\section allocation_names Allocation names
++
++An allocation can also carry a null-terminated string, giving a name to the allocation.
++To set it, call vmaSetAllocationName().
++The library creates internal copy of the string, so the pointer you pass doesn't need
++to be valid for whole lifetime of the allocation. You can free it after the call.
++
++\code
++std::string imageName = "Texture: ";
++imageName += fileName;
++vmaSetAllocationName(allocator, allocation, imageName.c_str());
++\endcode
++
++The string can be later retrieved by inspecting VmaAllocationInfo::pName.
++It is also printed in JSON report created by vmaBuildStatsString().
++
++\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
++You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
++
++
++\page virtual_allocator Virtual allocator
++
++As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
++It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
++You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
++A common use case is sub-allocation of pieces of one large GPU buffer.
++
++\section virtual_allocator_creating_virtual_block Creating virtual block
++
++To use this functionality, there is no main "allocator" object.
++You don't need to have #VmaAllocator object created.
++All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
++
++-# Fill in #VmaVirtualBlockCreateInfo structure.
++-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
++
++Example:
++
++\code
++VmaVirtualBlockCreateInfo blockCreateInfo = {};
++blockCreateInfo.size = 1048576; // 1 MB
++
++VmaVirtualBlock block;
++VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
++\endcode
++
++\section virtual_allocator_making_virtual_allocations Making virtual allocations
++
++#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
++using the same code as the main Vulkan memory allocator.
++Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
++that represents an opaque handle to an allocation within the virtual block.
++
++In order to make such allocation:
++
++-# Fill in #VmaVirtualAllocationCreateInfo structure.
++-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
++ You can also receive `VkDeviceSize offset` that was assigned to the allocation.
++
++Example:
++
++\code
++VmaVirtualAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.size = 4096; // 4 KB
++
++VmaVirtualAllocation alloc;
++VkDeviceSize offset;
++res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
++if(res == VK_SUCCESS)
++{
++ // Use the 4 KB of your memory starting at offset.
++}
++else
++{
++ // Allocation failed - no space for it could be found. Handle this error!
++}
++\endcode
++
++\section virtual_allocator_deallocation Deallocation
++
++When no longer needed, an allocation can be freed by calling vmaVirtualFree().
++You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
++called for the same #VmaVirtualBlock.
++
++When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
++All allocations must be freed before the block is destroyed, which is checked internally by an assert.
++However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
++a feature not available in normal Vulkan memory allocator. Example:
++
++\code
++vmaVirtualFree(block, alloc);
++vmaDestroyVirtualBlock(block);
++\endcode
++
++\section virtual_allocator_allocation_parameters Allocation parameters
++
++You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
++Its default value is null.
++It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
++larger data structure containing more information. Example:
++
++\code
++struct CustomAllocData
++{
++ std::string m_AllocName;
++};
++CustomAllocData* allocData = new CustomAllocData();
++allocData->m_AllocName = "My allocation 1";
++vmaSetVirtualAllocationUserData(block, alloc, allocData);
++\endcode
++
++The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
++vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
++If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
++Example:
++
++\code
++VmaVirtualAllocationInfo allocInfo;
++vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
++delete (CustomAllocData*)allocInfo.pUserData;
++
++vmaVirtualFree(block, alloc);
++\endcode
++
++\section virtual_allocator_alignment_and_units Alignment and units
++
++It feels natural to express sizes and offsets in bytes.
++If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
++VmaVirtualAllocationCreateInfo::alignment to request it. Example:
++
++\code
++VmaVirtualAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.size = 4096; // 4 KB
++allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
++
++VmaVirtualAllocation alloc;
++res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
++\endcode
++
++Alignments of different allocations made from one block may vary.
++However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
++you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
++It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
++
++- VmaVirtualBlockCreateInfo::size
++- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
++- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
++
++\section virtual_allocator_statistics Statistics
++
++You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
++(to get brief statistics that are fast to calculate)
++or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
++The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
++Example:
++
++\code
++VmaStatistics stats;
++vmaGetVirtualBlockStatistics(block, &stats);
++printf("My virtual block has %llu bytes used by %u virtual allocations\n",
++ stats.allocationBytes, stats.allocationCount);
++\endcode
++
++You can also request a full list of allocations and free regions as a string in JSON format by calling
++vmaBuildVirtualBlockStatsString().
++Returned string must be later freed using vmaFreeVirtualBlockStatsString().
++The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
++
++\section virtual_allocator_additional_considerations Additional considerations
++
++The "virtual allocator" functionality is implemented on a level of individual memory blocks.
++Keeping track of a whole collection of blocks, allocating new ones when out of free space,
++deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
++
++Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
++See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
++You can find their description in chapter \ref custom_memory_pools.
++Allocation strategies are also supported.
++See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
++
++Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
++buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
++
++
++\page debugging_memory_usage Debugging incorrect memory usage
++
++If you suspect a bug with memory usage, like usage of uninitialized memory or
++memory being overwritten out of bounds of an allocation,
++you can use debug features of this library to verify this.
++
++\section debugging_memory_usage_initialization Memory initialization
++
++If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
++you can enable automatic memory initialization to verify this.
++To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
++
++\code
++#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
++#include "vk_mem_alloc.h"
++\endcode
++
++It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
++Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
++Memory is automatically mapped and unmapped if necessary.
++
++If you find these values while debugging your program, good chances are that you incorrectly
++read Vulkan memory that is allocated but not initialized, or already freed, respectively.
++
++Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
++It works also with dedicated allocations.
++
++\section debugging_memory_usage_margins Margins
++
++By default, allocations are laid out in memory blocks next to each other if possible
++(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
++
++![Allocations without margin](../gfx/Margins_1.png)
++
++Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
++number of bytes as a margin after every allocation.
++
++\code
++#define VMA_DEBUG_MARGIN 16
++#include "vk_mem_alloc.h"
++\endcode
++
++![Allocations with margin](../gfx/Margins_2.png)
++
++If your bug goes away after enabling margins, it means it may be caused by memory
++being overwritten outside of allocation boundaries. It is not 100% certain though.
++Change in application behavior may also be caused by different order and distribution
++of allocations across memory blocks after margins are applied.
++
++Margins work with all types of memory.
++
++Margin is applied only to allocations made out of memory blocks and not to dedicated
++allocations, which have their own memory block of specific size.
++It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
++or those automatically decided to put into dedicated allocations, e.g. due to its
++large size or recommended by VK_KHR_dedicated_allocation extension.
++
++Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
++
++Note that enabling margins increases memory usage and fragmentation.
++
++Margins do not apply to \ref virtual_allocator.
++
++\section debugging_memory_usage_corruption_detection Corruption detection
++
++You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
++of contents of the margins.
++
++\code
++#define VMA_DEBUG_MARGIN 16
++#define VMA_DEBUG_DETECT_CORRUPTION 1
++#include "vk_mem_alloc.h"
++\endcode
++
++When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
++(it must be multiply of 4) after every allocation is filled with a magic number.
++This idea is also know as "canary".
++Memory is automatically mapped and unmapped if necessary.
++
++This number is validated automatically when the allocation is destroyed.
++If it is not equal to the expected value, `VMA_ASSERT()` is executed.
++It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
++which indicates a serious bug.
++
++You can also explicitly request checking margins of all allocations in all memory blocks
++that belong to specified memory types by using function vmaCheckCorruption(),
++or in memory blocks that belong to specified custom pool, by using function
++vmaCheckPoolCorruption().
++
++Margin validation (corruption detection) works only for memory types that are
++`HOST_VISIBLE` and `HOST_COHERENT`.
++
++
++\page opengl_interop OpenGL Interop
++
++VMA provides some features that help with interoperability with OpenGL.
++
++\section opengl_interop_exporting_memory Exporting memory
++
++If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library:
++
++It is recommended to create \ref custom_memory_pools for such allocations.
++Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
++while creating the custom pool.
++Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
++not only while creating it, as no copy of the structure is made,
++but its original pointer is used for each allocation instead.
++
++If you want to export all memory allocated by the library from certain memory types,
++also dedicated allocations or other allocations made from default pools,
++an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
++It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
++through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
++Please note that new versions of the library also support dedicated allocations created in custom pools.
++
++You should not mix these two methods in a way that allows to apply both to the same memory type.
++Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
++
++
++\section opengl_interop_custom_alignment Custom alignment
++
++Buffers or images exported to a different API like OpenGL may require a different alignment,
++higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
++To impose such alignment:
++
++It is recommended to create \ref custom_memory_pools for such allocations.
++Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
++to be made out of this pool.
++The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
++from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
++
++If you want to create a buffer with a specific minimum alignment out of default pools,
++use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
++
++Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
++allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
++Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
++
++
++\page usage_patterns Recommended usage patterns
++
++Vulkan gives great flexibility in memory allocation.
++This chapter shows the most common patterns.
++
++See also slides from talk:
++[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
++
++
++\section usage_patterns_gpu_only GPU-only resource
++
++<b>When:</b>
++Any resources that you frequently write and read on GPU,
++e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
++images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
++
++<b>What to do:</b>
++Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
++
++\code
++VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
++imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
++imgCreateInfo.extent.width = 3840;
++imgCreateInfo.extent.height = 2160;
++imgCreateInfo.extent.depth = 1;
++imgCreateInfo.mipLevels = 1;
++imgCreateInfo.arrayLayers = 1;
++imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
++imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
++imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
++imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
++imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
++allocCreateInfo.priority = 1.0f;
++
++VkImage img;
++VmaAllocation alloc;
++vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
++\endcode
++
++<b>Also consider:</b>
++Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
++especially if they are large or if you plan to destroy and recreate them with different sizes
++e.g. when display resolution changes.
++Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
++When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
++to decrease chances to be evicted to system memory by the operating system.
++
++\section usage_patterns_staging_copy_upload Staging copy for upload
++
++<b>When:</b>
++A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer
++to some GPU resource.
++
++<b>What to do:</b>
++Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
++Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
++
++\code
++VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++bufCreateInfo.size = 65536;
++bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
++ VMA_ALLOCATION_CREATE_MAPPED_BIT;
++
++VkBuffer buf;
++VmaAllocation alloc;
++VmaAllocationInfo allocInfo;
++vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
++
++...
++
++memcpy(allocInfo.pMappedData, myData, myDataSize);
++\endcode
++
++<b>Also consider:</b>
++You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
++using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
++
++
++\section usage_patterns_readback Readback
++
++<b>When:</b>
++Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
++e.g. results of some computations.
++
++<b>What to do:</b>
++Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
++Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
++and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
++
++\code
++VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++bufCreateInfo.size = 65536;
++bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
++ VMA_ALLOCATION_CREATE_MAPPED_BIT;
++
++VkBuffer buf;
++VmaAllocation alloc;
++VmaAllocationInfo allocInfo;
++vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
++
++...
++
++const float* downloadedData = (const float*)allocInfo.pMappedData;
++\endcode
++
++
++\section usage_patterns_advanced_data_uploading Advanced data uploading
++
++For resources that you frequently write on CPU via mapped pointer and
++frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
++
++-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
++ even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
++ and make the device reach out to that resource directly.
++ - Reads performed by the device will then go through PCI Express bus.
++ The performance of this access may be limited, but it may be fine depending on the size
++ of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
++ of access.
++-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
++ a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
++ (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
++-# Systems with a discrete graphics card and separate video memory may or may not expose
++ a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
++ If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
++ that is available to CPU for mapping.
++ - Writes performed by the host to that memory go through PCI Express bus.
++ The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
++ as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
++-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
++ a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
++
++Thankfully, VMA offers an aid to create and use such resources in the the way optimal
++for the current Vulkan device. To help the library make the best choice,
++use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
++#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
++It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
++but if no such memory type is available or allocation from it fails
++(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
++it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
++It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
++so you need to create another "staging" allocation and perform explicit transfers.
++
++\code
++VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++bufCreateInfo.size = 65536;
++bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
++ VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
++ VMA_ALLOCATION_CREATE_MAPPED_BIT;
++
++VkBuffer buf;
++VmaAllocation alloc;
++VmaAllocationInfo allocInfo;
++vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
++
++VkMemoryPropertyFlags memPropFlags;
++vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
++
++if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
++{
++ // Allocation ended up in a mappable memory and is already mapped - write to it directly.
++
++ // [Executed in runtime]:
++ memcpy(allocInfo.pMappedData, myData, myDataSize);
++}
++else
++{
++ // Allocation ended up in a non-mappable memory - need to transfer.
++ VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
++ stagingBufCreateInfo.size = 65536;
++ stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
++
++ VmaAllocationCreateInfo stagingAllocCreateInfo = {};
++ stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++ stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
++ VMA_ALLOCATION_CREATE_MAPPED_BIT;
++
++ VkBuffer stagingBuf;
++ VmaAllocation stagingAlloc;
++ VmaAllocationInfo stagingAllocInfo;
++ vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
++ &stagingBuf, &stagingAlloc, stagingAllocInfo);
++
++ // [Executed in runtime]:
++ memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
++ vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE);
++ //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
++ VkBufferCopy bufCopy = {
++ 0, // srcOffset
++ 0, // dstOffset,
++ myDataSize); // size
++ vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
++}
++\endcode
++
++\section usage_patterns_other_use_cases Other use cases
++
++Here are some other, less obvious use cases and their recommended settings:
++
++- An image that is used only as transfer source and destination, but it should stay on the device,
++ as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
++ for temporal antialiasing or other temporal effects.
++ - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
++ - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
++- An image that is used only as transfer source and destination, but it should be placed
++ in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
++ least recently used textures from VRAM.
++ - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
++ - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
++ as VMA needs a hint here to differentiate from the previous case.
++- A buffer that you want to map and write from the CPU, directly read from the GPU
++ (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
++ host memory due to its large size.
++ - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
++ - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
++ - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
++
++
++\page configuration Configuration
++
++Please check "CONFIGURATION SECTION" in the code to find macros that you can define
++before each include of this file or change directly in this file to provide
++your own implementation of basic facilities like assert, `min()` and `max()` functions,
++mutex, atomic etc.
++The library uses its own implementation of containers by default, but you can switch to using
++STL containers instead.
++
++For example, define `VMA_ASSERT(expr)` before including the library to provide
++custom implementation of the assertion, compatible with your project.
++By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
++and empty otherwise.
++
++\section config_Vulkan_functions Pointers to Vulkan functions
++
++There are multiple ways to import pointers to Vulkan functions in the library.
++In the simplest case you don't need to do anything.
++If the compilation or linking of your program or the initialization of the #VmaAllocator
++doesn't work for you, you can try to reconfigure it.
++
++First, the allocator tries to fetch pointers to Vulkan functions linked statically,
++like this:
++
++\code
++m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
++\endcode
++
++If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
++
++Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
++You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
++by using a helper library like [volk](https://github.com/zeux/volk).
++
++Third, VMA tries to fetch remaining pointers that are still null by calling
++`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
++You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
++Other pointers will be fetched automatically.
++If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
++
++Finally, all the function pointers required by the library (considering selected
++Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
++
++
++\section custom_memory_allocator Custom host memory allocator
++
++If you use custom allocator for CPU memory rather than default operator `new`
++and `delete` from C++, you can make this library using your allocator as well
++by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
++functions will be passed to Vulkan, as well as used by the library itself to
++make any CPU-side allocations.
++
++\section allocation_callbacks Device memory allocation callbacks
++
++The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
++You can setup callbacks to be informed about these calls, e.g. for the purpose
++of gathering some statistics. To do it, fill optional member
++VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
++
++\section heap_memory_limit Device heap memory limit
++
++When device memory of certain heap runs out of free space, new allocations may
++fail (returning error code) or they may succeed, silently pushing some existing_
++memory blocks from GPU VRAM to system RAM (which degrades performance). This
++behavior is implementation-dependent - it depends on GPU vendor and graphics
++driver.
++
++On AMD cards it can be controlled while creating Vulkan device object by using
++VK_AMD_memory_overallocation_behavior extension, if available.
++
++Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
++memory available without switching your graphics card to one that really has
++smaller VRAM, you can use a feature of this library intended for this purpose.
++To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
++
++
++
++\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
++
++VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
++performance on some GPUs. It augments Vulkan API with possibility to query
++driver whether it prefers particular buffer or image to have its own, dedicated
++allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
++to do some internal optimizations. The extension is supported by this library.
++It will be used automatically when enabled.
++
++It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
++and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
++you are all set.
++
++Otherwise, if you want to use it as an extension:
++
++1 . When creating Vulkan device, check if following 2 device extensions are
++supported (call `vkEnumerateDeviceExtensionProperties()`).
++If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
++
++- VK_KHR_get_memory_requirements2
++- VK_KHR_dedicated_allocation
++
++If you enabled these extensions:
++
++2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
++your #VmaAllocator to inform the library that you enabled required extensions
++and you want the library to use them.
++
++\code
++allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
++
++vmaCreateAllocator(&allocatorInfo, &allocator);
++\endcode
++
++That is all. The extension will be automatically used whenever you create a
++buffer using vmaCreateBuffer() or image using vmaCreateImage().
++
++When using the extension together with Vulkan Validation Layer, you will receive
++warnings like this:
++
++_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
++
++It is OK, you should just ignore it. It happens because you use function
++`vkGetBufferMemoryRequirements2KHR()` instead of standard
++`vkGetBufferMemoryRequirements()`, while the validation layer seems to be
++unaware of it.
++
++To learn more about this extension, see:
++
++- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
++- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
++
++
++
++\page vk_ext_memory_priority VK_EXT_memory_priority
++
++VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
++value to Vulkan memory allocations that the implementation may use prefer certain
++buffers and images that are critical for performance to stay in device-local memory
++in cases when the memory is over-subscribed, while some others may be moved to the system memory.
++
++VMA offers convenient usage of this extension.
++If you enable it, you can pass "priority" parameter when creating allocations or custom pools
++and the library automatically passes the value to Vulkan using this extension.
++
++If you want to use this extension in connection with VMA, follow these steps:
++
++\section vk_ext_memory_priority_initialization Initialization
++
++1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
++Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
++
++2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
++Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
++Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
++
++3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
++to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
++
++4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
++Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
++Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
++`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
++
++5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
++have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
++to VmaAllocatorCreateInfo::flags.
++
++\section vk_ext_memory_priority_usage Usage
++
++When using this extension, you should initialize following member:
++
++- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
++- VmaPoolCreateInfo::priority when creating a custom pool.
++
++It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
++Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
++and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
++
++It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
++as dedicated and set high priority to them. For example:
++
++\code
++VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
++imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
++imgCreateInfo.extent.width = 3840;
++imgCreateInfo.extent.height = 2160;
++imgCreateInfo.extent.depth = 1;
++imgCreateInfo.mipLevels = 1;
++imgCreateInfo.arrayLayers = 1;
++imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
++imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
++imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
++imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
++imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
++
++VmaAllocationCreateInfo allocCreateInfo = {};
++allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
++allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
++allocCreateInfo.priority = 1.0f;
++
++VkImage img;
++VmaAllocation alloc;
++vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
++\endcode
++
++`priority` member is ignored in the following situations:
++
++- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
++ from the parametrs passed in #VmaPoolCreateInfo when the pool was created.
++- Allocations created in default pools: They inherit the priority from the parameters
++ VMA used when creating default pools, which means `priority == 0.5f`.
++
++
++\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
++
++VK_AMD_device_coherent_memory is a device extension that enables access to
++additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
++`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
++allocation of buffers intended for writing "breadcrumb markers" in between passes
++or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
++
++When the extension is available but has not been enabled, Vulkan physical device
++still exposes those memory types, but their usage is forbidden. VMA automatically
++takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
++to allocate memory of such type is made.
++
++If you want to use this extension in connection with VMA, follow these steps:
++
++\section vk_amd_device_coherent_memory_initialization Initialization
++
++1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
++Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
++
++2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
++Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
++Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
++
++3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
++to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
++
++4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
++Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
++Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
++`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
++
++5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
++have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
++to VmaAllocatorCreateInfo::flags.
++
++\section vk_amd_device_coherent_memory_usage Usage
++
++After following steps described above, you can create VMA allocations and custom pools
++out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
++devices. There are multiple ways to do it, for example:
++
++- You can request or prefer to allocate out of such memory types by adding
++ `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
++ or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
++ other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
++- If you manually found memory type index to use for this purpose, force allocation
++ from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
++
++\section vk_amd_device_coherent_memory_more_information More information
++
++To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
++
++Example use of this extension can be found in the code of the sample and test suite
++accompanying this library.
++
++
++\page enabling_buffer_device_address Enabling buffer device address
++
++Device extension VK_KHR_buffer_device_address
++allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
++It has been promoted to core Vulkan 1.2.
++
++If you want to use this feature in connection with VMA, follow these steps:
++
++\section enabling_buffer_device_address_initialization Initialization
++
++1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
++Check if the extension is supported - if returned array of `VkExtensionProperties` contains
++"VK_KHR_buffer_device_address".
++
++2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
++Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
++Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
++
++3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
++"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
++
++4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
++Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
++Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
++`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
++
++5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
++have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
++to VmaAllocatorCreateInfo::flags.
++
++\section enabling_buffer_device_address_usage Usage
++
++After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
++The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
++allocated memory blocks wherever it might be needed.
++
++Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
++The second part of this functionality related to "capture and replay" is not supported,
++as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
++
++\section enabling_buffer_device_address_more_information More information
++
++To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
++
++Example use of this extension can be found in the code of the sample and test suite
++accompanying this library.
++
++\page general_considerations General considerations
++
++\section general_considerations_thread_safety Thread safety
++
++- The library has no global state, so separate #VmaAllocator objects can be used
++ independently.
++ There should be no need to create multiple such objects though - one per `VkDevice` is enough.
++- By default, all calls to functions that take #VmaAllocator as first parameter
++ are safe to call from multiple threads simultaneously because they are
++ synchronized internally when needed.
++ This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
++- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
++ flag, calls to functions that take such #VmaAllocator object must be
++ synchronized externally.
++- Access to a #VmaAllocation object must be externally synchronized. For example,
++ you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
++ threads at the same time if you pass the same #VmaAllocation object to these
++ functions.
++- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
++
++\section general_considerations_versioning_and_compatibility Versioning and compatibility
++
++The library uses [**Semantic Versioning**](https://semver.org/),
++which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
++
++- Incremented Patch version means a release is backward- and forward-compatible,
++ introducing only some internal improvements, bug fixes, optimizations etc.
++ or changes that are out of scope of the official API described in this documentation.
++- Incremented Minor version means a release is backward-compatible,
++ so existing code that uses the library should continue to work, while some new
++ symbols could have been added: new structures, functions, new values in existing
++ enums and bit flags, new structure members, but not new function parameters.
++- Incrementing Major version means a release could break some backward compatibility.
++
++All changes between official releases are documented in file "CHANGELOG.md".
++
++\warning Backward compatibility is considered on the level of C++ source code, not binary linkage.
++Adding new members to existing structures is treated as backward compatible if initializing
++the new members to binary zero results in the old behavior.
++You should always fully initialize all library structures to zeros and not rely on their
++exact binary size.
++
++\section general_considerations_validation_layer_warnings Validation layer warnings
++
++When using this library, you can meet following types of warnings issued by
++Vulkan validation layer. They don't necessarily indicate a bug, so you may need
++to just ignore them.
++
++- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
++ - It happens when VK_KHR_dedicated_allocation extension is enabled.
++ `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
++- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
++ - It happens when you map a buffer or image, because the library maps entire
++ `VkDeviceMemory` block, where different types of images and buffers may end
++ up together, especially on GPUs with unified memory like Intel.
++- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
++ - It may happen when you use [defragmentation](@ref defragmentation).
++
++\section general_considerations_allocation_algorithm Allocation algorithm
++
++The library uses following algorithm for allocation, in order:
++
++-# Try to find free range of memory in existing blocks.
++-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
++-# If failed, try to create such block with size / 2, size / 4, size / 8.
++-# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
++ just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
++-# If failed, choose other memory type that meets the requirements specified in
++ VmaAllocationCreateInfo and go to point 1.
++-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
++
++\section general_considerations_features_not_supported Features not supported
++
++Features deliberately excluded from the scope of this library:
++
++-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
++ between CPU and GPU memory and related synchronization is responsibility of the user.
++ Defining some "texture" object that would automatically stream its data from a
++ staging copy in CPU memory to GPU memory would rather be a feature of another,
++ higher-level library implemented on top of VMA.
++ VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
++-# **Recreation of buffers and images.** Although the library has functions for
++ buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
++ recreate these objects yourself after defragmentation. That is because the big
++ structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
++ #VmaAllocation object.
++-# **Handling CPU memory allocation failures.** When dynamically creating small C++
++ objects in CPU memory (not Vulkan memory), allocation failures are not checked
++ and handled gracefully, because that would complicate code significantly and
++ is usually not needed in desktop PC applications anyway.
++ Success of an allocation is just checked with an assert.
++-# **Code free of any compiler warnings.** Maintaining the library to compile and
++ work correctly on so many different platforms is hard enough. Being free of
++ any warnings, on any version of any compiler, is simply not feasible.
++ There are many preprocessor macros that make some variables unused, function parameters unreferenced,
++ or conditional expressions constant in some configurations.
++ The code of this library should not be bigger or more complicated just to silence these warnings.
++ It is recommended to disable such warnings instead.
++-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
++ are not going to be included into this repository.
++*/
diff --git a/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.cc b/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.cc
index beba61d290aa..d9b52fe603d6 100644
--- a/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.cc
@@ -1,6 +1,6 @@
---- third_party/webrtc/rtc_base/physical_socket_server.cc.orig 2023-05-05 12:12:41 UTC
+--- third_party/webrtc/rtc_base/physical_socket_server.cc.orig 2023-12-23 12:33:28 UTC
+++ third_party/webrtc/rtc_base/physical_socket_server.cc
-@@ -56,7 +56,7 @@
+@@ -53,7 +53,7 @@
#include "rtc_base/time_utils.h"
#include "system_wrappers/include/field_trial.h"
@@ -9,7 +9,7 @@
#include <linux/sockios.h>
#endif
-@@ -75,7 +75,7 @@ typedef void* SockOptArg;
+@@ -73,7 +73,7 @@ typedef void* SockOptArg;
#endif // WEBRTC_POSIX
@@ -18,7 +18,7 @@
int64_t GetSocketRecvTimestamp(int socket) {
struct timeval tv_ioctl;
-@@ -310,7 +310,7 @@ int PhysicalSocket::GetOption(Option opt, int* value)
+@@ -307,7 +307,7 @@ int PhysicalSocket::GetOption(Option opt, int* value)
return -1;
}
if (opt == OPT_DONTFRAGMENT) {
@@ -27,7 +27,7 @@
*value = (*value != IP_PMTUDISC_DONT) ? 1 : 0;
#endif
} else if (opt == OPT_DSCP) {
-@@ -328,7 +328,7 @@ int PhysicalSocket::SetOption(Option opt, int value) {
+@@ -325,7 +325,7 @@ int PhysicalSocket::SetOption(Option opt, int value) {
if (TranslateOption(opt, &slevel, &sopt) == -1)
return -1;
if (opt == OPT_DONTFRAGMENT) {
@@ -36,7 +36,7 @@
value = (value) ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
#endif
} else if (opt == OPT_DSCP) {
-@@ -356,7 +356,7 @@ int PhysicalSocket::SetOption(Option opt, int value) {
+@@ -353,7 +353,7 @@ int PhysicalSocket::SetOption(Option opt, int value) {
int PhysicalSocket::Send(const void* pv, size_t cb) {
int sent = DoSend(
s_, reinterpret_cast<const char*>(pv), static_cast<int>(cb),
@@ -45,7 +45,7 @@
// Suppress SIGPIPE. Without this, attempting to send on a socket whose
// other end is closed will result in a SIGPIPE signal being raised to
// our process, which by default will terminate the process, which we
-@@ -385,7 +385,7 @@ int PhysicalSocket::SendTo(const void* buffer,
+@@ -382,7 +382,7 @@ int PhysicalSocket::SendTo(const void* buffer,
size_t len = addr.ToSockAddrStorage(&saddr);
int sent =
DoSendTo(s_, static_cast<const char*>(buffer), static_cast<int>(length),
@@ -54,7 +54,7 @@
// Suppress SIGPIPE. See above for explanation.
MSG_NOSIGNAL,
#else
-@@ -643,7 +643,7 @@ int PhysicalSocket::TranslateOption(Option opt, int* s
+@@ -641,7 +641,7 @@ int PhysicalSocket::TranslateOption(Option opt, int* s
*slevel = IPPROTO_IP;
*sopt = IP_DONTFRAGMENT;
break;
diff --git a/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.h b/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.h
index 2bc7c485bc7d..b17e2638813a 100644
--- a/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.h
+++ b/www/ungoogled-chromium/files/patch-third__party_webrtc_rtc__base_physical__socket__server.h
@@ -1,11 +1,11 @@
---- third_party/webrtc/rtc_base/physical_socket_server.h.orig 2023-05-05 12:12:41 UTC
+--- third_party/webrtc/rtc_base/physical_socket_server.h.orig 2023-12-23 12:33:28 UTC
+++ third_party/webrtc/rtc_base/physical_socket_server.h
-@@ -14,7 +14,7 @@
- #include "api/units/time_delta.h"
+@@ -18,7 +18,7 @@
+ #include "rtc_base/third_party/sigslot/sigslot.h"
#if defined(WEBRTC_POSIX)
-#if defined(WEBRTC_LINUX)
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_BSD)
// On Linux, use epoll.
#include <sys/epoll.h>
- #define WEBRTC_USE_EPOLL 1
+
diff --git a/www/ungoogled-chromium/files/patch-third__party_widevine_cdm_widevine.gni b/www/ungoogled-chromium/files/patch-third__party_widevine_cdm_widevine.gni
index dc9023388ddc..acdef5824d49 100644
--- a/www/ungoogled-chromium/files/patch-third__party_widevine_cdm_widevine.gni
+++ b/www/ungoogled-chromium/files/patch-third__party_widevine_cdm_widevine.gni
@@ -1,6 +1,6 @@
---- third_party/widevine/cdm/widevine.gni.orig 2023-10-13 13:20:35 UTC
+--- third_party/widevine/cdm/widevine.gni.orig 2023-12-23 12:33:28 UTC
+++ third_party/widevine/cdm/widevine.gni
-@@ -27,6 +27,8 @@ library_widevine_cdm_available =
+@@ -28,6 +28,8 @@ library_widevine_cdm_available =
(is_chromeos &&
(target_cpu == "x64" || target_cpu == "arm" || target_cpu == "arm64")) ||
(target_os == "linux" && target_cpu == "x64") ||
diff --git a/www/ungoogled-chromium/files/patch-tools_perf_chrome__telemetry__build_BUILD.gn b/www/ungoogled-chromium/files/patch-tools_perf_chrome__telemetry__build_BUILD.gn
index 9f443db504c6..352c05b4cbd2 100644
--- a/www/ungoogled-chromium/files/patch-tools_perf_chrome__telemetry__build_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-tools_perf_chrome__telemetry__build_BUILD.gn
@@ -1,4 +1,4 @@
---- tools/perf/chrome_telemetry_build/BUILD.gn.orig 2023-06-05 19:39:05 UTC
+--- tools/perf/chrome_telemetry_build/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ tools/perf/chrome_telemetry_build/BUILD.gn
@@ -47,7 +47,7 @@ group("telemetry_chrome_test") {
data_deps += [ "//chrome:reorder_imports" ]
@@ -9,7 +9,7 @@
data_deps += [ "//third_party/breakpad:dump_syms($host_toolchain)" ]
# CrOS currently has issues with the locally compiled version of
-@@ -172,7 +172,7 @@ group("telemetry_chrome_test_without_chrome") {
+@@ -176,7 +176,7 @@ group("telemetry_chrome_test_without_chrome") {
]
# Cr-Fuchsia doesn't support breakpad.
diff --git a/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.cc b/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.cc
index b99554228c95..68e64251824d 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.cc
@@ -1,6 +1,6 @@
---- ui/base/clipboard/clipboard_constants.cc.orig 2022-10-01 07:40:07 UTC
+--- ui/base/clipboard/clipboard_constants.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/base/clipboard/clipboard_constants.cc
-@@ -24,7 +24,7 @@ const char kMimeTypeOctetStream[] = "application/octet
+@@ -25,7 +25,7 @@ const char kMimeTypeOctetStream[] = "application/octet
// Used for window dragging on some platforms.
const char kMimeTypeWindowDrag[] = "chromium/x-window-drag";
diff --git a/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.h b/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.h
index 6fff34143de3..d8e4d3e67022 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.h
+++ b/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__constants.h
@@ -1,6 +1,6 @@
---- ui/base/clipboard/clipboard_constants.h.orig 2023-02-11 09:11:04 UTC
+--- ui/base/clipboard/clipboard_constants.h.orig 2023-12-23 12:33:28 UTC
+++ ui/base/clipboard/clipboard_constants.h
-@@ -47,7 +47,7 @@ extern const char kMimeTypeDataTransferEndpoint[];
+@@ -48,7 +48,7 @@ extern const char kMimeTypeDataTransferEndpoint[];
// ----- LINUX & CHROMEOS & FUCHSIA MIME TYPES -----
diff --git a/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__non__backed.cc b/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__non__backed.cc
index 55be9e936bc3..c6400abb3dd5 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__non__backed.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_clipboard_clipboard__non__backed.cc
@@ -1,6 +1,6 @@
---- ui/base/clipboard/clipboard_non_backed.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/base/clipboard/clipboard_non_backed.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/base/clipboard/clipboard_non_backed.cc
-@@ -455,7 +455,7 @@ ClipboardNonBacked::ClipboardNonBacked() {
+@@ -459,7 +459,7 @@ ClipboardNonBacked::ClipboardNonBacked() {
// so create internal clipboards for platform supported clipboard buffers.
constexpr ClipboardBuffer kClipboardBuffers[] = {
ClipboardBuffer::kCopyPaste,
diff --git a/www/ungoogled-chromium/files/patch-ui_base_test_ui__controls.h b/www/ungoogled-chromium/files/patch-ui_base_test_ui__controls.h
new file mode 100644
index 000000000000..ffe966c6c848
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_base_test_ui__controls.h
@@ -0,0 +1,11 @@
+--- ui/base/test/ui_controls.h.orig 2023-12-23 12:33:28 UTC
++++ ui/base/test/ui_controls.h
+@@ -183,7 +183,7 @@ bool SendTouchEventsNotifyWhenDone(int action,
+ base::OnceClosure task);
+ #endif
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ // Forces the platform implementation to use screen coordinates, even if they're
+ // not really available, the next time that ui_controls::SendMouseMove() or
+ // ui_controls::SendMouseMoveNotifyWhenDone() is called, or some other method
diff --git a/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc b/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc
index 9690c6bdf897..26db96fa7a18 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc
@@ -1,6 +1,6 @@
---- ui/base/ui_base_features.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/base/ui_base_features.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/base/ui_base_features.cc
-@@ -216,7 +216,7 @@ BASE_FEATURE(kExperimentalFlingAnimation,
+@@ -224,7 +224,7 @@ BASE_FEATURE(kExperimentalFlingAnimation,
"ExperimentalFlingAnimation",
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,21 +9,12 @@
(BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS_ASH) && \
!BUILDFLAG(IS_CHROMEOS_LACROS))
base::FEATURE_ENABLED_BY_DEFAULT
-@@ -328,7 +328,7 @@ bool IsForcedColorsEnabled() {
- // milestones.
+@@ -337,7 +337,7 @@ bool IsForcedColorsEnabled() {
BASE_FEATURE(kEyeDropper,
"EyeDropper",
--#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+- BUILDFLAG(IS_CHROMEOS)
++ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -604,7 +604,7 @@ BASE_FEATURE(kBubbleMetricsApi,
- "BubbleMetricsApi",
- base::FEATURE_DISABLED_BY_DEFAULT);
-
--#if !BUILDFLAG(IS_LINUX)
-+#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_BSD)
- BASE_FEATURE(kWebUiSystemFont,
- "WebUiSystemFont",
- base::FEATURE_ENABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.h b/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.h
deleted file mode 100644
index c3d5da8cf3f5..000000000000
--- a/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- ui/base/ui_base_features.h.orig 2023-11-04 07:08:51 UTC
-+++ ui/base/ui_base_features.h
-@@ -287,7 +287,7 @@ ChromeRefresh2023Level GetChromeRefresh2023Level();
- COMPONENT_EXPORT(UI_BASE_FEATURES)
- BASE_DECLARE_FEATURE(kBubbleMetricsApi);
-
--#if !BUILDFLAG(IS_LINUX)
-+#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_BSD)
- COMPONENT_EXPORT(UI_BASE_FEATURES) BASE_DECLARE_FEATURE(kWebUiSystemFont);
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc b/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc
index 6714b4d9c8a3..37d7817d23f5 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc
@@ -1,15 +1,15 @@
---- ui/base/webui/web_ui_util.cc.orig 2023-06-05 19:39:05 UTC
+--- ui/base/webui/web_ui_util.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/base/webui/web_ui_util.cc
-@@ -39,7 +39,7 @@ namespace {
+@@ -38,7 +38,7 @@ namespace {
constexpr float kMaxScaleFactor = 1000.0f;
std::string GetFontFamilyMd() {
--#if !BUILDFLAG(IS_LINUX)
-+#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_BSD)
- if (base::FeatureList::IsEnabled(features::kWebUiSystemFont)) {
- return GetFontFamily();
- }
-@@ -219,7 +219,7 @@ std::string GetFontFamily() {
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ return "Roboto, " + GetFontFamily();
+ #else
+ return GetFontFamily();
+@@ -216,7 +216,7 @@ std::string GetFontFamily() {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-ui_color_color__id.h b/www/ungoogled-chromium/files/patch-ui_color_color__id.h
index 3bcfce38253a..8654d8ef42a8 100644
--- a/www/ungoogled-chromium/files/patch-ui_color_color__id.h
+++ b/www/ungoogled-chromium/files/patch-ui_color_color__id.h
@@ -1,6 +1,6 @@
---- ui/color/color_id.h.orig 2023-11-04 07:08:51 UTC
+--- ui/color/color_id.h.orig 2023-12-23 12:33:28 UTC
+++ ui/color/color_id.h
-@@ -600,7 +600,7 @@
+@@ -606,7 +606,7 @@
E_CPONLY(kColorNativeColor6) \
E_CPONLY(kColorNativeBaseColor) \
E_CPONLY(kColorNativeSecondaryColor)
diff --git a/www/ungoogled-chromium/files/patch-ui_color_color__provider__utils.cc b/www/ungoogled-chromium/files/patch-ui_color_color__provider__utils.cc
index b13bd7492775..31d57072a8b8 100644
--- a/www/ungoogled-chromium/files/patch-ui_color_color__provider__utils.cc
+++ b/www/ungoogled-chromium/files/patch-ui_color_color__provider__utils.cc
@@ -1,6 +1,6 @@
---- ui/color/color_provider_utils.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/color/color_provider_utils.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/color/color_provider_utils.cc
-@@ -179,7 +179,7 @@ base::StringPiece SystemThemeName(ui::SystemTheme syst
+@@ -187,7 +187,7 @@ base::StringPiece SystemThemeName(ui::SystemTheme syst
switch (system_theme) {
case ui::SystemTheme::kDefault:
return "kDefault";
diff --git a/www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc b/www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc
deleted file mode 100644
index 379c150f02a3..000000000000
--- a/www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- ui/compositor/compositor.cc.orig 2023-09-17 07:59:53 UTC
-+++ ui/compositor/compositor.cc
-@@ -896,7 +896,7 @@ void Compositor::OnResume() {
-
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- void Compositor::OnCompleteSwapWithNewSize(const gfx::Size& size) {
- for (auto& observer : observer_list_)
- observer.OnCompositingCompleteSwapWithNewSize(this, size);
diff --git a/www/ungoogled-chromium/files/patch-ui_compositor_compositor.h b/www/ungoogled-chromium/files/patch-ui_compositor_compositor.h
deleted file mode 100644
index 965e7ccde0dd..000000000000
--- a/www/ungoogled-chromium/files/patch-ui_compositor_compositor.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- ui/compositor/compositor.h.orig 2023-06-05 19:39:05 UTC
-+++ ui/compositor/compositor.h
-@@ -458,7 +458,7 @@ class COMPOSITOR_EXPORT Compositor : public base::Powe
-
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- void OnCompleteSwapWithNewSize(const gfx::Size& size);
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h b/www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h
deleted file mode 100644
index 9a6e8cc9c92e..000000000000
--- a/www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- ui/compositor/compositor_observer.h.orig 2022-10-01 07:40:07 UTC
-+++ ui/compositor/compositor_observer.h
-@@ -48,7 +48,7 @@ class COMPOSITOR_EXPORT CompositorObserver {
-
- // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
- // of lacros-chrome is complete.
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- // Called when a swap with new size is completed.
- virtual void OnCompositingCompleteSwapWithNewSize(ui::Compositor* compositor,
- const gfx::Size& size) {}
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn b/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn
index 0972e8688890..3efb07eb0846 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn
@@ -1,6 +1,6 @@
---- ui/gfx/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- ui/gfx/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ ui/gfx/BUILD.gn
-@@ -660,7 +660,7 @@ source_set("memory_buffer_sources") {
+@@ -664,7 +664,7 @@ source_set("memory_buffer_sources") {
deps += [ "//build/config/linux/libdrm" ]
}
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_linux_gbm__wrapper.cc b/www/ungoogled-chromium/files/patch-ui_gfx_linux_gbm__wrapper.cc
new file mode 100644
index 000000000000..174a9c5c6b19
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_linux_gbm__wrapper.cc
@@ -0,0 +1,11 @@
+--- ui/gfx/linux/gbm_wrapper.cc.orig 2023-12-23 12:33:28 UTC
++++ ui/gfx/linux/gbm_wrapper.cc
+@@ -316,7 +316,7 @@ class Device final : public ui::GbmDevice {
+ // of 1x1 BOs which are destroyed before creating the final BO creation used
+ // to instantiate the returned GbmBuffer.
+ gfx::Size size =
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ gfx::Size(1, 1);
+ #else
+ requested_size;
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc
index ea91c6ea5843..256e7991a54c 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc
@@ -1,6 +1,6 @@
---- ui/gfx/x/generated_protos/dri3.cc.orig 2022-10-01 07:40:07 UTC
+--- ui/gfx/x/generated_protos/dri3.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/gfx/x/generated_protos/dri3.cc
-@@ -44,6 +44,8 @@
+@@ -27,6 +27,8 @@
#include <xcb/xcb.h>
#include <xcb/xcbext.h>
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc
index e9d1bd18c3b9..23e9250c94f2 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc
@@ -1,6 +1,6 @@
---- ui/gfx/x/generated_protos/shm.cc.orig 2022-10-01 07:40:07 UTC
+--- ui/gfx/x/generated_protos/shm.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/gfx/x/generated_protos/shm.cc
-@@ -44,6 +44,8 @@
+@@ -27,6 +27,8 @@
#include <xcb/xcb.h>
#include <xcb/xcbext.h>
diff --git a/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn b/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn
index 68f3c9526d5d..51b1d5a791d5 100644
--- a/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn
@@ -1,4 +1,4 @@
---- ui/gl/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- ui/gl/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ ui/gl/BUILD.gn
@@ -151,8 +151,6 @@ component("gl") {
defines += [ "GPU_ENABLE_SERVICE_LOGGING" ]
@@ -25,7 +25,7 @@
]
if (enable_swiftshader) {
data_deps += [
-@@ -578,7 +574,6 @@ test("gl_unittests") {
+@@ -582,7 +578,6 @@ test("gl_unittests") {
data_deps = [
"//testing/buildbot/filters:gl_unittests_filters",
diff --git a/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc b/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc
index c1556ba6061e..6e4275291b8a 100644
--- a/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc
@@ -1,6 +1,6 @@
---- ui/gl/gl_context.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/gl/gl_context.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/gl/gl_context.cc
-@@ -440,7 +440,7 @@ bool GLContext::MakeVirtuallyCurrent(
+@@ -486,7 +486,7 @@ bool GLContext::MakeVirtuallyCurrent(
DCHECK(virtual_context->IsCurrent(surface));
if (switched_real_contexts || virtual_context != current_virtual_context_) {
diff --git a/www/ungoogled-chromium/files/patch-ui_gtk_gtk__ui.cc b/www/ungoogled-chromium/files/patch-ui_gtk_gtk__ui.cc
index 2779192e0fe1..608a45dbfc35 100644
--- a/www/ungoogled-chromium/files/patch-ui_gtk_gtk__ui.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gtk_gtk__ui.cc
@@ -1,6 +1,6 @@
---- ui/gtk/gtk_ui.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/gtk/gtk_ui.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/gtk/gtk_ui.cc
-@@ -993,11 +993,19 @@ ui::DisplayConfig GtkUi::GetDisplayConfig() const {
+@@ -1013,11 +1013,19 @@ ui::DisplayConfig GtkUi::GetDisplayConfig() const {
GdkRectangle geometry;
gdk_monitor_get_geometry(monitor, &geometry);
int monitor_scale = std::max(1, gdk_monitor_get_scale_factor(monitor));
diff --git a/www/ungoogled-chromium/files/patch-ui_message__center_views_message__popup__view.cc b/www/ungoogled-chromium/files/patch-ui_message__center_views_message__popup__view.cc
index 8f82a0328458..3ff0f1ce127e 100644
--- a/www/ungoogled-chromium/files/patch-ui_message__center_views_message__popup__view.cc
+++ b/www/ungoogled-chromium/files/patch-ui_message__center_views_message__popup__view.cc
@@ -1,6 +1,6 @@
---- ui/message_center/views/message_popup_view.cc.orig 2022-10-29 17:50:56 UTC
+--- ui/message_center/views/message_popup_view.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/message_center/views/message_popup_view.cc
-@@ -118,7 +118,7 @@ void MessagePopupView::Show() {
+@@ -135,7 +135,7 @@ void MessagePopupView::Show() {
params.z_order = ui::ZOrderLevel::kFloatingWindow;
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.cc b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.cc
new file mode 100644
index 000000000000..ffaeeba1457f
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.cc
@@ -0,0 +1,11 @@
+--- ui/ozone/platform/wayland/emulate/wayland_input_emulate.cc.orig 2023-12-23 12:33:28 UTC
++++ ui/ozone/platform/wayland/emulate/wayland_input_emulate.cc
+@@ -230,7 +230,7 @@ void WaylandInputEmulate::EmulateTouch(int action,
+ wayland_proxy->FlushForTesting();
+ }
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ void WaylandInputEmulate::ForceUseScreenCoordinatesOnce() {
+ force_use_screen_coordinates_once_ = true;
+ }
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.h b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.h
new file mode 100644
index 000000000000..bcbe2cdb2353
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_emulate_wayland__input__emulate.h
@@ -0,0 +1,11 @@
+--- ui/ozone/platform/wayland/emulate/wayland_input_emulate.h.orig 2023-12-23 12:33:28 UTC
++++ ui/ozone/platform/wayland/emulate/wayland_input_emulate.h
+@@ -61,7 +61,7 @@ class WaylandInputEmulate : public wl::WaylandProxy::D
+ int touch_id,
+ uint32_t request_id);
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ void ForceUseScreenCoordinatesOnce();
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc b/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc
index 8f3407a24d54..c68be6c24865 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc
+++ b/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc
@@ -1,4 +1,4 @@
---- ui/views/controls/textfield/textfield.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/views/controls/textfield/textfield.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/views/controls/textfield/textfield.cc
@@ -81,7 +81,7 @@
#include "base/win/win_util.h"
@@ -7,9 +7,9 @@
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
#include "ui/base/ime/linux/text_edit_command_auralinux.h"
+ #include "ui/base/ime/text_input_flags.h"
#include "ui/linux/linux_ui.h"
- #endif
-@@ -175,7 +175,7 @@ bool IsControlKeyModifier(int flags) {
+@@ -176,7 +176,7 @@ bool IsControlKeyModifier(int flags) {
// Control-modified key combination, but we cannot extend it to other platforms
// as Control has different meanings and behaviors.
// https://crrev.com/2580483002/#msg46
@@ -18,7 +18,7 @@
return flags & ui::EF_CONTROL_DOWN;
#else
return false;
-@@ -746,7 +746,7 @@ bool Textfield::OnKeyPressed(const ui::KeyEvent& event
+@@ -747,7 +747,7 @@ bool Textfield::OnKeyPressed(const ui::KeyEvent& event
if (!textfield)
return handled;
@@ -27,7 +27,7 @@
auto* linux_ui = ui::LinuxUi::instance();
std::vector<ui::TextEditCommandAuraLinux> commands;
if (!handled && linux_ui &&
-@@ -928,7 +928,7 @@ void Textfield::AboutToRequestFocusFromTabTraversal(bo
+@@ -930,7 +930,7 @@ void Textfield::AboutToRequestFocusFromTabTraversal(bo
}
bool Textfield::SkipDefaultKeyEventProcessing(const ui::KeyEvent& event) {
@@ -36,7 +36,7 @@
// Skip any accelerator handling that conflicts with custom keybindings.
auto* linux_ui = ui::LinuxUi::instance();
std::vector<ui::TextEditCommandAuraLinux> commands;
-@@ -1933,7 +1933,7 @@ bool Textfield::ShouldDoLearning() {
+@@ -1941,7 +1941,7 @@ bool Textfield::ShouldDoLearning() {
return false;
}
@@ -45,7 +45,7 @@
// TODO(https://crbug.com/952355): Implement this method to support Korean IME
// reconversion feature on native text fields (e.g. find bar).
bool Textfield::SetCompositionFromExistingText(
-@@ -2446,14 +2446,14 @@ ui::TextEditCommand Textfield::GetCommandForKeyEvent(
+@@ -2454,14 +2454,14 @@ ui::TextEditCommand Textfield::GetCommandForKeyEvent(
#endif
return ui::TextEditCommand::DELETE_BACKWARD;
}
diff --git a/www/ungoogled-chromium/files/patch-ui_views_test_ui__controls__factory__desktop__aura__ozone.cc b/www/ungoogled-chromium/files/patch-ui_views_test_ui__controls__factory__desktop__aura__ozone.cc
new file mode 100644
index 000000000000..94aa59be1174
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_views_test_ui__controls__factory__desktop__aura__ozone.cc
@@ -0,0 +1,11 @@
+--- ui/views/test/ui_controls_factory_desktop_aura_ozone.cc.orig 2023-12-23 12:33:28 UTC
++++ ui/views/test/ui_controls_factory_desktop_aura_ozone.cc
+@@ -284,7 +284,7 @@ bool SendTouchEventsNotifyWhenDone(int action,
+ }
+ #endif
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ // static
+ void ForceUseScreenCoordinatesOnce() {
+ g_ozone_ui_controls_test_helper->ForceUseScreenCoordinatesOnce();
diff --git a/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc b/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc
index 9ce29b7fda7e..d481fe04d903 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc
+++ b/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc
@@ -1,4 +1,4 @@
---- ui/views/widget/widget.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/views/widget/widget.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/views/widget/widget.cc
@@ -54,7 +54,7 @@
#include "ui/views/window/custom_frame_view.h"
@@ -9,7 +9,7 @@
#include "ui/linux/linux_ui.h"
#endif
-@@ -2061,7 +2061,7 @@ const ui::NativeTheme* Widget::GetNativeTheme() const
+@@ -2068,7 +2068,7 @@ const ui::NativeTheme* Widget::GetNativeTheme() const
if (parent_)
return parent_->GetNativeTheme();
diff --git a/www/ungoogled-chromium/files/patch-ui_views_window_dialog__delegate.cc b/www/ungoogled-chromium/files/patch-ui_views_window_dialog__delegate.cc
index 24a6be245b4b..83315a09556d 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_window_dialog__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-ui_views_window_dialog__delegate.cc
@@ -1,6 +1,6 @@
---- ui/views/window/dialog_delegate.cc.orig 2023-10-13 13:20:35 UTC
+--- ui/views/window/dialog_delegate.cc.orig 2023-12-23 12:33:28 UTC
+++ ui/views/window/dialog_delegate.cc
-@@ -78,7 +78,7 @@ Widget* DialogDelegate::CreateDialogWidget(
+@@ -79,7 +79,7 @@ Widget* DialogDelegate::CreateDialogWidget(
// static
bool DialogDelegate::CanSupportCustomFrame(gfx::NativeView parent) {
diff --git a/www/ungoogled-chromium/files/patch-v8_BUILD.gn b/www/ungoogled-chromium/files/patch-v8_BUILD.gn
index ec289a9e62d5..bf064b2fdce7 100644
--- a/www/ungoogled-chromium/files/patch-v8_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-v8_BUILD.gn
@@ -1,6 +1,6 @@
---- v8/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- v8/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+++ v8/BUILD.gn
-@@ -1412,6 +1412,14 @@ config("toolchain") {
+@@ -1425,6 +1425,14 @@ config("toolchain") {
} else if (target_os == "chromeos") {
defines += [ "V8_HAVE_TARGET_OS" ]
defines += [ "V8_TARGET_OS_CHROMEOS" ]
@@ -15,7 +15,7 @@
}
# TODO(infra): Support v8_enable_prof on Windows.
-@@ -2375,6 +2383,12 @@ template("run_mksnapshot") {
+@@ -2388,6 +2396,12 @@ template("run_mksnapshot") {
if (!v8_enable_builtins_profiling && v8_enable_builtins_reordering) {
args += [ "--reorder-builtins" ]
}
@@ -28,7 +28,7 @@
}
# This is needed to distinguish between generating code for the simulator
-@@ -6156,7 +6170,7 @@ v8_component("v8_libbase") {
+@@ -6248,7 +6262,7 @@ v8_component("v8_libbase") {
}
}
@@ -37,7 +37,7 @@
sources += [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-linux.cc",
-@@ -6167,6 +6181,18 @@ v8_component("v8_libbase") {
+@@ -6259,6 +6273,18 @@ v8_component("v8_libbase") {
"dl",
"rt",
]
diff --git a/www/ungoogled-chromium/files/patch-v8_include_v8-internal.h b/www/ungoogled-chromium/files/patch-v8_include_v8-internal.h
index b464ddc06cbb..a835631b3f4d 100644
--- a/www/ungoogled-chromium/files/patch-v8_include_v8-internal.h
+++ b/www/ungoogled-chromium/files/patch-v8_include_v8-internal.h
@@ -1,6 +1,6 @@
---- v8/include/v8-internal.h.orig 2023-08-18 10:26:52 UTC
+--- v8/include/v8-internal.h.orig 2023-12-23 12:33:28 UTC
+++ v8/include/v8-internal.h
-@@ -172,7 +172,7 @@ using SandboxedPointer_t = Address;
+@@ -175,7 +175,7 @@ using SandboxedPointer_t = Address;
#ifdef V8_ENABLE_SANDBOX
// Size of the sandbox, excluding the guard regions surrounding it.
diff --git a/www/ungoogled-chromium/files/patch-v8_src_api_api.cc b/www/ungoogled-chromium/files/patch-v8_src_api_api.cc
index 4545fc96af3e..014cd7d19c25 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_api_api.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_api_api.cc
@@ -1,4 +1,4 @@
---- v8/src/api/api.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/api/api.cc.orig 2023-12-23 12:33:28 UTC
+++ v8/src/api/api.cc
@@ -141,7 +141,7 @@
#include "src/wasm/wasm-serialization.h"
@@ -9,7 +9,7 @@
#include <signal.h>
#include <unistd.h>
-@@ -6363,7 +6363,7 @@ bool v8::V8::Initialize(const int build_config) {
+@@ -6356,7 +6356,7 @@ bool v8::V8::Initialize(const int build_config) {
return true;
}
diff --git a/www/ungoogled-chromium/files/patch-v8_src_base_platform_platform-posix.cc b/www/ungoogled-chromium/files/patch-v8_src_base_platform_platform-posix.cc
index fd6e3551409f..605c96253f71 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_base_platform_platform-posix.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_base_platform_platform-posix.cc
@@ -1,6 +1,6 @@
---- v8/src/base/platform/platform-posix.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/base/platform/platform-posix.cc.orig 2023-12-23 12:33:28 UTC
+++ v8/src/base/platform/platform-posix.cc
-@@ -55,7 +55,7 @@
+@@ -54,7 +54,7 @@
#if V8_OS_DARWIN
#include <mach/mach.h>
#include <malloc/malloc.h>
@@ -9,7 +9,7 @@
#include <malloc.h>
#endif
-@@ -73,7 +73,7 @@
+@@ -72,7 +72,7 @@
#include <sys/syscall.h>
#endif
@@ -18,7 +18,7 @@
#define MAP_ANONYMOUS MAP_ANON
#endif
-@@ -305,8 +305,15 @@ void OS::SetRandomMmapSeed(int64_t seed) {
+@@ -303,8 +303,15 @@ void OS::SetRandomMmapSeed(int64_t seed) {
}
}
@@ -34,7 +34,7 @@
uintptr_t raw_addr;
{
MutexGuard guard(rng_mutex.Pointer());
-@@ -401,6 +408,7 @@ void* OS::GetRandomMmapAddr() {
+@@ -399,6 +406,7 @@ void* OS::GetRandomMmapAddr() {
#endif
return reinterpret_cast<void*>(raw_addr);
}
@@ -42,7 +42,7 @@
// TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
-@@ -674,7 +682,7 @@ void OS::DestroySharedMemoryHandle(PlatformSharedMemor
+@@ -672,7 +680,7 @@ void OS::DestroySharedMemoryHandle(PlatformSharedMemor
// static
bool OS::HasLazyCommits() {
@@ -51,7 +51,7 @@
return true;
#else
// TODO(bbudge) Return true for all POSIX platforms.
-@@ -1300,7 +1308,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void*
+@@ -1303,7 +1311,7 @@ void Thread::SetThreadLocal(LocalStorageKey key, void*
// keep this version in POSIX as most Linux-compatible derivatives will
// support it. MacOS and FreeBSD are different here.
#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && !defined(_AIX) && \
diff --git a/www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc
index 672dea30ce49..f327aba22943 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc
@@ -1,4 +1,4 @@
---- v8/src/builtins/x64/builtins-x64.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/builtins/x64/builtins-x64.cc.orig 2023-12-23 12:33:28 UTC
+++ v8/src/builtins/x64/builtins-x64.cc
@@ -44,6 +44,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
@@ -18,7 +18,7 @@
// Store the current pc as the handler offset. It's used later to create the
// handler table.
-@@ -3324,6 +3326,9 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm,
+@@ -3339,6 +3341,9 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm,
void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
Label* return_promise) {
int catch_handler = __ pc_offset();
@@ -28,7 +28,7 @@
// Restore rsp to free the reserved stack slots for the sections.
__ leaq(rsp, MemOperand(rbp, StackSwitchFrameConstants::kLastSpillOffset));
-@@ -3655,6 +3660,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* ma
+@@ -3696,6 +3701,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* ma
LoadJumpBuffer(masm, jmpbuf, true);
__ Trap();
__ bind(&resume);
@@ -36,7 +36,7 @@
__ LeaveFrame(StackFrame::STACK_SWITCH);
__ ret(0);
}
-@@ -3787,6 +3793,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, w
+@@ -3828,6 +3834,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, w
}
__ Trap();
__ bind(&suspend);
diff --git a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc
index eceb79e8159e..0389b3197f8c 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc
@@ -1,4 +1,4 @@
---- v8/src/codegen/x64/macro-assembler-x64.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/codegen/x64/macro-assembler-x64.cc.orig 2023-12-23 12:33:28 UTC
+++ v8/src/codegen/x64/macro-assembler-x64.cc
@@ -51,6 +51,8 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int
return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
@@ -9,7 +9,7 @@
void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && options().enable_root_relative_access) {
intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
-@@ -2145,7 +2147,7 @@ void MacroAssembler::Switch(Register scratch, Register
+@@ -2144,7 +2146,7 @@ void MacroAssembler::Switch(Register scratch, Register
cmpq(reg, Immediate(num_labels));
j(above_equal, &fallthrough);
leaq(table, MemOperand(&jump_table));
diff --git a/www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc
index 48b2a714e751..7d822e389c42 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc
@@ -1,6 +1,6 @@
---- v8/src/compiler/backend/x64/code-generator-x64.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/compiler/backend/x64/code-generator-x64.cc.orig 2023-12-23 12:33:28 UTC
+++ v8/src/compiler/backend/x64/code-generator-x64.cc
-@@ -6852,11 +6852,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instructio
+@@ -6850,11 +6850,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instructio
// target = table + (target - table)
__ addq(input, kScratchRegister);
// Jump to the target.
diff --git a/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc b/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc
index 7b5daad37d88..6504d9d7b9b4 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc
@@ -1,4 +1,4 @@
---- v8/src/execution/isolate.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/execution/isolate.cc.orig 2023-12-23 12:33:28 UTC
+++ v8/src/execution/isolate.cc
@@ -147,6 +147,10 @@
#include "src/execution/simulator-base.h"
@@ -11,7 +11,7 @@
extern "C" const uint8_t v8_Default_embedded_blob_code_[];
extern "C" uint32_t v8_Default_embedded_blob_code_size_;
extern "C" const uint8_t v8_Default_embedded_blob_data_[];
-@@ -4083,6 +4087,11 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
+@@ -4190,6 +4194,11 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
uint32_t code_size = DefaultEmbeddedBlobCodeSize();
const uint8_t* data = DefaultEmbeddedBlobData();
uint32_t data_size = DefaultEmbeddedBlobDataSize();
diff --git a/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h b/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h
index edeee58e8ed2..dd699baba2f2 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h
+++ b/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h
@@ -1,6 +1,6 @@
---- v8/src/maglev/x64/maglev-assembler-x64-inl.h.orig 2023-11-04 07:08:51 UTC
+--- v8/src/maglev/x64/maglev-assembler-x64-inl.h.orig 2023-12-23 12:33:28 UTC
+++ v8/src/maglev/x64/maglev-assembler-x64-inl.h
-@@ -231,7 +231,10 @@ void MaglevAssembler::PushReverse(T... vals) {
+@@ -232,7 +232,10 @@ void MaglevAssembler::PushReverse(T... vals) {
detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
diff --git a/www/ungoogled-chromium/files/patch-v8_src_trap-handler_handler-inside-posix.cc b/www/ungoogled-chromium/files/patch-v8_src_trap-handler_handler-inside-posix.cc
index 527f0713117d..0bbb10231616 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_trap-handler_handler-inside-posix.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_trap-handler_handler-inside-posix.cc
@@ -1,4 +1,4 @@
---- v8/src/trap-handler/handler-inside-posix.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/trap-handler/handler-inside-posix.cc.orig 2023-12-23 12:33:28 UTC
+++ v8/src/trap-handler/handler-inside-posix.cc
@@ -61,6 +61,8 @@ namespace trap_handler {
#define CONTEXT_REG(reg, REG) &uc->uc_mcontext->__ss.__##reg
@@ -9,7 +9,7 @@
#else
#error "Unsupported platform."
#endif
-@@ -70,8 +72,12 @@ bool IsKernelGeneratedSignal(siginfo_t* info) {
+@@ -78,8 +80,12 @@ bool IsKernelGeneratedSignal(siginfo_t* info) {
// si_code at its default of 0 for signals that don’t originate in hardware.
// The other conditions are only relevant for Linux.
return info->si_code > 0 && info->si_code != SI_USER &&
diff --git a/www/ungoogled-chromium/files/patch-v8_src_wasm_baseline_ia32_liftoff-assembler-ia32.h b/www/ungoogled-chromium/files/patch-v8_src_wasm_baseline_ia32_liftoff-assembler-ia32-inl.h
index 4189676383a3..573e4f35e54d 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_wasm_baseline_ia32_liftoff-assembler-ia32.h
+++ b/www/ungoogled-chromium/files/patch-v8_src_wasm_baseline_ia32_liftoff-assembler-ia32-inl.h
@@ -1,6 +1,6 @@
---- v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h.orig 2023-11-04 07:08:51 UTC
-+++ v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
-@@ -492,7 +492,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst
+--- v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-inl.h.orig 2023-12-23 12:33:28 UTC
++++ v8/src/wasm/baseline/ia32/liftoff-assembler-ia32-inl.h
+@@ -487,7 +487,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -9,7 +9,7 @@
LoadType type, uint32_t* protected_load_pc,
bool /* is_load_mem */, bool /* i64_offset */,
bool needs_shift) {
-@@ -572,7 +572,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Regis
+@@ -567,7 +567,7 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Regis
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
@@ -18,7 +18,7 @@
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc,
bool /* is_store_mem */, bool /* i64_offset */) {
-@@ -652,7 +652,7 @@ void LiftoffAssembler::Store(Register dst_addr, Regist
+@@ -647,7 +647,7 @@ void LiftoffAssembler::Store(Register dst_addr, Regist
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
@@ -27,7 +27,7 @@
LoadType type, LiftoffRegList /* pinned */,
bool /* i64_offset */) {
if (type.value() != LoadType::kI64Load) {
-@@ -672,7 +672,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst,
+@@ -667,7 +667,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst,
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
@@ -36,7 +36,7 @@
StoreType type, LiftoffRegList pinned,
bool /* i64_offset */) {
DCHECK_NE(offset_reg, no_reg);
-@@ -742,7 +742,7 @@ enum Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
+@@ -737,7 +737,7 @@ enum Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
inline void AtomicAddOrSubOrExchange32(LiftoffAssembler* lasm, Binop binop,
Register dst_addr, Register offset_reg,
@@ -45,7 +45,7 @@
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
DCHECK_EQ(value, result);
-@@ -809,7 +809,7 @@ inline void AtomicAddOrSubOrExchange32(LiftoffAssemble
+@@ -804,7 +804,7 @@ inline void AtomicAddOrSubOrExchange32(LiftoffAssemble
}
inline void AtomicBinop32(LiftoffAssembler* lasm, Binop op, Register dst_addr,
@@ -54,7 +54,7 @@
LiftoffRegister value, LiftoffRegister result,
StoreType type) {
DCHECK_EQ(value, result);
-@@ -921,7 +921,7 @@ inline void AtomicBinop32(LiftoffAssembler* lasm, Bino
+@@ -916,7 +916,7 @@ inline void AtomicBinop32(LiftoffAssembler* lasm, Bino
}
inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
@@ -63,7 +63,7 @@
LiftoffRegister value, LiftoffRegister result) {
// We need {ebx} here, which is the root register. As the root register it
// needs special treatment. As we use {ebx} directly in the code below, we
-@@ -1013,7 +1013,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Bino
+@@ -1008,7 +1008,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Bino
} // namespace liftoff
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
@@ -72,7 +72,7 @@
LiftoffRegister result, StoreType type,
bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
-@@ -1027,7 +1027,7 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Re
+@@ -1022,7 +1022,7 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Re
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
@@ -81,7 +81,7 @@
LiftoffRegister result, StoreType type,
bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
-@@ -1040,7 +1040,7 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Re
+@@ -1035,7 +1035,7 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Re
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
@@ -90,7 +90,7 @@
LiftoffRegister result, StoreType type,
bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
-@@ -1054,7 +1054,7 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Re
+@@ -1049,7 +1049,7 @@ void LiftoffAssembler::AtomicAnd(Register dst_addr, Re
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
@@ -99,7 +99,7 @@
LiftoffRegister result, StoreType type,
bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
-@@ -1068,7 +1068,7 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Reg
+@@ -1063,7 +1063,7 @@ void LiftoffAssembler::AtomicOr(Register dst_addr, Reg
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
@@ -108,7 +108,7 @@
LiftoffRegister result, StoreType type,
bool /* i64_offset */) {
if (type.value() == StoreType::kI64Store) {
-@@ -1082,7 +1082,7 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Re
+@@ -1077,7 +1077,7 @@ void LiftoffAssembler::AtomicXor(Register dst_addr, Re
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
@@ -117,7 +117,7 @@
LiftoffRegister value,
LiftoffRegister result, StoreType type,
bool /* i64_offset */) {
-@@ -1097,7 +1097,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_add
+@@ -1092,7 +1092,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_add
}
void LiftoffAssembler::AtomicCompareExchange(