#include "entities.hpp" #include "ecs.hpp" #include "game-settings.hpp" #include "math-helpers.hpp" #include "physics.hpp" #include "pk.h" #include "plugins.hpp" #include "static-missing-texture.hpp" #include "vendor-cgltf-include.hpp" #include "vendor-stb-image-include.h" #include "window.hpp" #include #include #include #include struct EntityTypeMaster { pk_membucket *bkt; pk_bkt_arr_t bc{}; } et_mstr; struct EntToTeardown { EntityHandle handle = EntityHandle_MAX; CompGrBinds *grBinds[1] = {nullptr}; uint8_t ticksToWait = 0; }; pk_arr_t EntityTypesToTeardown{}; void EntityType_Init() { et_mstr.bkt = pk_mem_bucket_create("pk_bkt_arr enttype", 1024 * 1024, PK_MEMBUCKET_FLAG_NONE); new (&et_mstr.bc) pk_bkt_arr_t{ pk_bkt_arr_handle_MAX_constexpr, et_mstr.bkt, et_mstr.bkt }; pk_arr_reserve(&EntityTypesToTeardown, 16); } EntityType *EntityType_Create(pk_uuid uuid) { EntityTypeHandle entTypeHandle{pk_bkt_arr_new_handle(&et_mstr.bc)}; EntityType &entityType = et_mstr.bc[entTypeHandle]; new (&entityType) EntityType{}; entityType.uuid = uuid; ECS_CreateEntity(&entityType); return &entityType; } Entity_Base *EntityType_CreateGenericInstance(EntityType *et, Entity_Base *levelEnt, CompInstance *srcInstance, InstPos *instPos) { assert(et != nullptr); pk_uuid uuid = pk_uuid_zed; Entity_Base *genericEntity = ECS_CreateGenericEntity(); ECS_CreateEntity(genericEntity, levelEnt); if (srcInstance != nullptr) { uuid = srcInstance->uuid; } for (int64_t i = 0; i < et->detailsCount; ++i) { auto &etd = et->details[i]; auto *compInst = ECS_CreateInstance(genericEntity, uuid, etd.grBinds, nullptr); btVector3 scaling{1.f,1.f,1.f}; btTransform posRot{}; btScalar mass = 1.f; if (srcInstance != nullptr) { if (srcInstance->collisionCallback.name[0] != '\0') { strncpy(compInst->collisionCallback.name, srcInstance->collisionCallback.name, CallbackSignatureLength); PkePlugin_SetSignatureFunc(&compInst->collisionCallback); } compInst->physicsLayer = srcInstance->physicsLayer; compInst->physicsMask = srcInstance->physicsMask; } else { compInst->physicsLayer = etd.bt.startingCollisionLayer; compInst->physicsMask = etd.bt.startingCollisionMask; posRot.setIdentity(); } if (instPos != nullptr) { posRot = instPos->posRot; mass = instPos->mass; scaling = instPos->scale; } else { mass = etd.bt.startingMass; posRot.setIdentity(); } btVector3 localInertia(0, 0, 0); etd.bt.shape->calculateLocalInertia(mass, localInertia); compInst->bt.motionState = pk_new(MemBkt_Bullet); new (compInst->bt.motionState) btDefaultMotionState(posRot); compInst->bt.rigidBody = pk_new(MemBkt_Bullet); new (compInst->bt.rigidBody) btRigidBody(mass, compInst->bt.motionState, etd.bt.shape, localInertia); compInst->bt.rigidBody->setLinearVelocity(btVector3(0,0,0)); compInst->bt.rigidBody->setAngularVelocity(btVector3(0,0,0)); compInst->bt.rigidBody->getCollisionShape()->setLocalScaling(scaling); BtDynamicsWorld->addRigidBody(compInst->bt.rigidBody); compInst->bt.rigidBody->getBroadphaseProxy()->m_collisionFilterGroup = static_cast(compInst->physicsLayer); compInst->bt.rigidBody->getBroadphaseProxy()->m_collisionFilterMask = static_cast(compInst->physicsMask); compInst->bt.rigidBody->setUserPointer(reinterpret_cast(compInst)); } return genericEntity; } EntityType *EntityType_FindByTypeCode(const char *typeCode) { auto et_find_cb = [](void *user_data, const void *user_obj_data, const void *arr_obj_data) { (void)user_data; const char *typeCode = reinterpret_cast(user_obj_data); const EntityType &entityType = *reinterpret_cast(arr_obj_data); if (entityType.handle == EntityHandle_MAX) { return false; } return strcmp(typeCode, entityType.entityTypeCode.val) == 0; }; EntityTypeHandle handle { pk_bkt_arr_find_first_handle(&et_mstr.bc, et_find_cb, NULL, typeCode) }; if (handle == EntityTypeHandle_MAX) { return nullptr; } return &et_mstr.bc[handle]; } EntityType *EntityType_FindByEntityHandle_Inner(EntityHandle handle) { if (pk_bkt_arr_handle_validate(ECS_GetEntities(), handle) != PK_BKT_ARR_HANDLE_VALIDATION_VALID) { return nullptr; } auto et_find_cb = [](void *user_data, const void *user_obj_data, const void *arr_obj_data) { (void)user_data; const EntityHandle &handle = *reinterpret_cast(user_obj_data); const EntityType &entityType = *reinterpret_cast(arr_obj_data); return entityType.handle == handle; }; EntityTypeHandle found_handle { pk_bkt_arr_find_first_handle(&et_mstr.bc, et_find_cb, NULL, &handle) }; if (found_handle == EntityTypeHandle_MAX) { return nullptr; } return &et_mstr.bc[found_handle]; } EntityType *EntityType_FindByEntityHandle(EntityHandle handle) { Entity_Base *base_entity; EntityType *ret = nullptr; EntityHandle h = handle; while (ret == nullptr) { ret = EntityType_FindByEntityHandle_Inner(h); if (ret != nullptr) return ret; base_entity = ECS_GetEntity(h); if (base_entity == nullptr) break; h = base_entity->parentHandle; if (h == EntityHandle_MAX) break; } return nullptr; } struct EntityTypeDetails_LoadHelperStruct { EntityTypeDetails *etd = nullptr; AssetHandle textureAssetHandle = AssetHandle_MAX; const Asset *textureAsset = nullptr; struct { pk_arr_t vertexes; pk_arr_t normals; pk_arr_t uv; pk_arr_t indexes; } physDbg; }; struct EntityType_LoadHelperStruct { struct pk_membucket *bkt = nullptr; EntityType &et; pk_arr_t etdHelpers; const cgltf_data *gltfData = nullptr; const Asset *modelBinAsset = nullptr; pk_arr_t vertMemoryRequirements; pk_arr_t instMemoryRequirements; pk_arr_t physVertMemoryRequirements; pk_arr_t textureMemoryRequirements; VkMemoryRequirements vertMemoryRequirementsCombined{}; VkMemoryRequirements instMemoryRequirementsCombined{}; VkMemoryRequirements physVertMemoryRequirementsCombined{}; VkMemoryRequirements textureMemoryRequirementsCombined{}; }; void EntityType_Inner_DestroyDescriptors(EntityType *et) { assert(et != nullptr); for (long k = 0; k < et->detailsCount; ++k) { if (et->details[k].grBinds == nullptr || et->details[k].grBinds == CAFE_BABE(CompGrBinds)) { continue; } if (et->details[k].grBinds->vkDescriptorSets != nullptr && et->details[k].vkDescriptorPool != VK_NULL_HANDLE) { // 2023-09-27 - JCB // We are not setting the pool flag for allowing freeing descriptor sets // so all we need to do is destroy the pool // If we switch to a global pool, we will need to free here, and // destroy the pool outside of this loop vkDestroyDescriptorPool(vkDevice, et->details[k].vkDescriptorPool, vkAllocator); pk_delete_arr(et->details[k].grBinds->vkDescriptorSets, prevSwapchainLength, MemBkt_Vulkan); et->details[k].grBinds->vkDescriptorSets = CAFE_BABE(VkDescriptorSet); } } } void EntityType_Inner_UpdateDescriptorSets(EntityType *et) { // descriptor pool & sets VkDescriptorPoolSize descriptorPoolSizes[2]; descriptorPoolSizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptorPoolSizes[0].descriptorCount = swapchainLength; descriptorPoolSizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptorPoolSizes[1].descriptorCount = swapchainLength; VkDescriptorPoolCreateInfo vkDescriptorPoolCreateInfo; vkDescriptorPoolCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; vkDescriptorPoolCreateInfo.pNext = nullptr; vkDescriptorPoolCreateInfo.flags = 0; vkDescriptorPoolCreateInfo.maxSets = swapchainLength; vkDescriptorPoolCreateInfo.poolSizeCount = (uint32_t)2; vkDescriptorPoolCreateInfo.pPoolSizes = descriptorPoolSizes; VkDescriptorSetLayout *descriptorSets = pk_new_arr(swapchainLength, pkeSettings.mem_bkt.game_transient); for (long i = 0; i < swapchainLength; ++i) { descriptorSets[i] = pkePipelines.descr_layouts.named.ubo_txtr; } VkDescriptorSetAllocateInfo vkDescriptorSetAllocateInfo; vkDescriptorSetAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; vkDescriptorSetAllocateInfo.pNext = nullptr; vkDescriptorSetAllocateInfo.descriptorSetCount = swapchainLength; vkDescriptorSetAllocateInfo.pSetLayouts = descriptorSets; VkWriteDescriptorSet *writeDescriptorSets = pk_new_arr(2 * swapchainLength, pkeSettings.mem_bkt.game_transient); for (long i = 0; i < 2 * swapchainLength; ++i) { writeDescriptorSets[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; writeDescriptorSets[i].pNext = nullptr; writeDescriptorSets[i].dstSet = nullptr; writeDescriptorSets[i].dstBinding = i % 2; writeDescriptorSets[i].dstArrayElement = 0; writeDescriptorSets[i].descriptorCount = 1; writeDescriptorSets[i].descriptorType = (i % 2) == 0 ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; writeDescriptorSets[i].pImageInfo = nullptr; writeDescriptorSets[i].pBufferInfo = nullptr; writeDescriptorSets[i].pTexelBufferView = nullptr; } VkDescriptorImageInfo textureDescriptorInfo; textureDescriptorInfo.sampler = global_sampler; textureDescriptorInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkDescriptorBufferInfo *vkDescriptorBufferInfo = pk_new_arr(swapchainLength, pkeSettings.mem_bkt.game_transient); for (long i = 0; i < et->detailsCount; ++i) { EntityTypeDetails *etd = &et->details[i]; // consider making me a global pool auto vkResult = vkCreateDescriptorPool(vkDevice, &vkDescriptorPoolCreateInfo, vkAllocator, &etd->vkDescriptorPool); assert(vkResult == VK_SUCCESS); vkDescriptorSetAllocateInfo.descriptorPool = etd->vkDescriptorPool; etd->grBinds->vkDescriptorSets = pk_new_arr(swapchainLength, MemBkt_Vulkan); for (long i = 0; i < swapchainLength; ++i) { etd->grBinds->vkDescriptorSets[i] = VkDescriptorSet{}; } vkResult = vkAllocateDescriptorSets(vkDevice, &vkDescriptorSetAllocateInfo, etd->grBinds->vkDescriptorSets); assert(vkResult == VK_SUCCESS); textureDescriptorInfo.imageView = etd->textureImageView; for (long i = 0; i < swapchainLength; ++i) { vkDescriptorBufferInfo[i].buffer = UniformBuffers[i]; vkDescriptorBufferInfo[i].offset = 0; vkDescriptorBufferInfo[i].range = sizeof(UniformBufferObject); long uboIndex = i * 2; long samplerIndex = uboIndex + 1; writeDescriptorSets[uboIndex].pBufferInfo = &vkDescriptorBufferInfo[i]; writeDescriptorSets[uboIndex].dstSet = etd->grBinds->vkDescriptorSets[i]; writeDescriptorSets[samplerIndex].pImageInfo = &textureDescriptorInfo; writeDescriptorSets[samplerIndex].dstSet = etd->grBinds->vkDescriptorSets[i]; } vkUpdateDescriptorSets(vkDevice, 2 * swapchainLength, writeDescriptorSets, 0, nullptr); } } void EntityType_Inner_UpdateDescriptorSets_EvCallabck(void *mgr_data, void *entity_data, void *ev_data) { (void)mgr_data; (void)ev_data; EntityHandle eh; uint64_t id = reinterpret_cast(entity_data); eh.b = (pk_handle_bucket_index_T)(id >> 32); eh.i = (pk_handle_item_index_T)((id << 32) >> 32); EntityType *et = EntityType_FindByEntityHandle(eh); assert(et != nullptr); EntityType_Inner_DestroyDescriptors(et); EntityType_Inner_UpdateDescriptorSets(et); } void EntityType_PreLoad(EntityType_LoadHelperStruct &helper) { const long expectedBufferCount = 4; pk_arr_reserve(&helper.vertMemoryRequirements, helper.et.detailsCount * expectedBufferCount); pk_arr_reserve(&helper.instMemoryRequirements, helper.et.detailsCount); pk_arr_reserve(&helper.physVertMemoryRequirements, helper.et.detailsCount * expectedBufferCount); pk_arr_reserve(&helper.textureMemoryRequirements, helper.et.detailsCount); VkBuffer buffer; VkBufferCreateInfo bufferCI; bufferCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferCI.pNext = nullptr; bufferCI.flags = {}; bufferCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; bufferCI.queueFamilyIndexCount = 1; bufferCI.pQueueFamilyIndices = &graphicsFamilyIndex; VkImage image; VkImageCreateInfo vkImageCreateInfo; vkImageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; vkImageCreateInfo.pNext = nullptr; vkImageCreateInfo.flags = 0; vkImageCreateInfo.imageType = VK_IMAGE_TYPE_2D; vkImageCreateInfo.mipLevels = 1; vkImageCreateInfo.arrayLayers = 1; vkImageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; vkImageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; vkImageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; vkImageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; vkImageCreateInfo.queueFamilyIndexCount = 0; vkImageCreateInfo.pQueueFamilyIndices = nullptr; vkImageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; for (int64_t i = 0; i < helper.et.detailsCount; ++i) { EntityTypeDetails &etd = helper.et.details[i]; { EntityTypeDetails_LoadHelperStruct tmp{}; tmp.etd = &etd; etd.grBinds = ECS_CreateGrBinds(&helper.et);; tmp.physDbg.vertexes.bkt = helper.bkt; tmp.physDbg.normals.bkt = helper.bkt; tmp.physDbg.uv.bkt = helper.bkt; tmp.physDbg.indexes.bkt = helper.bkt; pk_arr_append_t(&helper.etdHelpers, tmp); } EntityTypeDetails_LoadHelperStruct &etdHelper = helper.etdHelpers[helper.etdHelpers.next-1]; /* * phys */ btConvexHullShape *shape; { shape = pk_new(MemBkt_Bullet); btScalar *vertDataPointer = reinterpret_cast(const_cast(helper.modelBinAsset->ptr)); /* TODO JCB - 2024-01-02 * we shouldn't assume that the first index is the vertexes */ uint64_t vertIndex = (i * expectedBufferCount); vertDataPointer += helper.gltfData->accessors[vertIndex].buffer_view->offset; new (shape) btConvexHullShape(vertDataPointer, helper.gltfData->accessors[vertIndex].count, helper.gltfData->accessors[vertIndex].stride); shape->optimizeConvexHull(); shape->initializePolyhedralFeatures(); etd.bt.shape = shape; } assert(shape != nullptr); // convex hull debug const btConvexPolyhedron *pol = shape->getConvexPolyhedron(); int count = pol->m_vertices.size(); pk_arr_reserve(&etdHelper.physDbg.vertexes, count); pk_arr_reserve(&etdHelper.physDbg.normals, count); pk_arr_reserve(&etdHelper.physDbg.uv, count); pk_arr_reserve(&etdHelper.physDbg.indexes, count); for (long k = 0; k < pol->m_vertices.size(); ++k) { btVector3 norm = pol->m_vertices[k]; pk_arr_append_t(&etdHelper.physDbg.vertexes, {}); glm::vec3 &glmVert = etdHelper.physDbg.vertexes[etdHelper.physDbg.vertexes.next-1]; BulletToGlm(norm, glmVert); norm.safeNormalize(); pk_arr_append_t(&etdHelper.physDbg.normals, {}); glm::vec3 &glmNorm = etdHelper.physDbg.normals[etdHelper.physDbg.normals.next-1]; BulletToGlm(norm, glmNorm); pk_arr_append_t(&etdHelper.physDbg.uv, {norm.x(), norm.y()}); } for (long ii = 0; ii < pol->m_faces.size(); ++ii) { for (long k = 2; k < pol->m_faces[ii].m_indices.size(); ++k) { pk_arr_append_t(&etdHelper.physDbg.indexes, (uint16_t)pol->m_faces[ii].m_indices[0]); pk_arr_append_t(&etdHelper.physDbg.indexes, (uint16_t)pol->m_faces[ii].m_indices[k - 1]); pk_arr_append_t(&etdHelper.physDbg.indexes, (uint16_t)pol->m_faces[ii].m_indices[k]); } } for (int64_t index = 0; index < expectedBufferCount; ++index) { /* TODO JCB - 2024-01-02 * lol */ if (index == 0) { bufferCI.size = sizeof(glm::vec3) * etdHelper.physDbg.vertexes.next; } else if (index == 1) { bufferCI.size = sizeof(glm::vec3) * etdHelper.physDbg.normals.next; } else if (index == 2) { bufferCI.size = sizeof(glm::vec2) * etdHelper.physDbg.uv.next; } else if (index == 3) { bufferCI.size = sizeof(uint16_t) * etdHelper.physDbg.indexes.next; } if (index == 3) { bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT; } else { bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; } vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &buffer); pk_arr_append_t(&helper.physVertMemoryRequirements, {}); vkGetBufferMemoryRequirements(vkDevice, buffer, &helper.physVertMemoryRequirements[helper.physVertMemoryRequirements.next-1]); vkDestroyBuffer(vkDevice, buffer, vkAllocator); } /* * verts */ for (int64_t index = 0; index < expectedBufferCount; ++index) { const auto &acc = helper.gltfData->accessors[(expectedBufferCount * i) + index]; bufferCI.size = acc.buffer_view->size; /* TODO JCB - 2024-01-02 * If our bindings ever change, == 3 isn't good enough to determine * which of all the buffers is the index buffer */ if (index == 3) { bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT; } else { bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; } vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &buffer); pk_arr_append_t(&helper.vertMemoryRequirements, {}); vkGetBufferMemoryRequirements(vkDevice, buffer, &helper.vertMemoryRequirements[helper.vertMemoryRequirements.next-1]); vkDestroyBuffer(vkDevice, buffer, vkAllocator); } /* * instance */ uint32_t instBufferStartingCount = etd.startingInstanceCount; instBufferStartingCount = instBufferStartingCount < 1 ? 1 : instBufferStartingCount; bufferCI.size = sizeof(glm::mat4) * instBufferStartingCount; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &buffer); pk_arr_append_t(&helper.instMemoryRequirements, {}); vkGetBufferMemoryRequirements(vkDevice, buffer, &helper.instMemoryRequirements[helper.instMemoryRequirements.next-1]); vkDestroyBuffer(vkDevice, buffer, vkAllocator); /* * texture */ int32_t pixelWidth, pixelHeight, pixelChannels; etdHelper.textureAssetHandle = AssetHandle{AM_GetHandle(etd.textureAssetKey)}; if (etdHelper.textureAssetHandle != AssetHandle_MAX) { etdHelper.textureAsset = AM_Get(etdHelper.textureAssetHandle); stbi_uc *pixels = stbi_load_from_memory(static_cast(const_cast(etdHelper.textureAsset->ptr)), etdHelper.textureAsset->size, &pixelWidth, &pixelHeight, &pixelChannels, STBI_rgb_alpha); assert(pixels != nullptr && "sbti_load failed to load image."); stbi_image_free(pixels); } else { pixelWidth = 2; pixelHeight = 2; pixelChannels = 4; } VkFormat imageFormat = VK_FORMAT_R8G8B8A8_SRGB; if (pixelChannels == 3) { imageFormat = VK_FORMAT_R8G8B8_SRGB; } else if (pixelChannels == 2) { imageFormat = VK_FORMAT_R8G8_SRGB; } else if (pixelChannels == 1) { imageFormat = VK_FORMAT_R8_SRGB; } else { assert(pixelChannels != 0 && pixelChannels < 5); } vkImageCreateInfo.format = imageFormat; vkImageCreateInfo.extent = VkExtent3D { .width = static_cast(pixelWidth), .height = static_cast(pixelHeight), .depth = 1 }; vkCreateImage(vkDevice, &vkImageCreateInfo, vkAllocator, &image); pk_arr_append_t(&helper.textureMemoryRequirements, {}); vkGetImageMemoryRequirements(vkDevice, image, &helper.textureMemoryRequirements[helper.textureMemoryRequirements.next-1]); vkDestroyImage(vkDevice, image, vkAllocator); } VkMemoryAllocateInfo vkMemoryAllocateInfo; vkMemoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; vkMemoryAllocateInfo.pNext = nullptr; /* * phys */ CalculateCombinedMemReqs(helper.physVertMemoryRequirements.next, reinterpret_cast(helper.physVertMemoryRequirements.data), helper.physVertMemoryRequirementsCombined); vkMemoryAllocateInfo.allocationSize = helper.physVertMemoryRequirementsCombined.size; vkMemoryAllocateInfo.memoryTypeIndex = FindMemoryTypeIndex(helper.physVertMemoryRequirementsCombined.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); vkAllocateMemory(vkDevice, &vkMemoryAllocateInfo, vkAllocator, &helper.et.deviceMemoryPhysVert); /* * verts */ CalculateCombinedMemReqs(helper.vertMemoryRequirements.next, reinterpret_cast(helper.vertMemoryRequirements.data), helper.vertMemoryRequirementsCombined); vkMemoryAllocateInfo.allocationSize = helper.vertMemoryRequirementsCombined.size; vkMemoryAllocateInfo.memoryTypeIndex = FindMemoryTypeIndex(helper.vertMemoryRequirementsCombined.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); vkAllocateMemory(vkDevice, &vkMemoryAllocateInfo, vkAllocator, &helper.et.deviceMemoryVert); /* * instance */ CalculateCombinedMemReqs(helper.instMemoryRequirements.next, reinterpret_cast(helper.instMemoryRequirements.data), helper.instMemoryRequirementsCombined); vkMemoryAllocateInfo.allocationSize = helper.instMemoryRequirementsCombined.size; vkMemoryAllocateInfo.memoryTypeIndex = FindMemoryTypeIndex(helper.instMemoryRequirementsCombined.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); vkAllocateMemory(vkDevice, &vkMemoryAllocateInfo, vkAllocator, &helper.et.deviceMemoryInst); /* * texture */ CalculateCombinedMemReqs(helper.textureMemoryRequirements.next, reinterpret_cast(helper.textureMemoryRequirements.data), helper.textureMemoryRequirementsCombined); vkMemoryAllocateInfo.allocationSize = helper.textureMemoryRequirementsCombined.size; vkMemoryAllocateInfo.memoryTypeIndex = FindMemoryTypeIndex(helper.textureMemoryRequirementsCombined.memoryTypeBits, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT); if (vkMemoryAllocateInfo.memoryTypeIndex == 0) { vkMemoryAllocateInfo.memoryTypeIndex = FindMemoryTypeIndex(helper.textureMemoryRequirementsCombined.memoryTypeBits, 0); } vkAllocateMemory(vkDevice, &vkMemoryAllocateInfo, vkAllocator, &helper.et.deviceMemoryTexture); } void EntityType_LoadTexture(EntityType_LoadHelperStruct &helper, const int64_t index) { PKVK_TmpBufferDetails tmpBufferDetails{}; EntityTypeDetails_LoadHelperStruct &etdHelper = helper.etdHelpers[index]; int32_t pixelWidth, pixelHeight, pixelChannels; stbi_uc *pixels = nullptr; if (etdHelper.textureAssetHandle != AssetHandle_MAX) { pixels = stbi_load_from_memory(static_cast(const_cast(etdHelper.textureAsset->ptr)), etdHelper.textureAsset->size, &pixelWidth, &pixelHeight, &pixelChannels, STBI_rgb_alpha); assert(pixels != nullptr && "sbti_load failed to load image."); } else { pixelWidth = 2; pixelHeight = 2; pixelChannels = 4; pixels = const_cast(&PKE_MISSING_TEXTURE_DATA[0]); } uint32_t imageSizeBytes = pixelWidth * pixelHeight * pixelChannels; VkFormat imageFormat = VK_FORMAT_R8G8B8A8_SRGB; if (pixelChannels == 3) { imageFormat = VK_FORMAT_R8G8B8_SRGB; } else if (pixelChannels == 2) { imageFormat = VK_FORMAT_R8G8_SRGB; } else if (pixelChannels == 1) { imageFormat = VK_FORMAT_R8_SRGB; } else { assert(pixelChannels != 0 && pixelChannels < 5); } VkImageCreateInfo vkImageCreateInfo; vkImageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; vkImageCreateInfo.pNext = nullptr; vkImageCreateInfo.flags = 0; vkImageCreateInfo.imageType = VK_IMAGE_TYPE_2D; vkImageCreateInfo.format = imageFormat; vkImageCreateInfo.extent = VkExtent3D { .width = static_cast(pixelWidth), .height = static_cast(pixelHeight), .depth = 1 }; vkImageCreateInfo.mipLevels = 1; vkImageCreateInfo.arrayLayers = 1; vkImageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; vkImageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; vkImageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; vkImageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; vkImageCreateInfo.queueFamilyIndexCount = 0; vkImageCreateInfo.pQueueFamilyIndices = nullptr; vkImageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; vkCreateImage(vkDevice, &vkImageCreateInfo, vkAllocator, &etdHelper.etd->textureImage); // TODO calculate padding vkBindImageMemory(vkDevice, etdHelper.etd->textureImage, helper.et.deviceMemoryTexture, 0); VkImageViewCreateInfo vkImageViewCreateInfo; vkImageViewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; vkImageViewCreateInfo.pNext = nullptr; vkImageViewCreateInfo.flags = 0; vkImageViewCreateInfo.image = etdHelper.etd->textureImage; // TODO animated textures vkImageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; vkImageViewCreateInfo.format = imageFormat; vkImageViewCreateInfo.components = VkComponentMapping { .r = VK_COMPONENT_SWIZZLE_IDENTITY, .g = VK_COMPONENT_SWIZZLE_IDENTITY, .b = VK_COMPONENT_SWIZZLE_IDENTITY, .a = VK_COMPONENT_SWIZZLE_IDENTITY, }; vkImageViewCreateInfo.subresourceRange = VkImageSubresourceRange { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, // TODO MapMap .levelCount = 1, .baseArrayLayer = 0, // TODO animated textures .layerCount = 1, }; vkCreateImageView(vkDevice, &vkImageViewCreateInfo, vkAllocator, &etdHelper.etd->textureImageView); // transition image layout and copy to buffer PKVK_BeginBuffer(transferFamilyIndex, imageSizeBytes, tmpBufferDetails); memcpy(tmpBufferDetails.deviceData, pixels, imageSizeBytes); { VkImageMemoryBarrier vkImageMemoryBarrier; vkImageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; vkImageMemoryBarrier.pNext = nullptr; vkImageMemoryBarrier.srcAccessMask = {}; vkImageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; vkImageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; vkImageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vkImageMemoryBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; vkImageMemoryBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; vkImageMemoryBarrier.image = etdHelper.etd->textureImage; vkImageMemoryBarrier.subresourceRange = VkImageSubresourceRange { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, // TODO MipMap .levelCount = 1, .baseArrayLayer = 0, // TODO animated textures .layerCount = 1, }; VkCommandBufferBeginInfo vkCommandBufferBeginInfo; vkCommandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkCommandBufferBeginInfo.pNext = nullptr; vkCommandBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; vkCommandBufferBeginInfo.pInheritanceInfo = nullptr; vkBeginCommandBuffer(tmpBufferDetails.cmdBuffer, &vkCommandBufferBeginInfo); vkCmdPipelineBarrier(tmpBufferDetails.cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &vkImageMemoryBarrier); // TODO animated textures / texture array - make this an array VkBufferImageCopy vkBufferImageCopy; vkBufferImageCopy.bufferOffset = 0; vkBufferImageCopy.bufferRowLength = pixelWidth; vkBufferImageCopy.bufferImageHeight = pixelHeight; vkBufferImageCopy.imageSubresource = VkImageSubresourceLayers { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, // TODO animated textures .baseArrayLayer = 0, .layerCount = 1, }; vkBufferImageCopy.imageOffset = VkOffset3D { .x = 0, .y = 0, .z = 0, }; vkBufferImageCopy.imageExtent = VkExtent3D { .width = static_cast(pixelWidth), .height = static_cast(pixelHeight), .depth = 1, }; vkCmdCopyBufferToImage(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->textureImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &vkBufferImageCopy); vkEndCommandBuffer(tmpBufferDetails.cmdBuffer); VkSubmitInfo submitInfo; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 0; submitInfo.pWaitSemaphores = nullptr; submitInfo.pWaitDstStageMask = nullptr; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &tmpBufferDetails.cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; vkQueueSubmit(tmpBufferDetails.queue, 1, &submitInfo, nullptr); vkQueueWaitIdle(tmpBufferDetails.queue); vkResetCommandBuffer(tmpBufferDetails.cmdBuffer, 0); } PKVK_EndBuffer(tmpBufferDetails); PKVK_BeginBuffer(graphicsFamilyIndex, 0, tmpBufferDetails); { VkImageMemoryBarrier vkImageMemoryBarrier; vkImageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; vkImageMemoryBarrier.pNext = nullptr; vkImageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; vkImageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; vkImageMemoryBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vkImageMemoryBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; vkImageMemoryBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; vkImageMemoryBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; vkImageMemoryBarrier.image = etdHelper.etd->textureImage; vkImageMemoryBarrier.subresourceRange = VkImageSubresourceRange { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, // TODO MipMap .levelCount = 1, .baseArrayLayer = 0, // TODO animated textures .layerCount = 1, }; VkCommandBufferBeginInfo vkCommandBufferBeginInfo; vkCommandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkCommandBufferBeginInfo.pNext = nullptr; vkCommandBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; vkCommandBufferBeginInfo.pInheritanceInfo = nullptr; vkBeginCommandBuffer(tmpBufferDetails.cmdBuffer, &vkCommandBufferBeginInfo); vkCmdPipelineBarrier(tmpBufferDetails.cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &vkImageMemoryBarrier); vkEndCommandBuffer(tmpBufferDetails.cmdBuffer); VkSubmitInfo submitInfo; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 0; submitInfo.pWaitSemaphores = nullptr; submitInfo.pWaitDstStageMask = nullptr; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &tmpBufferDetails.cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; vkQueueSubmit(tmpBufferDetails.queue, 1, &submitInfo, nullptr); vkQueueWaitIdle(tmpBufferDetails.queue); vkResetCommandBuffer(tmpBufferDetails.cmdBuffer, 0); } PKVK_EndBuffer(tmpBufferDetails); // TODO double-check this? if (etdHelper.textureAssetHandle != AssetHandle_MAX) { stbi_image_free(pixels); AM_Release(etdHelper.textureAssetHandle); } EntityType_Inner_UpdateDescriptorSets(&helper.et); } void EntityType_LoadMesh(EntityType_LoadHelperStruct &helper, const int64_t meshIndex) { PKVK_TmpBufferDetails tmpBufferDetails{}; EntityTypeDetails_LoadHelperStruct &etdHelper = helper.etdHelpers[meshIndex]; // create and bind buffers // TODO load me from gltf data const long expectedBufferCount = 4; int64_t accessorIndexVertex = (meshIndex * expectedBufferCount) + 0; int64_t accessorIndexNormal = (meshIndex * expectedBufferCount) + 1; int64_t accessorIndexUV = (meshIndex * expectedBufferCount) + 2; int64_t accessorIndexIndex = (meshIndex * expectedBufferCount) + 3; VkBufferCreateInfo bufferCI; bufferCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bufferCI.pNext = nullptr; bufferCI.flags = {}; bufferCI.sharingMode = VK_SHARING_MODE_EXCLUSIVE; bufferCI.queueFamilyIndexCount = 1; bufferCI.pQueueFamilyIndices = &graphicsFamilyIndex; long index = 0; uint32_t runningOffset = 0; uint32_t alignmentPadding = 0; // vertex const auto &accVert = helper.gltfData->accessors[accessorIndexVertex]; uint32_t offsetVert = runningOffset; uint32_t sizeVert = accVert.buffer_view->size; etdHelper.etd->grBinds->vertexBD.firstBinding = index; etdHelper.etd->grBinds->vertexBD.bindingCount = 1; alignmentPadding = sizeVert % helper.vertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.vertMemoryRequirementsCombined.alignment - alignmentPadding; sizeVert += alignmentPadding; bufferCI.size = sizeVert; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->vertexBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->vertexBD.buffer, helper.et.deviceMemoryVert, offsetVert); runningOffset += sizeVert; index += 1; // normals const auto &accNorm = helper.gltfData->accessors[accessorIndexNormal]; uint32_t offsetNorm = runningOffset; uint32_t sizeNorm = accNorm.buffer_view->size; etdHelper.etd->grBinds->normalsBD.firstBinding = index; etdHelper.etd->grBinds->normalsBD.bindingCount = 1; alignmentPadding = sizeNorm % helper.vertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.vertMemoryRequirementsCombined.alignment - alignmentPadding; sizeNorm += alignmentPadding; bufferCI.size = sizeNorm; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->normalsBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->normalsBD.buffer, helper.et.deviceMemoryVert, offsetNorm); runningOffset += sizeNorm; index += 1; // uv const auto &accUV = helper.gltfData->accessors[accessorIndexUV]; uint32_t offsetUV = runningOffset; uint32_t sizeUV = accUV.buffer_view->size; etdHelper.etd->grBinds->uvBD.firstBinding = index; etdHelper.etd->grBinds->uvBD.bindingCount = 1; alignmentPadding = sizeUV % helper.vertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.vertMemoryRequirementsCombined.alignment - alignmentPadding; sizeUV += alignmentPadding; bufferCI.size = sizeUV; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->uvBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->uvBD.buffer, helper.et.deviceMemoryVert, offsetUV); runningOffset += sizeUV; index += 1; // 2023-09-27 - JCB // I don't know where else to put this etdHelper.etd->grBinds->instanceBD.firstBinding = index; etdHelper.etd->grBinds->instanceBD.bindingCount = 1; // no index += 1 because index just happens to be the right value here for // the binding index, whereas binding the IndexBuffer doesn't need a binding index. // index const auto &accIndex = helper.gltfData->accessors[accessorIndexIndex]; uint32_t offsetIndex = runningOffset; uint32_t sizeIndex = accIndex.buffer_view->size; etdHelper.etd->grBinds->indexBD.bindingCount = 1; etdHelper.etd->grBinds->indexCount = accIndex.count; alignmentPadding = sizeIndex % helper.vertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.vertMemoryRequirementsCombined.alignment - alignmentPadding; sizeIndex += alignmentPadding; bufferCI.size = sizeIndex; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->indexBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->indexBD.buffer, helper.et.deviceMemoryVert, offsetIndex); runningOffset += sizeIndex; // index += 1; assert(runningOffset == helper.vertMemoryRequirementsCombined.size); // create transfer items && transfer { PKVK_BeginBuffer(transferFamilyIndex, helper.vertMemoryRequirementsCombined.size, tmpBufferDetails); memset(tmpBufferDetails.deviceData, '\0', helper.vertMemoryRequirementsCombined.size); char *dstPtr = nullptr; char *srcPtr = nullptr; dstPtr = static_cast(tmpBufferDetails.deviceData) + offsetVert; srcPtr = static_cast(const_cast(helper.modelBinAsset->ptr)) + accVert.buffer_view->offset; memcpy(dstPtr, srcPtr, accVert.buffer_view->size); dstPtr = static_cast(tmpBufferDetails.deviceData) + offsetNorm; srcPtr = static_cast(const_cast(helper.modelBinAsset->ptr)) + accNorm.buffer_view->offset; memcpy(dstPtr, srcPtr, accNorm.buffer_view->size); dstPtr = static_cast(tmpBufferDetails.deviceData) + offsetUV; srcPtr = static_cast(const_cast(helper.modelBinAsset->ptr)) + accUV.buffer_view->offset; memcpy(dstPtr, srcPtr, accUV.buffer_view->size); dstPtr = static_cast(tmpBufferDetails.deviceData) + offsetIndex; srcPtr = static_cast(const_cast(helper.modelBinAsset->ptr)) + accIndex.buffer_view->offset; memcpy(dstPtr, srcPtr, accIndex.buffer_view->size); VkCommandBufferBeginInfo vkCommandBufferBeginInfo; vkCommandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkCommandBufferBeginInfo.pNext = nullptr; // TODO consider single-use? vkCommandBufferBeginInfo.flags = 0; vkCommandBufferBeginInfo.pInheritanceInfo = nullptr; vkBeginCommandBuffer(tmpBufferDetails.cmdBuffer, &vkCommandBufferBeginInfo); VkBufferCopy bufferCopys[expectedBufferCount]; for (long i = 0; i < expectedBufferCount; ++i) { bufferCopys[i].dstOffset = 0; } index = 0; bufferCopys[index].srcOffset = offsetVert; bufferCopys[index].size = sizeVert; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->vertexBD.buffer, 1, &bufferCopys[index]); index+=1; bufferCopys[index].srcOffset = offsetNorm; bufferCopys[index].size = sizeNorm; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->normalsBD.buffer, 1, &bufferCopys[index]); index+=1; bufferCopys[index].srcOffset = offsetUV; bufferCopys[index].size = sizeUV; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->uvBD.buffer, 1, &bufferCopys[index]); index+=1; bufferCopys[index].srcOffset = offsetIndex; bufferCopys[index].size = sizeIndex; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->indexBD.buffer, 1, &bufferCopys[index]); // index+=1; vkEndCommandBuffer(tmpBufferDetails.cmdBuffer); VkSubmitInfo submitInfo; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 0; submitInfo.pWaitSemaphores = nullptr; submitInfo.pWaitDstStageMask = nullptr; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &tmpBufferDetails.cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; vkQueueSubmit(tmpBufferDetails.queue, 1, &submitInfo, nullptr); vkQueueWaitIdle(tmpBufferDetails.queue); PKVK_EndBuffer(tmpBufferDetails); } // set up instance buffer etdHelper.etd->grBinds->instanceBufferMaxCount = etdHelper.etd->startingInstanceCount; etdHelper.etd->grBinds->instanceBufferMaxCount = etdHelper.etd->grBinds->instanceBufferMaxCount < 1 ? 1 : etdHelper.etd->grBinds->instanceBufferMaxCount; bufferCI.size = sizeof(glm::mat4) * etdHelper.etd->grBinds->instanceBufferMaxCount; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->instanceBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->instanceBD.buffer, helper.et.deviceMemoryInst, 0); // bullet // set up convex hull debug { index = 0; runningOffset = 0; // vertex offsetVert = runningOffset; sizeVert = sizeof(glm::vec3) * etdHelper.physDbg.vertexes.next; etdHelper.etd->grBinds->physVertBD.firstBinding = index; etdHelper.etd->grBinds->physVertBD.bindingCount = 1; alignmentPadding = sizeVert % helper.physVertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.physVertMemoryRequirementsCombined.alignment - alignmentPadding; sizeVert += alignmentPadding; bufferCI.size = sizeVert; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->physVertBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->physVertBD.buffer, helper.et.deviceMemoryPhysVert, offsetVert); runningOffset += sizeVert; // norm index = 1; offsetNorm = runningOffset; sizeNorm = sizeof(glm::vec3) * etdHelper.physDbg.normals.next; etdHelper.etd->grBinds->physNormBD.firstBinding = index; etdHelper.etd->grBinds->physNormBD.bindingCount = 1; alignmentPadding = sizeNorm % helper.physVertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.physVertMemoryRequirementsCombined.alignment - alignmentPadding; sizeNorm += alignmentPadding; bufferCI.size = sizeNorm; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->physNormBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->physNormBD.buffer, helper.et.deviceMemoryPhysVert, offsetNorm); runningOffset += sizeNorm; // uv index = 2; offsetUV = runningOffset; sizeUV = sizeof(glm::vec2) * etdHelper.physDbg.uv.next; etdHelper.etd->grBinds->physUvBD.firstBinding = index; etdHelper.etd->grBinds->physUvBD.bindingCount = 1; alignmentPadding = sizeUV % helper.physVertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.physVertMemoryRequirementsCombined.alignment - alignmentPadding; sizeUV += alignmentPadding; bufferCI.size = sizeUV; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->physUvBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->physUvBD.buffer, helper.et.deviceMemoryPhysVert, offsetUV); runningOffset += sizeUV; // index index = 3; offsetIndex = runningOffset; sizeIndex = sizeof(uint16_t) * etdHelper.physDbg.indexes.next; etdHelper.etd->grBinds->physIndxBD.firstBinding = index; etdHelper.etd->grBinds->physIndxBD.bindingCount = 1; alignmentPadding = sizeIndex % helper.physVertMemoryRequirementsCombined.alignment; alignmentPadding = alignmentPadding == 0 ? 0 : helper.physVertMemoryRequirementsCombined.alignment - alignmentPadding; sizeIndex += alignmentPadding; bufferCI.size = sizeIndex; bufferCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT; vkCreateBuffer(vkDevice, &bufferCI, vkAllocator, &etdHelper.etd->grBinds->physIndxBD.buffer); vkBindBufferMemory(vkDevice, etdHelper.etd->grBinds->physIndxBD.buffer, helper.et.deviceMemoryPhysVert, offsetIndex); runningOffset += sizeIndex; assert(runningOffset == helper.physVertMemoryRequirementsCombined.size); // create transfer items && transfer { PKVK_BeginBuffer(transferFamilyIndex, helper.physVertMemoryRequirementsCombined.size, tmpBufferDetails); memset(tmpBufferDetails.deviceData, '\0', helper.physVertMemoryRequirementsCombined.size); runningOffset = 0; char *dstPtr = nullptr; char *srcPtr = nullptr; dstPtr = static_cast(tmpBufferDetails.deviceData) + runningOffset; srcPtr = reinterpret_cast(etdHelper.physDbg.vertexes.data); memcpy(dstPtr, srcPtr, sizeVert); runningOffset += sizeVert; dstPtr = static_cast(tmpBufferDetails.deviceData) + runningOffset; srcPtr = reinterpret_cast(etdHelper.physDbg.normals.data); memcpy(dstPtr, srcPtr, sizeNorm); runningOffset += sizeNorm; dstPtr = static_cast(tmpBufferDetails.deviceData) + runningOffset; srcPtr = reinterpret_cast(etdHelper.physDbg.uv.data); memcpy(dstPtr, srcPtr, sizeUV); runningOffset += sizeUV; dstPtr = static_cast(tmpBufferDetails.deviceData) + runningOffset; srcPtr = reinterpret_cast(etdHelper.physDbg.indexes.data); memcpy(dstPtr, srcPtr, sizeof(uint16_t) * etdHelper.physDbg.indexes.next); VkCommandBufferBeginInfo vkCommandBufferBeginInfo; vkCommandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkCommandBufferBeginInfo.pNext = nullptr; // TODO consider single-use? vkCommandBufferBeginInfo.flags = 0; vkCommandBufferBeginInfo.pInheritanceInfo = nullptr; vkBeginCommandBuffer(tmpBufferDetails.cmdBuffer, &vkCommandBufferBeginInfo); VkBufferCopy bufferCopys[expectedBufferCount]; for (long i = 0; i < expectedBufferCount; ++i) { bufferCopys[i].dstOffset = 0; } index = 0; runningOffset = 0; bufferCopys[index].srcOffset = runningOffset; bufferCopys[index].size = sizeVert; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->physVertBD.buffer, 1, &bufferCopys[index]); runningOffset += sizeVert; index+=1; bufferCopys[index].srcOffset = runningOffset; bufferCopys[index].size = sizeNorm; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->physNormBD.buffer, 1, &bufferCopys[index]); runningOffset += sizeNorm; index+=1; bufferCopys[index].srcOffset = runningOffset; bufferCopys[index].size = sizeUV; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->physUvBD.buffer, 1, &bufferCopys[index]); runningOffset += sizeUV; index+=1; bufferCopys[index].srcOffset = runningOffset; bufferCopys[index].size = sizeIndex; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, tmpBufferDetails.buffer, etdHelper.etd->grBinds->physIndxBD.buffer, 1, &bufferCopys[index]); // runningOffset += sizeIndex; // index+=1; vkEndCommandBuffer(tmpBufferDetails.cmdBuffer); VkSubmitInfo submitInfo; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 0; submitInfo.pWaitSemaphores = nullptr; submitInfo.pWaitDstStageMask = nullptr; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &tmpBufferDetails.cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; vkQueueSubmit(tmpBufferDetails.queue, 1, &submitInfo, nullptr); vkQueueWaitIdle(tmpBufferDetails.queue); PKVK_EndBuffer(tmpBufferDetails); } } } void EntityType_Load(EntityType &et) { AssetHandle assetHandle{AM_GetHandle(et.modelAssetKey)}; assert(assetHandle != AssetHandle_MAX); const Asset *asset = AM_Get(assetHandle); cgltf_options options{}; // TODO allocator cgltf_data *gltfData = nullptr; cgltf_result result = cgltf_parse(&options, asset->ptr, asset->size, &gltfData); assert(result == cgltf_result_success); result = cgltf_validate(gltfData); assert(result == cgltf_result_success); assert(gltfData->buffers_count == 1); // make sure cgltf can interpret our model for (unsigned long i = 0; i < gltfData->accessors_count; ++i) { assert(gltfData->accessors[i].type != cgltf_type_invalid); } for (unsigned long i = 0; i < gltfData->buffers_count; ++i) { assert(gltfData->buffer_views[i].type != cgltf_buffer_view_type_invalid); } et.detailsCount = gltfData->nodes_count; assert(et.detailsCount <= EntityTypeDetails_MAX && "Maximum supported number of meshes in gltf is hard-coded, update _MAX if not unreasonable"); std::filesystem::path gltfPath{asset->basePath}; gltfPath.replace_filename(gltfData->buffers[0].uri); AssetHandle modelBinHandle = AM_Register(gltfPath.c_str(), PKE_ASSET_TYPE_UNSET); const Asset *modelBinAsset = AM_Get(modelBinHandle); struct pk_membucket *entLoaderBkt = pk_mem_bucket_create("entities", PK_MEM_DEFAULT_BUCKET_SIZE, PK_MEMBUCKET_FLAG_NONE); EntityType_LoadHelperStruct helper { .bkt = entLoaderBkt, .et = et, .etdHelpers = {}, .gltfData = gltfData, .modelBinAsset = modelBinAsset, .vertMemoryRequirements = {}, .instMemoryRequirements = {}, .physVertMemoryRequirements = {}, .textureMemoryRequirements = {}, }; helper.etdHelpers.bkt = entLoaderBkt; helper.vertMemoryRequirements.bkt = entLoaderBkt; helper.instMemoryRequirements.bkt = entLoaderBkt; helper.physVertMemoryRequirements.bkt = entLoaderBkt; helper.textureMemoryRequirements.bkt = entLoaderBkt; EntityType_PreLoad(helper); for (int64_t i = 0; i < et.detailsCount; ++i) { EntityTypeDetails_LoadHelperStruct &etdHelper = helper.etdHelpers[i]; /* * 2023-09-13 - JCB * I don't like that we're just copying this. * This should be moved to window.cpp. */ etdHelper.etd->grBinds->vkPipelineLayout = pkePipelines.pipe_layouts.named.ubo_txtr; etdHelper.etd->grBinds->graphicsPipeline = pkePipelines.pipelines.named.entity_standard; // handle texture EntityType_LoadTexture(helper, i); //handle mesh EntityType_LoadMesh(helper, i); } uint64_t id = 0; id |= ((uint64_t)helper.et.handle.b << 32); id |= ((uint64_t)helper.et.handle.i); helper.et.pke_ev_cb_id_framebuffer_resized = pk_ev_register_cb(pke_ev_mgr_id_window, pke_ev_id_framebuffer_length_changed, EntityType_Inner_UpdateDescriptorSets_EvCallabck, reinterpret_cast(id)); // TODO DeviceMemory // cleanup AM_Release(modelBinHandle); AM_Release(assetHandle); cgltf_free(const_cast(helper.gltfData)); helper.etdHelpers.bkt = nullptr; helper.vertMemoryRequirements.bkt = nullptr; helper.instMemoryRequirements.bkt = nullptr; helper.physVertMemoryRequirements.bkt = nullptr; helper.textureMemoryRequirements.bkt = nullptr; pk_mem_bucket_destroy(entLoaderBkt); } void EntityType_Unload(EntityType &et, CompGrBinds *grBindsArr[1]) { if (et.modelAssetKey[0] == '\0') return; et.modelAssetKey[0] = '\0'; pk_ev_unregister_cb(pke_ev_mgr_id_window, pke_ev_id_framebuffer_length_changed, et.pke_ev_cb_id_framebuffer_resized); EntityType_Inner_DestroyDescriptors(&et); for (long k = 0; k < et.detailsCount; ++k) { EntityTypeDetails &etd = et.details[k]; // TODO maybe i should just change `shape` to the actual type rather than a parent type? if (etd.bt.shape != nullptr) pk_delete_bkt(etd.bt.shape, sizeof(btConvexHullShape), MemBkt_Bullet); auto *grBinds = grBindsArr[k]; if (grBinds != nullptr) { if (grBinds->vertexBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->vertexBD.buffer, vkAllocator); grBinds->vertexBD.buffer = VK_NULL_HANDLE; grBinds->vertexBD.firstBinding = 0; grBinds->vertexBD.bindingCount = 0; grBinds->vertexBD.offsets[0] = 0; if (grBinds->normalsBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->normalsBD.buffer, vkAllocator); grBinds->normalsBD.buffer = VK_NULL_HANDLE; grBinds->normalsBD.firstBinding = 0; grBinds->normalsBD.bindingCount = 0; grBinds->normalsBD.offsets[0] = 0; if (grBinds->uvBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->uvBD.buffer, vkAllocator); grBinds->uvBD.buffer = VK_NULL_HANDLE; grBinds->uvBD.firstBinding = 0; grBinds->uvBD.bindingCount = 0; grBinds->uvBD.offsets[0] = 0; if (grBinds->indexBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->indexBD.buffer, vkAllocator); grBinds->indexBD.buffer = VK_NULL_HANDLE; grBinds->indexBD.bindingCount = 0; grBinds->indexBD.offsets[0] = 0; grBinds->indexCount = 0; if (grBinds->physVertBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->physVertBD.buffer, vkAllocator); grBinds->physVertBD.buffer = VK_NULL_HANDLE; grBinds->physVertBD.firstBinding = 0; grBinds->physVertBD.bindingCount = 0; grBinds->physVertBD.offsets[0] = 0; if (grBinds->physNormBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->physNormBD.buffer, vkAllocator); grBinds->physNormBD.buffer = VK_NULL_HANDLE; grBinds->physNormBD.firstBinding = 0; grBinds->physNormBD.bindingCount = 0; grBinds->physNormBD.offsets[0] = 0; if (grBinds->physUvBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->physUvBD.buffer, vkAllocator); grBinds->physUvBD.buffer = VK_NULL_HANDLE; grBinds->physUvBD.firstBinding = 0; grBinds->physUvBD.bindingCount = 0; grBinds->physUvBD.offsets[0] = 0; if (grBinds->physIndxBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->physIndxBD.buffer, vkAllocator); grBinds->physIndxBD.buffer = VK_NULL_HANDLE; grBinds->physIndxBD.firstBinding = 0; grBinds->physIndxBD.bindingCount = 0; grBinds->physIndxBD.offsets[0] = 0; if (grBinds->instanceBD.buffer != VK_NULL_HANDLE) vkDestroyBuffer(vkDevice, grBinds->instanceBD.buffer, vkAllocator); grBinds->instanceBD.buffer = VK_NULL_HANDLE; grBinds->instanceBD.firstBinding = 0; grBinds->instanceBD.bindingCount = 0; grBinds->instanceCounter = 0; grBinds->instanceBufferMaxCount = 0; grBinds->instanceBD.offsets[0] = 0; } if (etd.textureImageView != VK_NULL_HANDLE) vkDestroyImageView(vkDevice, etd.textureImageView, vkAllocator); etd.textureImageView = VK_NULL_HANDLE; if (etd.textureImage != VK_NULL_HANDLE) vkDestroyImage(vkDevice, etd.textureImage, vkAllocator); etd.textureImage = VK_NULL_HANDLE; } if (et.deviceMemoryInst != VK_NULL_HANDLE) vkFreeMemory(vkDevice, et.deviceMemoryInst, vkAllocator); et.deviceMemoryInst = VK_NULL_HANDLE; if (et.deviceMemoryVert != VK_NULL_HANDLE) vkFreeMemory(vkDevice, et.deviceMemoryVert, vkAllocator); et.deviceMemoryVert = VK_NULL_HANDLE; if (et.deviceMemoryPhysVert != VK_NULL_HANDLE) vkFreeMemory(vkDevice, et.deviceMemoryPhysVert, vkAllocator); et.deviceMemoryPhysVert = VK_NULL_HANDLE; if (et.deviceMemoryTexture != VK_NULL_HANDLE) vkFreeMemory(vkDevice, et.deviceMemoryTexture, vkAllocator); et.deviceMemoryTexture = VK_NULL_HANDLE; if (et.entityTypeCode.reserved != 0) pk_delete_arr(et.entityTypeCode.val, et.entityTypeCode.reserved); et.entityTypeCode.val = CAFE_BABE(char); et.entityTypeCode.length = 0; et.entityTypeCode.reserved = 0; } void EntityType_Tick(double delta) { (void)delta; pk_iter_t iter_ent_type; bool b; b = pk_bkt_arr_iter_begin(&et_mstr.bc, &iter_ent_type); while (b == true) { if (iter_ent_type->isMarkedForRemoval == true) { EntToTeardown td{}; td.handle = iter_ent_type->handle; td.ticksToWait = 1; for (long k = 0; k < iter_ent_type->detailsCount; ++k) { td.grBinds[k] = iter_ent_type->details[k].grBinds; } pk_arr_append_t(&EntityTypesToTeardown, td); } b = pk_bkt_arr_iter_increment(&et_mstr.bc, &iter_ent_type); } } void EntityType_Tick_Late(double delta) { (void)delta; while (EntitiesWithExcessInstances.next != 0) { auto *entity = EntitiesWithExcessInstances[EntitiesWithExcessInstances.next-1]; auto *etPtr = EntityType_FindByEntityHandle(entity->handle); assert(etPtr != nullptr); auto &et = *etPtr; for (int64_t i = 0; i < et.detailsCount; ++i) { auto &etd = et.details[i]; assert(etd.grBinds != nullptr); EntityType_RolloverInstances(et, *etd.grBinds); } } for (uint32_t i = EntityTypesToTeardown.next; i > 0; --i) { auto &td = EntityTypesToTeardown[i-1]; td.ticksToWait -= 1; if (td.ticksToWait == 0) { auto *entityType = EntityType_FindByEntityHandle(td.handle); assert(entityType != nullptr); EntityType_Unload(*entityType, td.grBinds); pk_arr_remove_at(&EntityTypesToTeardown, i-1); } } } void EntityType_RolloverInstances(EntityType &et, CompGrBinds &grBinds) { PKVK_TmpBufferDetails tmpBufferDetails{}; int32_t oldCount = grBinds.instanceBufferMaxCount; int32_t newCount = std::ceil(grBinds.instanceBufferMaxCount * 1.5); newCount = newCount < 4 ? 4 : newCount; grBinds.instanceBufferMaxCount = newCount; uint32_t oldSize = sizeof(glm::mat4) * oldCount; VkDeviceMemory oldMemory(et.deviceMemoryInst); VkBuffer oldBuffer(grBinds.instanceBD.buffer); VkBufferCreateInfo vkBufferCreateInfo{}; vkBufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; vkBufferCreateInfo.pNext = nullptr; vkBufferCreateInfo.flags = 0; vkBufferCreateInfo.size = sizeof(glm::mat4) * newCount; vkBufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; vkBufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; vkBufferCreateInfo.queueFamilyIndexCount = 1; vkBufferCreateInfo.pQueueFamilyIndices = &graphicsFamilyIndex; vkCreateBuffer(vkDevice, &vkBufferCreateInfo, vkAllocator, &grBinds.instanceBD.buffer); VkMemoryRequirements vkMemoryRequirementsInst; vkGetBufferMemoryRequirements(vkDevice, grBinds.instanceBD.buffer, &vkMemoryRequirementsInst); VkMemoryAllocateInfo vkMemoryAllocateInfo; vkMemoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; vkMemoryAllocateInfo.pNext = nullptr; vkMemoryAllocateInfo.allocationSize = vkMemoryRequirementsInst.size; vkMemoryAllocateInfo.memoryTypeIndex = FindMemoryTypeIndex(vkMemoryRequirementsInst.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); vkAllocateMemory(vkDevice, &vkMemoryAllocateInfo, vkAllocator, &et.deviceMemoryInst); vkBindBufferMemory(vkDevice, grBinds.instanceBD.buffer, et.deviceMemoryInst, 0); // copy data PKVK_BeginBuffer(graphicsFamilyIndex, 0, tmpBufferDetails); { vkResetCommandBuffer(tmpBufferDetails.cmdBuffer, 0); VkBufferMemoryBarrier memBarriers[2]; for (long i = 0; i < 2; ++i) { memBarriers[i].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; memBarriers[i].pNext = nullptr; } memBarriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; memBarriers[0].dstAccessMask = {}; memBarriers[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; memBarriers[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; memBarriers[0].buffer = oldBuffer; memBarriers[0].offset = 0; memBarriers[0].size = oldSize; memBarriers[1].srcAccessMask = {}; memBarriers[1].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; memBarriers[1].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; memBarriers[1].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; memBarriers[1].buffer = grBinds.instanceBD.buffer; memBarriers[1].offset = 0; memBarriers[1].size = vkMemoryRequirementsInst.size; VkCommandBufferBeginInfo vkCommandBufferBeginInfo; vkCommandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkCommandBufferBeginInfo.pNext = nullptr; // TODO consider single-use? vkCommandBufferBeginInfo.flags = 0; vkCommandBufferBeginInfo.pInheritanceInfo = nullptr; vkBeginCommandBuffer(tmpBufferDetails.cmdBuffer, &vkCommandBufferBeginInfo); vkCmdPipelineBarrier(tmpBufferDetails.cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 2, memBarriers, 0, nullptr); VkBufferCopy vkBufferCopy; vkBufferCopy.srcOffset = 0; vkBufferCopy.dstOffset = 0; vkBufferCopy.size = oldSize; vkCmdCopyBuffer(tmpBufferDetails.cmdBuffer, oldBuffer, grBinds.instanceBD.buffer, 1, &vkBufferCopy); vkEndCommandBuffer(tmpBufferDetails.cmdBuffer); VkSubmitInfo submitInfo; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; submitInfo.waitSemaphoreCount = 0; submitInfo.pWaitSemaphores = nullptr; submitInfo.pWaitDstStageMask = nullptr; submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &tmpBufferDetails.cmdBuffer; submitInfo.signalSemaphoreCount = 0; submitInfo.pSignalSemaphores = nullptr; vkQueueSubmit(tmpBufferDetails.queue, 1, &submitInfo, nullptr); vkQueueWaitIdle(tmpBufferDetails.queue); vkResetCommandBuffer(tmpBufferDetails.cmdBuffer, 0); } PKVK_EndBuffer(tmpBufferDetails); // cleanup vkDestroyBuffer(vkDevice, oldBuffer, vkAllocator); vkFreeMemory(vkDevice, oldMemory, vkAllocator); } pk_bkt_arr *EntityType_GetEntityTypes() { return &et_mstr.bc; } void EntityType_Teardown() { bool b; pk_iter_t iter_ent_type{}; b = pk_bkt_arr_iter_end(&et_mstr.bc, &iter_ent_type); while (b == true) { if (iter_ent_type->modelAssetKey[0] == '\0') { b = pk_bkt_arr_iter_decrement(&et_mstr.bc, &iter_ent_type); continue; } CompGrBinds *grBindsArr[EntityTypeDetails_MAX] = {nullptr}; for (long k = 0; k < iter_ent_type->detailsCount; ++k) { grBindsArr[k] = iter_ent_type->details[k].grBinds; } EntityType_Unload(*iter_ent_type, grBindsArr); b = pk_bkt_arr_iter_decrement(&et_mstr.bc, &iter_ent_type); } pk_bkt_arr_teardown(&et_mstr.bc); pk_arr_reset(&EntityTypesToTeardown); pk_mem_bucket_destroy(et_mstr.bkt); }