LCOV - code coverage report
Current view: top level - source/renderer - VertexBuffer.cpp (source / functions) Hit Total Coverage
Test: 0 A.D. test coverage report Lines: 79 131 60.3 %
Date: 2023-01-19 00:18:29 Functions: 10 17 58.8 %

          Line data    Source code
       1             : /* Copyright (C) 2022 Wildfire Games.
       2             :  * This file is part of 0 A.D.
       3             :  *
       4             :  * 0 A.D. is free software: you can redistribute it and/or modify
       5             :  * it under the terms of the GNU General Public License as published by
       6             :  * the Free Software Foundation, either version 2 of the License, or
       7             :  * (at your option) any later version.
       8             :  *
       9             :  * 0 A.D. is distributed in the hope that it will be useful,
      10             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      11             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      12             :  * GNU General Public License for more details.
      13             :  *
      14             :  * You should have received a copy of the GNU General Public License
      15             :  * along with 0 A.D.  If not, see <http://www.gnu.org/licenses/>.
      16             :  */
      17             : 
      18             : #include "precompiled.h"
      19             : 
      20             : #include "VertexBuffer.h"
      21             : 
      22             : #include "lib/sysdep/cpu.h"
      23             : #include "ps/CLogger.h"
      24             : #include "ps/Errors.h"
      25             : #include "ps/VideoMode.h"
      26             : #include "renderer/backend/IDevice.h"
      27             : #include "renderer/Renderer.h"
      28             : 
      29             : #include <algorithm>
      30             : #include <cstring>
      31             : #include <iterator>
      32             : 
      33             : // Absolute maximum (bytewise) size of each GL vertex buffer object.
      34             : // Make it large enough for the maximum feasible mesh size (64K vertexes,
      35             : // 64 bytes per vertex in InstancingModelRenderer).
      36             : // TODO: measure what influence this has on performance
      37             : constexpr std::size_t MAX_VB_SIZE_BYTES = 4 * 1024 * 1024;
      38             : 
      39           1 : CVertexBuffer::CVertexBuffer(
      40             :     const char* name, const size_t vertexSize,
      41           1 :     const Renderer::Backend::IBuffer::Type type, const bool dynamic)
      42           1 :     : CVertexBuffer(name, vertexSize, type, dynamic, MAX_VB_SIZE_BYTES)
      43             : {
      44           1 : }
      45             : 
      46           1 : CVertexBuffer::CVertexBuffer(
      47             :     const char* name, const size_t vertexSize,
      48             :     const Renderer::Backend::IBuffer::Type type, const bool dynamic,
      49           1 :     const size_t maximumBufferSize)
      50           1 :     : m_VertexSize(vertexSize), m_HasNeededChunks(false)
      51             : {
      52           1 :     size_t size = maximumBufferSize;
      53             : 
      54           1 :     if (type == Renderer::Backend::IBuffer::Type::VERTEX)
      55             :     {
      56             :         // We want to store 16-bit indices to any vertex in a buffer, so the
      57             :         // buffer must never be bigger than vertexSize*64K bytes since we can
      58             :         // address at most 64K of them with 16-bit indices
      59           0 :         size = std::min(size, vertexSize * 65536);
      60             :     }
      61           1 :     else if (type == Renderer::Backend::IBuffer::Type::INDEX)
      62             :     {
      63           1 :         ENSURE(vertexSize == sizeof(u16));
      64             :     }
      65             : 
      66             :     // store max/free vertex counts
      67           1 :     m_MaxVertices = m_FreeVertices = size / vertexSize;
      68             : 
      69           5 :     m_Buffer = g_VideoMode.GetBackendDevice()->CreateBuffer(
      70           4 :         name, type, m_MaxVertices * m_VertexSize, dynamic);
      71             : 
      72             :     // create sole free chunk
      73           1 :     VBChunk* chunk = new VBChunk;
      74           1 :     chunk->m_Owner = this;
      75           1 :     chunk->m_Count = m_FreeVertices;
      76           1 :     chunk->m_Index = 0;
      77           1 :     m_FreeList.emplace_back(chunk);
      78           1 : }
      79             : 
      80           2 : CVertexBuffer::~CVertexBuffer()
      81             : {
      82             :     // Must have released all chunks before destroying the buffer
      83           1 :     ENSURE(m_AllocList.empty());
      84             : 
      85           1 :     m_Buffer.reset();
      86             : 
      87           2 :     for (VBChunk* const& chunk : m_FreeList)
      88           1 :         delete chunk;
      89           1 : }
      90             : 
      91           6 : bool CVertexBuffer::CompatibleVertexType(
      92             :     const size_t vertexSize, const Renderer::Backend::IBuffer::Type type,
      93             :     const bool dynamic) const
      94             : {
      95           6 :     ENSURE(m_Buffer);
      96           6 :     return type == m_Buffer->GetType() && dynamic == m_Buffer->IsDynamic() && vertexSize == m_VertexSize;
      97             : }
      98             : 
      99             : ///////////////////////////////////////////////////////////////////////////////
     100             : // Allocate: try to allocate a buffer of given number of vertices (each of
     101             : // given size), with the given type, and using the given texture - return null
     102             : // if no free chunks available
     103           6 : CVertexBuffer::VBChunk* CVertexBuffer::Allocate(
     104             :     const size_t vertexSize, const size_t numberOfVertices,
     105             :     const Renderer::Backend::IBuffer::Type type, const bool dynamic,
     106             :     void* backingStore)
     107             : {
     108             :     // check this is the right kind of buffer
     109           6 :     if (!CompatibleVertexType(vertexSize, type, dynamic))
     110           0 :         return nullptr;
     111             : 
     112           6 :     if (UseStreaming(dynamic))
     113           0 :         ENSURE(backingStore != nullptr);
     114             : 
     115             :     // quick check there's enough vertices spare to allocate
     116           6 :     if (numberOfVertices > m_FreeVertices)
     117           0 :         return nullptr;
     118             : 
     119             :     // trawl free list looking for first free chunk with enough space
     120           6 :     std::vector<VBChunk*>::iterator best_iter = m_FreeList.end();
     121          12 :     for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end(); ++iter)
     122             :     {
     123           6 :         if (numberOfVertices == (*iter)->m_Count)
     124             :         {
     125           0 :             best_iter = iter;
     126           0 :             break;
     127             :         }
     128           6 :         else if (numberOfVertices < (*iter)->m_Count && (best_iter == m_FreeList.end() || (*best_iter)->m_Count < (*iter)->m_Count))
     129           6 :             best_iter = iter;
     130             :     }
     131             : 
     132             :     // We could not find a large enough chunk.
     133           6 :     if (best_iter == m_FreeList.end())
     134           0 :         return nullptr;
     135             : 
     136           6 :     VBChunk* chunk = *best_iter;
     137           6 :     m_FreeList.erase(best_iter);
     138           6 :     m_FreeVertices -= chunk->m_Count;
     139             : 
     140           6 :     chunk->m_BackingStore = backingStore;
     141           6 :     chunk->m_Dirty = false;
     142           6 :     chunk->m_Needed = false;
     143             : 
     144             :     // split chunk into two; - allocate a new chunk using all unused vertices in the
     145             :     // found chunk, and add it to the free list
     146           6 :     if (chunk->m_Count > numberOfVertices)
     147             :     {
     148           6 :         VBChunk* newchunk = new VBChunk;
     149           6 :         newchunk->m_Owner = this;
     150           6 :         newchunk->m_Count = chunk->m_Count - numberOfVertices;
     151           6 :         newchunk->m_Index = chunk->m_Index + numberOfVertices;
     152           6 :         m_FreeList.emplace_back(newchunk);
     153           6 :         m_FreeVertices += newchunk->m_Count;
     154             : 
     155             :         // resize given chunk
     156           6 :         chunk->m_Count = numberOfVertices;
     157             :     }
     158             : 
     159             :     // return found chunk
     160           6 :     m_AllocList.push_back(chunk);
     161           6 :     return chunk;
     162             : }
     163             : 
     164             : ///////////////////////////////////////////////////////////////////////////////
     165             : // Release: return given chunk to this buffer
     166           6 : void CVertexBuffer::Release(VBChunk* chunk)
     167             : {
     168             :     // Update total free count before potentially modifying this chunk's count
     169           6 :     m_FreeVertices += chunk->m_Count;
     170             : 
     171           6 :     m_AllocList.erase(std::find(m_AllocList.begin(), m_AllocList.end(), chunk));
     172             : 
     173             :     // Sorting O(nlogn) shouldn't be too far from O(n) by performance, because
     174             :     // the container is partly sorted already.
     175           6 :     std::sort(
     176             :         m_FreeList.begin(), m_FreeList.end(),
     177           0 :         [](const VBChunk* chunk1, const VBChunk* chunk2) -> bool
     178             :         {
     179           0 :             return chunk1->m_Index < chunk2->m_Index;
     180             :         });
     181             : 
     182             :     // Coalesce with any free-list items that are adjacent to this chunk;
     183             :     // merge the found chunk with the new one, and remove the old one
     184             :     // from the list.
     185          12 :     for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end();)
     186             :     {
     187          12 :         if ((*iter)->m_Index == chunk->m_Index + chunk->m_Count
     188           6 :          || (*iter)->m_Index + (*iter)->m_Count == chunk->m_Index)
     189             :         {
     190           6 :             chunk->m_Index = std::min(chunk->m_Index, (*iter)->m_Index);
     191           6 :             chunk->m_Count += (*iter)->m_Count;
     192           6 :             delete *iter;
     193           6 :             iter = m_FreeList.erase(iter);
     194           6 :             if (!m_FreeList.empty() && iter != m_FreeList.begin())
     195           0 :                 iter = std::prev(iter);
     196             :         }
     197             :         else
     198             :         {
     199           0 :             ++iter;
     200             :         }
     201             :     }
     202             : 
     203           6 :     m_FreeList.emplace_back(chunk);
     204           6 : }
     205             : 
     206             : ///////////////////////////////////////////////////////////////////////////////
     207             : // UpdateChunkVertices: update vertex data for given chunk
     208           6 : void CVertexBuffer::UpdateChunkVertices(VBChunk* chunk, void* data)
     209             : {
     210           6 :     ENSURE(m_Buffer);
     211           6 :     if (UseStreaming(m_Buffer->IsDynamic()))
     212             :     {
     213             :         // The backend buffer is now out of sync with the backing store.
     214           0 :         chunk->m_Dirty = true;
     215             : 
     216             :         // Sanity check: Make sure the caller hasn't tried to reallocate
     217             :         // their backing store.
     218           0 :         ENSURE(data == chunk->m_BackingStore);
     219             :     }
     220             :     else
     221             :     {
     222           6 :         ENSURE(data);
     223          30 :         g_Renderer.GetDeviceCommandContext()->UploadBufferRegion(
     224          24 :             m_Buffer.get(), data, chunk->m_Index * m_VertexSize, chunk->m_Count * m_VertexSize);
     225             :     }
     226           6 : }
     227             : 
     228           0 : void CVertexBuffer::UploadIfNeeded(
     229             :     Renderer::Backend::IDeviceCommandContext* deviceCommandContext)
     230             : {
     231           0 :     if (UseStreaming(m_Buffer->IsDynamic()))
     232             :     {
     233           0 :         if (!m_HasNeededChunks)
     234           0 :             return;
     235             : 
     236             :         // If any chunks are out of sync with the current backend buffer, and are
     237             :         // needed for rendering this frame, we'll need to re-upload the backend buffer.
     238           0 :         bool needUpload = false;
     239           0 :         for (VBChunk* const& chunk : m_AllocList)
     240             :         {
     241           0 :             if (chunk->m_Dirty && chunk->m_Needed)
     242             :             {
     243           0 :                 needUpload = true;
     244           0 :                 break;
     245             :             }
     246             :         }
     247             : 
     248           0 :         if (needUpload)
     249             :         {
     250           0 :             deviceCommandContext->UploadBuffer(m_Buffer.get(), [&](u8* mappedData)
     251             :             {
     252             : #ifndef NDEBUG
     253             :                 // To help detect bugs where PrepareForRendering() was not called,
     254             :                 // force all not-needed data to 0, so things won't get rendered
     255             :                 // with undefined (but possibly still correct-looking) data.
     256           0 :                 memset(mappedData, 0, m_MaxVertices * m_VertexSize);
     257             : #endif
     258             : 
     259             :                 // Copy only the chunks we need. (This condition is helpful when
     260             :                 // the backend buffer contains data for every unit in the world,
     261             :                 // but only a handful are visible on screen and we don't need to
     262             :                 // bother copying the rest.)
     263           0 :                 for (VBChunk* const& chunk : m_AllocList)
     264           0 :                     if (chunk->m_Needed)
     265           0 :                         std::memcpy(mappedData + chunk->m_Index * m_VertexSize, chunk->m_BackingStore, chunk->m_Count * m_VertexSize);
     266           0 :             });
     267             : 
     268             :             // Anything we just uploaded is clean; anything else is dirty
     269             :             // since the rest of the backend buffer content is now undefined
     270           0 :             for (VBChunk* const& chunk : m_AllocList)
     271             :             {
     272           0 :                 if (chunk->m_Needed)
     273             :                 {
     274           0 :                     chunk->m_Dirty = false;
     275           0 :                     chunk->m_Needed = false;
     276             :                 }
     277             :                 else
     278           0 :                     chunk->m_Dirty = true;
     279             :             }
     280             :         }
     281             :         else
     282             :         {
     283             :             // Reset the flags for the next phase.
     284           0 :             for (VBChunk* const& chunk : m_AllocList)
     285           0 :                 chunk->m_Needed = false;
     286             :         }
     287             : 
     288           0 :         m_HasNeededChunks = false;
     289             :     }
     290             : }
     291             : 
     292           0 : size_t CVertexBuffer::GetBytesReserved() const
     293             : {
     294           0 :     return MAX_VB_SIZE_BYTES;
     295             : }
     296             : 
     297           0 : size_t CVertexBuffer::GetBytesAllocated() const
     298             : {
     299           0 :     return (m_MaxVertices - m_FreeVertices) * m_VertexSize;
     300             : }
     301             : 
     302           0 : void CVertexBuffer::DumpStatus() const
     303             : {
     304           0 :     debug_printf("freeverts = %d\n", static_cast<int>(m_FreeVertices));
     305             : 
     306           0 :     size_t maxSize = 0;
     307           0 :     for (VBChunk* const& chunk : m_FreeList)
     308             :     {
     309           0 :         debug_printf("free chunk %p: size=%d\n", static_cast<void *>(chunk), static_cast<int>(chunk->m_Count));
     310           0 :         maxSize = std::max(chunk->m_Count, maxSize);
     311             :     }
     312           0 :     debug_printf("max size = %d\n", static_cast<int>(maxSize));
     313           0 : }
     314             : 
     315          24 : bool CVertexBuffer::UseStreaming(const bool dynamic)
     316             : {
     317          24 :     return dynamic;
     318             : }
     319             : 
     320           0 : void CVertexBuffer::PrepareForRendering(VBChunk* chunk)
     321             : {
     322           0 :     chunk->m_Needed = true;
     323           0 :     m_HasNeededChunks = true;
     324           3 : }

Generated by: LCOV version 1.13