Line data Source code
1 : /* Copyright (C) 2022 Wildfire Games.
2 : * This file is part of 0 A.D.
3 : *
4 : * 0 A.D. is free software: you can redistribute it and/or modify
5 : * it under the terms of the GNU General Public License as published by
6 : * the Free Software Foundation, either version 2 of the License, or
7 : * (at your option) any later version.
8 : *
9 : * 0 A.D. is distributed in the hope that it will be useful,
10 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 : * GNU General Public License for more details.
13 : *
14 : * You should have received a copy of the GNU General Public License
15 : * along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
16 : */
17 :
18 : #include "precompiled.h"
19 :
20 : #include "VertexBuffer.h"
21 :
22 : #include "lib/ogl.h"
23 : #include "lib/sysdep/cpu.h"
24 : #include "ps/CLogger.h"
25 : #include "ps/Errors.h"
26 : #include "ps/VideoMode.h"
27 : #include "renderer/backend/gl/Device.h"
28 : #include "renderer/Renderer.h"
29 :
30 : #include <algorithm>
31 : #include <cstring>
32 : #include <iterator>
33 :
34 : // Absolute maximum (bytewise) size of each GL vertex buffer object.
35 : // Make it large enough for the maximum feasible mesh size (64K vertexes,
36 : // 64 bytes per vertex in InstancingModelRenderer).
37 : // TODO: measure what influence this has on performance
38 : constexpr std::size_t MAX_VB_SIZE_BYTES = 4 * 1024 * 1024;
39 :
40 0 : CVertexBuffer::CVertexBuffer(
41 : const char* name, const size_t vertexSize,
42 0 : const Renderer::Backend::GL::CBuffer::Type type, const bool dynamic)
43 0 : : CVertexBuffer(name, vertexSize, type, dynamic, MAX_VB_SIZE_BYTES)
44 : {
45 0 : }
46 :
47 0 : CVertexBuffer::CVertexBuffer(
48 : const char* name, const size_t vertexSize,
49 : const Renderer::Backend::GL::CBuffer::Type type, const bool dynamic,
50 0 : const size_t maximumBufferSize)
51 0 : : m_VertexSize(vertexSize), m_HasNeededChunks(false)
52 : {
53 0 : size_t size = maximumBufferSize;
54 :
55 0 : if (type == Renderer::Backend::GL::CBuffer::Type::VERTEX)
56 : {
57 : // We want to store 16-bit indices to any vertex in a buffer, so the
58 : // buffer must never be bigger than vertexSize*64K bytes since we can
59 : // address at most 64K of them with 16-bit indices
60 0 : size = std::min(size, vertexSize * 65536);
61 : }
62 0 : else if (type == Renderer::Backend::GL::CBuffer::Type::INDEX)
63 : {
64 0 : ENSURE(vertexSize == sizeof(u16));
65 : }
66 :
67 : // store max/free vertex counts
68 0 : m_MaxVertices = m_FreeVertices = size / vertexSize;
69 :
70 0 : m_Buffer = g_VideoMode.GetBackendDevice()->CreateBuffer(
71 0 : name, type, m_MaxVertices * m_VertexSize, dynamic);
72 :
73 : // create sole free chunk
74 0 : VBChunk* chunk = new VBChunk;
75 0 : chunk->m_Owner = this;
76 0 : chunk->m_Count = m_FreeVertices;
77 0 : chunk->m_Index = 0;
78 0 : m_FreeList.emplace_back(chunk);
79 0 : }
80 :
81 0 : CVertexBuffer::~CVertexBuffer()
82 : {
83 : // Must have released all chunks before destroying the buffer
84 0 : ENSURE(m_AllocList.empty());
85 :
86 0 : m_Buffer.reset();
87 :
88 0 : for (VBChunk* const& chunk : m_FreeList)
89 0 : delete chunk;
90 0 : }
91 :
92 0 : bool CVertexBuffer::CompatibleVertexType(
93 : const size_t vertexSize, const Renderer::Backend::GL::CBuffer::Type type,
94 : const bool dynamic) const
95 : {
96 0 : ENSURE(m_Buffer);
97 0 : return type == m_Buffer->GetType() && dynamic == m_Buffer->IsDynamic() && vertexSize == m_VertexSize;
98 : }
99 :
100 : ///////////////////////////////////////////////////////////////////////////////
101 : // Allocate: try to allocate a buffer of given number of vertices (each of
102 : // given size), with the given type, and using the given texture - return null
103 : // if no free chunks available
104 0 : CVertexBuffer::VBChunk* CVertexBuffer::Allocate(
105 : const size_t vertexSize, const size_t numberOfVertices,
106 : const Renderer::Backend::GL::CBuffer::Type type, const bool dynamic,
107 : void* backingStore)
108 : {
109 : // check this is the right kind of buffer
110 0 : if (!CompatibleVertexType(vertexSize, type, dynamic))
111 : return nullptr;
112 :
113 0 : if (UseStreaming(dynamic))
114 0 : ENSURE(backingStore != nullptr);
115 :
116 : // quick check there's enough vertices spare to allocate
117 0 : if (numberOfVertices > m_FreeVertices)
118 : return nullptr;
119 :
120 : // trawl free list looking for first free chunk with enough space
121 0 : std::vector<VBChunk*>::iterator best_iter = m_FreeList.end();
122 0 : for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end(); ++iter)
123 : {
124 0 : if (numberOfVertices == (*iter)->m_Count)
125 : {
126 : best_iter = iter;
127 : break;
128 : }
129 0 : else if (numberOfVertices < (*iter)->m_Count && (best_iter == m_FreeList.end() || (*best_iter)->m_Count < (*iter)->m_Count))
130 : best_iter = iter;
131 : }
132 :
133 : // We could not find a large enough chunk.
134 0 : if (best_iter == m_FreeList.end())
135 : return nullptr;
136 :
137 0 : VBChunk* chunk = *best_iter;
138 0 : m_FreeList.erase(best_iter);
139 0 : m_FreeVertices -= chunk->m_Count;
140 :
141 0 : chunk->m_BackingStore = backingStore;
142 0 : chunk->m_Dirty = false;
143 0 : chunk->m_Needed = false;
144 :
145 : // split chunk into two; - allocate a new chunk using all unused vertices in the
146 : // found chunk, and add it to the free list
147 0 : if (chunk->m_Count > numberOfVertices)
148 : {
149 0 : VBChunk* newchunk = new VBChunk;
150 0 : newchunk->m_Owner = this;
151 0 : newchunk->m_Count = chunk->m_Count - numberOfVertices;
152 0 : newchunk->m_Index = chunk->m_Index + numberOfVertices;
153 0 : m_FreeList.emplace_back(newchunk);
154 0 : m_FreeVertices += newchunk->m_Count;
155 :
156 : // resize given chunk
157 0 : chunk->m_Count = numberOfVertices;
158 : }
159 :
160 : // return found chunk
161 0 : m_AllocList.push_back(chunk);
162 0 : return chunk;
163 : }
164 :
165 : ///////////////////////////////////////////////////////////////////////////////
166 : // Release: return given chunk to this buffer
167 0 : void CVertexBuffer::Release(VBChunk* chunk)
168 : {
169 : // Update total free count before potentially modifying this chunk's count
170 0 : m_FreeVertices += chunk->m_Count;
171 :
172 0 : m_AllocList.erase(std::find(m_AllocList.begin(), m_AllocList.end(), chunk));
173 :
174 : // Sorting O(nlogn) shouldn't be too far from O(n) by performance, because
175 : // the container is partly sorted already.
176 0 : std::sort(
177 : m_FreeList.begin(), m_FreeList.end(),
178 0 : [](const VBChunk* chunk1, const VBChunk* chunk2) -> bool
179 : {
180 0 : return chunk1->m_Index < chunk2->m_Index;
181 : });
182 :
183 : // Coalesce with any free-list items that are adjacent to this chunk;
184 : // merge the found chunk with the new one, and remove the old one
185 : // from the list.
186 0 : for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end();)
187 : {
188 0 : if ((*iter)->m_Index == chunk->m_Index + chunk->m_Count
189 0 : || (*iter)->m_Index + (*iter)->m_Count == chunk->m_Index)
190 : {
191 0 : chunk->m_Index = std::min(chunk->m_Index, (*iter)->m_Index);
192 0 : chunk->m_Count += (*iter)->m_Count;
193 0 : delete *iter;
194 0 : iter = m_FreeList.erase(iter);
195 0 : if (!m_FreeList.empty() && iter != m_FreeList.begin())
196 0 : iter = std::prev(iter);
197 : }
198 : else
199 : {
200 0 : ++iter;
201 : }
202 : }
203 :
204 0 : m_FreeList.emplace_back(chunk);
205 0 : }
206 :
207 : ///////////////////////////////////////////////////////////////////////////////
208 : // UpdateChunkVertices: update vertex data for given chunk
209 0 : void CVertexBuffer::UpdateChunkVertices(VBChunk* chunk, void* data)
210 : {
211 0 : ENSURE(m_Buffer);
212 0 : if (UseStreaming(m_Buffer->IsDynamic()))
213 : {
214 : // The VBO is now out of sync with the backing store
215 0 : chunk->m_Dirty = true;
216 :
217 : // Sanity check: Make sure the caller hasn't tried to reallocate
218 : // their backing store
219 0 : ENSURE(data == chunk->m_BackingStore);
220 : }
221 : else
222 : {
223 0 : ENSURE(data);
224 0 : g_Renderer.GetDeviceCommandContext()->UploadBufferRegion(
225 0 : m_Buffer.get(), data, chunk->m_Index * m_VertexSize, chunk->m_Count * m_VertexSize);
226 : }
227 0 : }
228 :
229 0 : void CVertexBuffer::UploadIfNeeded(
230 : Renderer::Backend::GL::CDeviceCommandContext* deviceCommandContext)
231 : {
232 0 : if (UseStreaming(m_Buffer->IsDynamic()))
233 : {
234 0 : if (!m_HasNeededChunks)
235 : return;
236 :
237 : // If any chunks are out of sync with the current VBO, and are
238 : // needed for rendering this frame, we'll need to re-upload the VBO
239 0 : bool needUpload = false;
240 0 : for (VBChunk* const& chunk : m_AllocList)
241 : {
242 0 : if (chunk->m_Dirty && chunk->m_Needed)
243 : {
244 : needUpload = true;
245 : break;
246 : }
247 : }
248 :
249 0 : if (needUpload)
250 : {
251 0 : deviceCommandContext->UploadBuffer(m_Buffer.get(), [&](u8* mappedData)
252 : {
253 : #ifndef NDEBUG
254 : // To help detect bugs where PrepareForRendering() was not called,
255 : // force all not-needed data to 0, so things won't get rendered
256 : // with undefined (but possibly still correct-looking) data.
257 : memset(mappedData, 0, m_MaxVertices * m_VertexSize);
258 : #endif
259 :
260 : // Copy only the chunks we need. (This condition is helpful when
261 : // the VBO contains data for every unit in the world, but only a
262 : // handful are visible on screen and we don't need to bother copying
263 : // the rest.)
264 0 : for (VBChunk* const& chunk : m_AllocList)
265 0 : if (chunk->m_Needed)
266 0 : std::memcpy(mappedData + chunk->m_Index * m_VertexSize, chunk->m_BackingStore, chunk->m_Count * m_VertexSize);
267 0 : });
268 0 :
269 : // Anything we just uploaded is clean; anything else is dirty
270 : // since the rest of the VBO content is now undefined
271 : for (VBChunk* const& chunk : m_AllocList)
272 : {
273 : if (chunk->m_Needed)
274 : {
275 : chunk->m_Dirty = false;
276 : chunk->m_Needed = false;
277 : }
278 : else
279 : chunk->m_Dirty = true;
280 : }
281 0 : }
282 0 : else
283 0 : {
284 0 : // Reset the flags for the next phase.
285 0 : for (VBChunk* const& chunk : m_AllocList)
286 : chunk->m_Needed = false;
287 : }
288 :
289 : m_HasNeededChunks = false;
290 : }
291 : }
292 :
293 : // Bind: bind to this buffer; return pointer to address required as parameter
294 : // to glVertexPointer ( + etc) calls
295 : u8* CVertexBuffer::Bind(
296 : Renderer::Backend::GL::CDeviceCommandContext* deviceCommandContext)
297 : {
298 0 : UploadIfNeeded(deviceCommandContext);
299 0 : deviceCommandContext->BindBuffer(m_Buffer->GetType(), m_Buffer.get());
300 0 : return nullptr;
301 0 : }
302 :
303 : void CVertexBuffer::Unbind(
304 : Renderer::Backend::GL::CDeviceCommandContext* deviceCommandContext)
305 0 : {
306 : deviceCommandContext->BindBuffer(
307 0 : Renderer::Backend::GL::CBuffer::Type::VERTEX, nullptr);
308 : deviceCommandContext->BindBuffer(
309 0 : Renderer::Backend::GL::CBuffer::Type::INDEX, nullptr);
310 0 : }
311 :
312 : size_t CVertexBuffer::GetBytesReserved() const
313 0 : {
314 : return MAX_VB_SIZE_BYTES;
315 : }
316 :
317 : size_t CVertexBuffer::GetBytesAllocated() const
318 : {
319 0 : return (m_MaxVertices - m_FreeVertices) * m_VertexSize;
320 0 : }
321 :
322 : void CVertexBuffer::DumpStatus() const
323 0 : {
324 : debug_printf("freeverts = %d\n", static_cast<int>(m_FreeVertices));
325 :
326 : size_t maxSize = 0;
327 : for (VBChunk* const& chunk : m_FreeList)
328 : {
329 0 : debug_printf("free chunk %p: size=%d\n", static_cast<void *>(chunk), static_cast<int>(chunk->m_Count));
330 : maxSize = std::max(chunk->m_Count, maxSize);
331 : }
332 0 : debug_printf("max size = %d\n", static_cast<int>(maxSize));
333 0 : }
334 0 :
335 : bool CVertexBuffer::UseStreaming(const bool dynamic)
336 : {
337 0 : return dynamic;
338 : }
339 :
340 0 : void CVertexBuffer::PrepareForRendering(VBChunk* chunk)
341 : {
342 0 : chunk->m_Needed = true;
343 : m_HasNeededChunks = true;
344 0 : }
|