|
4 | 4 |
|
5 | 5 | #include "impeller/core/host_buffer.h" |
6 | 6 |
|
7 | | -#include <algorithm> |
8 | 7 | #include <cstring> |
9 | | - |
10 | | -#include "flutter/fml/logging.h" |
| 8 | +#include <tuple> |
11 | 9 |
|
12 | 10 | #include "impeller/core/allocator.h" |
13 | 11 | #include "impeller/core/buffer_view.h" |
14 | 12 | #include "impeller/core/device_buffer.h" |
| 13 | +#include "impeller/core/device_buffer_descriptor.h" |
| 14 | +#include "impeller/core/formats.h" |
15 | 15 |
|
16 | 16 | namespace impeller { |
17 | 17 |
|
18 | | -std::shared_ptr<HostBuffer> HostBuffer::Create() { |
19 | | - return std::shared_ptr<HostBuffer>(new HostBuffer()); |
| 18 | +constexpr size_t kAllocatorBlockSize = 1024000; // 1024 Kb. |
| 19 | + |
| 20 | +std::shared_ptr<HostBuffer> HostBuffer::Create( |
| 21 | + const std::shared_ptr<Allocator>& allocator) { |
| 22 | + return std::shared_ptr<HostBuffer>(new HostBuffer(allocator)); |
20 | 23 | } |
21 | 24 |
|
22 | | -HostBuffer::HostBuffer() = default; |
| 25 | +HostBuffer::HostBuffer(const std::shared_ptr<Allocator>& allocator) |
| 26 | + : allocator_(allocator) { |
| 27 | + DeviceBufferDescriptor desc; |
| 28 | + desc.size = kAllocatorBlockSize; |
| 29 | + desc.storage_mode = StorageMode::kHostVisible; |
| 30 | + for (auto i = 0u; i < kHostBufferArenaSize; i++) { |
| 31 | + device_buffers_[i].push_back(allocator->CreateBuffer(desc)); |
| 32 | + } |
| 33 | +} |
23 | 34 |
|
24 | 35 | HostBuffer::~HostBuffer() = default; |
25 | 36 |
|
26 | 37 | void HostBuffer::SetLabel(std::string label) { |
27 | | - state_->label = std::move(label); |
| 38 | + label_ = std::move(label); |
28 | 39 | } |
29 | 40 |
|
30 | 41 | BufferView HostBuffer::Emplace(const void* buffer, |
31 | 42 | size_t length, |
32 | 43 | size_t align) { |
33 | | - auto [device_buffer, range] = state_->Emplace(buffer, length, align); |
| 44 | + auto [data, range, device_buffer] = EmplaceInternal(buffer, length, align); |
34 | 45 | if (!device_buffer) { |
35 | 46 | return {}; |
36 | 47 | } |
37 | | - return BufferView{state_, device_buffer, range}; |
| 48 | + return BufferView{std::move(device_buffer), data, range}; |
38 | 49 | } |
39 | 50 |
|
40 | 51 | BufferView HostBuffer::Emplace(const void* buffer, size_t length) { |
41 | | - auto [device_buffer, range] = state_->Emplace(buffer, length); |
| 52 | + auto [data, range, device_buffer] = EmplaceInternal(buffer, length); |
42 | 53 | if (!device_buffer) { |
43 | 54 | return {}; |
44 | 55 | } |
45 | | - return BufferView{state_, device_buffer, range}; |
| 56 | + return BufferView{std::move(device_buffer), data, range}; |
46 | 57 | } |
47 | 58 |
|
48 | 59 | BufferView HostBuffer::Emplace(size_t length, |
49 | 60 | size_t align, |
50 | 61 | const EmplaceProc& cb) { |
51 | | - auto [buffer, range] = state_->Emplace(length, align, cb); |
52 | | - if (!buffer) { |
| 62 | + auto [data, range, device_buffer] = EmplaceInternal(length, align, cb); |
| 63 | + if (!device_buffer) { |
53 | 64 | return {}; |
54 | 65 | } |
55 | | - return BufferView{state_, buffer, range}; |
56 | | -} |
57 | | - |
58 | | -std::shared_ptr<const DeviceBuffer> HostBuffer::GetDeviceBuffer( |
59 | | - Allocator& allocator) const { |
60 | | - return state_->GetDeviceBuffer(allocator); |
| 66 | + return BufferView{std::move(device_buffer), data, range}; |
61 | 67 | } |
62 | 68 |
|
63 | | -void HostBuffer::Reset() { |
64 | | - state_->Reset(); |
| 69 | +HostBuffer::TestStateQuery HostBuffer::GetStateForTest() { |
| 70 | + return HostBuffer::TestStateQuery{ |
| 71 | + .current_frame = frame_index_, |
| 72 | + .current_buffer = current_buffer_, |
| 73 | + .total_buffer_count = device_buffers_[frame_index_].size(), |
| 74 | + }; |
65 | 75 | } |
66 | 76 |
|
67 | | -size_t HostBuffer::GetSize() const { |
68 | | - return state_->GetReservedLength(); |
69 | | -} |
70 | | - |
71 | | -size_t HostBuffer::GetLength() const { |
72 | | - return state_->GetLength(); |
| 77 | +void HostBuffer::MaybeCreateNewBuffer(size_t required_size) { |
| 78 | + current_buffer_++; |
| 79 | + if (current_buffer_ >= device_buffers_[frame_index_].size()) { |
| 80 | + FML_DCHECK(required_size <= kAllocatorBlockSize); |
| 81 | + DeviceBufferDescriptor desc; |
| 82 | + desc.size = kAllocatorBlockSize; |
| 83 | + desc.storage_mode = StorageMode::kHostVisible; |
| 84 | + device_buffers_[frame_index_].push_back(allocator_->CreateBuffer(desc)); |
| 85 | + } |
| 86 | + offset_ = 0; |
73 | 87 | } |
74 | 88 |
|
75 | | -std::pair<uint8_t*, Range> HostBuffer::HostBufferState::Emplace( |
76 | | - size_t length, |
77 | | - size_t align, |
78 | | - const EmplaceProc& cb) { |
| 89 | +std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>> |
| 90 | +HostBuffer::EmplaceInternal(size_t length, |
| 91 | + size_t align, |
| 92 | + const EmplaceProc& cb) { |
79 | 93 | if (!cb) { |
80 | 94 | return {}; |
81 | 95 | } |
| 96 | + |
| 97 | + // If the requested allocation is bigger than the block size, create a one-off |
| 98 | + // device buffer and write to that. |
| 99 | + if (length > kAllocatorBlockSize) { |
| 100 | + DeviceBufferDescriptor desc; |
| 101 | + desc.size = length; |
| 102 | + desc.storage_mode = StorageMode::kHostVisible; |
| 103 | + auto device_buffer = allocator_->CreateBuffer(desc); |
| 104 | + if (!device_buffer) { |
| 105 | + return {}; |
| 106 | + } |
| 107 | + if (cb) { |
| 108 | + cb(device_buffer->OnGetContents()); |
| 109 | + device_buffer->Flush(Range{0, length}); |
| 110 | + } |
| 111 | + return std::make_tuple(device_buffer->OnGetContents(), Range{0, length}, |
| 112 | + device_buffer); |
| 113 | + } |
| 114 | + |
82 | 115 | auto old_length = GetLength(); |
83 | | - if (!Truncate(old_length + length)) { |
84 | | - return {}; |
| 116 | + if (old_length + length > kAllocatorBlockSize) { |
| 117 | + MaybeCreateNewBuffer(length); |
85 | 118 | } |
86 | | - generation++; |
87 | | - cb(GetBuffer() + old_length); |
| 119 | + old_length = GetLength(); |
| 120 | + |
| 121 | + auto current_buffer = GetCurrentBuffer(); |
| 122 | + cb(current_buffer->OnGetContents() + old_length); |
| 123 | + current_buffer->Flush(Range{old_length, length}); |
88 | 124 |
|
89 | | - return std::make_pair(GetBuffer(), Range{old_length, length}); |
| 125 | + offset_ += length; |
| 126 | + auto contents = current_buffer->OnGetContents(); |
| 127 | + return std::make_tuple(contents, Range{old_length, length}, |
| 128 | + std::move(current_buffer)); |
90 | 129 | } |
91 | 130 |
|
92 | | -std::shared_ptr<const DeviceBuffer> |
93 | | -HostBuffer::HostBufferState::GetDeviceBuffer(Allocator& allocator) const { |
94 | | - if (generation == device_buffer_generation) { |
95 | | - return device_buffer; |
96 | | - } |
97 | | - auto new_buffer = allocator.CreateBufferWithCopy(GetBuffer(), GetLength()); |
98 | | - if (!new_buffer) { |
99 | | - return nullptr; |
| 131 | +std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>> |
| 132 | +HostBuffer::EmplaceInternal(const void* buffer, size_t length) { |
| 133 | + // If the requested allocation is bigger than the block size, create a one-off |
| 134 | + // device buffer and write to that. |
| 135 | + if (length > kAllocatorBlockSize) { |
| 136 | + DeviceBufferDescriptor desc; |
| 137 | + desc.size = length; |
| 138 | + desc.storage_mode = StorageMode::kHostVisible; |
| 139 | + auto device_buffer = allocator_->CreateBuffer(desc); |
| 140 | + if (!device_buffer) { |
| 141 | + return {}; |
| 142 | + } |
| 143 | + if (buffer) { |
| 144 | + if (!device_buffer->CopyHostBuffer(static_cast<const uint8_t*>(buffer), |
| 145 | + Range{0, length})) { |
| 146 | + return {}; |
| 147 | + } |
| 148 | + } |
| 149 | + return std::make_tuple(device_buffer->OnGetContents(), Range{0, length}, |
| 150 | + device_buffer); |
100 | 151 | } |
101 | | - new_buffer->SetLabel(label); |
102 | | - device_buffer_generation = generation; |
103 | | - device_buffer = std::move(new_buffer); |
104 | | - return device_buffer; |
105 | | -} |
106 | 152 |
|
107 | | -std::pair<uint8_t*, Range> HostBuffer::HostBufferState::Emplace( |
108 | | - const void* buffer, |
109 | | - size_t length) { |
110 | 153 | auto old_length = GetLength(); |
111 | | - if (!Truncate(old_length + length)) { |
112 | | - return {}; |
| 154 | + if (old_length + length > kAllocatorBlockSize) { |
| 155 | + MaybeCreateNewBuffer(length); |
113 | 156 | } |
114 | | - generation++; |
| 157 | + old_length = GetLength(); |
| 158 | + |
| 159 | + auto current_buffer = GetCurrentBuffer(); |
115 | 160 | if (buffer) { |
116 | | - ::memmove(GetBuffer() + old_length, buffer, length); |
| 161 | + ::memmove(current_buffer->OnGetContents() + old_length, buffer, length); |
| 162 | + current_buffer->Flush(Range{old_length, length}); |
117 | 163 | } |
118 | | - return std::make_pair(GetBuffer(), Range{old_length, length}); |
| 164 | + offset_ += length; |
| 165 | + auto contents = current_buffer->OnGetContents(); |
| 166 | + return std::make_tuple(contents, Range{old_length, length}, |
| 167 | + std::move(current_buffer)); |
119 | 168 | } |
120 | 169 |
|
121 | | -std::pair<uint8_t*, Range> HostBuffer::HostBufferState::Emplace( |
122 | | - const void* buffer, |
123 | | - size_t length, |
124 | | - size_t align) { |
| 170 | +std::tuple<uint8_t*, Range, std::shared_ptr<DeviceBuffer>> |
| 171 | +HostBuffer::EmplaceInternal(const void* buffer, size_t length, size_t align) { |
125 | 172 | if (align == 0 || (GetLength() % align) == 0) { |
126 | | - return Emplace(buffer, length); |
| 173 | + return EmplaceInternal(buffer, length); |
127 | 174 | } |
128 | 175 |
|
129 | 176 | { |
130 | | - auto [buffer, range] = Emplace(nullptr, align - (GetLength() % align)); |
| 177 | + auto [buffer, range, device_buffer] = |
| 178 | + EmplaceInternal(nullptr, align - (GetLength() % align)); |
131 | 179 | if (!buffer) { |
132 | 180 | return {}; |
133 | 181 | } |
134 | 182 | } |
135 | 183 |
|
136 | | - return Emplace(buffer, length); |
| 184 | + return EmplaceInternal(buffer, length); |
137 | 185 | } |
138 | 186 |
|
139 | | -void HostBuffer::HostBufferState::Reset() { |
140 | | - generation += 1; |
141 | | - device_buffer = nullptr; |
142 | | - bool did_truncate = Truncate(0); |
143 | | - FML_CHECK(did_truncate); |
| 187 | +void HostBuffer::Reset() { |
| 188 | + // When resetting the host buffer state at the end of the frame, check if |
| 189 | + // there are any unused buffers and remove them. |
| 190 | + while (device_buffers_[frame_index_].size() > current_buffer_ + 1) { |
| 191 | + device_buffers_[frame_index_].pop_back(); |
| 192 | + } |
| 193 | + |
| 194 | + offset_ = 0u; |
| 195 | + current_buffer_ = 0u; |
| 196 | + frame_index_ = (frame_index_ + 1) % kHostBufferArenaSize; |
144 | 197 | } |
145 | 198 |
|
146 | 199 | } // namespace impeller |
0 commit comments