Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 1 | // Copyright 2017, VIXL authors |
| 2 | // All rights reserved. |
| 3 | // |
| 4 | // Redistribution and use in source and binary forms, with or without |
| 5 | // modification, are permitted provided that the following conditions are met: |
| 6 | // |
| 7 | // * Redistributions of source code must retain the above copyright notice, |
| 8 | // this list of conditions and the following disclaimer. |
| 9 | // * Redistributions in binary form must reproduce the above copyright notice, |
| 10 | // this list of conditions and the following disclaimer in the documentation |
| 11 | // and/or other materials provided with the distribution. |
| 12 | // * Neither the name of ARM Limited nor the names of its contributors may be |
| 13 | // used to endorse or promote products derived from this software without |
| 14 | // specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND |
| 17 | // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 18 | // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 19 | // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE |
| 20 | // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 21 | // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 22 | // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| 23 | // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 25 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | |
| 27 | #ifndef VIXL_POOL_MANAGER_IMPL_H_ |
| 28 | #define VIXL_POOL_MANAGER_IMPL_H_ |
| 29 | |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 30 | #include <algorithm> |
mmc28a | 1a2c1d3 | 2024-02-01 16:43:49 +0000 | [diff] [blame] | 31 | |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 32 | #include "assembler-base-vixl.h" |
mmc28a | 1a2c1d3 | 2024-02-01 16:43:49 +0000 | [diff] [blame] | 33 | #include "pool-manager.h" |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 34 | |
| 35 | namespace vixl { |
| 36 | |
| 37 | |
| 38 | template <typename T> |
| 39 | T PoolManager<T>::Emit(MacroAssemblerInterface* masm, |
| 40 | T pc, |
| 41 | int num_bytes, |
| 42 | ForwardReference<T>* new_reference, |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 43 | LocationBase<T>* new_object, |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 44 | EmitOption option) { |
| 45 | // Make sure that the buffer still has the alignment we think it does. |
| 46 | VIXL_ASSERT(IsAligned(masm->AsAssemblerBase() |
| 47 | ->GetBuffer() |
| 48 | ->GetStartAddress<uintptr_t>(), |
| 49 | buffer_alignment_)); |
| 50 | |
| 51 | // We should not call this method when the pools are blocked. |
| 52 | VIXL_ASSERT(!IsBlocked()); |
| 53 | if (objects_.empty()) return pc; |
| 54 | |
| 55 | // Emit header. |
| 56 | if (option == kBranchRequired) { |
| 57 | masm->EmitPoolHeader(); |
| 58 | // TODO: The pc at this point might not actually be aligned according to |
| 59 | // alignment_. This is to support the current AARCH32 MacroAssembler which |
| 60 | // does not have a fixed size instruction set. In practice, the pc will be |
| 61 | // aligned to the alignment instructions need for the current instruction |
| 62 | // set, so we do not need to align it here. All other calculations do take |
| 63 | // the alignment into account, which only makes the checkpoint calculations |
| 64 | // more conservative when we use T32. Uncomment the following assertion if |
| 65 | // the AARCH32 MacroAssembler is modified to only support one ISA at the |
| 66 | // time. |
| 67 | // VIXL_ASSERT(pc == AlignUp(pc, alignment_)); |
| 68 | pc += header_size_; |
| 69 | } else { |
| 70 | // If the header is optional, we might need to add some extra padding to |
| 71 | // meet the minimum location of the first object. |
| 72 | if (pc < objects_[0].min_location_) { |
| 73 | int32_t padding = objects_[0].min_location_ - pc; |
| 74 | masm->EmitNopBytes(padding); |
| 75 | pc += padding; |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | PoolObject<T>* existing_object = GetObjectIfTracked(new_object); |
| 80 | |
| 81 | // Go through all objects and emit one by one. |
| 82 | for (objects_iter iter = objects_.begin(); iter != objects_.end();) { |
| 83 | PoolObject<T>& current = *iter; |
| 84 | if (ShouldSkipObject(¤t, |
| 85 | pc, |
| 86 | num_bytes, |
| 87 | new_reference, |
| 88 | new_object, |
| 89 | existing_object)) { |
| 90 | ++iter; |
| 91 | continue; |
| 92 | } |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 93 | LocationBase<T>* label_base = current.label_base_; |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 94 | T aligned_pc = AlignUp(pc, current.alignment_); |
| 95 | masm->EmitPaddingBytes(aligned_pc - pc); |
| 96 | pc = aligned_pc; |
| 97 | VIXL_ASSERT(pc >= current.min_location_); |
| 98 | VIXL_ASSERT(pc <= current.max_location_); |
| 99 | // First call SetLocation, which will also resolve the references, and then |
| 100 | // call EmitPoolObject, which might add a new reference. |
| 101 | label_base->SetLocation(masm->AsAssemblerBase(), pc); |
| 102 | label_base->EmitPoolObject(masm); |
Georgia Kouveli | 4443d2c | 2017-05-09 18:14:15 +0100 | [diff] [blame] | 103 | int object_size = label_base->GetPoolObjectSizeInBytes(); |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 104 | if (label_base->ShouldDeletePoolObjectOnPlacement()) { |
| 105 | label_base->MarkBound(); |
| 106 | iter = RemoveAndDelete(iter); |
| 107 | } else { |
| 108 | VIXL_ASSERT(!current.label_base_->ShouldDeletePoolObjectOnPlacement()); |
| 109 | current.label_base_->UpdatePoolObject(¤t); |
| 110 | VIXL_ASSERT(current.alignment_ >= label_base->GetPoolObjectAlignment()); |
| 111 | ++iter; |
| 112 | } |
Georgia Kouveli | 4443d2c | 2017-05-09 18:14:15 +0100 | [diff] [blame] | 113 | pc += object_size; |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | // Recalculate the checkpoint before emitting the footer. The footer might |
| 117 | // call Bind() which will check if we need to emit. |
| 118 | RecalculateCheckpoint(); |
| 119 | |
| 120 | // Always emit footer - this might add some padding. |
| 121 | masm->EmitPoolFooter(); |
| 122 | pc = AlignUp(pc, alignment_); |
| 123 | |
| 124 | return pc; |
| 125 | } |
| 126 | |
| 127 | template <typename T> |
| 128 | bool PoolManager<T>::ShouldSkipObject(PoolObject<T>* pool_object, |
| 129 | T pc, |
| 130 | int num_bytes, |
| 131 | ForwardReference<T>* new_reference, |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 132 | LocationBase<T>* new_object, |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 133 | PoolObject<T>* existing_object) const { |
| 134 | // We assume that all objects before this have been skipped and all objects |
| 135 | // after this will be emitted, therefore we will emit the whole pool. Add |
| 136 | // the header size and alignment, as well as the number of bytes we are |
| 137 | // planning to emit. |
| 138 | T max_actual_location = pc + num_bytes + max_pool_size_; |
| 139 | |
| 140 | if (new_reference != NULL) { |
| 141 | // If we're adding a new object, also assume that it will have to be emitted |
| 142 | // before the object we are considering to skip. |
| 143 | VIXL_ASSERT(new_object != NULL); |
| 144 | T new_object_alignment = std::max(new_reference->object_alignment_, |
| 145 | new_object->GetPoolObjectAlignment()); |
| 146 | if ((existing_object != NULL) && |
| 147 | (existing_object->alignment_ > new_object_alignment)) { |
| 148 | new_object_alignment = existing_object->alignment_; |
| 149 | } |
| 150 | max_actual_location += |
| 151 | (new_object->GetPoolObjectSizeInBytes() + new_object_alignment - 1); |
| 152 | } |
| 153 | |
| 154 | // Hard limit. |
| 155 | if (max_actual_location >= pool_object->max_location_) return false; |
| 156 | |
| 157 | // Use heuristic. |
| 158 | return (pc < pool_object->skip_until_location_hint_); |
| 159 | } |
| 160 | |
| 161 | template <typename T> |
| 162 | T PoolManager<T>::UpdateCheckpointForObject(T checkpoint, |
| 163 | const PoolObject<T>* object) { |
| 164 | checkpoint -= object->label_base_->GetPoolObjectSizeInBytes(); |
| 165 | if (checkpoint > object->max_location_) checkpoint = object->max_location_; |
| 166 | checkpoint = AlignDown(checkpoint, object->alignment_); |
| 167 | return checkpoint; |
| 168 | } |
| 169 | |
| 170 | template <typename T> |
| 171 | static T MaxCheckpoint() { |
| 172 | return std::numeric_limits<T>::max(); |
| 173 | } |
| 174 | |
| 175 | template <typename T> |
| 176 | static inline bool CheckCurrentPC(T pc, T checkpoint) { |
| 177 | VIXL_ASSERT(pc <= checkpoint); |
| 178 | // We must emit the pools if we are at the checkpoint now. |
| 179 | return pc == checkpoint; |
| 180 | } |
| 181 | |
| 182 | template <typename T> |
| 183 | static inline bool CheckFuturePC(T pc, T checkpoint) { |
| 184 | // We do not need to emit the pools now if the projected future PC will be |
| 185 | // equal to the checkpoint (we will need to emit the pools then). |
| 186 | return pc > checkpoint; |
| 187 | } |
| 188 | |
| 189 | template <typename T> |
| 190 | bool PoolManager<T>::MustEmit(T pc, |
| 191 | int num_bytes, |
| 192 | ForwardReference<T>* reference, |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 193 | LocationBase<T>* label_base) const { |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 194 | // Check if we are at or past the checkpoint. |
| 195 | if (CheckCurrentPC(pc, checkpoint_)) return true; |
| 196 | |
| 197 | // Check if the future PC will be past the checkpoint. |
| 198 | pc += num_bytes; |
| 199 | if (CheckFuturePC(pc, checkpoint_)) return true; |
| 200 | |
| 201 | // No new reference - nothing to do. |
| 202 | if (reference == NULL) { |
| 203 | VIXL_ASSERT(label_base == NULL); |
| 204 | return false; |
| 205 | } |
| 206 | |
| 207 | if (objects_.empty()) { |
| 208 | // Basic assertions that restrictions on the new (and only) reference are |
| 209 | // possible to satisfy. |
| 210 | VIXL_ASSERT(AlignUp(pc + header_size_, alignment_) >= |
| 211 | reference->min_object_location_); |
| 212 | VIXL_ASSERT(pc <= reference->max_object_location_); |
| 213 | return false; |
| 214 | } |
| 215 | |
| 216 | // Check if the object is already being tracked. |
| 217 | const PoolObject<T>* existing_object = GetObjectIfTracked(label_base); |
| 218 | if (existing_object != NULL) { |
| 219 | // If the existing_object is already in existing_objects_ and its new |
| 220 | // alignment and new location restrictions are not stricter, skip the more |
| 221 | // expensive check. |
| 222 | if ((reference->min_object_location_ <= existing_object->min_location_) && |
| 223 | (reference->max_object_location_ >= existing_object->max_location_) && |
| 224 | (reference->object_alignment_ <= existing_object->alignment_)) { |
| 225 | return false; |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | // Create a temporary object. |
| 230 | PoolObject<T> temp(label_base); |
| 231 | temp.RestrictRange(reference->min_object_location_, |
| 232 | reference->max_object_location_); |
| 233 | temp.RestrictAlignment(reference->object_alignment_); |
| 234 | if (existing_object != NULL) { |
| 235 | temp.RestrictRange(existing_object->min_location_, |
| 236 | existing_object->max_location_); |
| 237 | temp.RestrictAlignment(existing_object->alignment_); |
| 238 | } |
| 239 | |
| 240 | // Check if the new reference can be added after the end of the current pool. |
| 241 | // If yes, we don't need to emit. |
| 242 | T last_reachable = AlignDown(temp.max_location_, temp.alignment_); |
| 243 | const PoolObject<T>& last = objects_.back(); |
| 244 | T after_pool = AlignDown(last.max_location_, last.alignment_) + |
| 245 | last.label_base_->GetPoolObjectSizeInBytes(); |
| 246 | // The current object can be placed at the end of the pool, even if the last |
| 247 | // object is placed at the last possible location. |
| 248 | if (last_reachable >= after_pool) return false; |
| 249 | // The current object can be placed after the code we are about to emit and |
| 250 | // after the existing pool (with a pessimistic size estimate). |
| 251 | if (last_reachable >= pc + num_bytes + max_pool_size_) return false; |
| 252 | |
| 253 | // We're not in a trivial case, so we need to recalculate the checkpoint. |
| 254 | |
| 255 | // Check (conservatively) if we can fit it into the objects_ array, without |
| 256 | // breaking our assumptions. Here we want to recalculate the checkpoint as |
| 257 | // if the new reference was added to the PoolManager but without actually |
| 258 | // adding it (as removing it is non-trivial). |
| 259 | |
| 260 | T checkpoint = MaxCheckpoint<T>(); |
| 261 | // Will temp be the last object in objects_? |
| 262 | if (PoolObjectLessThan(last, temp)) { |
| 263 | checkpoint = UpdateCheckpointForObject(checkpoint, &temp); |
| 264 | if (checkpoint < temp.min_location_) return true; |
| 265 | } |
| 266 | |
Jacob Bramley | acd32aa | 2019-12-12 18:08:20 +0000 | [diff] [blame] | 267 | bool temp_not_placed_yet = true; |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 268 | for (int i = static_cast<int>(objects_.size()) - 1; i >= 0; --i) { |
| 269 | const PoolObject<T>& current = objects_[i]; |
Jacob Bramley | acd32aa | 2019-12-12 18:08:20 +0000 | [diff] [blame] | 270 | if (temp_not_placed_yet && PoolObjectLessThan(current, temp)) { |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 271 | checkpoint = UpdateCheckpointForObject(checkpoint, &temp); |
| 272 | if (checkpoint < temp.min_location_) return true; |
| 273 | if (CheckFuturePC(pc, checkpoint)) return true; |
Jacob Bramley | acd32aa | 2019-12-12 18:08:20 +0000 | [diff] [blame] | 274 | temp_not_placed_yet = false; |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 275 | } |
| 276 | if (current.label_base_ == label_base) continue; |
| 277 | checkpoint = UpdateCheckpointForObject(checkpoint, ¤t); |
| 278 | if (checkpoint < current.min_location_) return true; |
| 279 | if (CheckFuturePC(pc, checkpoint)) return true; |
| 280 | } |
| 281 | // temp is the object with the smallest max_location_. |
Jacob Bramley | acd32aa | 2019-12-12 18:08:20 +0000 | [diff] [blame] | 282 | if (temp_not_placed_yet) { |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 283 | checkpoint = UpdateCheckpointForObject(checkpoint, &temp); |
| 284 | if (checkpoint < temp.min_location_) return true; |
| 285 | } |
| 286 | |
| 287 | // Take the header into account. |
| 288 | checkpoint -= header_size_; |
| 289 | checkpoint = AlignDown(checkpoint, alignment_); |
| 290 | |
| 291 | return CheckFuturePC(pc, checkpoint); |
| 292 | } |
| 293 | |
| 294 | template <typename T> |
| 295 | void PoolManager<T>::RecalculateCheckpoint(SortOption sort_option) { |
| 296 | // TODO: Improve the max_pool_size_ estimate by starting from the |
| 297 | // min_location_ of the first object, calculating the end of the pool as if |
| 298 | // all objects were placed starting from there, and in the end adding the |
| 299 | // maximum object alignment found minus one (which is the maximum extra |
| 300 | // padding we would need if we were to relocate the pool to a different |
| 301 | // address). |
| 302 | max_pool_size_ = 0; |
| 303 | |
| 304 | if (objects_.empty()) { |
| 305 | checkpoint_ = MaxCheckpoint<T>(); |
| 306 | return; |
| 307 | } |
| 308 | |
| 309 | // Sort objects by their max_location_. |
| 310 | if (sort_option == kSortRequired) { |
| 311 | std::sort(objects_.begin(), objects_.end(), PoolObjectLessThan); |
| 312 | } |
| 313 | |
| 314 | // Add the header size and header and footer max alignment to the maximum |
| 315 | // pool size. |
| 316 | max_pool_size_ += header_size_ + 2 * (alignment_ - 1); |
| 317 | |
| 318 | T checkpoint = MaxCheckpoint<T>(); |
| 319 | int last_object_index = static_cast<int>(objects_.size()) - 1; |
| 320 | for (int i = last_object_index; i >= 0; --i) { |
| 321 | // Bring back the checkpoint by the size of the current object, unless |
| 322 | // we need to bring it back more, then align. |
| 323 | PoolObject<T>& current = objects_[i]; |
| 324 | checkpoint = UpdateCheckpointForObject(checkpoint, ¤t); |
| 325 | VIXL_ASSERT(checkpoint >= current.min_location_); |
| 326 | max_pool_size_ += (current.alignment_ - 1 + |
| 327 | current.label_base_->GetPoolObjectSizeInBytes()); |
| 328 | } |
| 329 | // Take the header into account. |
| 330 | checkpoint -= header_size_; |
| 331 | checkpoint = AlignDown(checkpoint, alignment_); |
| 332 | |
| 333 | // Update the checkpoint of the pool manager. |
| 334 | checkpoint_ = checkpoint; |
| 335 | |
| 336 | // NOTE: To handle min_location_ in the generic case, we could make a second |
| 337 | // pass of the objects_ vector, increasing the checkpoint as needed, while |
| 338 | // maintaining the alignment requirements. |
| 339 | // It should not be possible to have any issues with min_location_ with actual |
| 340 | // code, since there should always be some kind of branch over the pool, |
| 341 | // whether introduced by the pool emission or by the user, which will make |
| 342 | // sure the min_location_ requirement is satisfied. It's possible that the |
| 343 | // user could emit code in the literal pool and intentionally load the first |
| 344 | // value and then fall-through into the pool, but that is not a supported use |
| 345 | // of VIXL and we will assert in that case. |
| 346 | } |
| 347 | |
| 348 | template <typename T> |
| 349 | bool PoolManager<T>::PoolObjectLessThan(const PoolObject<T>& a, |
| 350 | const PoolObject<T>& b) { |
| 351 | if (a.max_location_ != b.max_location_) |
| 352 | return (a.max_location_ < b.max_location_); |
| 353 | int a_size = a.label_base_->GetPoolObjectSizeInBytes(); |
| 354 | int b_size = b.label_base_->GetPoolObjectSizeInBytes(); |
| 355 | if (a_size != b_size) return (a_size < b_size); |
| 356 | if (a.alignment_ != b.alignment_) return (a.alignment_ < b.alignment_); |
| 357 | if (a.min_location_ != b.min_location_) |
| 358 | return (a.min_location_ < b.min_location_); |
| 359 | return false; |
| 360 | } |
| 361 | |
| 362 | template <typename T> |
| 363 | void PoolManager<T>::AddObjectReference(const ForwardReference<T>* reference, |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 364 | LocationBase<T>* label_base) { |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 365 | VIXL_ASSERT(reference->object_alignment_ <= buffer_alignment_); |
| 366 | VIXL_ASSERT(label_base->GetPoolObjectAlignment() <= buffer_alignment_); |
| 367 | |
| 368 | PoolObject<T>* object = GetObjectIfTracked(label_base); |
| 369 | |
| 370 | if (object == NULL) { |
| 371 | PoolObject<T> new_object(label_base); |
| 372 | new_object.RestrictRange(reference->min_object_location_, |
| 373 | reference->max_object_location_); |
| 374 | new_object.RestrictAlignment(reference->object_alignment_); |
| 375 | Insert(new_object); |
| 376 | } else { |
| 377 | object->RestrictRange(reference->min_object_location_, |
| 378 | reference->max_object_location_); |
| 379 | object->RestrictAlignment(reference->object_alignment_); |
| 380 | |
| 381 | // Move the object, if needed. |
| 382 | if (objects_.size() != 1) { |
| 383 | PoolObject<T> new_object(*object); |
| 384 | ptrdiff_t distance = std::distance(objects_.data(), object); |
| 385 | objects_.erase(objects_.begin() + distance); |
| 386 | Insert(new_object); |
| 387 | } |
| 388 | } |
| 389 | // No need to sort, we inserted the object in an already sorted array. |
| 390 | RecalculateCheckpoint(kNoSortRequired); |
| 391 | } |
| 392 | |
| 393 | template <typename T> |
| 394 | void PoolManager<T>::Insert(const PoolObject<T>& new_object) { |
| 395 | bool inserted = false; |
| 396 | // Place the object in the right position. |
| 397 | for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { |
| 398 | PoolObject<T>& current = *iter; |
| 399 | if (!PoolObjectLessThan(current, new_object)) { |
| 400 | objects_.insert(iter, new_object); |
| 401 | inserted = true; |
| 402 | break; |
| 403 | } |
| 404 | } |
| 405 | if (!inserted) { |
| 406 | objects_.push_back(new_object); |
| 407 | } |
| 408 | } |
| 409 | |
| 410 | template <typename T> |
| 411 | void PoolManager<T>::RemoveAndDelete(PoolObject<T>* object) { |
| 412 | for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { |
| 413 | PoolObject<T>& current = *iter; |
| 414 | if (current.label_base_ == object->label_base_) { |
| 415 | (void)RemoveAndDelete(iter); |
| 416 | return; |
| 417 | } |
| 418 | } |
| 419 | VIXL_UNREACHABLE(); |
| 420 | } |
| 421 | |
| 422 | template <typename T> |
| 423 | typename PoolManager<T>::objects_iter PoolManager<T>::RemoveAndDelete( |
| 424 | objects_iter iter) { |
| 425 | PoolObject<T>& object = *iter; |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 426 | LocationBase<T>* label_base = object.label_base_; |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 427 | |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 428 | // Check if we also need to delete the LocationBase object. |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 429 | if (label_base->ShouldBeDeletedOnPoolManagerDestruction()) { |
| 430 | delete_on_destruction_.push_back(label_base); |
| 431 | } |
| 432 | if (label_base->ShouldBeDeletedOnPlacementByPoolManager()) { |
| 433 | VIXL_ASSERT(!label_base->ShouldBeDeletedOnPoolManagerDestruction()); |
| 434 | delete label_base; |
| 435 | } |
| 436 | |
| 437 | return objects_.erase(iter); |
| 438 | } |
| 439 | |
| 440 | template <typename T> |
| 441 | T PoolManager<T>::Bind(MacroAssemblerInterface* masm, |
Georgia Kouveli | 8b57c86 | 2017-03-02 15:18:58 +0000 | [diff] [blame] | 442 | LocationBase<T>* object, |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 443 | T location) { |
| 444 | PoolObject<T>* existing_object = GetObjectIfTracked(object); |
| 445 | int alignment; |
| 446 | T min_location; |
| 447 | if (existing_object == NULL) { |
| 448 | alignment = object->GetMaxAlignment(); |
| 449 | min_location = object->GetMinLocation(); |
| 450 | } else { |
| 451 | alignment = existing_object->alignment_; |
| 452 | min_location = existing_object->min_location_; |
| 453 | } |
| 454 | |
| 455 | // Align if needed, and add necessary padding to reach the min_location_. |
| 456 | T aligned_location = AlignUp(location, alignment); |
| 457 | masm->EmitNopBytes(aligned_location - location); |
| 458 | location = aligned_location; |
| 459 | while (location < min_location) { |
| 460 | masm->EmitNopBytes(alignment); |
| 461 | location += alignment; |
| 462 | } |
| 463 | |
| 464 | object->SetLocation(masm->AsAssemblerBase(), location); |
| 465 | object->MarkBound(); |
| 466 | |
| 467 | if (existing_object != NULL) { |
| 468 | RemoveAndDelete(existing_object); |
| 469 | // No need to sort, we removed the object from a sorted array. |
| 470 | RecalculateCheckpoint(kNoSortRequired); |
| 471 | } |
| 472 | |
| 473 | // We assume that the maximum padding we can possibly add here is less |
| 474 | // than the header alignment - hence that we're not going to go past our |
| 475 | // checkpoint. |
| 476 | VIXL_ASSERT(!CheckFuturePC(location, checkpoint_)); |
| 477 | return location; |
| 478 | } |
| 479 | |
| 480 | template <typename T> |
| 481 | void PoolManager<T>::Release(T pc) { |
| 482 | USE(pc); |
| 483 | if (--monitor_ == 0) { |
| 484 | // Ensure the pool has not been blocked for too long. |
| 485 | VIXL_ASSERT(pc <= checkpoint_); |
| 486 | } |
| 487 | } |
| 488 | |
| 489 | template <typename T> |
Andrea Pappacoda | f4e6805 | 2024-08-14 12:10:15 +0200 | [diff] [blame] | 490 | PoolManager<T>::~PoolManager() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION { |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 491 | #ifdef VIXL_DEBUG |
| 492 | // Check for unbound objects. |
| 493 | for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) { |
| 494 | // There should not be any bound objects left in the pool. For unbound |
| 495 | // objects, we will check in the destructor of the object itself. |
| 496 | VIXL_ASSERT(!(*iter).label_base_->IsBound()); |
| 497 | } |
| 498 | #endif |
| 499 | // Delete objects the pool manager owns. |
Jacob Bramley | 2fe55ec | 2020-03-20 17:03:48 +0000 | [diff] [blame] | 500 | for (typename std::vector<LocationBase<T>*>::iterator |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 501 | iter = delete_on_destruction_.begin(), |
| 502 | end = delete_on_destruction_.end(); |
| 503 | iter != end; |
| 504 | ++iter) { |
| 505 | delete *iter; |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | template <typename T> |
| 510 | int PoolManager<T>::GetPoolSizeForTest() const { |
| 511 | // Iterate over objects and return their cumulative size. This does not take |
| 512 | // any padding into account, just the size of the objects themselves. |
| 513 | int size = 0; |
| 514 | for (const_objects_iter iter = objects_.begin(); iter != objects_.end(); |
| 515 | ++iter) { |
| 516 | size += (*iter).label_base_->GetPoolObjectSizeInBytes(); |
| 517 | } |
| 518 | return size; |
| 519 | } |
mmc28a | 1a2c1d3 | 2024-02-01 16:43:49 +0000 | [diff] [blame] | 520 | } // namespace vixl |
Georgia Kouveli | 1cb7144 | 2017-01-30 13:35:28 +0000 | [diff] [blame] | 521 | |
| 522 | #endif // VIXL_POOL_MANAGER_IMPL_H_ |