pallet_parachain_staking/
migrations.rs

1// Copyright 2019-2025 PureStake Inc.
2// This file is part of Moonbeam.
3
4// Moonbeam is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Moonbeam is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Moonbeam.  If not, see <http://www.gnu.org/licenses/>.
16
17extern crate alloc;
18
19use alloc::vec::Vec;
20
21use frame_support::{
22	migrations::{SteppedMigration, SteppedMigrationError},
23	pallet_prelude::{ConstU32, Zero},
24	traits::Get,
25	weights::{Weight, WeightMeter},
26};
27use parity_scale_codec::Decode;
28use sp_io;
29
30use crate::*;
31
32#[derive(
33	Clone,
34	PartialEq,
35	Eq,
36	parity_scale_codec::Decode,
37	parity_scale_codec::Encode,
38	sp_runtime::RuntimeDebug,
39)]
40/// Reserve information { account, percent_of_inflation }
41pub struct OldParachainBondConfig<AccountId> {
42	/// Account which receives funds intended for parachain bond
43	pub account: AccountId,
44	/// Percent of inflation set aside for parachain bond account
45	pub percent: sp_runtime::Percent,
46}
47
48/// Migration to move `DelegationScheduledRequests` from a single `StorageMap` keyed by collator
49/// into a `StorageDoubleMap` keyed by (collator, delegator) and to initialize the per-collator
50/// counter `DelegationScheduledRequestsPerCollator`.
51///
52/// This assumes the on-chain data was written with the old layout where:
53/// - Storage key: ParachainStaking::DelegationScheduledRequests
54/// - Value type: BoundedVec<ScheduledRequest<..>, AddGet<MaxTop, MaxBottom>>
55pub struct MigrateDelegationScheduledRequestsToDoubleMap<T>(sp_std::marker::PhantomData<T>);
56
57impl<T> SteppedMigration for MigrateDelegationScheduledRequestsToDoubleMap<T>
58where
59	T: Config,
60{
61	/// Cursor keeps track of the last processed legacy storage key (the full
62	/// storage key bytes for the legacy single-map entry). `None` means we have
63	/// not processed any key yet.
64	///
65	/// Using a bounded vector keeps the on-chain cursor small while still being
66	/// large enough to store the full key (prefix + hash + AccountId).
67	type Cursor = frame_support::BoundedVec<u8, ConstU32<128>>;
68
69	/// Identifier for this migration. Must be unique across all migrations.
70	type Identifier = [u8; 16];
71
72	fn id() -> Self::Identifier {
73		// Arbitrary but fixed 16-byte identifier.
74		*b"MB-DSR-MIG-00001"
75	}
76
77	fn step(
78		cursor: Option<Self::Cursor>,
79		meter: &mut WeightMeter,
80	) -> Result<Option<Self::Cursor>, SteppedMigrationError> {
81		// NOTE: High-level algorithm
82		// --------------------------
83		// - We treat each invocation of `step` as having a fixed "budget"
84		//   equal to at most 50% of the remaining weight in the `WeightMeter`.
85		// - Within that budget we migrate as many *collators* (legacy map
86		//   entries) as we can.
87		// - For every collator we enforce two properties:
88		//   1. Before we even read the legacy value from storage we ensure the
89		//      remaining budget can pay for a *worst-case* collator.
90		//   2. Once we know exactly how many requests `n` that collator has,
91		//      we re-check the remaining budget against the *precise* cost for
92		//      those `n` requests.
93		// - Progress is tracked only by:
94		//   * Removing legacy keys as they are migrated, and
95		//   * Persisting the last processed legacy key in the `Cursor`. The
96		//     next `step` resumes scanning directly after that key.
97		/// Legacy scheduled request type used *only* for decoding the old single-map
98		/// storage layout where the delegator was stored inside the value.
99		#[derive(
100			Clone,
101			PartialEq,
102			Eq,
103			parity_scale_codec::Decode,
104			parity_scale_codec::Encode,
105			sp_runtime::RuntimeDebug,
106		)]
107		struct LegacyScheduledRequest<AccountId, Balance> {
108			delegator: AccountId,
109			when_executable: RoundIndex,
110			action: DelegationAction<Balance>,
111		}
112
113		// Legacy value type under `ParachainStaking::DelegationScheduledRequests`.
114		type OldScheduledRequests<T> = frame_support::BoundedVec<
115			LegacyScheduledRequest<<T as frame_system::Config>::AccountId, BalanceOf<T>>,
116			AddGet<
117				<T as pallet::Config>::MaxTopDelegationsPerCandidate,
118				<T as pallet::Config>::MaxBottomDelegationsPerCandidate,
119			>,
120		>;
121
122		// Upper bound for the number of legacy requests that can exist for a single
123		// collator in the old layout.
124		let max_requests_per_collator: u64 = <AddGet<
125			<T as pallet::Config>::MaxTopDelegationsPerCandidate,
126			<T as pallet::Config>::MaxBottomDelegationsPerCandidate,
127		> as frame_support::traits::Get<u32>>::get() as u64;
128
129		// Conservatively estimate the worst-case DB weight for migrating a single
130		// legacy entry (one collator):
131		//
132		// - 1 read for the old value.
133		// - For each request (up to max_requests_per_collator):
134		//   - 1 read + 1 write for `DelegationScheduledRequests` (mutate).
135		// - After migration of this collator:
136		//   - Up to `max_requests_per_collator` reads when iterating the new
137		//     double-map to compute the per-collator counter.
138		//   - 1 write to set `DelegationScheduledRequestsPerCollator`.
139		//   - 1 write to kill the old key.
140		let db_weight = <T as frame_system::Config>::DbWeight::get();
141		let worst_reads = 1 + 3 * max_requests_per_collator;
142		let worst_writes = 2 * max_requests_per_collator + 2;
143		let worst_per_collator = db_weight.reads_writes(worst_reads, worst_writes);
144
145		// Safety margin baseline for this step: we will try to spend at most 50%
146		// of the remaining block weight on this migration, but we only require
147		// that the *full* remaining budget is sufficient to migrate one
148		// worst-case collator. This avoids the situation where the 50% margin is
149		// smaller than `worst_per_collator` (e.g. on production where
150		// MaxTop/MaxBottom are much larger than in tests) and the migration
151		// could never even start.
152		let remaining = meter.remaining();
153		if remaining.all_lt(worst_per_collator) {
154			return Err(SteppedMigrationError::InsufficientWeight {
155				required: worst_per_collator,
156			});
157		}
158		let step_budget = remaining.saturating_div(2);
159
160		// Hard cap on the number of collators we are willing to migrate in a
161		// single step, regardless of the theoretical weight budget. This
162		// prevents a single step from doing unbounded work even if the
163		// `WeightMeter` is configured with a very large limit (for example in
164		// testing), and keeps block execution times predictable on mainnet.
165		const MAX_COLLATORS_PER_STEP: u32 = 8;
166
167		let prefix = frame_support::storage::storage_prefix(
168			b"ParachainStaking",
169			b"DelegationScheduledRequests",
170		);
171
172		// Helper: find the next legacy (single-map) key after `start_from`.
173		//
174		// The key space is shared between the old single-map and the new
175		// double-map under the same storage prefix:
176		// - legacy:   Blake2_128Concat(collator)
177		// - new:      Blake2_128Concat(collator) ++ Blake2_128Concat(delegator)
178		//
179		// We use the fact that legacy keys have *no* trailing bytes after the
180		// collator AccountId, while new keys have at least one more encoded
181		// component.
182		fn next_legacy_key<T: Config>(
183			prefix: &[u8],
184			start_from: &[u8],
185		) -> Option<(Vec<u8>, <T as frame_system::Config>::AccountId)> {
186			let mut current = sp_io::storage::next_key(start_from)?;
187
188			while current.starts_with(prefix) {
189				// Strip the prefix and decode the first Blake2_128Concat-encoded key
190				// which should correspond to the collator AccountId.
191				let mut key_bytes = &current[prefix.len()..];
192
193				// Must contain at least the 16 bytes of Blake2_128 hash.
194				if key_bytes.len() < 16 {
195					current = sp_io::storage::next_key(&current)?;
196					continue;
197				}
198
199				// Skip the hash and decode the AccountId.
200				key_bytes = &key_bytes[16..];
201				let mut decoder = key_bytes;
202				let maybe_collator =
203					<<T as frame_system::Config>::AccountId as Decode>::decode(&mut decoder);
204
205				if let Ok(collator) = maybe_collator {
206					// If there are no remaining bytes, then this key corresponds to the
207					// legacy single-map layout (one key per collator). If there *are*
208					// remaining bytes, it is a new double-map key which we must skip.
209					if decoder.is_empty() {
210						return Some((current.clone(), collator));
211					}
212				}
213
214				current = sp_io::storage::next_key(&current)?;
215			}
216
217			None
218		}
219
220		// Process as many legacy entries as possible within the per-step weight
221		// budget. Progress is tracked by removing legacy keys from storage and
222		// by persisting the last processed legacy key in the cursor, so the
223		// next step can resume in O(1) reads.
224		let mut used_in_step = Weight::zero();
225		let mut processed_collators: u32 = 0;
226		let mut start_from: Vec<u8> = cursor
227			.map(|c| c.to_vec())
228			.unwrap_or_else(|| prefix.to_vec());
229
230		loop {
231			let Some((full_key, collator)) = next_legacy_key::<T>(&prefix, &start_from) else {
232				// No more legacy entries to migrate – we are done. Account for
233				// the weight we actually used in this step.
234				if !used_in_step.is_zero() {
235					meter.consume(used_in_step);
236				}
237				return Ok(None);
238			};
239
240			// Decode the legacy value for this collator.
241			let Some(bytes) = sp_io::storage::get(&full_key) else {
242				// Nothing to migrate for this key; try the next one.
243				start_from = full_key;
244				continue;
245			};
246
247			let old_requests: OldScheduledRequests<T> =
248				OldScheduledRequests::<T>::decode(&mut &bytes[..]).unwrap_or_default();
249
250			let n = old_requests.len() as u64;
251			// More precise weight estimate for this specific collator based on
252			// the actual number of legacy requests `n`.
253			let reads = 1 + 3 * n;
254			let writes = 2 * n + 2;
255			let weight_for_collator = db_weight.reads_writes(reads, writes);
256
257			// Recompute remaining budget now that we know the precise weight
258			// for this collator, and ensure we do not exceed the 50% per-step
259			// safety margin.
260			let remaining_budget = step_budget.saturating_sub(used_in_step);
261			if weight_for_collator.any_gt(remaining_budget) {
262				// Cannot fit this collator into the current block's budget.
263				// Stop here and let the next step handle it.
264				break;
265			}
266
267			// Rebuild storage using the new double-map layout for this collator.
268			for request in old_requests.into_iter() {
269				let delegator = request.delegator.clone();
270
271				DelegationScheduledRequests::<T>::mutate(&collator, &delegator, |scheduled| {
272					// This Error is safe to ignore given that in the current implementation we have at most one request per collator.
273					let _ = scheduled.try_push(ScheduledRequest {
274						when_executable: request.when_executable,
275						action: request.action,
276					});
277				});
278			}
279
280			// Remove the legacy single-map key for this collator. This does *not* touch
281			// the new double-map entries, which use longer keys under the same prefix.
282			sp_io::storage::clear(&full_key);
283
284			// Initialize the per-collator counter from the freshly migrated data: each
285			// `(collator, delegator)` queued in the double map corresponds to one
286			// delegator with at least one pending request towards this collator.
287			let delegator_queues =
288				DelegationScheduledRequests::<T>::iter_prefix(&collator).count() as u32;
289			if delegator_queues > 0 {
290				DelegationScheduledRequestsPerCollator::<T>::insert(&collator, delegator_queues);
291			}
292
293			used_in_step = used_in_step.saturating_add(weight_for_collator);
294			start_from = full_key;
295			processed_collators = processed_collators.saturating_add(1);
296
297			// Always stop after a bounded number of collators, even if the
298			// weight budget would allow more. The remaining work will be picked
299			// up in the next step.
300			if processed_collators >= MAX_COLLATORS_PER_STEP {
301				break;
302			}
303		}
304
305		if !used_in_step.is_zero() {
306			meter.consume(used_in_step);
307			let bounded_key =
308				frame_support::BoundedVec::<u8, ConstU32<128>>::truncate_from(start_from);
309			Ok(Some(bounded_key))
310		} else {
311			// We had enough theoretical budget but could not fit even a single
312			// collator with the more precise estimate. Signal insufficient weight.
313			Err(SteppedMigrationError::InsufficientWeight {
314				required: worst_per_collator,
315			})
316		}
317	}
318}