From 2c8a2b5000cfcf508b1d55861e106a6821040af9 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Thu, 15 Jul 2021 16:51:04 +0200 Subject: [PATCH 01/14] overseer gen minor chore fixes (#3479) --- node/overseer/Cargo.toml | 4 +--- node/overseer/overseer-gen/proc-macro/src/impl_misc.rs | 4 ++-- node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index 76f96e0a4182..6f436c0d33fb 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -21,10 +21,8 @@ tracing = "0.1.26" lru = "0.6" [dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -polkadot-node-network-protocol = { path = "../network/protocol" } -polkadot-node-metrics = { path = "../metrics" } metered-channel = { path = "../metered-channel" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } futures = { version = "0.3.15", features = ["thread-pool"] } femme = "2.1.1" kv-log-macro = "1.0.7" diff --git a/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs b/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs index 652337faeaea..248d6b0fc37f 100644 --- a/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs +++ b/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs @@ -118,7 +118,7 @@ pub(crate) fn impl_misc(info: &OverseerInfo) -> proc_macro2::TokenStream { signals: #support_crate ::metered::MeteredReceiver< #signal >, messages: SubsystemIncomingMessages, to_subsystems: ChannelsOut, - to_overseer: #support_crate ::metered::UnboundedMeteredSender, + to_overseer: #support_crate ::metered::UnboundedMeteredSender<#support_crate:: ToOverseer>, ) -> Self { let signals_received = SignalsReceived::default(); #subsystem_ctx_name { @@ -136,7 +136,7 @@ pub(crate) fn impl_misc(info: &OverseerInfo) -> proc_macro2::TokenStream { } #[#support_crate ::async_trait] - impl SubsystemContext for #subsystem_ctx_name + impl #support_crate ::SubsystemContext for #subsystem_ctx_name where #subsystem_sender_name: #support_crate ::SubsystemSender< #wrapper_message >, #wrapper_message: From, diff --git a/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs b/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs index accb006f36bc..d30f8f6991c1 100644 --- a/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs +++ b/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs @@ -90,7 +90,7 @@ pub(crate) fn impl_overseer_struct(info: &OverseerInfo) -> proc_macro2::TokenStr /// Gather running subsystems' outbound streams into one. to_overseer_rx: #support_crate ::stream::Fuse< - #support_crate ::metered::UnboundedMeteredReceiver< ToOverseer > + #support_crate ::metered::UnboundedMeteredReceiver< #support_crate ::ToOverseer > >, /// Events that are sent to the overseer from the outside world. From 4c509058b1a74ff9543195c3cc444702318da621 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Thu, 15 Jul 2021 19:32:55 +0200 Subject: [PATCH 02/14] ci: use srtool-actions to build runtimes (#3423) * ci: use chevdor/srtool-actions to build runtimes * cleanup --- .github/workflows/publish-draft-release.yml | 109 ++++++++++---------- 1 file changed, 55 insertions(+), 54 deletions(-) diff --git a/.github/workflows/publish-draft-release.yml b/.github/workflows/publish-draft-release.yml index b40bc8ba9274..5b1855c32cc3 100644 --- a/.github/workflows/publish-draft-release.yml +++ b/.github/workflows/publish-draft-release.yml @@ -24,38 +24,27 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - runtime: ['polkadot', 'kusama'] - container: - image: paritytech/srtool:nightly-2021-03-15 - volumes: - - ${{ github.workspace }}:/build - env: - PACKAGE: ${{ matrix.runtime }}-runtime - RUSTC_VERSION: nightly-2020-10-27 + runtime: ["polkadot", "kusama", "westend"] steps: - uses: actions/checkout@v2 - name: Cache target dir uses: actions/cache@v2 with: - path: '${{ github.workspace }}/runtime/${{ matrix.runtime }}/target' + path: "${{ github.workspace }}/runtime/${{ matrix.runtime }}/target" key: srtool-target-${{ matrix.runtime }}-${{ github.sha }} restore-keys: | srtool-target-${{ matrix.runtime }}- srtool-target- - name: Build ${{ matrix.runtime }} runtime - id: build-runtime - shell: bash - env: - srtool_output_filename: ${{ matrix.runtime }}_srtool_output.json + id: srtool_build + uses: chevdor/srtool-actions@v0.3.0 + with: + # This is the default with chevdor/srtool-actions@v0.3.0 but we make it clear + image: paritytech/srtool + chain: ${{ matrix.runtime }} + - name: Store srtool digest to disk run: | - cd /build - pwd - ls -la - build --json | tee $srtool_output_filename - cat $srtool_output_filename - while IFS= read -r line; do - echo "::set-output name=$line::$(jq -r ".$line" < $srtool_output_filename)" - done <<< "$(jq -r 'keys[]' < $srtool_output_filename)" + echo '${{ steps.srtool_build.outputs.json }}' | jq > ${{ matrix.runtime }}_srtool_output.json - name: Upload ${{ matrix.runtime }} srtool json uses: actions/upload-artifact@v2 with: @@ -65,49 +54,52 @@ jobs: uses: actions/upload-artifact@v2 with: name: ${{ matrix.runtime }}-runtime - path: "${{ steps.build-runtime.outputs.wasm }}" + path: | + ${{ steps.srtool_build.outputs.wasm }} + ${{ steps.srtool_build.outputs.wasm_compressed }} publish-draft-release: runs-on: ubuntu-latest - needs: ['get-rust-versions', 'build-runtimes'] + needs: ["get-rust-versions", "build-runtimes"] outputs: release_url: ${{ steps.create-release.outputs.html_url }} asset_upload_url: ${{ steps.create-release.outputs.upload_url }} steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 0 - path: polkadot - - name: Set up Ruby 2.7 - uses: actions/setup-ruby@v1 - with: - ruby-version: 2.7 - - name: Download srtool json output - uses: actions/download-artifact@v2 - - name: Generate release text - env: - RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }} - RUSTC_NIGHTLY: ${{ needs.get-rust-versions.outputs.rustc-nightly }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gem install changelogerator git toml - ruby $GITHUB_WORKSPACE/polkadot/scripts/github/generate_release_text.rb | tee release_text.md - - name: Create draft release - id: create-release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ github.ref }} - release_name: Polkadot ${{ github.ref }} - body_path: ./release_text.md - draft: true + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + path: polkadot + - name: Set up Ruby 2.7 + uses: actions/setup-ruby@v1 + with: + ruby-version: 2.7 + - name: Download srtool json output + uses: actions/download-artifact@v2 + - name: Generate release text + env: + RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }} + RUSTC_NIGHTLY: ${{ needs.get-rust-versions.outputs.rustc-nightly }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gem install changelogerator git toml + ruby $GITHUB_WORKSPACE/polkadot/scripts/github/generate_release_text.rb | tee release_text.md + - name: Create draft release + id: create-release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Polkadot ${{ github.ref }} + body_path: ./release_text.md + draft: true + publish-runtimes: runs-on: ubuntu-latest - needs: ['publish-draft-release'] + needs: ["publish-draft-release"] strategy: matrix: - runtime: ['polkadot', 'kusama'] + runtime: ["polkadot", "kusama", "westend"] steps: - uses: actions/checkout@v2 - uses: actions/download-artifact@v2 @@ -122,7 +114,7 @@ jobs: ls "${{ matrix.runtime }}-runtime" runtime_ver="$(ruby -e 'require "./scripts/github/lib.rb"; puts get_runtime("${{ matrix.runtime }}")')" echo "::set-output name=runtime_ver::$runtime_ver" - - name: Upload ${{ matrix.runtime }} wasm + - name: Upload compact ${{ matrix.runtime }} wasm uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -131,6 +123,15 @@ jobs: asset_path: "${{ matrix.runtime }}-runtime/${{ matrix.runtime }}_runtime.compact.wasm" asset_name: ${{ matrix.runtime }}_runtime-v${{ steps.get-runtime-ver.outputs.runtime_ver }}.compact.wasm asset_content_type: application/wasm + - name: Upload compressed ${{ matrix.runtime }} wasm + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.publish-draft-release.outputs.asset_upload_url }} + asset_path: "${{ matrix.runtime }}-runtime/${{ matrix.runtime }}_runtime.compact.compressed.wasm" + asset_name: ${{ matrix.runtime }}_runtime-v${{ steps.get-runtime-ver.outputs.runtime_ver }}.compact.compressed.wasm + asset_content_type: application/wasm post_to_matrix: runs-on: ubuntu-latest From 956d6ae183a0b7f662d37bbbd251b64423f9701c Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Thu, 15 Jul 2021 21:12:15 +0100 Subject: [PATCH 03/14] more verbose asserts (#3476) Co-authored-by: CI system <> --- node/test/polkadot-simnet/common/src/lib.rs | 76 +++++++++++++-------- node/test/polkadot-simnet/test/src/main.rs | 15 +++- 2 files changed, 58 insertions(+), 33 deletions(-) diff --git a/node/test/polkadot-simnet/common/src/lib.rs b/node/test/polkadot-simnet/common/src/lib.rs index 2ac986ae7fd7..5527de0ba39d 100644 --- a/node/test/polkadot-simnet/common/src/lib.rs +++ b/node/test/polkadot-simnet/common/src/lib.rs @@ -99,10 +99,10 @@ pub async fn dispatch_with_root(call: impl Into< { type DemocracyCall = democracy::Call; - type CouncilCollectiveEvent = collective::Event::; + type CouncilCollectiveEvent = collective::Event; type CouncilCollectiveCall = collective::Call; type TechnicalCollectiveCall = collective::Call; - type TechnicalCollectiveEvent = collective::Event::; + type TechnicalCollectiveEvent = collective::Event; // here lies a black mirror esque copy of on chain whales. let whales = vec![ @@ -126,14 +126,15 @@ pub async fn dispatch_with_root(call: impl Into< Some(proposal_hash), + Event::Democracy(democracy::Event::PreimageNoted(ref proposal_hash, _, _)) + => Some(proposal_hash.clone()), _ => None }) .next() - .ok_or_else(|| "failed to note pre-image")? + .ok_or_else(|| format!("democracy::Event::PreimageNoted not found in events: {:#?}", events))? }; // submit external_propose call through council collective @@ -151,16 +152,17 @@ pub async fn dispatch_with_root(call: impl Into< Some((index, hash)), + Event::Council(CouncilCollectiveEvent::Proposed(_, index, ref hash, _)) => + Some((index, hash.clone())), _ => None } }) .next() - .ok_or_else(|| "failed to execute council::Call::propose(democracy::Call::external_propose_majority)")?; + .ok_or_else(|| format!("CouncilCollectiveEvent::Proposed not found in events: {:#?}", events))?; // vote for member in &council_collective[1..] { @@ -179,16 +181,20 @@ pub async fn dispatch_with_root(call: impl Into< true, + Event::Council(CouncilCollectiveEvent::Closed(_hash, _, _)) if hash == _hash => true, + Event::Council(CouncilCollectiveEvent::Approved(_hash, )) if hash == _hash => true, + Event::Council(CouncilCollectiveEvent::Executed(_hash, Ok(()))) if hash == _hash => true, _ => false, } }) .collect::>(); // make sure all 3 events are in state - assert_eq!(events.len(), 3); + assert_eq!( + events.len(), 3, + "CouncilCollectiveEvent::{{Closed, Approved, Executed}} not found in events: {:#?}", + node.events(), + ); } // next technical collective must fast track the proposal. @@ -205,16 +211,17 @@ pub async fn dispatch_with_root(call: impl Into< Some((index, hash)), + Event::TechnicalCommittee(TechnicalCollectiveEvent::Proposed(_, index, ref hash, _)) + => Some((index, hash.clone())), _ => None } }) .next() - .ok_or_else(|| "failed to execute council::Call::propose(democracy::Call::fast_track))")?; + .ok_or_else(|| format!("TechnicalCollectiveEvent::Proposed not found in events: {:#?}", events))?; // vote for member in &technical_collective[1..] { @@ -233,29 +240,34 @@ pub async fn dispatch_with_root(call: impl Into< true, + Event::TechnicalCommittee(TechnicalCollectiveEvent::Closed(_hash, _, _)) if hash == _hash => true, + Event::TechnicalCommittee(TechnicalCollectiveEvent::Approved(_hash)) if hash == _hash => true, + Event::TechnicalCommittee(TechnicalCollectiveEvent::Executed(_hash, Ok(()))) if hash == _hash => true, _ => false, } }) .collect::>(); // make sure all 3 events are in state - assert_eq!(events.len(), 3); + assert_eq!( + events.len(), 3, + "TechnicalCollectiveEvent::{{Closed, Approved, Executed}} not found in events: {:#?}", + node.events(), + ); } // now runtime upgrade proposal is a fast-tracked referendum we can vote for. - let referendum_index = node.events() + let ref_index = node.events() .into_iter() .filter_map(|event| match event.event { - Event::Democracy(democracy::Event::::Started(index, _)) => Some(index), + Event::Democracy(democracy::Event::Started(index, _)) => Some(index), _ => None, }) .next() - .ok_or_else(|| "failed to execute council::Call::close")?; + .ok_or_else(|| format!("democracy::Event::Started not found in events: {:#?}", node.events()))?; + let call = DemocracyCall::vote( - referendum_index, + ref_index, AccountVote::Standard { vote: Vote { aye: true, conviction: Conviction::Locked1x }, // 10 DOTS @@ -274,16 +286,20 @@ pub async fn dispatch_with_root(call: impl Into< true, + Event::Democracy(democracy::Event::Passed(_index)) if _index == ref_index => true, + Event::Democracy(democracy::Event::PreimageUsed(_hash, _, _)) if _hash == proposal_hash => true, + Event::Democracy(democracy::Event::Executed(_index, true)) if _index == ref_index => true, _ => false, } }) .collect::>(); // make sure all events were emitted - assert_eq!(events.len(), 3); + assert_eq!( + events.len(), 3, + "democracy::Event::{{Passed, PreimageUsed, Executed}} not found in events: {:#?}", + node.events(), + ); Ok(()) } diff --git a/node/test/polkadot-simnet/test/src/main.rs b/node/test/polkadot-simnet/test/src/main.rs index 3b57c40a6079..fccffc6b0e6b 100644 --- a/node/test/polkadot-simnet/test/src/main.rs +++ b/node/test/polkadot-simnet/test/src/main.rs @@ -48,13 +48,18 @@ fn main() -> Result<(), Box> { .collect::>(); // make sure event was emitted - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 1, "system::Event::CodeUpdate not found in events: {:#?}", node.events()); let new_runtime_version = node.client() .executor() .runtime_version(&BlockId::Hash(node.client().info().best_hash))? .spec_version; // just confirming - assert!(new_runtime_version > old_runtime_version); + assert!( + new_runtime_version > old_runtime_version, + "Invariant, spec_version of new runtime: {} not greater than spec_version of old runtime: {}", + new_runtime_version, + old_runtime_version, + ); let (from, dest, balance) = ( AccountId32::from_str("15j4dg5GzsL1bw2U2AWgeyAk6QTxq43V7ZPbXdAmbVLjvDCK")?, @@ -76,7 +81,11 @@ fn main() -> Result<(), Box> { }) .collect::>(); // make sure transfer went through - assert_eq!(events.len(), 1); + assert_eq!( + events.len(), 1, + "balances::Call::transfer failed to execute, balances::Event::Transfer not found in events: {:#?}", + node.events() + ); // we're done, drop node. drop(node); From 0c670d826c7ce80b26e6214c411dc7320af58854 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 16 Jul 2021 15:13:20 +0200 Subject: [PATCH 04/14] Update BEEFY+MMR integration. (#3480) * Update MMR leaf. * Revert to older substrate. * Add version docs. * Fix spellcheck. --- Cargo.lock | 34 +++++- runtime/common/Cargo.toml | 8 +- runtime/common/src/lib.rs | 29 +---- runtime/common/src/mmr.rs | 226 -------------------------------------- runtime/rococo/Cargo.toml | 2 + runtime/rococo/src/lib.rs | 60 +++++++--- 6 files changed, 81 insertions(+), 278 deletions(-) delete mode 100644 runtime/common/src/mmr.rs diff --git a/Cargo.lock b/Cargo.lock index 57f71119322e..41056ca89f19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -539,6 +539,11 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "beefy-merkle-tree" +version = "0.1.0" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" + [[package]] name = "beefy-primitives" version = "0.1.0" @@ -4725,6 +4730,30 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-beefy-mmr" +version = "0.1.0" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" +dependencies = [ + "beefy-merkle-tree", + "beefy-primitives", + "frame-support", + "frame-system", + "hex", + "libsecp256k1", + "log", + "pallet-beefy", + "pallet-mmr", + "pallet-mmr-primitives", + "pallet-session", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-bounties" version = "4.0.0-dev" @@ -6778,7 +6807,6 @@ dependencies = [ name = "polkadot-runtime-common" version = "0.9.8" dependencies = [ - "beefy-primitives", "bitvec", "frame-benchmarking", "frame-support", @@ -6791,9 +6819,8 @@ dependencies = [ "pallet-authorship", "pallet-babe", "pallet-balances", - "pallet-beefy", + "pallet-beefy-mmr", "pallet-election-provider-multi-phase", - "pallet-mmr", "pallet-offences", "pallet-session", "pallet-staking", @@ -7909,6 +7936,7 @@ dependencies = [ "pallet-babe", "pallet-balances", "pallet-beefy", + "pallet-beefy-mmr", "pallet-bridge-dispatch", "pallet-bridge-grandpa", "pallet-bridge-messages", diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index 1b5e7c2ae18b..cd259361956d 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -14,7 +14,6 @@ serde = { version = "1.0.123", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" -beefy-primitives = { git = "https://github.com/paritytech/grandpa-bridge-gadget", branch = "master", default-features = false } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -36,8 +35,7 @@ pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "m pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-election-provider-multi-phase = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-beefy = { git = "https://github.com/paritytech/grandpa-bridge-gadget", branch = "master", default-features = false } -pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy-mmr = { git = "https://github.com/paritytech/grandpa-bridge-gadget", branch = "master", default-features = false } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features=false, optional = true } @@ -67,7 +65,6 @@ libsecp256k1 = "0.3.5" default = ["std"] no_std = [] std = [ - "beefy-primitives/std", "bitvec/std", "parity-scale-codec/std", "log/std", @@ -83,8 +80,7 @@ std = [ "frame-support/std", "pallet-authorship/std", "pallet-balances/std", - "pallet-beefy/std", - "pallet-mmr/std", + "pallet-beefy-mmr/std", "pallet-session/std", "pallet-staking/std", "pallet-timestamp/std", diff --git a/runtime/common/src/lib.rs b/runtime/common/src/lib.rs index 725cf95b8b40..06047593c22b 100644 --- a/runtime/common/src/lib.rs +++ b/runtime/common/src/lib.rs @@ -24,7 +24,6 @@ pub mod auctions; pub mod crowdloan; pub mod purchase; pub mod impls; -pub mod mmr; pub mod paras_sudo_wrapper; pub mod paras_registrar; pub mod slot_range; @@ -37,8 +36,7 @@ mod mock; #[cfg(test)] mod integration_tests; -use beefy_primitives::crypto::AuthorityId as BeefyId; -use primitives::v1::{AccountId, AssignmentId, BlockNumber, ValidatorId}; +use primitives::v1::{AssignmentId, BlockNumber, ValidatorId}; use sp_runtime::{Perquintill, Perbill, FixedPointNumber}; use frame_system::limits; use frame_support::{ @@ -181,20 +179,6 @@ impl OneSessionHandler for AssignmentSe fn on_disabled(_: usize) { } } -/// Generates a `BeefyId` from the given `AccountId`. The resulting `BeefyId` is -/// a dummy value and this is a utility function meant to be used when migration -/// session keys. -pub fn dummy_beefy_id_from_account_id(a: AccountId) -> BeefyId { - let mut id = BeefyId::default(); - let id_raw: &mut [u8] = id.as_mut(); - - // NOTE: AccountId is 32 bytes, whereas BeefyId is 33 bytes. - id_raw[1..].copy_from_slice(a.as_ref()); - id_raw[0..4].copy_from_slice(b"beef"); - - id -} - #[cfg(test)] mod multiplier_tests { use super::*; @@ -297,15 +281,4 @@ mod multiplier_tests { println!("block = {} multiplier {:?}", blocks, multiplier); } } - - #[test] - fn generate_dummy_unique_beefy_id_from_account_id() { - let acc1 = AccountId::new([0; 32]); - let acc2 = AccountId::new([1; 32]); - - let beefy_id1 = dummy_beefy_id_from_account_id(acc1); - let beefy_id2 = dummy_beefy_id_from_account_id(acc2); - - assert_ne!(beefy_id1, beefy_id2); - } } diff --git a/runtime/common/src/mmr.rs b/runtime/common/src/mmr.rs deleted file mode 100644 index 6ba20bb04654..000000000000 --- a/runtime/common/src/mmr.rs +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! A pallet responsible for creating Merkle Mountain Range (MMR) leaf for current block. - -use beefy_primitives::ValidatorSetId; -use sp_core::H256; -use sp_runtime::traits::Convert; -use sp_std::prelude::*; -use frame_support::RuntimeDebug; -use pallet_mmr::primitives::LeafDataProvider; -use parity_scale_codec::{Encode, Decode}; -use runtime_parachains::paras; -pub use pallet::*; - -/// A BEEFY consensus digest item with MMR root hash. -pub struct DepositBeefyDigest(sp_std::marker::PhantomData); - -impl pallet_mmr::primitives::OnNewRoot for DepositBeefyDigest where - T: pallet_mmr::Config, - T: pallet_beefy::Config, -{ - fn on_new_root(root: &::Hash) { - let digest = sp_runtime::generic::DigestItem::Consensus( - beefy_primitives::BEEFY_ENGINE_ID, - parity_scale_codec::Encode::encode( - &beefy_primitives::ConsensusLog::<::BeefyId>::MmrRoot(*root) - ), - ); - >::deposit_log(digest); - } -} - -/// Convert BEEFY `secp256k1` public keys into uncompressed form -pub struct UncompressBeefyEcdsaKeys; -impl Convert> for UncompressBeefyEcdsaKeys { - fn convert(a: beefy_primitives::crypto::AuthorityId) -> Vec { - use sp_core::crypto::Public; - let compressed_key = a.as_slice(); - // TODO [ToDr] Temporary workaround until we have a better way to get uncompressed keys. - secp256k1::PublicKey::parse_slice(compressed_key, Some(secp256k1::PublicKeyFormat::Compressed)) - .map(|pub_key| pub_key.serialize().to_vec()) - .map_err(|_| { - log::error!(target: "runtime::beefy", "Invalid BEEFY PublicKey format!"); - }) - .unwrap_or_default() - } -} - -/// A leaf that gets added every block to the MMR constructed by `[pallet_mmr]`. -#[derive(RuntimeDebug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct MmrLeaf { - /// Current block parent number and hash. - pub parent_number_and_hash: (BlockNumber, Hash), - /// A merkle root of all registered parachain heads. - pub parachain_heads: MerkleRoot, - /// A merkle root of the next BEEFY authority set. - pub beefy_next_authority_set: BeefyNextAuthoritySet, -} - -/// Details of the next BEEFY authority set. -#[derive(RuntimeDebug, Default, PartialEq, Eq, Clone, Encode, Decode)] -pub struct BeefyNextAuthoritySet { - /// Id of the next set. - /// - /// Id is required to correlate BEEFY signed commitments with the validator set. - /// Light Client can easily verify that the commitment witness it is getting is - /// produced by the latest validator set. - pub id: ValidatorSetId, - /// Number of validators in the set. - /// - /// Some BEEFY Light Clients may use an interactive protocol to verify only subset - /// of signatures. We put set length here, so that these clients can verify the minimal - /// number of required signatures. - pub len: u32, - /// Merkle Root Hash build from BEEFY `AuthorityIds`. - /// - /// This is used by Light Clients to confirm that the commitments are signed by the correct - /// validator set. Light Clients using interactive protocol, might verify only subset of - /// signatures, hence don't require the full list here (will receive inclusion proofs). - pub root: MerkleRoot, -} - -type MerkleRootOf = ::Hash; - -/// A type that is able to return current list of parachain heads that end up in the MMR leaf. -pub trait ParachainHeadsProvider { - /// Return a list of encoded parachain heads. - fn encoded_heads() -> Vec>; -} - -/// A default implementation for runtimes without parachains. -impl ParachainHeadsProvider for () { - fn encoded_heads() -> Vec> { - Default::default() - } -} - -impl ParachainHeadsProvider for paras::Pallet { - fn encoded_heads() -> Vec> { - paras::Pallet::::parachains() - .into_iter() - .map(paras::Pallet::::para_head) - .map(|maybe_para_head| maybe_para_head.encode()) - .collect() - } -} - -#[frame_support::pallet] -pub mod pallet { - use frame_support::pallet_prelude::*; - use super::*; - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - - /// The module's configuration trait. - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: pallet_mmr::Config + pallet_beefy::Config { - /// Convert BEEFY `AuthorityId` to a form that would end up in the Merkle Tree. - /// - /// For instance for ECDSA (`secp256k1`) we want to store uncompressed public keys (65 bytes) - /// to simplify using them on Ethereum chain, but the rest of the Substrate codebase - /// is storing them compressed (33 bytes) for efficiency reasons. - type BeefyAuthorityToMerkleLeaf: Convert<::BeefyId, Vec>; - - /// Retrieve a list of current parachain heads. - /// - /// The trait is implemented for `paras` module, but since not all chains might have parachains, - /// and we want to keep the MMR leaf structure uniform, it's possible to use `()` as well to - /// simply put dummy data to the leaf. - type ParachainHeads: ParachainHeadsProvider; - } - - /// Details of next BEEFY authority set. - /// - /// This storage entry is used as cache for calls to [`update_beefy_next_authority_set`]. - #[pallet::storage] - #[pallet::getter(fn beefy_next_authorities)] - pub type BeefyNextAuthorities = StorageValue< - _, - BeefyNextAuthoritySet>, - ValueQuery, - >; -} - -impl LeafDataProvider for Pallet where - MerkleRootOf: From, -{ - type LeafData = MmrLeaf< - ::BlockNumber, - ::Hash, - MerkleRootOf, - >; - - fn leaf_data() -> Self::LeafData { - MmrLeaf { - parent_number_and_hash: frame_system::Pallet::::leaf_data(), - parachain_heads: Pallet::::parachain_heads_merkle_root(), - beefy_next_authority_set: Pallet::::update_beefy_next_authority_set(), - } - } -} - -impl Pallet where - MerkleRootOf: From, - ::BeefyId: -{ - /// Returns latest root hash of a merkle tree constructed from all registered parachain headers. - /// - /// NOTE this does not include parathreads - only parachains are part of the merkle tree. - /// - /// NOTE This is an initial and inefficient implementation, which re-constructs - /// the merkle tree every block. Instead we should update the merkle root in `[Self::on_initialize]` - /// call of this pallet and update the merkle tree efficiently (use on-chain storage to persist inner nodes). - fn parachain_heads_merkle_root() -> MerkleRootOf { - let para_heads = T::ParachainHeads::encoded_heads(); - sp_io::trie::keccak_256_ordered_root(para_heads).into() - } - - /// Returns details of the next BEEFY authority set. - /// - /// Details contain authority set id, authority set length and a merkle root, - /// constructed from uncompressed `secp256k1` public keys of the next BEEFY authority set. - /// - /// This function will use a storage-cached entry in case the set didn't change, or compute and cache - /// new one in case it did. - fn update_beefy_next_authority_set() -> BeefyNextAuthoritySet> { - let id = pallet_beefy::Pallet::::validator_set_id() + 1; - let current_next = Self::beefy_next_authorities(); - // avoid computing the merkle tree if validator set id didn't change. - if id == current_next.id { - return current_next; - } - - let beefy_public_keys = pallet_beefy::Pallet::::next_authorities() - .into_iter() - .map(T::BeefyAuthorityToMerkleLeaf::convert) - .collect::>(); - let len = beefy_public_keys.len() as u32; - let root: MerkleRootOf = sp_io::trie::keccak_256_ordered_root(beefy_public_keys).into(); - let next_set = BeefyNextAuthoritySet { - id, - len, - root, - }; - // cache the result - BeefyNextAuthorities::::put(&next_set); - next_set - } -} diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index 0924fa8dab7f..abee70185aad 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -35,6 +35,7 @@ pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-beefy = { git = "https://github.com/paritytech/grandpa-bridge-gadget", branch = "master", default-features = false } +pallet-beefy-mmr = { git = "https://github.com/paritytech/grandpa-bridge-gadget", branch = "master", default-features = false } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -104,6 +105,7 @@ std = [ "pallet-bridge-messages/std", "pallet-collective/std", "pallet-beefy/std", + "pallet-beefy-mmr/std", "pallet-grandpa/std", "pallet-sudo/std", "pallet-membership/std", diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index a416d87fe89d..06dbd19ed853 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -32,7 +32,6 @@ use primitives::v1::{ SessionInfo as SessionInfoData, }; use runtime_common::{ - mmr as mmr_common, SlowAdjustingFeeUpdate, impls::ToAuthor, BlockHashCount, BlockWeights, BlockLength, RocksDbWeight, }; use runtime_parachains::{ @@ -65,6 +64,7 @@ use sp_core::{OpaqueMetadata, RuntimeDebug}; use sp_staking::SessionIndex; use pallet_session::historical as session_historical; use beefy_primitives::crypto::AuthorityId as BeefyId; +use beefy_primitives::mmr::MmrLeafVersion; use pallet_mmr_primitives as mmr; use frame_system::EnsureRoot; use runtime_common::{paras_sudo_wrapper, paras_registrar, xcm_sender, auctions, crowdloan, slots}; @@ -237,7 +237,7 @@ construct_runtime! { // Bridges support. Mmr: pallet_mmr::{Pallet, Storage}, Beefy: pallet_beefy::{Pallet, Config, Storage}, - MmrLeaf: mmr_common::{Pallet, Storage}, + MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, // It might seem strange that we add both sides of the bridge to the same runtime. We do this because this // runtime as shared by both the Rococo and Wococo chains. When running as Rococo we only use @@ -825,27 +825,57 @@ impl pallet_mmr::Config for Runtime { const INDEXING_PREFIX: &'static [u8] = b"mmr"; type Hashing = Keccak256; type Hash = ::Output; - type OnNewRoot = mmr_common::DepositBeefyDigest; + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; type WeightInfo = (); - type LeafData = mmr_common::Pallet; + type LeafData = pallet_beefy_mmr::Pallet; } -impl mmr_common::Config for Runtime { - type BeefyAuthorityToMerkleLeaf = mmr_common::UncompressBeefyEcdsaKeys; - type ParachainHeads = Paras; +pub struct ParasProvider; +impl pallet_beefy_mmr::ParachainHeadsProvider for ParasProvider { + fn parachain_heads() -> Vec<(u32, Vec)> { + Paras::parachains() + .into_iter() + .filter_map(|id| { + Paras::para_head(&id).map(|head| (id.into(), head.0)) + }) + .collect() + } +} + +parameter_types! { + /// Version of the produced MMR leaf. + /// + /// The version consists of two parts; + /// - `major` (3 bits) + /// - `minor` (5 bits) + /// + /// `major` should be updated only if decoding the previous MMR Leaf format from the payload + /// is not possible (i.e. backward incompatible change). + /// `minor` should be updated if fields are added to the previous MMR Leaf, which given SCALE + /// encoding does not prevent old leafs from being decoded. + /// + /// Hence we expect `major` to be changed really rarely (think never). + /// See [`MmrLeafVersion`] type documentation for more details. + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); +} + +impl pallet_beefy_mmr::Config for Runtime { + type LeafVersion = LeafVersion; + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + type ParachainHeads = ParasProvider; } parameter_types! { - // This is a pretty unscientific cap. - // - // Note that once this is hit the pallet will essentially throttle incoming requests down to one - // call per block. + /// This is a pretty unscientific cap. + /// + /// Note that once this is hit the pallet will essentially throttle incoming requests down to one + /// call per block. pub const MaxRequests: u32 = 4 * HOURS as u32; - // Number of headers to keep. - // - // Assuming the worst case of every header being finalized, we will keep headers at least for a - // week. + /// Number of headers to keep. + /// + /// Assuming the worst case of every header being finalized, we will keep headers at least for a + /// week. pub const HeadersToKeep: u32 = 7 * DAYS as u32; } From bcb04dd3f77c5b5a66029e568bffe026ab729fd7 Mon Sep 17 00:00:00 2001 From: Denis Tsai Date: Sun, 18 Jul 2021 23:52:27 -0700 Subject: [PATCH 05/14] added pallet-proxy in rococo feature dependencies (#3486) --- runtime/rococo/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/runtime/rococo/Cargo.toml b/runtime/rococo/Cargo.toml index abee70185aad..183c20cb14ad 100644 --- a/runtime/rococo/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -116,6 +116,7 @@ std = [ "pallet-session/std", "pallet-staking/std", "pallet-offences/std", + "pallet-proxy/std", "pallet-timestamp/std", "pallet-transaction-payment/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -163,6 +164,7 @@ runtime-benchmarks = [ "pallet-grandpa/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", @@ -183,6 +185,7 @@ try-runtime = [ "pallet-im-online/try-runtime", "pallet-membership/try-runtime", "pallet-session/try-runtime", + "pallet-proxy/try-runtime", "pallet-staking/try-runtime", "pallet-offences/try-runtime", "pallet-timestamp/try-runtime", From 5480ecf69c323c84f3c16989e9d715ef99b2d1ad Mon Sep 17 00:00:00 2001 From: Pierre Besson Date: Mon, 19 Jul 2021 09:32:48 +0200 Subject: [PATCH 06/14] remove the kubernetes helm chart (#3483) --- scripts/kubernetes/Chart.yaml | 12 -- scripts/kubernetes/README.md | 47 ------ .../templates/poddisruptionbudget.yaml | 10 -- scripts/kubernetes/templates/service.yaml | 54 ------- .../kubernetes/templates/serviceaccount.yaml | 10 -- scripts/kubernetes/templates/statefulset.yaml | 139 ------------------ scripts/kubernetes/values.yaml | 42 ------ 7 files changed, 314 deletions(-) delete mode 100644 scripts/kubernetes/Chart.yaml delete mode 100644 scripts/kubernetes/README.md delete mode 100644 scripts/kubernetes/templates/poddisruptionbudget.yaml delete mode 100644 scripts/kubernetes/templates/service.yaml delete mode 100644 scripts/kubernetes/templates/serviceaccount.yaml delete mode 100644 scripts/kubernetes/templates/statefulset.yaml delete mode 100644 scripts/kubernetes/values.yaml diff --git a/scripts/kubernetes/Chart.yaml b/scripts/kubernetes/Chart.yaml deleted file mode 100644 index 91652cef543e..000000000000 --- a/scripts/kubernetes/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: polkadot -version: 0.2 -appVersion: 0.2.0 -description: Polkadot Node Implementation -home: https://polkadot.network/ -icon: https://polkadot.network/favicon.ico -sources: - - https://github.com/paritytech/polkadot/ -maintainers: - - name: Paritytech Devops Team - email: devops-team@parity.io -tillerVersion: ">=2.8.0" diff --git a/scripts/kubernetes/README.md b/scripts/kubernetes/README.md deleted file mode 100644 index 1ae9ff79c05e..000000000000 --- a/scripts/kubernetes/README.md +++ /dev/null @@ -1,47 +0,0 @@ - - -# Polkadot Kubernetes Helm Chart - -This [Helm Chart](https://helm.sh/) can be used for deploying containerized -**Polkadot** to a [Kubernetes](https://kubernetes.io/) cluster. - - -## Prerequisites - -- Tested on Kubernetes 1.10.7-gke.6 - -## Installation - -To install the chart with the release name `my-release` into namespace -`my-namespace` from within this directory: - -```console -$ helm install --namespace my-namespace --name my-release --values values.yaml ./ -``` - -The command deploys Polkadot on the Kubernetes cluster in the configuration -given in `values.yaml`. When the namespace is omitted it'll be installed in -the default one. - - -## Removal of the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete --namespace my-namespace my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - - -## Upgrading - -Once the chart is installed and a new version should be deployed helm takes -care of this by - -```console -$ helm upgrade --namespace my-namespace --values values.yaml my-release ./ -``` - - diff --git a/scripts/kubernetes/templates/poddisruptionbudget.yaml b/scripts/kubernetes/templates/poddisruptionbudget.yaml deleted file mode 100644 index 56958b1fbafd..000000000000 --- a/scripts/kubernetes/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ .Values.GitlabEnvSlug | default .Values.app }} -spec: - selector: - matchLabels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - maxUnavailable: 1 - diff --git a/scripts/kubernetes/templates/service.yaml b/scripts/kubernetes/templates/service.yaml deleted file mode 100644 index 01ba9d5a567c..000000000000 --- a/scripts/kubernetes/templates/service.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# see: -# https://kubernetes.io/docs/tutorials/services/ -# https://kubernetes.io/docs/concepts/services-networking/service/ -# headless service for rpc -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.app }}-rpc -spec: - ports: - - port: 9933 - name: http-rpc - - port: 9944 - name: websocket-rpc - selector: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - sessionAffinity: None - type: ClusterIP - clusterIP: None ---- -{{- if .Values.listen_node_port }} -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.app }} -spec: - ports: - - port: 30333 - name: p2p - nodePort: 30333 - protocol: TCP - selector: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - sessionAffinity: None - type: NodePort - # don't route exteral traffic to non-local pods - externalTrafficPolicy: Local -{{- else if .Values.validator.keys }} -{{- $root := . -}} -{{- range until (int .Values.nodes.replicas) }} ---- -kind: Service -apiVersion: v1 -metadata: - name: {{ $root.Values.app }}-{{ . }} -spec: - selector: - statefulset.kubernetes.io/pod-name: {{ $root.Values.app }}-{{ . }} - ports: - - port: 30333 - targetPort: 30333 - protocol: TCP -{{- end }} -{{- end }} diff --git a/scripts/kubernetes/templates/serviceaccount.yaml b/scripts/kubernetes/templates/serviceaccount.yaml deleted file mode 100644 index b603ad13ddb4..000000000000 --- a/scripts/kubernetes/templates/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if .Values.rbac.enable }} -# service account for polkadot pods themselves -# no permissions for the API are required -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - name: {{ .Values.rbac.name }} -{{- end }} diff --git a/scripts/kubernetes/templates/statefulset.yaml b/scripts/kubernetes/templates/statefulset.yaml deleted file mode 100644 index 2f400bb32eb9..000000000000 --- a/scripts/kubernetes/templates/statefulset.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/ -# https://cloud.google.com/kubernetes-engine/docs/concepts/statefulset -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.app }} -spec: - selector: - matchLabels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - serviceName: {{ .Values.app }} - replicas: {{ .Values.nodes.replicas }} - updateStrategy: - type: RollingUpdate - podManagementPolicy: Parallel - template: - metadata: - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - spec: - {{- if .Values.rbac.enable }} - serviceAccountName: {{ .Values.rbac.name }} - {{- else }} - serviceAccountName: default - {{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node - operator: In - values: - - {{ .Values.node_group }} - {{- if .Values.listen_node_port }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - {{ .Values.app }} - topologyKey: "kubernetes.io/hostname" - {{- end }} - terminationGracePeriodSeconds: 300 - {{- if .Values.validator.keys }} - volumes: - - name: {{ .Values.app }}-validator-secrets - secret: - secretName: {{ .Values.app }}-secrets - initContainers: - - name: prepare-secrets - image: busybox - command: [ "/bin/sh" ] - args: - - -c - - sed -n -r "s/^${POD_NAME}-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/key; - sed -n -r "s/^${POD_NAME}-node-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/node-key; - sed -n -r "s/^${POD_NAME}-name ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/name; - test -s {{ .Values.image.basepath }}/name || echo "${POD_NAME}" > {{ .Values.image.basepath }}/name - env: - # from (workaround for hostname) - # https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumeMounts: - - name: {{ .Values.app }}-validator-secrets - readOnly: true - mountPath: "/etc/validator" - - name: {{ .Values.app }}dir - mountPath: {{ .Values.image.basepath }} - {{- end }} - containers: - - name: {{ .Values.app }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- if .Values.resources }} - resources: - requests: - memory: {{ .Values.resources.memory }} - cpu: {{ .Values.resources.cpu }} - {{- end }} - ports: - - containerPort: 30333 - name: p2p - - containerPort: 9933 - name: http-rpc - - containerPort: 9944 - name: websocket-rpc - command: ["/bin/sh"] - args: - - -c - - exec {{ .Values.image.executable }} - --base-path {{ .Values.image.basepath }} - {{- if .Values.validator.keys }} - --validator - --name $(cat {{ .Values.image.basepath }}/name) - --key $(cat {{ .Values.image.basepath }}/key) - --node-key $(cat {{ .Values.image.basepath }}/node-key) - {{- else }} - --name $(POD_NAME) - {{- end }} - {{- range .Values.nodes.args }} {{ . }} {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumeMounts: - - name: {{ .Values.app }}dir - mountPath: {{ .Values.image.basepath }} - readinessProbe: - httpGet: - path: /health - port: http-rpc - initialDelaySeconds: 10 - periodSeconds: 10 - livenessProbe: - httpGet: - path: /health - port: http-rpc - initialDelaySeconds: 10 - periodSeconds: 10 - securityContext: - runAsUser: 1000 - fsGroup: 1000 - volumeClaimTemplates: - - metadata: - name: {{ .Values.app }}dir - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: ssd - resources: - requests: - storage: 32Gi - diff --git a/scripts/kubernetes/values.yaml b/scripts/kubernetes/values.yaml deleted file mode 100644 index 98b81b0e1df2..000000000000 --- a/scripts/kubernetes/values.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# set tag manually --set image.tag=latest -image: - repository: parity/polkadot - tag: latest - pullPolicy: Always - basepath: /polkadot - executable: /usr/local/bin/polkadot - - -# if set to true a service account for polkadot will be created -rbac: - enable: true - name: polkadot - -# name of the statefulset -app: polkadot -node_group: polkadot -listen_node_port: true - -nodes: - replicas: 2 - args: - - --chain - - alexander - # serve rpc within the local network - # - fenced off the world via firewall - # - used for health checks - - --rpc-external - - --ws-external - # - --log - # - sub-libp2p=trace - - -validator: {} -# providing 'keys' string via --set commandline parameter will run the nodes -# in validator mode (--validator). - -# maybe adopt resource limits here to the nodes of the pool -# resources: -# memory: "5Gi" -# cpu: "1.5" - From 7f79897415a86f2568465499ecc6a862bf777dd7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jul 2021 11:27:15 -0500 Subject: [PATCH 07/14] Bump async-process from 1.0.1 to 1.1.0 (#3122) Bumps [async-process](https://github.com/smol-rs/async-process) from 1.0.1 to 1.1.0. - [Release notes](https://github.com/smol-rs/async-process/releases) - [Changelog](https://github.com/smol-rs/async-process/blob/master/CHANGELOG.md) - [Commits](https://github.com/smol-rs/async-process/compare/v1.0.1...v1.1.0) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 22 ++++++++-------------- node/core/pvf/Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41056ca89f19..c698ec3077f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -155,12 +155,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "arc-swap" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" - [[package]] name = "arrayref" version = "0.3.6" @@ -304,15 +298,16 @@ dependencies = [ [[package]] name = "async-process" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8cea09c1fb10a317d1b5af8024eeba256d6554763e85ecd90ff8df31c7bbda" +checksum = "a8f38756dd9ac84671c428afbf7c9f7495feff9ec5b0710f17100098e5b354ac" dependencies = [ "async-io", "blocking", - "cfg-if 0.1.10", + "cfg-if 1.0.0", "event-listener", "futures-lite", + "libc", "once_cell", "signal-hook", "winapi 0.3.9", @@ -9425,9 +9420,9 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook" -version = "0.1.16" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604508c1418b99dfe1925ca9224829bb2a8a9a04dda655cc01fcad46f4ab05ed" +checksum = "470c5a6397076fae0094aaf06a08e6ba6f37acb77d3b1b91ea92b4d6c8650c39" dependencies = [ "libc", "signal-hook-registry", @@ -9435,11 +9430,10 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ - "arc-swap", "libc", ] diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index 23388b28daa4..cfd60314feae 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -11,7 +11,7 @@ path = "bin/puppet_worker.rs" [dependencies] always-assert = "0.1" async-std = { version = "1.8.0", features = ["attributes"] } -async-process = "1.0.1" +async-process = "1.1.0" assert_matches = "1.4.0" futures = "0.3.15" futures-timer = "3.0.2" From 219f0a4efaeb34fd9ca7a7d56da6ca0097c385ab Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Jul 2021 11:49:19 -0500 Subject: [PATCH 08/14] Disputes runtime (#2947) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * disputes module skeleton and storage * implement dispute module initialization logic * implement disputes session change logic * provide dispute skeletons * deduplication & ancient check * fix a couple of warnings * begin provide_dispute_data impl * flesh out statement set import somewhat * move ApprovalVote to shared primitives * add a signing-payload API to explicit dispute statements * implement statement signature checking * some bitflags glue for observing changes in disputes * implement dispute vote import logic * flesh out everything except slashing * guide: tweaks * declare and use punishment trait * punish validators for inconclusive disputes * guide: tiny fix * guide: update docs * add disputes getter fn * guide: small change to spam slots handling * improve spam slots handling and fix some bugs * finish API of disputes runtime * define and deposit `RevertTo` log * begin integrating disputes into para_inherent * use precomputed slash_for/against * return candidate hash from process_bitfields * implement inclusion::collect_disputed * finish integration into rest of runtime * add Disputes to initializer * address suggestions * use pallet macro * fix typo * Update runtime/parachains/src/disputes.rs * add test: fix pruning * document specific behavior * deposit events on dispute changes * add an allow(unused) on fn disputes * add a dummy PunishValidators implementation * add disputes module to Rococo * add disputes module to westend runtime * add disputes module to test runtime * add disputes module to kusama runtime * guide: prepare for runtime API for checking frozenness * remove revert digests in favor of state variable * merge reversions * Update runtime/parachains/src/disputes.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update runtime/parachains/src/disputes.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update runtime/parachains/src/disputes.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * add byzantine_threshold and supermajority_threshold utilities to primitives * use primitive helpers * deposit revert event when freezing chain * deposit revert log when freezing chain * test revert event and log are generated when freezing * add trait to decouple disputes handling from paras inherent handling * runtime: fix compilation and setup dispute handler * disputes: add hook for filtering out dispute statements * disputes: add initializer hooks to DisputesHandler * runtime: remove disputes pallet from all runtimes * tag TODOs * don't import any dispute statements just yet... * address grumbles * fix spellcheck, hopefully * maybe now? * last spellcheck round * fix runtime tests * fix test-runtime Co-authored-by: thiolliere Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: André Silva --- Cargo.lock | 1 + primitives/src/v1/mod.rs | 2 +- .../src/runtime/disputes.md | 42 +- .../implementers-guide/src/types/disputes.md | 14 +- runtime/kusama/src/lib.rs | 1 + runtime/parachains/Cargo.toml | 1 + runtime/parachains/src/disputes.rs | 2081 +++++++++++++++++ runtime/parachains/src/inclusion.rs | 35 +- runtime/parachains/src/initializer.rs | 20 +- runtime/parachains/src/lib.rs | 1 + runtime/parachains/src/mock.rs | 58 +- runtime/parachains/src/paras_inherent.rs | 66 +- runtime/rococo/src/lib.rs | 1 + runtime/test-runtime/src/lib.rs | 9 + runtime/westend/src/lib.rs | 1 + 15 files changed, 2291 insertions(+), 42 deletions(-) create mode 100644 runtime/parachains/src/disputes.rs diff --git a/Cargo.lock b/Cargo.lock index c698ec3077f4..9284c66eb3d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6853,6 +6853,7 @@ dependencies = [ name = "polkadot-runtime-parachains" version = "0.9.8" dependencies = [ + "bitflags", "bitvec", "derive_more", "frame-benchmarking", diff --git a/primitives/src/v1/mod.rs b/primitives/src/v1/mod.rs index 0d9f429d469a..ed082707a8b0 100644 --- a/primitives/src/v1/mod.rs +++ b/primitives/src/v1/mod.rs @@ -1186,7 +1186,7 @@ pub struct DisputeStatementSet { pub type MultiDisputeStatementSet = Vec; /// The entire state of a dispute. -#[derive(Encode, Decode, Clone, RuntimeDebug)] +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq)] pub struct DisputeState { /// A bitfield indicating all validators for the candidate. pub validators_for: BitVec, // one bit per validator. diff --git a/roadmap/implementers-guide/src/runtime/disputes.md b/roadmap/implementers-guide/src/runtime/disputes.md index 4faece7cb092..79015e4a740f 100644 --- a/roadmap/implementers-guide/src/runtime/disputes.md +++ b/roadmap/implementers-guide/src/runtime/disputes.md @@ -6,7 +6,7 @@ However, this isn't the end of the story. We are working in a forkful blockchain 1. For security, validators that misbehave shouldn't only be slashed on one fork, but on all possible forks. Validators that misbehave shouldn't be able to create a new fork of the chain when caught and get away with their misbehavior. 1. It is possible (and likely) that the parablock being contested has not appeared on all forks. -1. If a block author believes that there is a disputed parablock on a specific fork that will resolve to a reversion of the fork, that block author is better incentivized to build on a different fork which does not include that parablock. +1. If a block author believes that there is a disputed parablock on a specific fork that will resolve to a reversion of the fork, that block author has more incentive to build on a different fork which does not include that parablock. This means that in all likelihood, there is the possibility of disputes that are started on one fork of the relay chain, and as soon as the dispute resolution process starts to indicate that the parablock is indeed invalid, that fork of the relay chain will be abandoned and the dispute will never be fully resolved on that chain. @@ -42,11 +42,12 @@ Included: double_map (SessionIndex, CandidateHash) -> Option, // fewer than `byzantine_threshold + 1` validators. // // The i'th entry of the vector corresponds to the i'th validator in the session. -SpamSlots: map SessionIndex -> Vec, -// Whether the chain is frozen or not. Starts as `false`. When this is `true`, -// the chain will not accept any new parachain blocks for backing or inclusion. -// It can only be set back to `false` by governance intervention. -Frozen: bool, +SpamSlots: map SessionIndex -> Option>, +// Whether the chain is frozen or not. Starts as `None`. When this is `Some`, +// the chain will not accept any new parachain blocks for backing or inclusion, +// and its value indicates the last valid block number in the chain. +// It can only be set back to `None` by governance intervention. +Frozen: Option, ``` > `byzantine_threshold` refers to the maximum number `f` of validators which may be byzantine. The total number of validators is `n = 3f + e` where `e in { 1, 2, 3 }`. @@ -54,7 +55,8 @@ Frozen: bool, ## Session Change 1. If the current session is not greater than `config.dispute_period + 1`, nothing to do here. -1. Set `pruning_target = current_session - config.dispute_period - 1`. We add the extra `1` because we want to keep things for `config.dispute_period` _full_ sessions. The stuff at the end of the most recent session has been around for ~0 sessions, not ~1. +1. Set `pruning_target = current_session - config.dispute_period - 1`. We add the extra `1` because we want to keep things for `config.dispute_period` _full_ sessions. + The stuff at the end of the most recent session has been around for a little over 0 sessions, not a little over 1. 1. If `LastPrunedSession` is `None`, then set `LastPrunedSession` to `Some(pruning_target)` and return. 1. Otherwise, clear out all disputes, included candidates, and `SpamSlots` entries in the range `last_pruned..=pruning_target` and set `LastPrunedSession` to `Some(pruning_target)`. @@ -65,7 +67,6 @@ Frozen: bool, ## Routines * `provide_multi_dispute_data(MultiDisputeStatementSet) -> Vec<(SessionIndex, Hash)>`: - 1. Fail if any disputes in the set are duplicate or concluded before the `config.dispute_post_conclusion_acceptance_period` window relative to now. 1. Pass on each dispute statement set to `provide_dispute_data`, propagating failure. 1. Return a list of all candidates who just had disputes initiated. @@ -75,29 +76,30 @@ Frozen: bool, 1. If there is no dispute under `Disputes`, create a new `DisputeState` with blank bitfields. 1. If `concluded_at` is `Some`, and is `concluded_at + config.post_conclusion_acceptance_period < now`, return false. 1. If the overlap of the validators in the `DisputeStatementSet` and those already present in the `DisputeState` is fewer in number than `byzantine_threshold + 1` and the candidate is not present in the `Included` map - 1. increment `SpamSlots` for each validator in the `DisputeStatementSet` which is not already in the `DisputeState`. Initialize the `SpamSlots` to a zeroed vector first, if necessary. - 1. If the value for any spam slot exceeds `config.dispute_max_spam_slots`, return false. - 1. If the overlap of the validators in the `DisputeStatementSet` and those already present in the `DisputeState` is at least `byzantine_threshold + 1`, the `DisputeState` has fewer than `byzantine_threshold + 1` validators, and the candidate is not present in the `Included` map, decrement `SpamSlots` for each validator in the `DisputeState`. - 1. Import all statements into the dispute. This should fail if any statements are duplicate; if the corresponding bit for the corresponding validator is set in the dispute already. - 1. If `concluded_at` is `None`, reward all statements slightly less. + 1. increment `SpamSlots` for each validator in the `DisputeStatementSet` which is not already in the `DisputeState`. Initialize the `SpamSlots` to a zeroed vector first, if necessary. do not increment `SpamSlots` if the candidate is local. + 1. If the value for any spam slot exceeds `config.dispute_max_spam_slots`, return false. + 1. If the overlap of the validators in the `DisputeStatementSet` and those already present in the `DisputeState` is at least `byzantine_threshold + 1`, the `DisputeState` has fewer than `byzantine_threshold + 1` validators, and the candidate is not present in the `Included` map, then decrease `SpamSlots` by 1 for each validator in the `DisputeState`. + 1. Import all statements into the dispute. This should fail if any statements are duplicate or if the corresponding bit for the corresponding validator is set in the dispute already. + 1. If `concluded_at` is `None`, reward all statements. 1. If `concluded_at` is `Some`, reward all statements slightly less. - 1. If either side now has supermajority, slash the other side. This may be both sides, and we support this possibility in code, but note that this requires validators to participate on both sides which has negative expected value. Set `concluded_at` to `Some(now)`. + 1. If either side now has supermajority and did not previously, slash the other side. This may be both sides, and we support this possibility in code, but note that this requires validators to participate on both sides which has negative expected value. Set `concluded_at` to `Some(now)` if it was `None`. 1. If just concluded against the candidate and the `Included` map contains `(session, candidate)`: invoke `revert_and_freeze` with the stored block number. 1. Return true if just initiated, false otherwise. * `disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)>`: Get a list of all disputes and info about dispute state. - 1. Iterate over all disputes in `Disputes`. Set the flag according to `concluded`. + 1. Iterate over all disputes in `Disputes` and collect into a vector. * `note_included(SessionIndex, CandidateHash, included_in: BlockNumber)`: 1. Add `(SessionIndex, CandidateHash)` to the `Included` map with `included_in - 1` as the value. - 1. If there is a dispute under `(Sessionindex, CandidateHash)` with fewer than `byzantine_threshold + 1` participating validators, decrement `SpamSlots` for each validator in the `DisputeState`. + 1. If there is a dispute under `(Sessionindex, CandidateHash)` with fewer than `byzantine_threshold + 1` participating validators, decrease `SpamSlots` by 1 for each validator in the `DisputeState`. 1. If there is a dispute under `(SessionIndex, CandidateHash)` that has concluded against the candidate, invoke `revert_and_freeze` with the stored block number. * `could_be_invalid(SessionIndex, CandidateHash) -> bool`: Returns whether a candidate has a live dispute ongoing or a dispute which has already concluded in the negative. -* `is_frozen()`: Load the value of `Frozen` from storage. +* `is_frozen()`: Load the value of `Frozen` from storage. Return true if `Some` and false if `None`. -* `revert_and_freeze(BlockNumber): +* `last_valid_block()`: Load the value of `Frozen` from storage and return. None indicates that all blocks in the chain are potentially valid. + +* `revert_and_freeze(BlockNumber)`: 1. If `is_frozen()` return. - 1. issue a digest in the block header which indicates the chain is to be abandoned back to the stored block number. - 1. Set `Frozen` to true. + 1. Set `Frozen` to `Some(BlockNumber)` to indicate a rollback to the given block number is necessary. diff --git a/roadmap/implementers-guide/src/types/disputes.md b/roadmap/implementers-guide/src/types/disputes.md index becace642dfe..3043b7615abd 100644 --- a/roadmap/implementers-guide/src/types/disputes.md +++ b/roadmap/implementers-guide/src/types/disputes.md @@ -1,6 +1,6 @@ # Disputes -## DisputeStatementSet +## `DisputeStatementSet` ```rust /// A set of statements about a specific candidate. @@ -11,7 +11,7 @@ struct DisputeStatementSet { } ``` -## DisputeStatement +## `DisputeStatement` ```rust /// A statement about a candidate, to be used within some dispute resolution process. @@ -33,8 +33,8 @@ Kinds of dispute statements. Each of these can be combined with a candidate hash ```rust enum ValidDisputeStatementKind { Explicit, - BackingSeconded, - BackingValid, + BackingSeconded(Hash), + BackingValid(Hash), ApprovalChecking, } @@ -43,7 +43,7 @@ enum InvalidDisputeStatementKind { } ``` -## ExplicitDisputeStatement +## `ExplicitDisputeStatement` ```rust struct ExplicitDisputeStatement { @@ -53,7 +53,7 @@ struct ExplicitDisputeStatement { } ``` -## MultiDisputeStatementSet +## `MultiDisputeStatementSet` Sets of statements for many (zero or more) disputes. @@ -61,7 +61,7 @@ Sets of statements for many (zero or more) disputes. type MultiDisputeStatementSet = Vec; ``` -## DisputeState +## `DisputeState` ```rust struct DisputeState { diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 21968806e88b..fe41607dda43 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1092,6 +1092,7 @@ impl parachains_session_info::Config for Runtime {} impl parachains_inclusion::Config for Runtime { type Event = Event; + type DisputesHandler = (); type RewardValidators = parachains_reward_points::RewardValidatorsWithEraPoints; } diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index 1447e6b878bb..0a766af14c66 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -11,6 +11,7 @@ log = { version = "0.4.14", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } serde = { version = "1.0.123", features = [ "derive" ], optional = true } derive_more = "0.99.14" +bitflags = "1" sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs new file mode 100644 index 000000000000..f7327ad2bd4f --- /dev/null +++ b/runtime/parachains/src/disputes.rs @@ -0,0 +1,2081 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Runtime component for handling disputes of parachain candidates. + +use sp_std::prelude::*; +use primitives::v1::{ + byzantine_threshold, supermajority_threshold, ApprovalVote, CandidateHash, CompactStatement, + ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, + InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, SigningContext, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, +}; +use sp_runtime::{ + traits::{One, Zero, Saturating, AppVerify}, + DispatchError, RuntimeDebug, SaturatedConversion, +}; +use frame_support::{ensure, traits::Get, weights::Weight}; +use parity_scale_codec::{Encode, Decode}; +use bitvec::{bitvec, order::Lsb0 as BitOrderLsb0}; +use crate::{ + configuration::{self, HostConfiguration}, + initializer::SessionChangeNotification, + session_info, +}; + +/// Whether the dispute is local or remote. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum DisputeLocation { + Local, + Remote, +} + +/// The result of a dispute, whether the candidate is deemed valid (for) or invalid (against). +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum DisputeResult { + Valid, + Invalid, +} + +/// Reward hooks for disputes. +pub trait RewardValidators { + // Give each validator a reward, likely small, for participating in the dispute. + fn reward_dispute_statement(session: SessionIndex, validators: impl IntoIterator); +} + +impl RewardValidators for () { + fn reward_dispute_statement(_: SessionIndex, _: impl IntoIterator) { } +} + +/// Punishment hooks for disputes. +pub trait PunishValidators { + /// Punish a series of validators who were for an invalid parablock. This is expected to be a major + /// punishment. + fn punish_for_invalid(session: SessionIndex, validators: impl IntoIterator); + + /// Punish a series of validators who were against a valid parablock. This is expected to be a minor + /// punishment. + fn punish_against_valid(session: SessionIndex, validators: impl IntoIterator); + + /// Punish a series of validators who were part of a dispute which never concluded. This is expected + /// to be a minor punishment. + fn punish_inconclusive(session: SessionIndex, validators: impl IntoIterator); +} + +impl PunishValidators for () { + fn punish_for_invalid(_: SessionIndex, _: impl IntoIterator) { + + } + + fn punish_against_valid(_: SessionIndex, _: impl IntoIterator) { + + } + + fn punish_inconclusive(_: SessionIndex, _: impl IntoIterator) { + + } +} + +/// Hook into disputes handling. +/// +/// Allows decoupling parachains handling from disputes so that it can +/// potentially be disabled when instantiating a specific runtime. +pub trait DisputesHandler { + /// Whether the chain is frozen, if the chain is frozen it will not accept + /// any new parachain blocks for backing or inclusion. + fn is_frozen() -> bool; + + /// Handler for filtering any dispute statements before including them as part + /// of inherent data. This can be useful to filter out ancient and duplicate + /// dispute statements. + fn filter_multi_dispute_data(statement_sets: &mut MultiDisputeStatementSet); + + /// Handle sets of dispute statements corresponding to 0 or more candidates. + /// Returns a vector of freshly created disputes. + fn provide_multi_dispute_data( + statement_sets: MultiDisputeStatementSet, + ) -> Result, DispatchError>; + + /// Note that the given candidate has been included. + fn note_included( + session: SessionIndex, + candidate_hash: CandidateHash, + included_in: BlockNumber, + ); + + /// Whether the given candidate could be invalid, i.e. there is an ongoing + /// or concluded dispute with supermajority-against. + fn could_be_invalid(session: SessionIndex, candidate_hash: CandidateHash) -> bool; + + /// Called by the initializer to initialize the configuration module. + fn initializer_initialize(now: BlockNumber) -> Weight; + + /// Called by the initializer to finalize the configuration module. + fn initializer_finalize(); + + /// Called by the initializer to note that a new session has started. + fn initializer_on_new_session(notification: &SessionChangeNotification); +} + +impl DisputesHandler for () { + fn is_frozen() -> bool { + false + } + + fn filter_multi_dispute_data(statement_sets: &mut MultiDisputeStatementSet) { + statement_sets.clear() + } + + fn provide_multi_dispute_data( + _statement_sets: MultiDisputeStatementSet, + ) -> Result, DispatchError> { + Ok(Vec::new()) + } + + fn note_included( + _session: SessionIndex, + _candidate_hash: CandidateHash, + _included_in: BlockNumber, + ) { + + } + + fn could_be_invalid(_session: SessionIndex, _candidate_hash: CandidateHash) -> bool { + false + } + + fn initializer_initialize(_now: BlockNumber) -> Weight { + 0 + } + + fn initializer_finalize() { + + } + + fn initializer_on_new_session(_notification: &SessionChangeNotification) { + + } +} + +impl DisputesHandler for pallet::Pallet { + fn is_frozen() -> bool { + pallet::Pallet::::is_frozen() + } + + fn filter_multi_dispute_data(statement_sets: &mut MultiDisputeStatementSet) { + // TODO: filter duplicate and ancient dispute statements. For now, don't import anything + // because there will be redundancies. + // + // https://github.com/paritytech/polkadot/issues/3472 + statement_sets.clear(); + } + + fn provide_multi_dispute_data( + statement_sets: MultiDisputeStatementSet, + ) -> Result, DispatchError> { + pallet::Pallet::::provide_multi_dispute_data(statement_sets) + } + + fn note_included( + session: SessionIndex, + candidate_hash: CandidateHash, + included_in: T::BlockNumber, + ) { + pallet::Pallet::::note_included(session, candidate_hash, included_in) + } + + fn could_be_invalid(session: SessionIndex, candidate_hash: CandidateHash) -> bool { + pallet::Pallet::::could_be_invalid(session, candidate_hash) + } + + fn initializer_initialize(now: T::BlockNumber) -> Weight { + pallet::Pallet::::initializer_initialize(now) + } + + fn initializer_finalize() { + pallet::Pallet::::initializer_finalize() + } + + fn initializer_on_new_session(notification: &SessionChangeNotification) { + pallet::Pallet::::initializer_on_new_session(notification) + } +} + +pub use pallet::*; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use super::*; + + #[pallet::config] + pub trait Config: + frame_system::Config + + configuration::Config + + session_info::Config + { + type Event: From> + IsType<::Event>; + type RewardValidators: RewardValidators; + type PunishValidators: PunishValidators; + } + + #[pallet::pallet] + pub struct Pallet(_); + + /// The last pruned session, if any. All data stored by this module + /// references sessions. + #[pallet::storage] + pub(super) type LastPrunedSession = StorageValue<_, SessionIndex>; + + /// All ongoing or concluded disputes for the last several sessions. + #[pallet::storage] + pub(super) type Disputes = StorageDoubleMap< + _, + Twox64Concat, SessionIndex, + Blake2_128Concat, CandidateHash, + DisputeState, + >; + + /// All included blocks on the chain, as well as the block number in this chain that + /// should be reverted back to if the candidate is disputed and determined to be invalid. + #[pallet::storage] + pub(super) type Included = StorageDoubleMap< + _, + Twox64Concat, SessionIndex, + Blake2_128Concat, CandidateHash, + T::BlockNumber, + >; + + /// Maps session indices to a vector indicating the number of potentially-spam disputes + /// each validator is participating in. Potentially-spam disputes are remote disputes which have + /// fewer than `byzantine_threshold + 1` validators. + /// + /// The i'th entry of the vector corresponds to the i'th validator in the session. + #[pallet::storage] + pub(super) type SpamSlots = StorageMap<_, Twox64Concat, SessionIndex, Vec>; + + /// Whether the chain is frozen. Starts as `None`. When this is `Some`, + /// the chain will not accept any new parachain blocks for backing or inclusion, + /// and its value indicates the last valid block number in the chain. + /// It can only be set back to `None` by governance intervention. + #[pallet::storage] + #[pallet::getter(fn last_valid_block)] + pub(super) type Frozen = StorageValue<_, Option, ValueQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub fn deposit_event)] + pub enum Event { + /// A dispute has been initiated. \[candidate hash, dispute location\] + DisputeInitiated(CandidateHash, DisputeLocation), + /// A dispute has concluded for or against a candidate. + /// `\[para id, candidate hash, dispute result\]` + DisputeConcluded(CandidateHash, DisputeResult), + /// A dispute has timed out due to insufficient participation. + /// `\[para id, candidate hash\]` + DisputeTimedOut(CandidateHash), + /// A dispute has concluded with supermajority against a candidate. + /// Block authors should no longer build on top of this head and should + /// instead revert to the block at the given height which is the last + /// known valid block in this chain. + Revert(T::BlockNumber), + } + + #[pallet::error] + pub enum Error { + /// Duplicate dispute statement sets provided. + DuplicateDisputeStatementSets, + /// Ancient dispute statement provided. + AncientDisputeStatement, + /// Validator index on statement is out of bounds for session. + ValidatorIndexOutOfBounds, + /// Invalid signature on statement. + InvalidSignature, + /// Validator vote submitted more than once to dispute. + DuplicateStatement, + /// Too many spam slots used by some specific validator. + PotentialSpam, + } +} + +bitflags::bitflags! { + #[derive(Default)] + struct DisputeStateFlags: u8 { + const CONFIRMED = 0b0001; + const FOR_SUPERMAJORITY = 0b0010; + const AGAINST_SUPERMAJORITY = 0b0100; + } +} + +impl DisputeStateFlags { + fn from_state( + state: &DisputeState, + ) -> Self { + let n = state.validators_for.len(); + + let byzantine_threshold = byzantine_threshold(n); + let supermajority_threshold = supermajority_threshold(n); + + let mut flags = DisputeStateFlags::default(); + let all_participants = { + let mut a = state.validators_for.clone(); + *a |= state.validators_against.iter().by_val(); + a + }; + if all_participants.count_ones() > byzantine_threshold { + flags |= DisputeStateFlags::CONFIRMED; + } + + if state.validators_for.count_ones() >= supermajority_threshold { + flags |= DisputeStateFlags::FOR_SUPERMAJORITY; + } + + if state.validators_against.count_ones() >= supermajority_threshold { + flags |= DisputeStateFlags::AGAINST_SUPERMAJORITY; + } + + flags + } +} + +#[derive(PartialEq, RuntimeDebug)] +enum SpamSlotChange { + Inc, + Dec, +} + +struct ImportSummary { + // The new state, with all votes imported. + state: DisputeState, + // Changes to spam slots. Validator index paired with directional change. + spam_slot_changes: Vec<(ValidatorIndex, SpamSlotChange)>, + // Validators to slash for being (wrongly) on the AGAINST side. + slash_against: Vec, + // Validators to slash for being (wrongly) on the FOR side. + slash_for: Vec, + // New participants in the dispute. + new_participants: bitvec::vec::BitVec, + // Difference in state flags from previous. + new_flags: DisputeStateFlags, +} + +#[derive(RuntimeDebug, PartialEq, Eq)] +enum VoteImportError { + ValidatorIndexOutOfBounds, + DuplicateStatement, +} + +impl From for Error { + fn from(e: VoteImportError) -> Self { + match e { + VoteImportError::ValidatorIndexOutOfBounds => Error::::ValidatorIndexOutOfBounds, + VoteImportError::DuplicateStatement => Error::::DuplicateStatement, + } + } +} + +struct DisputeStateImporter { + state: DisputeState, + now: BlockNumber, + new_participants: bitvec::vec::BitVec, + pre_flags: DisputeStateFlags, +} + +impl DisputeStateImporter { + fn new( + state: DisputeState, + now: BlockNumber, + ) -> Self { + let pre_flags = DisputeStateFlags::from_state(&state); + let new_participants = bitvec::bitvec![BitOrderLsb0, u8; 0; state.validators_for.len()]; + + DisputeStateImporter { + state, + now, + new_participants, + pre_flags, + } + } + + fn import(&mut self, validator: ValidatorIndex, valid: bool) + -> Result<(), VoteImportError> + { + let (bits, other_bits) = if valid { + (&mut self.state.validators_for, &mut self.state.validators_against) + } else { + (&mut self.state.validators_against, &mut self.state.validators_for) + }; + + // out of bounds or already participated + match bits.get(validator.0 as usize).map(|b| *b) { + None => return Err(VoteImportError::ValidatorIndexOutOfBounds), + Some(true) => return Err(VoteImportError::DuplicateStatement), + Some(false) => {} + } + + // inefficient, and just for extra sanity. + if validator.0 as usize >= self.new_participants.len() { + return Err(VoteImportError::ValidatorIndexOutOfBounds); + } + + bits.set(validator.0 as usize, true); + + // New participants tracks those which didn't appear on either + // side of the dispute until now. So we check the other side + // and checked the first side before. + if other_bits.get(validator.0 as usize).map_or(false, |b| !*b) { + self.new_participants.set(validator.0 as usize, true); + } + + Ok(()) + } + + fn finish(mut self) -> ImportSummary { + let pre_flags = self.pre_flags; + let post_flags = DisputeStateFlags::from_state(&self.state); + + let pre_post_contains = |flags| (pre_flags.contains(flags), post_flags.contains(flags)); + + // 1. Act on confirmed flag state to inform spam slots changes. + let spam_slot_changes: Vec<_> = match pre_post_contains(DisputeStateFlags::CONFIRMED) { + (false, false) => { + // increment spam slots for all new participants. + self.new_participants.iter_ones() + .map(|i| (ValidatorIndex(i as _), SpamSlotChange::Inc)) + .collect() + } + (false, true) => { + let prev_participants = { + // all participants + let mut a = self.state.validators_for.clone(); + *a |= self.state.validators_against.iter().by_val(); + + // which are not new participants + *a &= self.new_participants.iter().by_val().map(|b| !b); + + a + }; + + prev_participants.iter_ones() + .map(|i| (ValidatorIndex(i as _), SpamSlotChange::Dec)) + .collect() + } + (true, true) | (true, false) => { + // nothing to do. (true, false) is also impossible. + Vec::new() + } + }; + + // 2. Check for fresh FOR supermajority. Only if not already concluded. + let slash_against = if let (false, true) = pre_post_contains(DisputeStateFlags::FOR_SUPERMAJORITY) { + if self.state.concluded_at.is_none() { + self.state.concluded_at = Some(self.now.clone()); + } + + // provide AGAINST voters to slash. + self.state.validators_against.iter_ones() + .map(|i| ValidatorIndex(i as _)) + .collect() + } else { + Vec::new() + }; + + // 3. Check for fresh AGAINST supermajority. + let slash_for = if let (false, true) = pre_post_contains(DisputeStateFlags::AGAINST_SUPERMAJORITY) { + if self.state.concluded_at.is_none() { + self.state.concluded_at = Some(self.now.clone()); + } + + // provide FOR voters to slash. + self.state.validators_for.iter_ones() + .map(|i| ValidatorIndex(i as _)) + .collect() + } else { + Vec::new() + }; + + ImportSummary { + state: self.state, + spam_slot_changes, + slash_against, + slash_for, + new_participants: self.new_participants, + new_flags: post_flags - pre_flags, + } + } +} + +impl Pallet { + /// Called by the initializer to initialize the disputes module. + pub(crate) fn initializer_initialize(now: T::BlockNumber) -> Weight { + let config = >::config(); + + let mut weight = 0; + for (session_index, candidate_hash, mut dispute) in >::iter() { + weight += T::DbWeight::get().reads_writes(1, 0); + + if dispute.concluded_at.is_none() + && dispute.start + config.dispute_conclusion_by_time_out_period < now + { + Self::deposit_event(Event::DisputeTimedOut(candidate_hash)); + + dispute.concluded_at = Some(now); + >::insert(session_index, candidate_hash, &dispute); + + if >::contains_key(&session_index, &candidate_hash) { + // Local disputes don't count towards spam. + + weight += T::DbWeight::get().reads_writes(1, 1); + continue; + } + + // mildly punish all validators involved. they've failed to make + // data available to others, so this is most likely spam. + SpamSlots::::mutate(session_index, |spam_slots| { + let spam_slots = match spam_slots { + Some(ref mut s) => s, + None => return, + }; + + // also reduce spam slots for all validators involved, if the dispute was unconfirmed. + // this does open us up to more spam, but only for validators who are willing + // to be punished more. + // + // it would be unexpected for any change here to occur when the dispute has not concluded + // in time, as a dispute guaranteed to have at least one honest participant should + // conclude quickly. + let participating = decrement_spam(spam_slots, &dispute); + + // Slight punishment as these validators have failed to make data available to + // others in a timely manner. + T::PunishValidators::punish_inconclusive( + session_index, + participating.iter_ones().map(|i| ValidatorIndex(i as _)), + ); + }); + + weight += T::DbWeight::get().reads_writes(2, 2); + } + } + + weight + } + + /// Called by the initializer to finalize the disputes module. + pub(crate) fn initializer_finalize() { } + + /// Called by the initializer to note a new session in the disputes module. + pub(crate) fn initializer_on_new_session(notification: &SessionChangeNotification) { + let config = >::config(); + + if notification.session_index <= config.dispute_period + 1 { + return + } + + let pruning_target = notification.session_index - config.dispute_period - 1; + + LastPrunedSession::::mutate(|last_pruned| { + let to_prune = if let Some(last_pruned) = last_pruned { + *last_pruned + 1 ..= pruning_target + } else { + pruning_target ..= pruning_target + }; + + for to_prune in to_prune { + // This should be small, as disputes are rare, so `None` is fine. + >::remove_prefix(to_prune, None); + + // This is larger, and will be extracted to the `shared` module for more proper pruning. + // TODO: https://github.com/paritytech/polkadot/issues/3469 + >::remove_prefix(to_prune, None); + SpamSlots::::remove(to_prune); + } + + *last_pruned = Some(pruning_target); + }); + } + + /// Handle sets of dispute statements corresponding to 0 or more candidates. + /// Returns a vector of freshly created disputes. + /// + /// # Warning + /// + /// This functions modifies the state when failing. It is expected to be called in inherent, + /// and to fail the extrinsic on error. As invalid inherents are not allowed, the dirty state + /// is not commited. + pub(crate) fn provide_multi_dispute_data(statement_sets: MultiDisputeStatementSet) + -> Result, DispatchError> + { + let config = >::config(); + + // Deduplicate. + { + let mut targets: Vec<_> = statement_sets.iter() + .map(|set| (set.candidate_hash.0, set.session)) + .collect(); + + targets.sort(); + + let submitted = targets.len(); + targets.dedup(); + + ensure!(submitted == targets.len(), Error::::DuplicateDisputeStatementSets); + } + + let mut fresh = Vec::with_capacity(statement_sets.len()); + for statement_set in statement_sets { + let dispute_target = (statement_set.session, statement_set.candidate_hash); + if Self::provide_dispute_data(&config, statement_set)? { + fresh.push(dispute_target); + } + } + + Ok(fresh) + } + + /// Handle a set of dispute statements corresponding to a single candidate. + /// + /// Fails if the dispute data is invalid. Returns a boolean indicating whether the + /// dispute is fresh. + fn provide_dispute_data(config: &HostConfiguration, set: DisputeStatementSet) + -> Result + { + // Dispute statement sets on any dispute which concluded + // before this point are to be rejected. + let now = >::block_number(); + let oldest_accepted = now.saturating_sub(config.dispute_post_conclusion_acceptance_period); + + // Load session info to access validators + let session_info = match >::session_info(set.session) { + Some(s) => s, + None => return Err(Error::::AncientDisputeStatement.into()), + }; + + let n_validators = session_info.validators.len(); + + // Check for ancient. + let (fresh, dispute_state) = { + if let Some(dispute_state) = >::get(&set.session, &set.candidate_hash) { + ensure!( + dispute_state.concluded_at.as_ref().map_or(true, |c| c >= &oldest_accepted), + Error::::AncientDisputeStatement, + ); + + (false, dispute_state) + } else { + ( + true, + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 0; n_validators], + validators_against: bitvec![BitOrderLsb0, u8; 0; n_validators], + start: now, + concluded_at: None, + } + ) + } + }; + + // Check and import all votes. + let summary = { + let mut importer = DisputeStateImporter::new(dispute_state, now); + for (statement, validator_index, signature) in &set.statements { + let validator_public = session_info.validators.get(validator_index.0 as usize) + .ok_or(Error::::ValidatorIndexOutOfBounds)?; + + // Check signature before importing. + check_signature( + &validator_public, + set.candidate_hash, + set.session, + statement, + signature, + ).map_err(|()| Error::::InvalidSignature)?; + + let valid = match statement { + DisputeStatement::Valid(_) => true, + DisputeStatement::Invalid(_) => false, + }; + + importer.import(*validator_index, valid).map_err(Error::::from)?; + } + + importer.finish() + }; + + // Apply spam slot changes. Bail early if too many occupied. + let is_local = >::contains_key(&set.session, &set.candidate_hash); + if !is_local { + let mut spam_slots: Vec = SpamSlots::::get(&set.session) + .unwrap_or_else(|| vec![0; n_validators]); + + for (validator_index, spam_slot_change) in summary.spam_slot_changes { + let spam_slot = spam_slots.get_mut(validator_index.0 as usize) + .expect("index is in-bounds, as checked above; qed"); + + match spam_slot_change { + SpamSlotChange::Inc => { + ensure!( + *spam_slot < config.dispute_max_spam_slots, + Error::::PotentialSpam, + ); + + *spam_slot += 1; + } + SpamSlotChange::Dec => { + *spam_slot = spam_slot.saturating_sub(1); + } + } + } + + SpamSlots::::insert(&set.session, spam_slots); + } + + if fresh { + Self::deposit_event(Event::DisputeInitiated( + set.candidate_hash, + if is_local { DisputeLocation::Local } else { DisputeLocation::Remote }, + )); + } + + { + if summary.new_flags.contains(DisputeStateFlags::FOR_SUPERMAJORITY) { + Self::deposit_event(Event::DisputeConcluded( + set.candidate_hash, + DisputeResult::Valid, + )); + } + + // It is possible, although unexpected, for a dispute to conclude twice. + // This would require f+1 validators to vote in both directions. + // A dispute cannot conclude more than once in each direction. + + if summary.new_flags.contains(DisputeStateFlags::AGAINST_SUPERMAJORITY) { + Self::deposit_event(Event::DisputeConcluded( + set.candidate_hash, + DisputeResult::Invalid, + )); + } + } + + // Reward statements. + T::RewardValidators::reward_dispute_statement( + set.session, + summary.new_participants.iter_ones().map(|i| ValidatorIndex(i as _)), + ); + + // Slash participants on a losing side. + { + // a valid candidate, according to 2/3. Punish those on the 'against' side. + T::PunishValidators::punish_against_valid( + set.session, + summary.slash_against, + ); + + // an invalid candidate, according to 2/3. Punish those on the 'for' side. + T::PunishValidators::punish_for_invalid( + set.session, + summary.slash_for, + ); + } + + >::insert(&set.session, &set.candidate_hash, &summary.state); + + // Freeze if just concluded against some local candidate + if summary.new_flags.contains(DisputeStateFlags::AGAINST_SUPERMAJORITY) { + if let Some(revert_to) = >::get(&set.session, &set.candidate_hash) { + Self::revert_and_freeze(revert_to); + } + } + + Ok(fresh) + } + + #[allow(unused)] + pub(crate) fn disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { + >::iter().collect() + } + + pub(crate) fn note_included(session: SessionIndex, candidate_hash: CandidateHash, included_in: T::BlockNumber) { + if included_in.is_zero() { return } + + let revert_to = included_in - One::one(); + + >::insert(&session, &candidate_hash, revert_to); + + // If we just included a block locally which has a live dispute, decrement spam slots + // for any involved validators, if the dispute is not already confirmed by f + 1. + if let Some(state) = >::get(&session, candidate_hash) { + SpamSlots::::mutate(&session, |spam_slots| { + if let Some(ref mut spam_slots) = *spam_slots { + decrement_spam(spam_slots, &state); + } + }); + + if has_supermajority_against(&state) { + Self::revert_and_freeze(revert_to); + } + } + } + + pub(crate) fn could_be_invalid(session: SessionIndex, candidate_hash: CandidateHash) -> bool { + >::get(&session, &candidate_hash).map_or(false, |dispute| { + // A dispute that is ongoing or has concluded with supermajority-against. + dispute.concluded_at.is_none() || has_supermajority_against(&dispute) + }) + } + + pub(crate) fn is_frozen() -> bool { + Self::last_valid_block().is_some() + } + + pub(crate) fn revert_and_freeze(revert_to: T::BlockNumber) { + if Self::last_valid_block().map_or(true, |last| last > revert_to) { + Frozen::::set(Some(revert_to)); + Self::deposit_event(Event::Revert(revert_to)); + frame_system::Pallet::::deposit_log( + ConsensusLog::Revert(revert_to.saturated_into()).into(), + ); + } + } +} + +fn has_supermajority_against(dispute: &DisputeState) -> bool { + let supermajority_threshold = supermajority_threshold(dispute.validators_against.len()); + dispute.validators_against.count_ones() >= supermajority_threshold +} + +// If the dispute had not enough validators to confirm, decrement spam slots for all the participating +// validators. +// +// Returns the set of participating validators as a bitvec. +fn decrement_spam( + spam_slots: &mut [u32], + dispute: &DisputeState, +) -> bitvec::vec::BitVec { + let byzantine_threshold = byzantine_threshold(spam_slots.len()); + + let participating = dispute.validators_for.clone() | dispute.validators_against.iter().by_val(); + let decrement_spam = participating.count_ones() <= byzantine_threshold; + for validator_index in participating.iter_ones() { + if decrement_spam { + if let Some(occupied) = spam_slots.get_mut(validator_index as usize) { + *occupied = occupied.saturating_sub(1); + } + } + } + + participating +} + +fn check_signature( + validator_public: &ValidatorId, + candidate_hash: CandidateHash, + session: SessionIndex, + statement: &DisputeStatement, + validator_signature: &ValidatorSignature, +) -> Result<(), ()> { + let payload = match *statement { + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) => { + ExplicitDisputeStatement { + valid: true, + candidate_hash, + session, + }.signing_payload() + }, + DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded(inclusion_parent)) => { + CompactStatement::Seconded(candidate_hash).signing_payload(&SigningContext { + session_index: session, + parent_hash: inclusion_parent, + }) + }, + DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(inclusion_parent)) => { + CompactStatement::Valid(candidate_hash).signing_payload(&SigningContext { + session_index: session, + parent_hash: inclusion_parent, + }) + }, + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) => { + ApprovalVote(candidate_hash).signing_payload(session) + }, + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) => { + ExplicitDisputeStatement { + valid: false, + candidate_hash, + session, + }.signing_payload() + }, + }; + + if validator_signature.verify(&payload[..] , &validator_public) { + Ok(()) + } else { + Err(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_system::InitKind; + use frame_support::{assert_ok, assert_err, assert_noop, traits::{OnInitialize, OnFinalize}}; + use crate::mock::{ + new_test_ext, Test, System, AllPallets, Initializer, AccountId, MockGenesisConfig, + REWARD_VALIDATORS, PUNISH_VALIDATORS_FOR, PUNISH_VALIDATORS_AGAINST, + PUNISH_VALIDATORS_INCONCLUSIVE, + }; + use sp_core::{Pair, crypto::CryptoType}; + use primitives::v1::BlockNumber; + + // All arguments for `initializer::on_new_session` + type NewSession<'a> = (bool, SessionIndex, Vec<(&'a AccountId, ValidatorId)>, Option>); + + // Run to specific block, while calling disputes pallet hooks manually, because disputes is not + // integrated in initializer yet. + fn run_to_block<'a>( + to: BlockNumber, + new_session: impl Fn(BlockNumber) -> Option>, + ) { + while System::block_number() < to { + let b = System::block_number(); + if b != 0 { + AllPallets::on_finalize(b); + System::finalize(); + } + + System::initialize(&(b + 1), &Default::default(), &Default::default(), InitKind::Full); + AllPallets::on_initialize(b + 1); + + if let Some(new_session) = new_session(b + 1) { + Initializer::test_trigger_on_new_session( + new_session.0, + new_session.1, + new_session.2.into_iter(), + new_session.3.map(|q| q.into_iter()), + ); + } + } + } + + #[test] + fn test_dispute_state_flag_from_state() { + assert_eq!( + DisputeStateFlags::from_state(&DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 0, 0, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 0, 0, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }), + DisputeStateFlags::default(), + ); + + assert_eq!( + DisputeStateFlags::from_state(&DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 1, 1, 1, 1, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 0, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }), + DisputeStateFlags::FOR_SUPERMAJORITY | DisputeStateFlags::CONFIRMED, + ); + + assert_eq!( + DisputeStateFlags::from_state(&DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 0, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 1, 1, 1, 1, 1, 0, 0], + start: 0, + concluded_at: None, + }), + DisputeStateFlags::AGAINST_SUPERMAJORITY | DisputeStateFlags::CONFIRMED, + ); + } + + #[test] + fn test_import_new_participant_spam_inc() { + let mut importer = DisputeStateImporter::new( + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 0, 0, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }, + 0, + ); + + assert_err!( + importer.import(ValidatorIndex(9), true), + VoteImportError::ValidatorIndexOutOfBounds, + ); + + assert_err!( + importer.import(ValidatorIndex(0), true), + VoteImportError::DuplicateStatement, + ); + assert_ok!(importer.import(ValidatorIndex(0), false)); + + assert_ok!(importer.import(ValidatorIndex(2), true)); + assert_err!( + importer.import(ValidatorIndex(2), true), + VoteImportError::DuplicateStatement, + ); + + assert_ok!(importer.import(ValidatorIndex(2), false)); + assert_err!( + importer.import(ValidatorIndex(2), false), + VoteImportError::DuplicateStatement, + ); + + let summary = importer.finish(); + assert_eq!(summary.new_flags, DisputeStateFlags::default()); + assert_eq!( + summary.state, + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }, + ); + assert_eq!( + summary.spam_slot_changes, + vec![(ValidatorIndex(2), SpamSlotChange::Inc)], + ); + assert!(summary.slash_for.is_empty()); + assert!(summary.slash_against.is_empty()); + assert_eq!(summary.new_participants, bitvec![BitOrderLsb0, u8; 0, 0, 1, 0, 0, 0, 0, 0]); + } + + #[test] + fn test_import_prev_participant_spam_dec_confirmed() { + let mut importer = DisputeStateImporter::new( + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 1, 0, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }, + 0, + ); + + assert_ok!(importer.import(ValidatorIndex(2), true)); + + let summary = importer.finish(); + assert_eq!( + summary.state, + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 1, 0, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }, + ); + assert_eq!( + summary.spam_slot_changes, + vec![ + (ValidatorIndex(0), SpamSlotChange::Dec), + (ValidatorIndex(1), SpamSlotChange::Dec), + ], + ); + assert!(summary.slash_for.is_empty()); + assert!(summary.slash_against.is_empty()); + assert_eq!(summary.new_participants, bitvec![BitOrderLsb0, u8; 0, 0, 1, 0, 0, 0, 0, 0]); + assert_eq!(summary.new_flags, DisputeStateFlags::CONFIRMED); + } + + #[test] + fn test_import_prev_participant_spam_dec_confirmed_slash_for() { + let mut importer = DisputeStateImporter::new( + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 1, 0, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }, + 0, + ); + + assert_ok!(importer.import(ValidatorIndex(2), true)); + assert_ok!(importer.import(ValidatorIndex(2), false)); + assert_ok!(importer.import(ValidatorIndex(3), false)); + assert_ok!(importer.import(ValidatorIndex(4), false)); + assert_ok!(importer.import(ValidatorIndex(5), false)); + assert_ok!(importer.import(ValidatorIndex(6), false)); + + let summary = importer.finish(); + assert_eq!( + summary.state, + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 1, 1, 1, 1, 1, 1, 0], + start: 0, + concluded_at: Some(0), + }, + ); + assert_eq!( + summary.spam_slot_changes, + vec![ + (ValidatorIndex(0), SpamSlotChange::Dec), + (ValidatorIndex(1), SpamSlotChange::Dec), + ], + ); + assert_eq!(summary.slash_for, vec![ValidatorIndex(0), ValidatorIndex(2)]); + assert!(summary.slash_against.is_empty()); + assert_eq!(summary.new_participants, bitvec![BitOrderLsb0, u8; 0, 0, 1, 1, 1, 1, 1, 0]); + assert_eq!( + summary.new_flags, + DisputeStateFlags::CONFIRMED | DisputeStateFlags::AGAINST_SUPERMAJORITY, + ); + } + + #[test] + fn test_import_slash_against() { + let mut importer = DisputeStateImporter::new( + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 0, 1, 0, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }, + 0, + ); + + assert_ok!(importer.import(ValidatorIndex(3), true)); + assert_ok!(importer.import(ValidatorIndex(4), true)); + assert_ok!(importer.import(ValidatorIndex(5), false)); + assert_ok!(importer.import(ValidatorIndex(6), true)); + assert_ok!(importer.import(ValidatorIndex(7), true)); + + let summary = importer.finish(); + assert_eq!( + summary.state, + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 1, 1, 1, 0, 1, 1], + validators_against: bitvec![BitOrderLsb0, u8; 0, 1, 0, 0, 0, 1, 0, 0], + start: 0, + concluded_at: Some(0), + }, + ); + assert!(summary.spam_slot_changes.is_empty()); + assert!(summary.slash_for.is_empty()); + assert_eq!(summary.slash_against, vec![ValidatorIndex(1), ValidatorIndex(5)]); + assert_eq!(summary.new_participants, bitvec![BitOrderLsb0, u8; 0, 0, 0, 1, 1, 1, 1, 1]); + assert_eq!(summary.new_flags, DisputeStateFlags::FOR_SUPERMAJORITY); + } + + // Test that punish_inconclusive is correctly called. + #[test] + fn test_initializer_initialize() { + let dispute_conclusion_by_time_out_period = 3; + let start = 10; + + let mock_genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: HostConfiguration { + dispute_conclusion_by_time_out_period, + .. Default::default() + }, + .. Default::default() + }, + .. Default::default() + }; + + new_test_ext(mock_genesis_config).execute_with(|| { + let v0 = ::Pair::generate().0; + let v1 = ::Pair::generate().0; + let v2 = ::Pair::generate().0; + let v3 = ::Pair::generate().0; + + // NOTE: v0 index will be 0 + // NOTE: v1 index will be 3 + // NOTE: v2 index will be 2 + // NOTE: v3 index will be 1 + + run_to_block( + start, + |b| { + // a new session at each block + Some(( + true, + b, + vec![(&0, v0.public()), (&1, v1.public()), (&2, v2.public()), (&3, v3.public())], + Some(vec![(&0, v0.public()), (&1, v1.public()), (&2, v2.public()), (&3, v3.public())]), + )) + } + ); + + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + + // v0 votes for 3 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: start - 1, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(0), + v0.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: start - 1, + }.signing_payload() + ) + ), + ], + }, + ]; + + assert_ok!( + Pallet::::provide_multi_dispute_data(stmts), + vec![(9, candidate_hash.clone())], + ); + assert_eq!(SpamSlots::::get(start - 1), Some(vec![1, 0, 0, 0])); + + // Run to timeout period + run_to_block(start + dispute_conclusion_by_time_out_period, |_| None); + assert_eq!(SpamSlots::::get(start - 1), Some(vec![1, 0, 0, 0])); + + // Run to timeout + 1 in order to executive on_finalize(timeout) + run_to_block(start + dispute_conclusion_by_time_out_period + 1, |_| None); + assert_eq!(SpamSlots::::get(start - 1), Some(vec![0, 0, 0, 0])); + assert_eq!( + PUNISH_VALIDATORS_INCONCLUSIVE.with(|r| r.borrow()[0].clone()), + (9, vec![ValidatorIndex(0)]), + ); + }); + } + + // Test prunning works + #[test] + fn test_initializer_on_new_session() { + let dispute_period = 3; + + let mock_genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: HostConfiguration { + dispute_period, + .. Default::default() + }, + .. Default::default() + }, + .. Default::default() + }; + + new_test_ext(mock_genesis_config).execute_with(|| { + let v0 = ::Pair::generate().0; + + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + Pallet::::note_included(0, candidate_hash.clone(), 0); + Pallet::::note_included(1, candidate_hash.clone(), 1); + Pallet::::note_included(2, candidate_hash.clone(), 2); + Pallet::::note_included(3, candidate_hash.clone(), 3); + Pallet::::note_included(4, candidate_hash.clone(), 4); + Pallet::::note_included(5, candidate_hash.clone(), 5); + Pallet::::note_included(6, candidate_hash.clone(), 5); + + run_to_block( + 7, + |b| { + // a new session at each block + Some(( + true, + b, + vec![(&0, v0.public())], + Some(vec![(&0, v0.public())]), + )) + } + ); + + // current session is 7, + // we keep for dispute_period + 1 session and we remove in on_finalize + // thus we keep info for session 3, 4, 5, 6, 7. + assert_eq!(Included::::iter_prefix(0).count(), 0); + assert_eq!(Included::::iter_prefix(1).count(), 0); + assert_eq!(Included::::iter_prefix(2).count(), 0); + assert_eq!(Included::::iter_prefix(3).count(), 1); + assert_eq!(Included::::iter_prefix(4).count(), 1); + assert_eq!(Included::::iter_prefix(5).count(), 1); + assert_eq!(Included::::iter_prefix(6).count(), 1); + }); + } + + #[test] + fn test_provide_multi_dispute_data_duplicate_error() { + new_test_ext(Default::default()).execute_with(|| { + let candidate_hash_1 = CandidateHash(sp_core::H256::repeat_byte(1)); + let candidate_hash_2 = CandidateHash(sp_core::H256::repeat_byte(2)); + + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash_2, + session: 2, + statements: vec![], + }, + DisputeStatementSet { + candidate_hash: candidate_hash_1, + session: 1, + statements: vec![], + }, + DisputeStatementSet { + candidate_hash: candidate_hash_2, + session: 2, + statements: vec![], + }, + ]; + + assert_err!( + Pallet::::provide_multi_dispute_data(stmts), + DispatchError::from(Error::::DuplicateDisputeStatementSets), + ); + }) + } + + // Test: + // * wrong signature fails + // * signature is checked for correct validator + #[test] + fn test_provide_multi_dispute_is_checking_signature_correctly() { + new_test_ext(Default::default()).execute_with(|| { + let v0 = ::Pair::generate().0; + let v1 = ::Pair::generate().0; + + run_to_block( + 3, + |b| { + // a new session at each block + if b == 1 { + Some(( + true, + b, + vec![(&0, v0.public())], + Some(vec![(&0, v0.public())]), + )) + } else { + Some(( + true, + b, + vec![(&1, v1.public())], + Some(vec![(&1, v1.public())]), + )) + } + } + ); + + + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 1, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(0), + v0.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: 1, + }.signing_payload() + ), + ), + ], + }, + ]; + + assert_ok!( + Pallet::::provide_multi_dispute_data(stmts), + vec![(1, candidate_hash.clone())], + ); + + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 2, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(0), + v0.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: 2, + }.signing_payload() + ), + ), + ], + }, + ]; + + assert_noop!( + Pallet::::provide_multi_dispute_data(stmts), + DispatchError::from(Error::::InvalidSignature), + ); + }) + } + + #[test] + fn test_freeze_on_note_included() { + new_test_ext(Default::default()).execute_with(|| { + let v0 = ::Pair::generate().0; + + run_to_block( + 6, + |b| { + // a new session at each block + Some(( + true, + b, + vec![(&0, v0.public())], + Some(vec![(&0, v0.public())]), + )) + } + ); + + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + + // v0 votes for 3 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 3, + statements: vec![ + ( + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), + ValidatorIndex(0), + v0.sign( + &ExplicitDisputeStatement { + valid: false, + candidate_hash: candidate_hash.clone(), + session: 3, + }.signing_payload() + ) + ), + ], + }, + ]; + assert!(Pallet::::provide_multi_dispute_data(stmts).is_ok()); + + Pallet::::note_included(3, candidate_hash.clone(), 3); + assert_eq!(Frozen::::get(), Some(2)); + }); + } + + #[test] + fn test_freeze_provided_against_supermajority_for_included() { + new_test_ext(Default::default()).execute_with(|| { + let v0 = ::Pair::generate().0; + + run_to_block( + 6, + |b| { + // a new session at each block + Some(( + true, + b, + vec![(&0, v0.public())], + Some(vec![(&0, v0.public())]), + )) + } + ); + + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + + Pallet::::note_included(3, candidate_hash.clone(), 3); + + // v0 votes for 3 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 3, + statements: vec![ + ( + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), + ValidatorIndex(0), + v0.sign( + &ExplicitDisputeStatement { + valid: false, + candidate_hash: candidate_hash.clone(), + session: 3, + }.signing_payload() + ) + ), + ], + }, + ]; + assert!(Pallet::::provide_multi_dispute_data(stmts).is_ok()); + + assert_eq!(Frozen::::get(), Some(2)); + }); + } + + // tests for: + // * provide_multi_dispute: with success scenario + // * disputes: correctness of datas + // * could_be_invalid: correctness of datas + // * note_included: decrement spam correctly + // * spam slots: correctly incremented and decremented + // * ensure rewards and punishment are correctly called. + #[test] + fn test_provide_multi_dispute_success_and_other() { + new_test_ext(Default::default()).execute_with(|| { + let v0 = ::Pair::generate().0; + let v1 = ::Pair::generate().0; + let v2 = ::Pair::generate().0; + let v3 = ::Pair::generate().0; + + // NOTE: v0 index will be 0 + // NOTE: v1 index will be 3 + // NOTE: v2 index will be 2 + // NOTE: v3 index will be 1 + + run_to_block( + 6, + |b| { + // a new session at each block + Some(( + true, + b, + vec![(&0, v0.public()), (&1, v1.public()), (&2, v2.public()), (&3, v3.public())], + Some(vec![(&0, v0.public()), (&1, v1.public()), (&2, v2.public()), (&3, v3.public())]), + )) + } + ); + + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + + // v0 votes for 3 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 3, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(0), + v0.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: 3, + }.signing_payload() + ) + ), + ], + }, + ]; + + assert_ok!( + Pallet::::provide_multi_dispute_data(stmts), + vec![(3, candidate_hash.clone())], + ); + assert_eq!(SpamSlots::::get(3), Some(vec![1, 0, 0, 0])); + + // v1 votes for 4 and for 3 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 4, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(3), + v1.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: 4, + }.signing_payload() + ) + ), + ], + }, + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 3, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(3), + v1.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: 3, + }.signing_payload() + ), + ), + ], + }, + ]; + + assert_ok!( + Pallet::::provide_multi_dispute_data(stmts), + vec![(4, candidate_hash.clone())], + ); + assert_eq!(SpamSlots::::get(3), Some(vec![0, 0, 0, 0])); // Confirmed as no longer spam + assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 1])); + + // v3 votes against 3 and for 5 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 3, + statements: vec![ + ( + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), + ValidatorIndex(1), + v3.sign( + &ExplicitDisputeStatement { + valid: false, + candidate_hash: candidate_hash.clone(), + session: 3, + }.signing_payload() + ), + ), + ], + }, + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 5, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(1), + v3.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: 5, + }.signing_payload() + ), + ), + ], + }, + ]; + assert_ok!( + Pallet::::provide_multi_dispute_data(stmts), + vec![(5, candidate_hash.clone())], + ); + assert_eq!(SpamSlots::::get(3), Some(vec![0, 0, 0, 0])); + assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 1])); + assert_eq!(SpamSlots::::get(5), Some(vec![0, 1, 0, 0])); + + // v2 votes for 3 and againt 5 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 3, + statements: vec![ + ( + DisputeStatement::Valid(ValidDisputeStatementKind::Explicit), + ValidatorIndex(2), + v2.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session: 3, + }.signing_payload() + ) + ), + ], + }, + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 5, + statements: vec![ + ( + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), + ValidatorIndex(2), + v2.sign( + &ExplicitDisputeStatement { + valid: false, + candidate_hash: candidate_hash.clone(), + session: 5, + }.signing_payload() + ), + ), + ], + }, + ]; + assert_ok!(Pallet::::provide_multi_dispute_data(stmts), vec![]); + assert_eq!(SpamSlots::::get(3), Some(vec![0, 0, 0, 0])); + assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 1])); + assert_eq!(SpamSlots::::get(5), Some(vec![0, 0, 0, 0])); + + // v0 votes for 5 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 5, + statements: vec![ + ( + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), + ValidatorIndex(0), + v0.sign( + &ExplicitDisputeStatement { + valid: false, + candidate_hash: candidate_hash.clone(), + session: 5, + }.signing_payload() + ), + ), + ], + }, + ]; + + assert_ok!(Pallet::::provide_multi_dispute_data(stmts), vec![]); + assert_eq!(SpamSlots::::get(3), Some(vec![0, 0, 0, 0])); + assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 1])); + assert_eq!(SpamSlots::::get(5), Some(vec![0, 0, 0, 0])); + + // v1 votes for 5 + let stmts = vec![ + DisputeStatementSet { + candidate_hash: candidate_hash.clone(), + session: 5, + statements: vec![ + ( + DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit), + ValidatorIndex(3), + v1.sign( + &ExplicitDisputeStatement { + valid: false, + candidate_hash: candidate_hash.clone(), + session: 5, + }.signing_payload() + ) + ), + ], + }, + ]; + + assert_ok!( + Pallet::::provide_multi_dispute_data(stmts), + vec![], + ); + assert_eq!(SpamSlots::::get(3), Some(vec![0, 0, 0, 0])); + assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 1])); + assert_eq!(SpamSlots::::get(5), Some(vec![0, 0, 0, 0])); + + assert_eq!( + Pallet::::disputes(), + vec![ + ( + 5, + candidate_hash.clone(), + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 0, 1, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 1, 0, 1, 1], + start: 6, + concluded_at: Some(6), // 3 vote against + } + ), + ( + 3, + candidate_hash.clone(), + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 1, 1], + validators_against: bitvec![BitOrderLsb0, u8; 0, 1, 0, 0], + start: 6, + concluded_at: Some(6), // 3 vote for + } + ), + ( + 4, + candidate_hash.clone(), + DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 0, 0, 0, 1], + validators_against: bitvec![BitOrderLsb0, u8; 0, 0, 0, 0], + start: 6, + concluded_at: None, + } + ), + ] + ); + + assert_eq!(Pallet::::could_be_invalid(3, candidate_hash.clone()), false); // It has 3 votes for + assert_eq!(Pallet::::could_be_invalid(4, candidate_hash.clone()), true); + assert_eq!(Pallet::::could_be_invalid(5, candidate_hash.clone()), true); + + // Ensure inclusion removes spam slots + assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 1])); + Pallet::::note_included(4, candidate_hash.clone(), 4); + assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 0])); + + // Ensure the reward_validator function was correctly called + assert_eq!( + REWARD_VALIDATORS.with(|r| r.borrow().clone()), + vec![ + (3, vec![ValidatorIndex(0)]), + (4, vec![ValidatorIndex(3)]), + (3, vec![ValidatorIndex(3)]), + (3, vec![ValidatorIndex(1)]), + (5, vec![ValidatorIndex(1)]), + (3, vec![ValidatorIndex(2)]), + (5, vec![ValidatorIndex(2)]), + (5, vec![ValidatorIndex(0)]), + (5, vec![ValidatorIndex(3)]), + ], + ); + + // Ensure punishment against is called + assert_eq!( + PUNISH_VALIDATORS_AGAINST.with(|r| r.borrow().clone()), + vec![ + (3, vec![]), + (4, vec![]), + (3, vec![]), + (3, vec![]), + (5, vec![]), + (3, vec![ValidatorIndex(1)]), + (5, vec![]), + (5, vec![]), + (5, vec![]), + ], + ); + + // Ensure punishment for is called + assert_eq!( + PUNISH_VALIDATORS_FOR.with(|r| r.borrow().clone()), + vec![ + (3, vec![]), + (4, vec![]), + (3, vec![]), + (3, vec![]), + (5, vec![]), + (3, vec![]), + (5, vec![]), + (5, vec![]), + (5, vec![ValidatorIndex(1)]), + ], + ); + }) + } + + #[test] + fn test_revert_and_freeze() { + new_test_ext(Default::default()).execute_with(|| { + // events are ignored for genesis block + System::set_block_number(1); + + Frozen::::put(Some(0)); + assert_noop!( + { + Pallet::::revert_and_freeze(0); + Result::<(), ()>::Err(()) // Just a small trick in order to use assert_noop. + }, + (), + ); + + Frozen::::kill(); + Pallet::::revert_and_freeze(0); + + assert_eq!(Frozen::::get(), Some(0)); + assert_eq!(System::digest().logs[0], ConsensusLog::Revert(0).into()); + System::assert_has_event(Event::Revert(0).into()); + }) + } + + #[test] + fn test_revert_and_freeze_merges() { + new_test_ext(Default::default()).execute_with(|| { + Frozen::::put(Some(10)); + assert_noop!( + { + Pallet::::revert_and_freeze(10); + Result::<(), ()>::Err(()) // Just a small trick in order to use assert_noop. + }, + (), + ); + + Pallet::::revert_and_freeze(8); + assert_eq!(Frozen::::get(), Some(8)); + }) + } + + #[test] + fn test_has_supermajority_against() { + assert_eq!( + has_supermajority_against(&DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 1, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 1, 1, 1, 1, 1, 0, 0, 0], + start: 0, + concluded_at: None, + }), + false, + ); + + assert_eq!( + has_supermajority_against(&DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 1, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 1, 1, 1, 1, 1, 1, 0, 0], + start: 0, + concluded_at: None, + }), + true, + ); + } + + #[test] + fn test_decrement_spam() { + let original_spam_slots = vec![0, 1, 2, 3, 4, 5, 6, 7]; + + // Test confirm is no-op + let mut spam_slots = original_spam_slots.clone(); + let dispute_state_confirm = DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 1, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }; + assert_eq!( + DisputeStateFlags::from_state(&dispute_state_confirm), + DisputeStateFlags::CONFIRMED + ); + assert_eq!( + decrement_spam(spam_slots.as_mut(), &dispute_state_confirm), + bitvec![BitOrderLsb0, u8; 1, 1, 1, 0, 0, 0, 0, 0], + ); + assert_eq!(spam_slots, original_spam_slots); + + // Test not confirm is decreasing spam + let mut spam_slots = original_spam_slots.clone(); + let dispute_state_no_confirm = DisputeState { + validators_for: bitvec![BitOrderLsb0, u8; 1, 0, 0, 0, 0, 0, 0, 0], + validators_against: bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + start: 0, + concluded_at: None, + }; + assert_eq!( + DisputeStateFlags::from_state(&dispute_state_no_confirm), + DisputeStateFlags::default() + ); + assert_eq!( + decrement_spam(spam_slots.as_mut(), &dispute_state_no_confirm), + bitvec![BitOrderLsb0, u8; 1, 0, 1, 0, 0, 0, 0, 0], + ); + assert_eq!(spam_slots, vec![0, 1, 1, 3, 4, 5, 6, 7]); + } + + #[test] + fn test_check_signature() { + let validator_id = ::Pair::generate().0; + let wrong_validator_id = ::Pair::generate().0; + + let session = 0; + let wrong_session = 1; + let candidate_hash = CandidateHash(sp_core::H256::repeat_byte(1)); + let wrong_candidate_hash = CandidateHash(sp_core::H256::repeat_byte(2)); + let inclusion_parent = sp_core::H256::repeat_byte(3); + let wrong_inclusion_parent = sp_core::H256::repeat_byte(4); + + let statement_1 = DisputeStatement::Valid(ValidDisputeStatementKind::Explicit); + let statement_2 = DisputeStatement::Valid( + ValidDisputeStatementKind::BackingSeconded(inclusion_parent.clone()) + ); + let wrong_statement_2 = DisputeStatement::Valid( + ValidDisputeStatementKind::BackingSeconded(wrong_inclusion_parent.clone()) + ); + let statement_3 = DisputeStatement::Valid( + ValidDisputeStatementKind::BackingValid(inclusion_parent.clone()) + ); + let wrong_statement_3 = DisputeStatement::Valid( + ValidDisputeStatementKind::BackingValid(wrong_inclusion_parent.clone()) + ); + let statement_4 = DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking); + let statement_5 = DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit); + + let signed_1 = validator_id.sign( + &ExplicitDisputeStatement { + valid: true, + candidate_hash: candidate_hash.clone(), + session, + }.signing_payload() + ); + let signed_2 = validator_id.sign( + &CompactStatement::Seconded(candidate_hash.clone()) + .signing_payload(&SigningContext { + session_index: session, + parent_hash: inclusion_parent.clone() + }) + ); + let signed_3 = validator_id.sign( + &CompactStatement::Valid(candidate_hash.clone()) + .signing_payload(&SigningContext { + session_index: session, + parent_hash: inclusion_parent.clone() + }) + ); + let signed_4 = validator_id.sign( + &ApprovalVote(candidate_hash.clone()).signing_payload(session) + ); + let signed_5 = validator_id.sign( + &ExplicitDisputeStatement { + valid: false, + candidate_hash: candidate_hash.clone(), + session, + }.signing_payload() + ); + + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_1, &signed_1).is_ok()); + assert!(check_signature(&wrong_validator_id.public(), candidate_hash, session, &statement_1, &signed_1).is_err()); + assert!(check_signature(&validator_id.public(), wrong_candidate_hash, session, &statement_1, &signed_1).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, wrong_session, &statement_1, &signed_1).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_2, &signed_1).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_3, &signed_1).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_4, &signed_1).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_5, &signed_1).is_err()); + + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_2, &signed_2).is_ok()); + assert!(check_signature(&wrong_validator_id.public(), candidate_hash, session, &statement_2, &signed_2).is_err()); + assert!(check_signature(&validator_id.public(), wrong_candidate_hash, session, &statement_2, &signed_2).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, wrong_session, &statement_2, &signed_2).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &wrong_statement_2, &signed_2).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_1, &signed_2).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_3, &signed_2).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_4, &signed_2).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_5, &signed_2).is_err()); + + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_3, &signed_3).is_ok()); + assert!(check_signature(&wrong_validator_id.public(), candidate_hash, session, &statement_3, &signed_3).is_err()); + assert!(check_signature(&validator_id.public(), wrong_candidate_hash, session, &statement_3, &signed_3).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, wrong_session, &statement_3, &signed_3).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &wrong_statement_3, &signed_3).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_1, &signed_3).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_2, &signed_3).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_4, &signed_3).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_5, &signed_3).is_err()); + + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_4, &signed_4).is_ok()); + assert!(check_signature(&wrong_validator_id.public(), candidate_hash, session, &statement_4, &signed_4).is_err()); + assert!(check_signature(&validator_id.public(), wrong_candidate_hash, session, &statement_4, &signed_4).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, wrong_session, &statement_4, &signed_4).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_1, &signed_4).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_2, &signed_4).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_3, &signed_4).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_5, &signed_4).is_err()); + + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_5, &signed_5).is_ok()); + assert!(check_signature(&wrong_validator_id.public(), candidate_hash, session, &statement_5, &signed_5).is_err()); + assert!(check_signature(&validator_id.public(), wrong_candidate_hash, session, &statement_5, &signed_5).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, wrong_session, &statement_5, &signed_5).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_1, &signed_5).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_2, &signed_5).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_3, &signed_5).is_err()); + assert!(check_signature(&validator_id.public(), candidate_hash, session, &statement_4, &signed_5).is_err()); + } +} diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index e6e159b6ee22..e5beab3734d3 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -35,7 +35,7 @@ use parity_scale_codec::{Encode, Decode}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use sp_runtime::{DispatchError, traits::{One, Saturating}}; -use crate::{configuration, paras, dmp, ump, hrmp, shared, scheduler::CoreAssignment}; +use crate::{configuration, disputes, paras, dmp, ump, hrmp, shared, scheduler::CoreAssignment}; /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding /// for any backed candidates referred to by a `1` bit available. @@ -118,6 +118,7 @@ pub trait Config: + configuration::Config { type Event: From> + Into<::Event>; + type DisputesHandler: disputes::DisputesHandler; type RewardValidators: RewardValidators; } @@ -238,7 +239,7 @@ impl Module { expected_bits: usize, unchecked_bitfields: UncheckedSignedAvailabilityBitfields, core_lookup: impl Fn(CoreIndex) -> Option, - ) -> Result, DispatchError> { + ) -> Result, DispatchError> { let validators = shared::Module::::active_validator_keys(); let session_index = shared::Module::::session_index(); @@ -247,7 +248,6 @@ impl Module { .map(|core_para| core_para.map(|p| (p, PendingAvailability::::get(&p)))) .collect(); - // do sanity checks on the bitfields: // 1. no more than one bitfield per validator // 2. bitfields are ascending by validator index. @@ -368,15 +368,12 @@ impl Module { pending_availability.backing_group, ); - freed_cores.push(pending_availability.core); + freed_cores.push((pending_availability.core, pending_availability.hash)); } else { >::insert(¶_id, &pending_availability); } } - // TODO: pass available candidates onwards to validity module once implemented. - // https://github.com/paritytech/polkadot/issues/1251 - Ok(freed_cores) } @@ -754,6 +751,28 @@ impl Module { cleaned_up_cores } + /// Cleans up all paras pending availability that are in the given list of disputed candidates. + /// + /// Returns a vector of cleaned-up core IDs. + pub(crate) fn collect_disputed(disputed: Vec) -> Vec { + let mut cleaned_up_ids = Vec::new(); + let mut cleaned_up_cores = Vec::new(); + + for (para_id, pending_record) in >::iter() { + if disputed.contains(&pending_record.hash) { + cleaned_up_ids.push(para_id); + cleaned_up_cores.push(pending_record.core); + } + } + + for para_id in cleaned_up_ids { + let _ = >::take(¶_id); + let _ = ::take(¶_id); + } + + cleaned_up_cores + } + /// Forcibly enact the candidate with the given ID as though it had been deemed available /// by bitfields. /// @@ -2553,4 +2572,6 @@ mod tests { assert!(::iter().collect::>().is_empty()); }); } + + // TODO [now]: test `collect_disputed` } diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index 21e3c2612061..870962e62ebe 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -25,6 +25,7 @@ use frame_support::traits::{Randomness, OneSessionHandler}; use parity_scale_codec::{Encode, Decode}; use crate::{ configuration::{self, HostConfiguration}, + disputes::DisputesHandler, shared, paras, scheduler, inclusion, session_info, dmp, ump, hrmp, }; @@ -127,7 +128,7 @@ pub mod pallet { // - Scheduler // - Inclusion // - SessionInfo - // - Validity + // - Disputes // - DMP // - UMP // - HRMP @@ -137,6 +138,7 @@ pub mod pallet { scheduler::Module::::initializer_initialize(now) + inclusion::Module::::initializer_initialize(now) + session_info::Module::::initializer_initialize(now) + + T::DisputesHandler::initializer_initialize(now) + dmp::Module::::initializer_initialize(now) + ump::Module::::initializer_initialize(now) + hrmp::Module::::initializer_initialize(now); @@ -151,6 +153,7 @@ pub mod pallet { hrmp::Module::::initializer_finalize(); ump::Module::::initializer_finalize(); dmp::Module::::initializer_finalize(); + T::DisputesHandler::initializer_finalize(); session_info::Module::::initializer_finalize(); inclusion::Module::::initializer_finalize(); scheduler::Module::::initializer_finalize(); @@ -234,6 +237,7 @@ impl Pallet { scheduler::Module::::initializer_on_new_session(¬ification); inclusion::Module::::initializer_on_new_session(¬ification); session_info::Module::::initializer_on_new_session(¬ification); + T::DisputesHandler::initializer_on_new_session(¬ification); dmp::Module::::initializer_on_new_session(¬ification, &outgoing_paras); ump::Module::::initializer_on_new_session(¬ification, &outgoing_paras); hrmp::Module::::initializer_on_new_session(¬ification, &outgoing_paras); @@ -268,6 +272,20 @@ impl Pallet { } } + + // Allow to trigger on_new_session in tests, this is needed as long as pallet_session is not + // implemented in mock. + #[cfg(test)] + pub(crate) fn test_trigger_on_new_session<'a, I: 'a>( + changed: bool, + session_index: SessionIndex, + validators: I, + queued: Option, + ) + where I: Iterator + { + Self::on_new_session(changed, session_index, validators, queued) + } } impl sp_runtime::BoundToRuntimeAppPublic for Pallet { diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 98014340f21e..e4341405886a 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -23,6 +23,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod configuration; +pub mod disputes; pub mod shared; pub mod inclusion; pub mod initializer; diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index f6af2be9711a..1c54ff6c5ae1 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -21,7 +21,9 @@ use sp_core::H256; use sp_runtime::traits::{ BlakeTwo256, IdentityLookup, }; -use primitives::v1::{AuthorityDiscoveryId, Balance, BlockNumber, Header, ValidatorIndex}; +use primitives::v1::{ + AuthorityDiscoveryId, Balance, BlockNumber, Header, ValidatorIndex, SessionIndex, +}; use frame_support::parameter_types; use frame_support::traits::GenesisBuild; use frame_support_test::TestRandomness; @@ -29,7 +31,7 @@ use std::cell::RefCell; use std::collections::HashMap; use crate::{ inclusion, scheduler, dmp, ump, hrmp, session_info, paras, configuration, - initializer, shared, + initializer, shared, disputes, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -53,6 +55,7 @@ frame_support::construct_runtime!( Ump: ump::{Pallet, Call, Storage, Event}, Hrmp: hrmp::{Pallet, Call, Storage, Event}, SessionInfo: session_info::{Pallet, Call, Storage}, + Disputes: disputes::{Pallet, Storage, Event}, } ); @@ -62,6 +65,8 @@ parameter_types! { frame_system::limits::BlockWeights::simple_max(4 * 1024 * 1024); } +pub type AccountId = u64; + impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::AllowAll; type BlockWeights = BlockWeights; @@ -136,10 +141,59 @@ impl crate::hrmp::Config for Test { type Currency = pallet_balances::Pallet; } +impl crate::disputes::Config for Test { + type Event = Event; + type RewardValidators = Self; + type PunishValidators = Self; +} + +thread_local! { + pub static REWARD_VALIDATORS: RefCell)>> = RefCell::new(Vec::new()); + pub static PUNISH_VALIDATORS_FOR: RefCell)>> = RefCell::new(Vec::new()); + pub static PUNISH_VALIDATORS_AGAINST: RefCell)>> = RefCell::new(Vec::new()); + pub static PUNISH_VALIDATORS_INCONCLUSIVE: RefCell)>> = RefCell::new(Vec::new()); +} + +impl crate::disputes::RewardValidators for Test { + fn reward_dispute_statement( + session: SessionIndex, + validators: impl IntoIterator + ) { + REWARD_VALIDATORS.with(|r| r.borrow_mut().push((session, validators.into_iter().collect()))) + } +} + +impl crate::disputes::PunishValidators for Test { + fn punish_for_invalid( + session: SessionIndex, + validators: impl IntoIterator, + ) { + PUNISH_VALIDATORS_FOR + .with(|r| r.borrow_mut().push((session, validators.into_iter().collect()))) + } + + fn punish_against_valid( + session: SessionIndex, + validators: impl IntoIterator, + ) { + PUNISH_VALIDATORS_AGAINST + .with(|r| r.borrow_mut().push((session, validators.into_iter().collect()))) + } + + fn punish_inconclusive( + session: SessionIndex, + validators: impl IntoIterator, + ) { + PUNISH_VALIDATORS_INCONCLUSIVE + .with(|r| r.borrow_mut().push((session, validators.into_iter().collect()))) + } +} + impl crate::scheduler::Config for Test { } impl crate::inclusion::Config for Test { type Event = Event; + type DisputesHandler = Disputes; type RewardValidators = TestRewardValidators; } diff --git a/runtime/parachains/src/paras_inherent.rs b/runtime/parachains/src/paras_inherent.rs index 551a7a1f55b3..4e26c1916c5b 100644 --- a/runtime/parachains/src/paras_inherent.rs +++ b/runtime/parachains/src/paras_inherent.rs @@ -35,8 +35,10 @@ use frame_support::{ }; use frame_system::ensure_none; use crate::{ + disputes::DisputesHandler, inclusion, scheduler::{self, FreedReason}, + shared, ump, }; @@ -68,6 +70,8 @@ decl_error! { /// The hash of the submitted parent header doesn't correspond to the saved block hash of /// the parent. InvalidParentHeader, + /// Potentially invalid candidate. + CandidateCouldBeInvalid, } } @@ -99,7 +103,7 @@ decl_module! { bitfields: signed_bitfields, backed_candidates, parent_header, - disputes: _, + disputes, } = data; ensure_none(origin)?; @@ -112,6 +116,36 @@ decl_module! { Error::::InvalidParentHeader, ); + // Handle disputes logic. + let current_session = >::session_index(); + let freed_disputed: Vec<(_, FreedReason)> = { + let fresh_disputes = T::DisputesHandler::provide_multi_dispute_data(disputes)?; + if T::DisputesHandler::is_frozen() { + // The relay chain we are currently on is invalid. Proceed no further on parachains. + Included::set(Some(())); + return Ok(Some( + MINIMAL_INCLUSION_INHERENT_WEIGHT + ).into()); + } + + let any_current_session_disputes = fresh_disputes.iter() + .any(|(s, _)| s == ¤t_session); + + if any_current_session_disputes { + let current_session_disputes: Vec<_> = fresh_disputes.iter() + .filter(|(s, _)| s == ¤t_session) + .map(|(_, c)| *c) + .collect(); + + >::collect_disputed(current_session_disputes) + .into_iter() + .map(|core| (core, FreedReason::Concluded)) + .collect() + } else { + Vec::new() + } + }; + // Process new availability bitfields, yielding any availability cores whose // work has now concluded. let expected_bits = >::availability_cores().len(); @@ -121,6 +155,12 @@ decl_module! { >::core_para, )?; + // Inform the disputes module of all included candidates. + let now = >::block_number(); + for (_, candidate_hash) in &freed_concluded { + T::DisputesHandler::note_included(current_session, *candidate_hash, now); + } + // Handle timeouts for any availability core work. let availability_pred = >::availability_timeout_predicate(); let freed_timeout = if let Some(pred) = availability_pred { @@ -130,8 +170,12 @@ decl_module! { }; // Schedule paras again, given freed cores, and reasons for freeing. - let freed = freed_concluded.into_iter().map(|c| (c, FreedReason::Concluded)) - .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))); + let mut freed = freed_disputed.into_iter() + .chain(freed_concluded.into_iter().map(|(c, _hash)| (c, FreedReason::Concluded))) + .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))) + .collect::>(); + + freed.sort_unstable_by_key(|pair| pair.0); // sort by core index >::clear(); >::schedule( @@ -142,6 +186,17 @@ decl_module! { let backed_candidates = limit_backed_candidates::(backed_candidates); let backed_candidates_len = backed_candidates.len() as Weight; + // Refuse to back any candidates that are disputed or invalid. + for candidate in &backed_candidates { + ensure!( + !T::DisputesHandler::could_be_invalid( + current_session, + candidate.candidate.hash(), + ), + Error::::CandidateCouldBeInvalid, + ); + } + // Process backed candidates according to scheduled cores. let parent_storage_root = parent_header.state_root().clone(); let occupied = >::process_candidates( @@ -216,7 +271,7 @@ impl ProvideInherent for Module { const INHERENT_IDENTIFIER: InherentIdentifier = PARACHAINS_INHERENT_IDENTIFIER; fn create_inherent(data: &InherentData) -> Option { - let inherent_data: ParachainsInherentData + let mut inherent_data: ParachainsInherentData = match data.get_data(&Self::INHERENT_IDENTIFIER) { Ok(Some(d)) => d, @@ -231,6 +286,9 @@ impl ProvideInherent for Module { } }; + // filter out any unneeded dispute statements + T::DisputesHandler::filter_multi_dispute_data(&mut inherent_data.disputes); + // Sanity check: session changes can invalidate an inherent, and we _really_ don't want that to happen. // See github.com/paritytech/polkadot/issues/1327 let inherent_data = match Self::enter( diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 06dbd19ed853..04c7adc96f55 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -589,6 +589,7 @@ impl runtime_parachains::inclusion::RewardValidators for RewardValidators { impl parachains_inclusion::Config for Runtime { type Event = Event; + type DisputesHandler = (); type RewardValidators = RewardValidators; } diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 70efad71b232..5570dbbac74a 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -36,6 +36,7 @@ use polkadot_runtime_parachains::dmp as parachains_dmp; use polkadot_runtime_parachains::ump as parachains_ump; use polkadot_runtime_parachains::hrmp as parachains_hrmp; use polkadot_runtime_parachains::scheduler as parachains_scheduler; +use polkadot_runtime_parachains::disputes as parachains_disputes; use polkadot_runtime_parachains::runtime_api_impl::v1 as runtime_impl; use primitives::v1::{ @@ -455,9 +456,16 @@ impl parachains_shared::Config for Runtime {} impl parachains_inclusion::Config for Runtime { type Event = Event; + type DisputesHandler = ParasDisputes; type RewardValidators = RewardValidatorsWithEraPoints; } +impl parachains_disputes::Config for Runtime { + type Event = Event; + type RewardValidators = (); + type PunishValidators = (); +} + impl parachains_paras_inherent::Config for Runtime {} impl parachains_initializer::Config for Runtime { @@ -537,6 +545,7 @@ construct_runtime! { SessionInfo: parachains_session_info::{Pallet, Call, Storage}, Hrmp: parachains_hrmp::{Pallet, Call, Storage, Event}, Ump: parachains_ump::{Pallet, Call, Storage, Event}, + ParasDisputes: parachains_disputes::{Pallet, Storage, Event}, Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, } diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index e904f6e6d482..0ee78b5d19c4 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -765,6 +765,7 @@ impl parachains_session_info::Config for Runtime {} impl parachains_inclusion::Config for Runtime { type Event = Event; + type DisputesHandler = (); type RewardValidators = parachains_reward_points::RewardValidatorsWithEraPoints; } From 570efba36d71fd178c2f749b8ca22912b6fe1856 Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 19 Jul 2021 13:12:54 -0400 Subject: [PATCH 09/14] add rustfmt toml (#3491) * feat: rustfmt.toml Copied from substrate. * avoid normalize It has some odd side effects converting // to /* */ instead of the other way round. See https://github.com/rust-lang/rustfmt/issues/4909 . --- rustfmt.toml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 rustfmt.toml diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000000..15e9bdcdf10f --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,20 @@ +# Basic +hard_tabs = true +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true From 94c1cc78c68c1758edc7121c13ec16a2eaec124b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Mon, 19 Jul 2021 20:17:51 +0200 Subject: [PATCH 10/14] dependabot: ignore another git dep (#3493) --- .github/dependabot.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b03aff183046..d9d24a12c9e9 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -10,6 +10,7 @@ updates: - dependency-name: "sp-*" - dependency-name: "frame-*" - dependency-name: "fork-tree" + - dependency-name: "remote-externalities" - dependency-name: "pallet-*" - dependency-name: "beefy-*" - dependency-name: "try-runtime-*" From b58284c36276ca8bdfbb63c7daafaca300dd7afe Mon Sep 17 00:00:00 2001 From: Andreas Doerr Date: Mon, 19 Jul 2021 20:20:29 +0200 Subject: [PATCH 11/14] Gossip rebroadcast rate limiter (#3494) --- Cargo.lock | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9284c66eb3d9..6f68457d56e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -487,7 +487,7 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#26d617c699afc6c7f8f4207e72aee59449712765" dependencies = [ "beefy-primitives", "fnv", @@ -511,12 +511,13 @@ dependencies = [ "sp-utils", "substrate-prometheus-endpoint", "thiserror", + "wasm-timer", ] [[package]] name = "beefy-gadget-rpc" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#26d617c699afc6c7f8f4207e72aee59449712765" dependencies = [ "beefy-gadget", "beefy-primitives", @@ -537,12 +538,12 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#26d617c699afc6c7f8f4207e72aee59449712765" [[package]] name = "beefy-primitives" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#26d617c699afc6c7f8f4207e72aee59449712765" dependencies = [ "parity-scale-codec", "sp-api", @@ -4713,7 +4714,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#26d617c699afc6c7f8f4207e72aee59449712765" dependencies = [ "beefy-primitives", "frame-support", @@ -4728,7 +4729,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#2e450ac733d55b2f5e42a304afa287be6abcc53b" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#26d617c699afc6c7f8f4207e72aee59449712765" dependencies = [ "beefy-merkle-tree", "beefy-primitives", From 19a5de137d79409c7a71a4c47e98be70d6a42aef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jul 2021 18:54:58 +0000 Subject: [PATCH 12/14] Bump slotmap from 1.0.2 to 1.0.5 (#3495) Bumps [slotmap](https://github.com/orlp/slotmap) from 1.0.2 to 1.0.5. - [Release notes](https://github.com/orlp/slotmap/releases) - [Changelog](https://github.com/orlp/slotmap/blob/master/RELEASES.md) - [Commits](https://github.com/orlp/slotmap/compare/v1.0.2...v1.0.5) --- updated-dependencies: - dependency-name: slotmap dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f68457d56e8..d98feaa66d9f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9485,9 +9485,9 @@ dependencies = [ [[package]] name = "slotmap" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3003725ae562cf995f3dc82bb99e70926e09000396816765bb6d7adbe740b1" +checksum = "a952280edbecfb1d4bd3cf2dbc309dc6ab523e53487c438ae21a6df09fe84bc4" dependencies = [ "version_check", ] From 89744aa1209d293d8f3fe5bb06ae754e2ede78b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jul 2021 19:31:55 +0000 Subject: [PATCH 13/14] Bump libc from 0.2.91 to 0.2.98 (#3496) Bumps [libc](https://github.com/rust-lang/libc) from 0.2.91 to 0.2.98. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.91...0.2.98) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- node/core/pvf/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d98feaa66d9f..9d48d68a7fff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3499,9 +3499,9 @@ checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" [[package]] name = "libc" -version = "0.2.91" +version = "0.2.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8916b1f6ca17130ec6568feccee27c156ad12037880833a3b842a823236502e7" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" [[package]] name = "libloading" diff --git a/node/core/pvf/Cargo.toml b/node/core/pvf/Cargo.toml index cfd60314feae..c7a0afbdc090 100644 --- a/node/core/pvf/Cargo.toml +++ b/node/core/pvf/Cargo.toml @@ -15,7 +15,7 @@ async-process = "1.1.0" assert_matches = "1.4.0" futures = "0.3.15" futures-timer = "3.0.2" -libc = "0.2.81" +libc = "0.2.98" slotmap = "1.0" tracing = "0.1.26" pin-project = "1.0.7" From 963b500042d3a958413e2967cda74a011ba5fec3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 20 Jul 2021 19:21:27 +0200 Subject: [PATCH 14/14] Update secp256k1 and remove unrequired usage (#3502) * Update secp256k1 and remove unrequired usage * Rename missed old crate names * Enable required feature --- Cargo.lock | 78 ++++++++++++++++++++++++++++----- runtime/common/Cargo.toml | 5 ++- runtime/common/src/claims.rs | 48 ++++++++++---------- runtime/kusama/Cargo.toml | 1 - runtime/parachains/Cargo.toml | 3 -- runtime/polkadot/Cargo.toml | 1 - runtime/test-runtime/Cargo.toml | 1 - runtime/westend/Cargo.toml | 1 - 8 files changed, 92 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d48d68a7fff..7f4e2e9aec42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2688,6 +2688,17 @@ dependencies = [ "hmac 0.7.1", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array 0.14.4", + "hmac 0.8.1", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3363,7 +3374,6 @@ dependencies = [ "frame-system-rpc-runtime-api", "frame-try-runtime", "hex-literal", - "libsecp256k1", "log", "pallet-authority-discovery", "pallet-authorship", @@ -3582,7 +3592,7 @@ dependencies = [ "futures 0.3.15", "futures-timer 3.0.2", "lazy_static", - "libsecp256k1", + "libsecp256k1 0.3.5", "log", "multihash", "multistream-select", @@ -3984,13 +3994,61 @@ dependencies = [ "arrayref", "crunchy", "digest 0.8.1", - "hmac-drbg", + "hmac-drbg 0.2.0", "rand 0.7.3", "sha2 0.8.2", "subtle 2.2.3", "typenum", ] +[[package]] +name = "libsecp256k1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" +dependencies = [ + "arrayref", + "base64 0.12.3", + "digest 0.9.0", + "hmac-drbg 0.3.0", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.7.3", + "serde", + "sha2 0.9.2", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle 2.2.3", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libz-sys" version = "1.0.25" @@ -4736,7 +4794,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "libsecp256k1", + "libsecp256k1 0.3.5", "log", "pallet-beefy", "pallet-mmr", @@ -6733,7 +6791,6 @@ dependencies = [ "frame-system-rpc-runtime-api", "frame-try-runtime", "hex-literal", - "libsecp256k1", "log", "pallet-authority-discovery", "pallet-authorship", @@ -6810,7 +6867,7 @@ dependencies = [ "frame-system", "hex-literal", "impl-trait-for-tuples", - "libsecp256k1", + "libsecp256k1 0.6.0", "log", "pallet-authorship", "pallet-babe", @@ -6863,7 +6920,6 @@ dependencies = [ "frame-system", "futures 0.3.15", "hex-literal", - "libsecp256k1", "log", "pallet-authority-discovery", "pallet-authorship", @@ -7143,7 +7199,6 @@ dependencies = [ "frame-system", "frame-system-rpc-runtime-api", "hex-literal", - "libsecp256k1", "log", "pallet-authority-discovery", "pallet-authorship", @@ -8522,7 +8577,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#bfca1a91f760 dependencies = [ "derive_more", "lazy_static", - "libsecp256k1", + "libsecp256k1 0.3.5", "log", "parity-scale-codec", "parity-wasm 0.42.2", @@ -9789,7 +9844,7 @@ dependencies = [ "hex", "impl-serde", "lazy_static", - "libsecp256k1", + "libsecp256k1 0.3.5", "log", "merlin", "num-traits", @@ -9885,7 +9940,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#bfca1a91f760 dependencies = [ "futures 0.3.15", "hash-db", - "libsecp256k1", + "libsecp256k1 0.3.5", "log", "parity-scale-codec", "parking_lot 0.11.1", @@ -11988,7 +12043,6 @@ dependencies = [ "frame-system-rpc-runtime-api", "frame-try-runtime", "hex-literal", - "libsecp256k1", "log", "pallet-authority-discovery", "pallet-authorship", diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index cd259361956d..ebd4294ffc20 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -41,7 +41,7 @@ frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features=false, optional = true } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } -libsecp256k1 = { version = "0.3.5", default-features = false } +libsecp256k1 = { version = "0.6.0", default-features = false } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } slot-range-helper = { path = "slot_range_helper", default-features = false } @@ -59,7 +59,7 @@ pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "m sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } trie-db = "0.22.3" serde_json = "1.0.61" -libsecp256k1 = "0.3.5" +libsecp256k1 = "0.6.0" [features] default = ["std"] @@ -99,6 +99,7 @@ std = [ ] runtime-benchmarks = [ "libsecp256k1/hmac", + "libsecp256k1/static-context", "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index c06db1d0d9cc..686006f7646f 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -618,19 +618,18 @@ impl SignedExtension for PrevalidateAttests where #[cfg(any(test, feature = "runtime-benchmarks"))] mod secp_utils { use super::*; - use secp256k1; - pub fn public(secret: &secp256k1::SecretKey) -> secp256k1::PublicKey { - secp256k1::PublicKey::from_secret_key(secret) + pub fn public(secret: &libsecp256k1::SecretKey) -> libsecp256k1::PublicKey { + libsecp256k1::PublicKey::from_secret_key(secret) } - pub fn eth(secret: &secp256k1::SecretKey) -> EthereumAddress { + pub fn eth(secret: &libsecp256k1::SecretKey) -> EthereumAddress { let mut res = EthereumAddress::default(); res.0.copy_from_slice(&keccak_256(&public(secret).serialize()[1..65])[12..]); res } - pub fn sig(secret: &secp256k1::SecretKey, what: &[u8], extra: &[u8]) -> EcdsaSignature { + pub fn sig(secret: &libsecp256k1::SecretKey, what: &[u8], extra: &[u8]) -> EcdsaSignature { let msg = keccak_256(&>::ethereum_signable_message(&to_ascii_hex(what)[..], extra)); - let (sig, recovery_id) = secp256k1::sign(&secp256k1::Message::parse(&msg), secret); + let (sig, recovery_id) = libsecp256k1::sign(&libsecp256k1::Message::parse(&msg), secret); let mut r = [0u8; 65]; r[0..64].copy_from_slice(&sig.serialize()[..]); r[64] = recovery_id.serialize(); @@ -640,7 +639,6 @@ mod secp_utils { #[cfg(test)] mod tests { - use secp256k1; use hex_literal::hex; use super::*; use secp_utils::*; @@ -751,20 +749,20 @@ mod tests { type WeightInfo = TestWeightInfo; } - fn alice() -> secp256k1::SecretKey { - secp256k1::SecretKey::parse(&keccak_256(b"Alice")).unwrap() + fn alice() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Alice")).unwrap() } - fn bob() -> secp256k1::SecretKey { - secp256k1::SecretKey::parse(&keccak_256(b"Bob")).unwrap() + fn bob() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Bob")).unwrap() } - fn dave() -> secp256k1::SecretKey { - secp256k1::SecretKey::parse(&keccak_256(b"Dave")).unwrap() + fn dave() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Dave")).unwrap() } - fn eve() -> secp256k1::SecretKey { - secp256k1::SecretKey::parse(&keccak_256(b"Eve")).unwrap() + fn eve() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Eve")).unwrap() } - fn frank() -> secp256k1::SecretKey { - secp256k1::SecretKey::parse(&keccak_256(b"Frank")).unwrap() + fn frank() -> libsecp256k1::SecretKey { + libsecp256k1::SecretKey::parse(&keccak_256(b"Frank")).unwrap() } // This function basically just builds a genesis storage key/value store according to @@ -1196,7 +1194,7 @@ mod benchmarking { const VALUE: u32 = 1_000_000; fn create_claim(input: u32) -> DispatchResult { - let secret_key = secp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); let eth_address = eth(&secret_key); let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); super::Pallet::::mint_claim(RawOrigin::Root.into(), eth_address, VALUE.into(), vesting, None)?; @@ -1204,7 +1202,7 @@ mod benchmarking { } fn create_claim_attest(input: u32) -> DispatchResult { - let secret_key = secp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); let eth_address = eth(&secret_key); let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); super::Pallet::::mint_claim( @@ -1227,7 +1225,7 @@ mod benchmarking { create_claim_attest::(u32::MAX - c)?; } - let secret_key = secp256k1::SecretKey::parse(&keccak_256(&c.encode())).unwrap(); + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&c.encode())).unwrap(); let eth_address = eth(&secret_key); let account: T::AccountId = account("user", c, SEED); let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); @@ -1272,7 +1270,7 @@ mod benchmarking { // Crate signature let attest_c = u32::MAX - c; - let secret_key = secp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); let eth_address = eth(&secret_key); let account: T::AccountId = account("user", c, SEED); let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); @@ -1300,7 +1298,7 @@ mod benchmarking { } let attest_c = u32::MAX - c; - let secret_key = secp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); let eth_address = eth(&secret_key); let account: T::AccountId = account("user", c, SEED); let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); @@ -1338,10 +1336,10 @@ mod benchmarking { } let attest_c = u32::MAX - c; - let secret_key = secp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&attest_c.encode())).unwrap(); let eth_address = eth(&secret_key); - let new_secret_key = secp256k1::SecretKey::parse(&keccak_256(&(u32::MAX/2).encode())).unwrap(); + let new_secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&(u32::MAX/2).encode())).unwrap(); let new_eth_address = eth(&new_secret_key); let account: T::AccountId = account("user", c, SEED); @@ -1371,7 +1369,7 @@ mod benchmarking { eth_recover { let i in 0 .. 1_000; // Crate signature - let secret_key = secp256k1::SecretKey::parse(&keccak_256(&i.encode())).unwrap(); + let secret_key = libsecp256k1::SecretKey::parse(&keccak_256(&i.encode())).unwrap(); let account: T::AccountId = account("user", i, SEED); let signature = sig::(&secret_key, &account.encode(), &[][..]); let data = account.using_encoded(to_ascii_hex); diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index cf1838bc91b9..d325efca2862 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -90,7 +90,6 @@ xcm-builder = { package = "xcm-builder", path = "../../xcm/xcm-builder", default [dev-dependencies] hex-literal = "0.3.1" -libsecp256k1 = "0.3.5" tiny-keccak = "2.0.2" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index 0a766af14c66..6b2af9cfe44e 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -38,7 +38,6 @@ frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = xcm = { package = "xcm", path = "../../xcm", default-features = false } xcm-executor = { package = "xcm-executor", path = "../../xcm/xcm-executor", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } -libsecp256k1 = { version = "0.3.5", default-features = false, optional = true } rand = { version = "0.8.3", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } @@ -54,7 +53,6 @@ pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-support-test = { git = "https://github.com/paritytech/substrate", branch = "master" } serde_json = "1.0.61" -libsecp256k1 = "0.3.5" sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -90,7 +88,6 @@ std = [ "log/std", ] runtime-benchmarks = [ - "libsecp256k1/hmac", "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index 5655a0ae5ee8..3d44d4ab7f37 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -80,7 +80,6 @@ primitives = { package = "polkadot-primitives", path = "../../primitives", defau [dev-dependencies] hex-literal = "0.3.1" -libsecp256k1 = "0.3.5" tiny-keccak = "2.0.2" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index 1bd2c7882990..f0ecc70caf4c 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -60,7 +60,6 @@ polkadot-runtime-parachains = { path = "../parachains", default-features = false [dev-dependencies] hex-literal = "0.3.1" -libsecp256k1 = "0.3.5" tiny-keccak = "2.0.2" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index 1ecfbf43a078..090b45006af3 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -88,7 +88,6 @@ xcm-builder = { package = "xcm-builder", path = "../../xcm/xcm-builder", default [dev-dependencies] hex-literal = "0.3.1" -libsecp256k1 = "0.3.5" tiny-keccak = "2.0.2" keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" }