Actualize repo

main
six 2025-04-14 10:33:03 +07:00
parent bab5191a82
commit 7a07d73868
123 changed files with 34468 additions and 44989 deletions

16334
Cargo.lock generated 100644

File diff suppressed because it is too large Load Diff

136
Cargo.toml 100644
View File

@ -0,0 +1,136 @@
[workspace]
members = ["node", "pallets/feeless", "runtime", "pallets/parking", "pallets/substrate-validator-set", "pallets/aura"]
resolver = "2"
[workspace.package]
version = "0.1.0"
homepage = "https://g6.network/"
authors = ["The G6 Team<https://www.g6.network/>", "Sergey Gerodes <https://sergeygerodes.xyz>"]
license = "XXX" # TODO There is no SPDX abbreviation for "all rights reserved" proprietary license!
repository = "https://g.g6.network/g6-chain/solo-chain"
edition = "2021"
rust-version = "1.81.0"
[workspace.lints.rust]
suspicious_double_ref_op = { level = "allow", priority = 2 }
[workspace.lints.clippy]
all = { level = "allow", priority = 0 }
correctness = { level = "warn", priority = 1 }
complexity = { level = "warn", priority = 1 }
if-same-then-else = { level = "allow", priority = 2 }
zero-prefixed-literal = { level = "allow", priority = 2 } # 00_1000_000
type_complexity = { level = "allow", priority = 2 } # raison d'etre
nonminimal-bool = { level = "allow", priority = 2 } # maybe
borrowed-box = { level = "allow", priority = 2 } # Reasonable to fix this one
too-many-arguments = { level = "allow", priority = 2 } # (Turning this on would lead to)
needless-lifetimes = { level = "allow", priority = 2 } # generated code
unnecessary_cast = { level = "allow", priority = 2 } # Types may change
identity-op = { level = "allow", priority = 2 } # One case where we do 0 +
useless_conversion = { level = "allow", priority = 2 } # Types may change
unit_arg = { level = "allow", priority = 2 } # stylistic
option-map-unit-fn = { level = "allow", priority = 2 } # stylistic
bind_instead_of_map = { level = "allow", priority = 2 } # stylistic
erasing_op = { level = "allow", priority = 2 } # E.g. 0 * DOLLARS
eq_op = { level = "allow", priority = 2 } # In tests we test equality.
while_immutable_condition = { level = "allow", priority = 2 } # false positives
needless_option_as_deref = { level = "allow", priority = 2 } # false positives
derivable_impls = { level = "allow", priority = 2 } # false positives
stable_sort_primitive = { level = "allow", priority = 2 } # prefer stable sort
extra-unused-type-parameters = { level = "allow", priority = 2 } # stylistic
default_constructed_unit_structs = { level = "allow", priority = 2 } # stylistic
[workspace.dependencies]
network_constants = { path = "network_constants", default-features = false }
g6-solo-runtime = { path = "./runtime" }
# g6-solo-node = { path = "./node" }
pallet-aura = { path = "./pallets/aura", default-features = false }
pallet-middleware = { path = "./pallets/middleware", default-features = false }
pallet-parking = { path = "./pallets/parking", default-features = false }
pallet-permissions = { path = "./pallets/permissions", default-features = false }
pallet-identity = { path = "./pallets/identity", default-features = false }
substrate-validator-set = { path = "./pallets/substrate-validator-set", default-features = false }
clap = { version = "4.4.18", features = ["derive"] }
codec = { package = "parity-scale-codec", version = "3.6.12", default-features = false, features = ["derive"] }
futures = { version = "0.3.21", features = ["thread-pool"] }
jsonrpsee = { version = "0.23.2" }
primitive-types = { version = "0.12.2", default-features = false }
scale-info = { version = "2.11.3", default-features = false, features = ["derive", "serde"] }
serde_json = { version = "1.0.111", default-features = false, features = ["alloc"] }
polkadot-sdk = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-benchmarking-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
frame-executive = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-metadata-hash-extension = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-support = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-system = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-system-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
frame-try-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-offences = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-session = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-skip-feeless-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-cli = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-client-api = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-consensus = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-executor = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-network = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-service = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-telemetry = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-application-crypto = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-arithmetic = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-blockchain = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false, features = ["serde"] }
sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false, features = ["serde"] }
sp-core = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false, features = ["serde"] }
sp-genesis-builder = { default-features = false, git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sp-inherents = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-io = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sp-keystore = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sp-offchain = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false, features = ["serde"] }
sp-session = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-staking = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-state-machine = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-std = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-storage = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-timestamp = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
sp-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
sp-version = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false, features = ["serde"] }
sp-weights = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407", default-features = false }
substrate-build-script-utils = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk.git", branch = "stable2407" }
#substrate-validator-set = { git = "https://github.com/gautamdhameja/substrate-validator-set.git", tag = "polkadot-v1.13.0", default-features = false }
#substrate-validator-set = { git = "https://github.com/Allfeat/substrate-validator-set.git", branch = "stable2407", default-features = false }
#pallet-evm = { git = "https://github.com/moonbeam-foundation/frontier", branch = "moonbeam-polkadot-v1.11.0", default-features = false, features = ["forbid-evm-reentrancy", ] }
#pallet-evm = { git = "https://github.com/polkadot-evm/frontier.git", branch = "master", default-features = false, features = ["forbid-evm-reentrancy" ] }
#pallet-evm = { git = "https://github.com/polkadot-evm/frontier.git", branch = "polkadot-v1.7.0", default-features = false, features = ["forbid-evm-reentrancy" ] }
#pallet-evm = { git = "https://github.com/polkadot-evm/frontier.git", tag = "pallet-evm-v5.0.0", default-features = false, features = ["forbid-evm-reentrancy" ] }
#pallet-evm = "5.0.0"

Binary file not shown.

File diff suppressed because one or more lines are too long

83
LICENSE
View File

@ -1,73 +1,16 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
MIT No Attribution
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Copyright Parity Technologies (UK) Ltd.
1. Definitions.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
Copyright 2024 G6_Chains
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

142
README.md
View File

@ -1,3 +1,141 @@
# G6 Chains Public
# Gen6 Public Blockchain
Public repository of G6 Chains: docs and brand kit.
A customized blockchain built using Substrate, designed to operate independently without relying on a relay chain, but keeping interoperability features.
## Technical Info
**Name:** Gen6 Public Chain
**Internal ID:** g6-solo-chain/107
**G6 SS58 Prefix:** 355
**SS58 Converter to G6 addresses:** [ss58.org](https://ss58.org/)
**Gen6 WSS API:** wss://gen6.app:443/node
**Default CHAIN_PORT:** 9944
**Default RPC_PORT:** 30301
**Explorer:** [PJS Link](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fgen6.app%3A443%2Fnode#/explorer)
**Wallets supported:** SubWallet, PolkadotJS
**Whitepaper:** [Gitbook](https://g6-networks.gitbook.io/g6-networks/)
## Getting Started with Building
Ensure all necessary dependencies are installed according to your platforms [Substrate installation guide.](https://docs.substrate.io/install/)
### Build
Use the following command to build the node without launching it:
```sh
cargo build --release
```
### Single-Node Development Chain
The following command starts a single-node development chain that doesn't
persist state:
```sh
./target/release/g6-solo-node --dev
```
To purge the development chain's state, run the following command:
```sh
./target/release/g6-solo-node purge-chain --dev
```
To start the development chain with detailed logging, run the following command:
```sh
RUST_BACKTRACE=1 ./target/release/g6-solo-node -ldebug --dev
```
### Run with docker
You need to have docker installed on your system
#### Build
```sh
docker compose -f ./docker_g6/compose_files/solo_chain/builder/docker-compose.yml up
```
#### Run
```sh
docker compose -f ./docker_g6/compose_files/solo_chain/runner/docker-compose.yml up
```
#### Run the live chain
- build the chain
- create and insert keys with the generate_validator_keys.sh from the Scripts project
You will have your secrets in a data folder. There are secrets inside which you should NOT share with anybody.
```shell
data
├── aura-sr25519.json
├── chains
│   └── gen6
│   ├── keystore
│   │   ├── 617572617c659be616ee6da553529e8e11081935a09dfd70a2556a972cd32822f916383a
│   │   └── 6772616ed81dd22ce966b9e77060292166f0904a126bd5d8b54db67f7476b0529ce2388b
│   └── network
│   └── secret_ed25519
├── grandpa-ed25519.json
├── node-key-ed25519.json
└── peer-id.txt
```
Run the blockchain
```shell
$SOLO_CHAIN_NODE_EXECUTABLE \
--base-path "$BASE_PATH" \
--chain "$RAW_CHAINSPEC_FILENAME" \
--port "$CHAIN_PORT" \
--rpc-port "$RPC_PORT" \
--validator \
--name "$NODE_NAME"
```
BASE_PATH = the path to the data folder we created with the generate script.
SOLO_CHAIN_NODE_EXECUTABLE = the path to the node executable E.g. <solo_chain_project>/target/release/g6-solo-node
RAW_CHAINSPEC_FILENAME = the raw chainspec. The live raw chainspec is stored in this project
CHAIN_PORT=9944
RPC_PORT=30301
NODE_NAME= any unique name
### Key generation
```sh
./target/release/g6-solo-node key generate --output-type json
```
Example output
```txt
{
"accountId": "0x9e6a8d5406599378f39784cb2f31f8a848d4605177da43d9e1227026a970e51c",
"networkId": "substrate",
"publicKey": "0x9e6a8d5406599378f39784cb2f31f8a848d4605177da43d9e1227026a970e51c",
"secretPhrase": "idle grid torch reflect acid cost whip verify song humble raw lion",
"secretSeed": "0xcd517bc718b16711d41182d98945867ee3eaf65b6dedfaaa1995efc1d5808073",
"ss58Address": "5FeR5LJ8jR6SRXeKXFEn61H2ztWxTw4Vvpz9vRrVdhcpuvKk",
"ss58PublicKey": "5FeR5LJ8jR6SRXeKXFEn61H2ztWxTw4Vvpz9vRrVdhcpuvKk"
}
```
Substrate supports different key types (e.g., sr25519, ed25519, or ecdsa). You can specify the key type using the --scheme flag
```sh
./target/release/g6-solo-node key generate --scheme sr25519 --output-type json
```
Generate Keys for a Specific Network
```sh
./target/release/g6-solo-node key generate --network polkadot
```
Help Command for Details: To explore all available options, use the --help flag:
```sh
./target/release/g6-solo-node key --help
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.9 MiB

Binary file not shown.

View File

@ -1 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?><svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46"><defs><style>.cls-1{fill:#fff;stroke-width:0px;}</style></defs><path class="cls-1" d="M258.86,79c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53s-.1-.04-.15-.06l-18.46-10.66s-.08-.07-.13-.1l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,19.37,11.18h0s35.34,20.4,35.34,20.4l3.6,2.08,14.76,8.53s.08.03.12.05l18.47,10.65c.05.04.1.09.16.12l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4v-53.29ZM40.24,84.17c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,13.97-8.05c1.07-.62,2.39-.62,3.46,0l11.14,6.43c1.05.61,1.05,2.13,0,2.74l-13.33,7.68-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57ZM242.35,200.81l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-14.9,8.59c-.51.29-1.14.29-1.65,0l-12.72-7.34c-.76-.44-.76-1.53,0-1.97l14.03-8.09,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.08.04,39.13,0,.63-.33,1.2-.88,1.52ZM243.22,102.18c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.5.29-1.11.3-1.63.05l-43.24-24.94-29.08-16.78c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.41-.23.88-.29,1.33-.18l48.32,27.9.03-.05,24.26,14.01c.54.31.88.89.88,1.52v14.16ZM165.62,171.97v-19.41c0-1.35,1.46-2.19,2.63-1.52l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52Z"/></svg>

Before

Width:  |  Height:  |  Size: 3.3 KiB

View File

@ -1 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?><svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46"><defs><style>.cls-1{fill:#1d1d1b;stroke-width:0px;}</style></defs><path class="cls-1" d="M258.86,79c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53s-.1-.04-.15-.06l-18.46-10.66s-.08-.07-.13-.1l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,19.37,11.18h0s35.34,20.4,35.34,20.4l3.6,2.08,14.76,8.53s.08.03.12.05l18.47,10.65c.05.04.1.09.16.12l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4v-53.29ZM40.24,84.17c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,13.97-8.05c1.07-.62,2.39-.62,3.46,0l11.14,6.43c1.05.61,1.05,2.13,0,2.74l-13.33,7.68-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57ZM242.35,200.81l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-14.9,8.59c-.51.29-1.14.29-1.65,0l-12.72-7.34c-.76-.44-.76-1.53,0-1.97l14.03-8.09,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.08.04,39.13,0,.63-.33,1.2-.88,1.52ZM243.22,102.18c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.5.29-1.11.3-1.63.05l-43.24-24.94-29.08-16.78c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.41-.23.88-.29,1.33-.18l48.32,27.9.03-.05,24.26,14.01c.54.31.88.89.88,1.52v14.16ZM165.62,171.97v-19.41c0-1.35,1.46-2.19,2.63-1.52l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52Z"/></svg>

Before

Width:  |  Height:  |  Size: 3.3 KiB

View File

@ -1 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?><svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46"><defs><style>.cls-1{fill:#009fe3;stroke-width:0px;}</style></defs><path class="cls-1" d="M258.86,79c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53s-.1-.04-.15-.06l-18.46-10.66s-.08-.07-.13-.1l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,19.37,11.18h0s35.34,20.4,35.34,20.4l3.6,2.08,14.76,8.53s.08.03.12.05l18.47,10.65c.05.04.1.09.16.12l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4v-53.29ZM40.24,84.17c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,13.97-8.05c1.07-.62,2.39-.62,3.46,0l11.14,6.43c1.05.61,1.05,2.13,0,2.74l-13.33,7.68-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57ZM242.35,200.81l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-14.9,8.59c-.51.29-1.14.29-1.65,0l-12.72-7.34c-.76-.44-.76-1.53,0-1.97l14.03-8.09,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.08.04,39.13,0,.63-.33,1.2-.88,1.52ZM243.22,102.18c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.5.29-1.11.3-1.63.05l-43.24-24.94-29.08-16.78c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.41-.23.88-.29,1.33-.18l48.32,27.9.03-.05,24.26,14.01c.54.31.88.89.88,1.52v14.16ZM165.62,171.97v-19.41c0-1.35,1.46-2.19,2.63-1.52l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52Z"/></svg>

Before

Width:  |  Height:  |  Size: 3.3 KiB

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46">
<defs>
<style>
.cls-1 {
fill: #161616;
stroke-width: 0px;
}
</style>
</defs>
<path class="cls-1" d="M258.86,132.29v-53.29c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53c-.54-.31-1.21-.31-1.75,0l-14.76,8.53-15.76,9.08-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,15.76-9.08,13.01-7.52c1.17-.67,1.17-2.36,0-3.03l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,58.31,33.66,14.76,8.53c.54.31,1.21.31,1.75,0l14.76-8.53,15.76-9.08,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.09.04,39.13,0,.63-.33,1.2-.88,1.52l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-15.76,9.08-13.01,7.52c-1.17.67-1.17,2.36,0,3.03l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4ZM227.76,131.73l-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.54.31-1.21.31-1.75,0l-72.19-41.67c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.54-.31,1.21-.31,1.75,0l72.19,41.67c.54.31.88.89.88,1.52v14.16c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4ZM168.25,151.05l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52v-19.41c0-1.35,1.46-2.19,2.63-1.52Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.2 KiB

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46">
<defs>
<style>
.cls-1 {
fill: #009fe3;
stroke-width: 0px;
}
</style>
</defs>
<path class="cls-1" d="M258.86,132.29v-53.29c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53c-.54-.31-1.21-.31-1.75,0l-14.76,8.53-15.76,9.08-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,15.76-9.08,13.01-7.52c1.17-.67,1.17-2.36,0-3.03l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,58.31,33.66,14.76,8.53c.54.31,1.21.31,1.75,0l14.76-8.53,15.76-9.08,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.09.04,39.13,0,.63-.33,1.2-.88,1.52l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-15.76,9.08-13.01,7.52c-1.17.67-1.17,2.36,0,3.03l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4ZM227.76,131.73l-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.54.31-1.21.31-1.75,0l-72.19-41.67c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.54-.31,1.21-.31,1.75,0l72.19,41.67c.54.31.88.89.88,1.52v14.16c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4ZM168.25,151.05l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52v-19.41c0-1.35,1.46-2.19,2.63-1.52Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.2 KiB

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46">
<defs>
<style>
.cls-1 {
fill: #fff;
stroke-width: 0px;
}
</style>
</defs>
<path class="cls-1" d="M258.86,132.29v-53.29c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53c-.54-.31-1.21-.31-1.75,0l-14.76,8.53-15.76,9.08-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,15.76-9.08,13.01-7.52c1.17-.67,1.17-2.36,0-3.03l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,58.31,33.66,14.76,8.53c.54.31,1.21.31,1.75,0l14.76-8.53,15.76-9.08,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.09.04,39.13,0,.63-.33,1.2-.88,1.52l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-15.76,9.08-13.01,7.52c-1.17.67-1.17,2.36,0,3.03l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4ZM227.76,131.73l-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.54.31-1.21.31-1.75,0l-72.19-41.67c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.54-.31,1.21-.31,1.75,0l72.19,41.67c.54.31.88.89.88,1.52v14.16c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4ZM168.25,151.05l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52v-19.41c0-1.35,1.46-2.19,2.63-1.52Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.2 KiB

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46">
<defs>
<style>
.cls-1 {
fill: #1d1d1b;
stroke-width: 0px;
}
</style>
</defs>
<path class="cls-1" d="M258.86,79c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53s-.1-.04-.15-.06l-18.46-10.66s-.08-.07-.13-.1l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,19.37,11.18h0s35.34,20.4,35.34,20.4l3.6,2.08,14.76,8.53s.08.03.12.05l18.47,10.65c.05.04.1.09.16.12l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4v-53.29ZM40.24,84.17c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,13.97-8.05c1.07-.62,2.39-.62,3.46,0l11.14,6.43c1.05.61,1.05,2.13,0,2.74l-13.33,7.68-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57ZM242.35,200.81l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-14.9,8.59c-.51.29-1.14.29-1.65,0l-12.72-7.34c-.76-.44-.76-1.53,0-1.97l14.03-8.09,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.08.04,39.13,0,.63-.33,1.2-.88,1.52ZM243.22,102.18c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.5.29-1.11.3-1.63.05l-43.24-24.94-29.08-16.78c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.41-.23.88-.29,1.33-.18l48.32,27.9.03-.05,24.26,14.01c.54.31.88.89.88,1.52v14.16ZM165.62,171.97v-19.41c0-1.35,1.46-2.19,2.63-1.52l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.4 KiB

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46">
<defs>
<style>
.cls-1 {
fill: #009fe3;
stroke-width: 0px;
}
</style>
</defs>
<path class="cls-1" d="M258.86,79c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53s-.1-.04-.15-.06l-18.46-10.66s-.08-.07-.13-.1l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,19.37,11.18h0s35.34,20.4,35.34,20.4l3.6,2.08,14.76,8.53s.08.03.12.05l18.47,10.65c.05.04.1.09.16.12l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4v-53.29ZM40.24,84.17c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,13.97-8.05c1.07-.62,2.39-.62,3.46,0l11.14,6.43c1.05.61,1.05,2.13,0,2.74l-13.33,7.68-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57ZM242.35,200.81l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-14.9,8.59c-.51.29-1.14.29-1.65,0l-12.72-7.34c-.76-.44-.76-1.53,0-1.97l14.03-8.09,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.08.04,39.13,0,.63-.33,1.2-.88,1.52ZM243.22,102.18c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.5.29-1.11.3-1.63.05l-43.24-24.94-29.08-16.78c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.41-.23.88-.29,1.33-.18l48.32,27.9.03-.05,24.26,14.01c.54.31.88.89.88,1.52v14.16ZM165.62,171.97v-19.41c0-1.35,1.46-2.19,2.63-1.52l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.4 KiB

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg id="matrica" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 283.46 283.46">
<defs>
<style>
.cls-1 {
fill: #fff;
stroke-width: 0px;
}
</style>
</defs>
<path class="cls-1" d="M258.86,79c0-.63-.33-1.2-.88-1.52l-14.76-8.52-58.31-33.66-14.76-8.53s-.1-.04-.15-.06l-18.46-10.66s-.08-.07-.13-.1l-12.13-7c-.54-.31-1.21-.31-1.75,0l-14.76,8.52-51.43,29.69-15.64,9.02-15.46,8.93-14.76,8.52c-.54.31-.88.89-.88,1.52v58.64l.03.02v17.4l-.03-.02v53.29c0,.63.33,1.2.88,1.52l14.76,8.52,19.37,11.18h0s35.34,20.4,35.34,20.4l3.6,2.08,14.76,8.53s.08.03.12.05l18.47,10.65c.05.04.1.09.16.12l12.13,7c.54.31,1.21.31,1.75,0l14.76-8.52,51.43-29.69,15.64-9.02,15.46-8.93,14.76-8.52c.54-.31.88-.89.88-1.52v-58.64h-.02c0-5.81.01-11.6.02-17.4v-53.29ZM40.24,84.17c0-.63.33-1.2.88-1.52l14.59-8.42,15.64-9.02,35.66-20.59,15.64-9.04,13.97-8.05c1.07-.62,2.39-.62,3.46,0l11.14,6.43c1.05.61,1.05,2.13,0,2.74l-13.33,7.68-15.64,9.04-50.91,29.39-14.76,8.52c-.54.31-.88.89-.88,1.52v37.8c0,1.35-1.46,2.19-2.63,1.52l-11.96-6.91c-.54-.31-.88-.89-.88-1.52v-39.57ZM242.35,200.81l-14.59,8.42-15.64,9.02-35.66,20.59-15.64,9.04-14.9,8.59c-.51.29-1.14.29-1.65,0l-12.72-7.34c-.76-.44-.76-1.53,0-1.97l14.03-8.09,15.64-9.04,50.91-29.39,14.76-8.52c.54-.31.88-.89.88-1.52v-37.8c.01-.12.12-1.02.91-1.53.68-.43,1.58-.4,2.29.08,3.74,2.25,7.48,4.49,11.22,6.74.17.1.54.36.79.85.27.53.24,1.01.22,1.21.01,13.04.02,26.08.04,39.13,0,.63-.33,1.2-.88,1.52ZM243.22,102.18c0,1.35-1.46,2.19-2.63,1.52l-70.8-40.87c-.54-.31-1.21-.31-1.75,0l-12.13,7c-1.17.67-1.17,2.36,0,3.03l86.42,49.89c.54.31.88.89.88,1.52v13.34c0,1.35-1.46,2.19-2.63,1.52l-12.82-7.4-14.32-8.26s0-.09,0-.14c-20.15-11.65-40.3-23.29-60.46-34.94-.08-.06-.2-.13-.35-.2-.34-.16-.85-.42-1.43-.27-.47.12-.74.44-.82.53-.44.52-.39,1.13-.38,1.26,0,.04-.01.1-.01.16,0,.05,0,.09.01.12,0,36.25,0,72.5,0,108.75-.04.44.17.87.54,1.1.56.36,1.16.06,1.19.05,4.6-2.56,9.24-5.17,13.9-7.83,3.81-2.18,7.59-4.36,11.32-6.54l18.78-10.85c.54-.31.88-.89.88-1.52v-22.78c0-.63-.33-1.2-.88-1.52l-14.76-8.52-14.46-8.34c-.54-.31-.88-.89-.88-1.52v-14.13c0-1.35,1.47-2.2,2.64-1.51l43,25.21c.54.31.87.89.87,1.51v40.02c0,.63-.33,1.2-.88,1.52l-26.78,15.46.05.08-67.42,38.93-.05-.08-1.98,1.15c-.5.29-1.11.3-1.63.05l-43.24-24.94-29.08-16.78c-.54-.31-.88-.89-.88-1.52v-14.16c0-1.35,1.46-2.19,2.63-1.52l70.66,40.79c.54.31,1.21.31,1.75,0l12.12-7c1.17-.67,1.17-2.36,0-3.03l-86.25-49.81c-.54-.31-.88-.89-.88-1.52v-13.34c0-1.35,1.46-2.19,2.63-1.52l2.56,1.48-.08.13,78.33,45.22.03-.06,7.81,4.51c1.17.67,2.63-.17,2.63-1.52v-64.92c0-1.35-1.46-2.19-2.63-1.52l-13.01,7.51v.02l-4.38,2.53c-.54.31-.88.89-.88,1.52v14c0,1.35,1.46,2.19,2.63,1.52,1.17-.67,2.63.17,2.63,1.52v6.93l.02-.02v.61l-.02.02v3.3c0,1.35-1.46,2.19-2.63,1.52l-11.84-6.83c-.54-.31-.88-.89-.88-1.52v-40.94c0-.63.33-1.2.88-1.52l11.29-6.52.55-.32c1.17-.67,2.63.17,2.63,1.52v1.58c0,1.35,1.46,2.19,2.63,1.52l12.13-7c.54-.31.88-.89.88-1.52v-21.66c0-1.35-1.46-2.19-2.63-1.52l-12.99,7.5-.02.02-3.17,1.83-12.16,7.02-3.47,2-11.29,6.51c-.54.31-.88.89-.88,1.52v38.91c0,1.35-1.46,2.19-2.63,1.52l-12.73-7.35c-.54-.31-.88-.89-.88-1.52v-39.99c0-.63.33-1.2.87-1.52l14.09-8.14-.13-.2c27.41-15.73,54.82-31.47,82.23-47.2.41-.23.88-.29,1.33-.18l48.32,27.9.03-.05,24.26,14.01c.54.31.88.89.88,1.52v14.16ZM165.62,171.97v-19.41c0-1.35,1.46-2.19,2.63-1.52l11.84,6.83c.54.31.88.89.88,1.52v5.75c0,.63-.33,1.2-.88,1.52l-11.29,6.51-.55.32c-1.17.67-2.63-.17-2.63-1.52Z"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,63 @@
# This image is for build purposes only, it will be used to build the polkadot node binary
# The command with docker buildx to create the image must be something like this with the args
# docker buildx build --build-arg USER_ID=$(id -u) --build-arg GROUP_ID=$(id -g) -t g6_node_builder:1.0 .
# And the execution command is like this
# docker run -it --name g6_node_builder --hostname g6 --rm -v $(pwd):/home/g6/workspace -v $HOME/runner/cargo_cache/:/home/g6/.cargo g6_node_builder:1.0
FROM debian:bookworm
# Install system dependencies
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y \
build-essential \
clang \
curl \
git \
make \
libssl-dev \
protobuf-compiler \
llvm \
libudev-dev
# Create a user and group to run the application as a non-root user for security reasons
# The id of the user and group is the same as the one on the host machine to avoid permission issues this should be taken from args
ARG USER_ID
ARG GROUP_ID
# Usar la instrucción RUN para crear el usuario y grupo en un solo comando, asegurando que el directorio home se crea correctamente.
RUN groupadd -g $GROUP_ID node && \
useradd -m -s /bin/bash -u $USER_ID -g $GROUP_ID g6
USER g6:node
# Set the working directory
WORKDIR /home/g6/workspace
RUN echo "alias ll='ls -la'" >> /home/g6/.bashrc
RUN echo "alias ..='cd ..'" >> /home/g6/.bashrc
RUN echo "alias ...='cd ../..'" >> /home/g6/.bashrc
# Set the environment
ENV USER=g6
ENV GROUP=node
ENV HOME=/home/g6
ENV PATH=$HOME/.cargo/bin:$PATH
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s -- -y
RUN echo 'source $HOME/.cargo/env' >> $HOME/.bashrc
# Update rust toolchains
RUN rustup default stable
RUN rustup update
RUN rustup target add wasm32-unknown-unknown
# Install the wasm target
RUN rustup update nightly
RUN rustup target add wasm32-unknown-unknown --toolchain nightly
RUN rustup component add rust-src
RUN rustup component add clippy
RUN cargo install cargo-audit

View File

@ -0,0 +1,20 @@
services:
##################################################
# Node builder
node_builder:
image: nexus.g6.network:10000/${NODE_BUILDER_IMAGE_NAME:-g6_node_builder}:${NODE_BUILDER_IMAGE_VERSION:-latest}
build:
context: ${NODE_BUILDER_IMAGE_CONTEXT:-./}
dockerfile: Dockerfile
args:
USER_ID: ${USER_ID:-1000}
GROUP_ID: ${GROUP_ID:-1000}
platforms:
- "linux/amd64"
container_name: ${NODE_BUILDER_CONTAINER_NAME:-g6_node_builder}
user: g6
hostname: node
stdin_open: true
tty: true
volumes:
- "${SHARED_DIR_SOLO_CHAIN_BUILDER:-../../}:/home/g6/workspace"

View File

@ -0,0 +1,20 @@
services:
##################################################
# Node builder
node_builder_pipeline:
image: nexus.g6.network:10000/${NODE_BUILDER_IMAGE_NAME:-g6_node_builder_pipeline}:${NODE_BUILDER_IMAGE_VERSION:-latest}
build:
context: ${NODE_BUILDER_IMAGE_CONTEXT:-./}
dockerfile: Dockerfile
args:
USER_ID: ${USER_ID}
GROUP_ID: ${GROUP_ID}
platforms:
- "linux/amd64"
container_name: ${NODE_BUILDER_CONTAINER_NAME:-g6_node_builder_pipeline}
user: g6
hostname: node
stdin_open: true
tty: true
volumes:
- "${SHARED_DIR_SOLO_CHAIN_BUILDER:-../../}:/home/g6/workspace"

View File

@ -0,0 +1,39 @@
# This image is for run purposes only, it will be used to run the node binary
# The command with docker buildx to create the image must be something like this with the args
# docker buildx build --build-arg USER_ID=$(id -u) --build-arg GROUP_ID=$(id -g) -t g6_node_runner:1.0 .
# And the execution command is like this
# docker run -it --name g6_node_runner --hostname g6 --rm -v $(pwd):/home/g6/workspace -v $HOME/runner/cargo_cache/:/home/g6/.cargo g6_node_runner:1.0
FROM debian:bookworm
# Install system dependencies
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y \
libssl-dev \
protobuf-compiler \
llvm \
libudev-dev
# Create a user and group to run the application as a non-root user for security reasons
# The id of the user and group is the same as the one on the host machine to avoid permission issues this should be taken from args
ARG USER_ID
ARG GROUP_ID
RUN groupadd -g $GROUP_ID node && \
useradd -m -s /bin/bash -u $USER_ID -g $GROUP_ID g6
USER g6:node
# Set the working directory
WORKDIR /home/g6/workspace
RUN echo "alias ll='ls -la'" >> /home/g6/.bashrc
RUN echo "alias ..='cd ..'" >> /home/g6/.bashrc
RUN echo "alias ...='cd ../..'" >> /home/g6/.bashrc
# Set the environment
ENV USER=g6
ENV GROUP=node
ENV HOME=/home/g6

View File

@ -0,0 +1,32 @@
services:
##################################################
# Node runner
node_runner:
image: nexus.g6.network:10000/g6_node_runner:${NODE_RUNNER_IMAGE_VERSION:-latest}
build:
context: ${NODE_RUNNER_IMAGE_CONTEXT:-./}
dockerfile: Dockerfile
args:
USER_ID: ${USER_ID:-1000}
GROUP_ID: ${GROUP_ID:-1000}
platforms:
- "linux/amd64"
container_name: ${NODE_BUILDER_CONTAINER_NAME:-g6_node_runner}
user: g6
stdin_open: true
tty: true
volumes:
- "${SHARED_DIR_NODE_RUNNER:-../../}:/home/g6/workspace"
ports:
- "9615:9615"
- "9933:9933"
- "9944:9944"
- "30333:30333"
command: /home/g6/workspace/${NODE_RUNNER_BINARY_NAME:-g6-solo-node} --base-path ./chain-data/${CHAIN_DATA_DIR_NAME:-alice} --name "${NODE_NAME_VALIDATOR:-alice-validator}" --port ${NODE_PORT:-30300} --rpc-port ${NODE_RPC_PORT:-9944} --node-key "${NODE_KEY:-0000000000000000000000000000000000000000000000000000000000000001}" --validator #comented rest of command to discuss with serge --insecure-validator-i-know-what-i-do --unsafe-rpc-external --rpc-cors all
networks:
- "g6_blockchain_nodes_network"
restart: always
# Nets
networks:
g6_blockchain_nodes_network:
external: true

View File

@ -0,0 +1,44 @@
services:
##################################################
# Node Runner
node_demo_alice:
image: nexus.g6.network:10000/g6_node_runner:${NODE_RUNNER_DEMO_IMAGE_VERSION:-latest}
container_name: ${NODE_RUNNER_DEMO_CONTAINER_NAME:-node_demo_alice}
user: g6
stdin_open: true
tty: true
volumes:
- "${SHARED_DIR_NODE_A:-../../../}:/home/g6/workspace"
ports:
- "9615:9615"
- "9933:9933"
- "9944:9944"
- "30333:30333"
command: /home/g6/workspace/${NODE_RUNNER_BINARY_NAME:-g6-solo-node} --alice --chain local --base-path ./chain-data/alice --name "alice-validator" --port ${NODE_PORT:-30300} --rpc-port ${NODE_RPC_PORT:-9944} --node-key "${NODE_KEY:-0000000000000000000000000000000000000000000000000000000000000001}" --validator --insecure-validator-i-know-what-i-do --unsafe-rpc-external --rpc-cors all
networks:
- "g6_blockchain_nodes_network"
restart: always
##################################################
# Node B
node_demo_bob:
image: nexus.g6.network:10000/g6_node_runner:${NODE_RUNNER_DEMO_IMAGE_VERSION:-latest}
container_name: ${NODE_RUNNER_DEMO_CONTAINER_NAME:-node_demo_bob}
user: g6
stdin_open: true
tty: true
volumes:
- "${SHARED_DIR_NODE_A:-../../../}:/home/g6/workspace"
ports:
- "9616:9615"
- "9934:9933"
- "9945:9944"
- "30334:30333"
command: /home/g6/workspace/${NODE_RUNNER_BINARY_NAME:-g6-solo-node} --bob --chain local --base-path ./chain-data/bob --name "bob-validator" --port ${NODE_PORT:-30300} --rpc-port ${NODE_RPC_PORT:-9944} --node-key "${NODE_KEY:-0000000000000000000000000000000000000000000000000000000000000002}" --validator --insecure-validator-i-know-what-i-do --unsafe-rpc-external --rpc-cors all
networks:
- "g6_blockchain_nodes_network"
restart: always
##################################################
# Nets
networks:
g6_blockchain_nodes_network:
external: true

View File

@ -0,0 +1,48 @@
# A guide how to add validators to the network
## Key generation
The validator should generate and insert aura, grandpa and node keys with the [generate_validator_keys.sh script](https://g.g6.network/g6-chain/Scripts/-/tree/main/keys)
Keep all the private keys safe. They should remain secret and only the owner should have access to them.
You should see a generated folder and file structure similar to this:
```shell
data % tree
.
├── aura-sr25519.json
├── chains
│   └── gen6
│   ├── keystore
│   │   ├── 6175726108fd5c07440953c6b8d920e196d4a02fbbfdd2fa2668b6c16aabb8a2280b7f24
│   │   └── 6772616e134dad80895e7ea54c99c1d673a177450674de2a95ad35d1f833734bfcc97bbb
│   └── network
│   └── secret_ed25519
├── grandpa-ed25519.json
├── node-key-ed25519.json
└── peer-id.txt
5 directories, 7 files
```
## Extrinsic calls
### Sudo calls
This step could only be done by the owner of the sud keys of the blockchain.
Sudo should add the aura ss58Address to the validator set through the extrinsic
`sudo.sudo( substrateValidatorSet.addValidator( <new_validator_aura_ss58Address> ) )`
### Validator key Calls
This step is done by the owner of the validator keys.
The owner of tha validator keys should add his aura keys to a wallet extension of his choice in order to sign the message later.
The owner of tha validator keys should call the extrinsic substrateValidatorSet.start.
Aura "publicKey" and grandpa "publicKey" should be submitted as parameters to the extrinsic and the transaction should be signed the previously added **aura** keys
Validator aura key calls `substrateValidatorSet.start(auraPublicKey, grandpaPublicKey)`
![validator.substrateValidatorSet.start.png](pictures/validator.substrateValidatorSet.start.png)
![validator.substrateValidatorSet.start_3.png](pictures/validator.substrateValidatorSet.start_3.png)
![validator.substrateValidatorSet.start_2.png](pictures/validator.substrateValidatorSet.start_2.png)

View File

@ -0,0 +1,12 @@
# A guide how to chill and restore a validator
In order to stop the validator you need to submit and sign the transaction of the chillMe extrinsic.
The validator cant stop itself immediately and needs to wait a specified amount of time before going down.
The "ChillTerm" period is specified in the runtime configuration. [View runtime lib.rs](../runtime/src/lib.rs)
![validator.substrateValidatorSet.chillMe.png](pictures/validator.substrateValidatorSet.chillMe.png)
For going back online there is the restore() extrinsic
![validator.substrateValidatorSet.restore.png](pictures/validator.substrateValidatorSet.restore.png)

View File

@ -0,0 +1,27 @@
# How to do a runtime upgrade
- You need to poses the sudo keys for the chain
- Make sure the runtime spec_version is incremented
- You need to compile the newest chain. A `cargo clean` before that would be advisable.
```shell
cargo clean
```
```shell
cargo build --release
```
- You need to call the sudo.sudo(call) on system.setCode(code) extrinsic with the sudo keys
`sudo.sudo(system.setCode(your_chain.compact.compressed.wasm)) `
- As the "code" argument you need to upload the `g6_solo_runtime.compact.compressed.wasm` from the target folder `target/release/wbuild/g6-solo-runtime/`
![sudo.sudo.system.setCode.png](pictures/sudo.sudo.system.setCode.png)
- To confirm the upgrade was successful you should see the new spec_version in the PJS UI top left
Eg. before: `g6-solo-chain/102`. After `g6-solo-chain/103`
![runtime.upgrade.successful.png](pictures/runtime.upgrade.successful.png)
## More info
https://docs.polkadot.com/develop/parachains/maintenance/runtime-upgrades/

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 815 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 442 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 251 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 244 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 232 KiB

218
docs/rust-setup.md 100644
View File

@ -0,0 +1,218 @@
# Installation
This guide is for reference only, please check the latest information on getting started with Substrate [here](https://docs.substrate.io/main-docs/install/).
This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. Since
Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first thing you will need to do
is prepare the computer for Rust development - these steps will vary based on the computer's operating system. Once Rust
is configured, you will use its toolchains to interact with Rust projects; the commands for Rust's toolchains will be
the same for all supported, Unix-based operating systems.
## Build dependencies
Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples in the [Substrate
Docs](https://docs.substrate.io) use Unix-style terminals to demonstrate how to interact with Substrate from the command
line.
### Ubuntu/Debian
Use a terminal shell to execute the following commands:
```bash
sudo apt update
# May prompt for location information
sudo apt install -y git clang curl libssl-dev llvm libudev-dev
```
### Arch Linux
Run these commands from a terminal:
```bash
pacman -Syu --needed --noconfirm curl git clang
```
### Fedora
Run these commands from a terminal:
```bash
sudo dnf update
sudo dnf install clang curl git openssl-devel
```
### OpenSUSE
Run these commands from a terminal:
```bash
sudo zypper install clang curl git openssl-devel llvm-devel libudev-devel
```
### macOS
> **Apple M1 ARM** If you have an Apple M1 ARM system on a chip, make sure that you have Apple Rosetta 2 installed
> through `softwareupdate --install-rosetta`. This is only needed to run the `protoc` tool during the build. The build
> itself and the target binaries would remain native.
Open the Terminal application and execute the following commands:
```bash
# Install Homebrew if necessary https://brew.sh/
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
# Make sure Homebrew is up-to-date, install openssl
brew update
brew install openssl
```
### Windows
**_PLEASE NOTE:_** Native Windows development of Substrate is _not_ very well supported! It is _highly_
recommended to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
(WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian).
Please refer to the separate
[guide for native Windows development](https://docs.substrate.io/main-docs/install/windows/).
## Rust developer environment
This guide uses <https://rustup.rs> installer and the `rustup` tool to manage the Rust toolchain. First install and
configure `rustup`:
```bash
# Install
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# Configure
source ~/.cargo/env
```
Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target:
```bash
rustup default stable
rustup update
rustup update nightly
rustup target add wasm32-unknown-unknown --toolchain nightly
```
## Test your set-up
Now the best way to ensure that you have successfully prepared a computer for Substrate development is to follow the
steps in [our first Substrate tutorial](https://docs.substrate.io/tutorials/v3/create-your-first-substrate-chain/).
## Troubleshooting Substrate builds
Sometimes you can't get the Substrate node template to compile out of the box. Here are some tips to help you work
through that.
### Rust configuration check
To see what Rust toolchain you are presently using, run:
```bash
rustup show
```
This will show something like this (Ubuntu example) output:
```text
Default host: x86_64-unknown-linux-gnu
rustup home: /home/user/.rustup
installed toolchains
--------------------
stable-x86_64-unknown-linux-gnu (default)
nightly-2020-10-06-x86_64-unknown-linux-gnu
nightly-x86_64-unknown-linux-gnu
installed targets for active toolchain
--------------------------------------
wasm32-unknown-unknown
x86_64-unknown-linux-gnu
active toolchain
----------------
stable-x86_64-unknown-linux-gnu (default)
rustc 1.50.0 (cb75ad5db 2021-02-10)
```
As you can see above, the default toolchain is stable, and the `nightly-x86_64-unknown-linux-gnu` toolchain as well as
its `wasm32-unknown-unknown` target is installed. You also see that `nightly-2020-10-06-x86_64-unknown-linux-gnu` is
installed, but is not used unless explicitly defined as illustrated in the [specify your nightly
version](#specifying-nightly-version) section.
### WebAssembly compilation
Substrate uses [WebAssembly](https://webassembly.org) (Wasm) to produce portable blockchain runtimes. You will need to
configure your Rust compiler to use [`nightly` builds](https://doc.rust-lang.org/book/appendix-07-nightly-rust.html) to
allow you to compile Substrate runtime code to the Wasm target.
> There are upstream issues in Rust that need to be resolved before all of Substrate can use the stable Rust toolchain.
> [This is our tracking issue](https://github.com/paritytech/substrate/issues/1252) if you're curious as to why and how
> this will be resolved.
#### Latest nightly for Substrate `master`
Developers who are building Substrate _itself_ should always use the latest bug-free versions of Rust stable and
nightly. This is because the Substrate codebase follows the tip of Rust nightly, which means that changes in Substrate
often depend on upstream changes in the Rust nightly compiler. To ensure your Rust compiler is always up to date, you
should run:
```bash
rustup update
rustup update nightly
rustup target add wasm32-unknown-unknown --toolchain nightly
```
> NOTE: It may be necessary to occasionally rerun `rustup update` if a change in the upstream Substrate codebase depends
> on a new feature of the Rust compiler. When you do this, both your nightly and stable toolchains will be pulled to the
> most recent release, and for nightly, it is generally _not_ expected to compile WASM without error (although it very
> often does). Be sure to [specify your nightly version](#specifying-nightly-version) if you get WASM build errors from
> `rustup` and [downgrade nightly as needed](#downgrading-rust-nightly).
#### Rust nightly toolchain
If you want to guarantee that your build works on your computer as you update Rust and other dependencies, you should
use a specific Rust nightly version that is known to be compatible with the version of Substrate they are using; this
version will vary from project to project and different projects may use different mechanisms to communicate this
version to developers. For instance, the Polkadot client specifies this information in its [release
notes](https://github.com/paritytech/polkadot-sdk/releases).
```bash
# Specify the specific nightly toolchain in the date below:
rustup install nightly-<yyyy-MM-dd>
```
#### Wasm toolchain
Now, configure the nightly version to work with the Wasm compilation target:
```bash
rustup target add wasm32-unknown-unknown --toolchain nightly-<yyyy-MM-dd>
```
### Specifying nightly version
Use the `WASM_BUILD_TOOLCHAIN` environment variable to specify the Rust nightly version a Substrate project should use
for Wasm compilation:
```bash
WASM_BUILD_TOOLCHAIN=nightly-<yyyy-MM-dd> cargo build --release
```
> Note that this only builds _the runtime_ with the specified nightly. The rest of project will be compiled with **your
> default toolchain**, i.e. the latest installed stable toolchain.
### Downgrading Rust nightly
If your computer is configured to use the latest Rust nightly and you would like to downgrade to a specific nightly
version, follow these steps:
```bash
rustup uninstall nightly
rustup install nightly-<yyyy-MM-dd>
rustup target add wasm32-unknown-unknown --toolchain nightly-<yyyy-MM-dd>
```

43
flake.lock 100644
View File

@ -0,0 +1,43 @@
{
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1678901627,
"narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1679262748,
"narHash": "sha256-DQCrrAFrkxijC6haUzOC5ZoFqpcv/tg2WxnyW3np1Cc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "60c1d71f2ba4c80178ec84523c2ca0801522e0a6",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

22
flake.nix 100644
View File

@ -0,0 +1,22 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils, ... }: flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs { inherit system; };
in
{
devShells.default = pkgs.mkShell {
packages = with pkgs; [
rustup
clang
protobuf
];
LIBCLANG_PATH = "${pkgs.libclang.lib}/lib";
};
});
}

View File

@ -0,0 +1,7 @@
Package: g6-solo-chain
Version: VERSION_PLACEHOLDER
Maintainer: Professor Chaos and General Disarray <johan@g6.network>
Architecture: all
Description: G6 solo chain.
Homepage: https://g.g6.network/g6-chain/solo-chain
Section: g6

View File

@ -0,0 +1,4 @@
echo "TBA if running from docker or host"
# echo "Enabling g6-solo-chain service"
# systemctl enable g6-solo-chain

View File

@ -0,0 +1,20 @@
[Unit]
Description=G6 Solo Blockchain
After=network.target
[Service]
User=g6user
EnvironmentFile=/g6/gsvid
ExecStart=/usr/local/sbin/g6-solo-node \
--base-path "/g6/public-chain/" \
--chain "/g6/public-chain/live-raw-solochain-chainspec.json" \
--port 30300 \
--rpc-port 9944 \
--validator \
--name "$NODE_NAME"
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

191
g6_swiss_army.sh 100755
View File

@ -0,0 +1,191 @@
#!/bin/bash
set -e
real_path=$(realpath $0)
base_path=$(dirname ${real_path})
rust_output_binary_name="g6-solo-node"
temporal_dir=${base_path}/tmp/
deb_package_name=g6-solo-chain
deb_dir_tree_sbin=${base_path}/${deb_package_name}/usr/local/sbin
chainspec_dir=${base_path}/chainspecs/live
deb_dir_chainspec=${base_path}/${deb_package_name}/etc/g6/public-chain/chainspec
nexus_user=""
nexus_pass=""
NEXUS_URL="https://nexus.g6.network/repository/g6_os_apt_packages/"
CREDENTIALS_FILE="nexus_credentials.env"
PIPELINE_SUFFIX=${PIPELINE_SUFFIX:-""}
# Function to show usage
function usage() {
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " -b, --build Build the g6-solo-chain binary"
echo " -c, --clean-build Clean build the g6-solo-chain binary (runs 'cargo clean' first)"
echo " -d, --deploy Deploy the Docker services"
echo " -u, --undeploy Stop and remove Docker Compose services"
echo " -p, --package VERSION Build and upload the deb package to Nexus with specified VERSION"
echo " -h, --help Show this help message"
exit 1
}
# Validate version format
function validate_version() {
local version=$1
echo "Validating version: $version"
if [[ ! $version =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Error: Invalid version format. Use major.minor.bugfix (e.g., 1.0.1)."
exit 1
fi
}
# Read Nexus credentials
function get_nexus_credentials() {
if [[ -f "$CREDENTIALS_FILE" ]]; then
# Read the credentials file, skipping comment lines
local lines=()
while IFS= read -r line; do
[[ "$line" =~ ^#.*$ || -z "$line" ]] && continue # Skip comments and empty lines
lines+=("$line")
done <"$CREDENTIALS_FILE"
# Assign user and pass from the first two non-comment lines
nexus_user="${lines[0]}"
nexus_pass="${lines[1]}"
fi
echo "Passed first stage"
# Check environment variables if credentials are not found in the file
if [[ -z "$nexus_user" || -z "$nexus_pass" ]]; then
nexus_user="${NEXUS_USER:-}"
nexus_pass="${NEXUS_PASSWORD:-}"
fi
echo "Passed second stage"
}
# Build function
function build() {
echo "Building g6-solo-chain binary..."
echo "Using the following Docker Compose file: docker/builder/docker-compose${PIPELINE_SUFFIX}.yml"
docker compose -f docker/builder/docker-compose${PIPELINE_SUFFIX}.yml up -d
docker compose -f docker/builder/docker-compose${PIPELINE_SUFFIX}.yml exec node_builder${PIPELINE_SUFFIX} bash -c "cargo build --release"
echo "Build completed."
}
# Clean build function
function clean_build() {
echo "Performing a clean build for g6-solo-chain binary..."
echo "Using the following Docker Compose file: docker/builder/docker-compose${PIPELINE_SUFFIX}.yml"
docker compose -f docker/builder/docker-compose${PIPELINE_SUFFIX}.yml up -d
docker compose -f docker/builder/docker-compose${PIPELINE_SUFFIX}.yml exec node_builder${PIPELINE_SUFFIX} bash -c "cargo clean && cargo build --release"
echo "Clean build completed."
}
# Deploy function
function deploy() {
echo "Deploying Docker services..."
echo "Using the following Docker Compose file: docker/builder/docker-compose${PIPELINE_SUFFIX}.yml"
docker compose -f docker/builder/docker-compose${PIPELINE_SUFFIX}.yml up -d
echo "Deploy completed."
}
# Undeploy function
function undeploy() {
echo "Stopping and removing Docker services..."
echo "Using the following Docker Compose file: docker/builder/docker-compose${PIPELINE_SUFFIX}.yml"
docker compose -f docker/builder/docker-compose${PIPELINE_SUFFIX}.yml stop
docker compose -f docker/builder/docker-compose${PIPELINE_SUFFIX}.yml down
echo "Undeploy completed."
}
# Package function
function package() {
# Parse version from control file
local control_file="${base_path}/${deb_package_name}/DEBIAN/control"
if [[ ! -f "$control_file" ]]; then
echo "Error: Control file not found at $control_file."
exit 1
fi
validate_version "$VERSION_DEB_PACKAGE" # Ensure extracted version is valid
echo "Packaging g6-solo-chain version $VERSION_DEB_PACKAGE..."
mkdir -p ${deb_dir_tree_sbin}
build # Ensure the binary is built before packaging
# Copy binary
cp ${base_path}/target/release/${rust_output_binary_name} ${deb_dir_tree_sbin}/
# Copy chainspecs/
mkdir -p ${deb_dir_chainspec}
cp $chainspec_dir/* ${deb_dir_chainspec}/
# Include version in package name
echo "VERSION_DEB_PACKAGE: $VERSION_DEB_PACKAGE"
# Set version value
sed -i s/VERSION_PLACEHOLDER/$VERSION_DEB_PACKAGE/ $base_path/$deb_package_name/DEBIAN/control
# Deb package generation
dpkg-deb --build ${deb_package_name}
sed -i s/$VERSION_DEB_PACKAGE/VERSION_PLACEHOLDER/ $base_path/$deb_package_name/DEBIAN/control
mv "${deb_package_name}.deb" "${deb_package_name}_${version}.deb"
# Get Nexus credentials
get_nexus_credentials
echo "Obtained credentials: ${nexus_user}"
echo "Uploading to Nexus..."
curl -u "${nexus_user}:${nexus_pass}" -H "Content-Type: multipart/form-data" --data-binary "@./${deb_package_name}_${version}.deb" "${NEXUS_URL}"
echo "Cleaning up..."
rm -f "${deb_package_name}_${version}.deb"
rm -rf ${temporal_dir}
rm -rf ${deb_dir_chainspec}/*
echo "Package version $VERSION_DEB_PACKAGE uploaded successfully."
}
# Parse options
if [[ "$#" -eq 0 ]]; then
usage
fi
while [[ "$#" -gt 0 ]]; do
case "$1" in
-b | --build)
build
exit 0
;;
-c | --clean-build)
clean_build
exit 0
;;
-d | --deploy)
deploy
exit 0
;;
-u | --undeploy)
undeploy
exit 0
;;
-p | --package)
shift
package
exit 0
;;
-h | --help)
usage
;;
*)
echo "Unknown option: $1"
usage
;;
esac
shift
done

View File

@ -0,0 +1,25 @@
[package]
name = "network_constants"
description = "description ..."
version.workspace = true
homepage.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
[features]
default = ["std"]
std = [
]

View File

@ -0,0 +1,19 @@
#![cfg_attr(not(feature = "std"), no_std)]
pub const TOKEN_SYMBOL: &str = "GSX";
pub const TOKEN_DECIMALS: u8 = 18;
pub const SS58FORMAT: u16 = 355;
pub const GENESIS_SUPPLY: u128 = 80_000_000; // in whole tokens, NOT atoms
pub const VALIDATOR_REWARD_SUPPLY: u128 = GENESIS_SUPPLY * 375 / 1000; // as described in
// tokenomics documentation
pub const GENESIS_SUPPLY_DISTRIBUTED: u128 = GENESIS_SUPPLY - VALIDATOR_REWARD_SUPPLY;
pub const CHAIN_NAME: &str = "Gen6 Public Chain";
pub const CHAIN_ID: &str = "gen6";
pub const EXISTENTIAL_DEPOSIT: u128 = 10u128.pow(TOKEN_DECIMALS as u32 - 1); // set to 0.1 GSX // The EXISTENTIAL_DEPOSIT is in atomic units, not full tokens.
/// This determines the average expected block time that we are targeting.
/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`.
/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked
/// up by `pallet_aura` to implement `fn slot_duration()`.
///
/// Change this to adjust the block time.
pub const MILLISECS_PER_BLOCK: u64 = 5000;

92
node/Cargo.toml 100644
View File

@ -0,0 +1,92 @@
[package]
name = "g6-solo-node"
description = "A FRAME-based Substrate G6 Solo Chain node."
publish = false
build = "build.rs"
version.workspace = true
homepage.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
edition.workspace = true
rust-version.workspace = true
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[[bin]]
name = "g6-solo-node"
[dependencies]
# Local Dependencies
g6-solo-runtime = { workspace = true }
network_constants = { workspace = true }
clap = { workspace = true }
futures = { workspace = true }
serde_json = { workspace = true }
sc-cli = { workspace = true }
sp-core = { workspace = true }
sc-executor = { workspace = true }
sc-network = { workspace = true }
sc-service = { workspace = true }
sc-telemetry = { workspace = true }
sc-transaction-pool = { workspace = true }
sc-transaction-pool-api = { workspace = true }
sc-offchain = { workspace = true }
sc-consensus-aura = { workspace = true }
sp-consensus-aura = { workspace = true }
sc-consensus = { workspace = true }
sc-consensus-grandpa = { workspace = true }
sp-consensus-grandpa = { workspace = true }
sc-client-api = { workspace = true }
sp-runtime = { workspace = true }
sp-io = { workspace = true }
sp-timestamp = { workspace = true }
sp-inherents = { workspace = true }
sp-keyring = { workspace = true }
frame-system = { workspace = true }
pallet-transaction-payment = { workspace = true, default-features = false }
# These dependencies are used for the node template's RPCs
jsonrpsee = { features = ["server"], workspace = true }
sp-api = { workspace = true }
sc-rpc-api = { workspace = true }
sp-blockchain = { workspace = true }
sp-block-builder = { workspace = true }
sc-basic-authorship = { workspace = true }
substrate-frame-rpc-system = { workspace = true }
pallet-skip-feeless-payment = { workspace = true }
pallet-transaction-payment-rpc = { workspace = true }
# These dependencies are used for runtime benchmarking
frame-benchmarking = { workspace = true }
frame-benchmarking-cli = { workspace = true }
frame-metadata-hash-extension = { workspace = true, default_features = false }
[build-dependencies]
substrate-build-script-utils = { workspace = true }
[features]
default = []
# Dependencies that are only required if runtime benchmarking should be build.
runtime-benchmarks = [
"frame-benchmarking-cli/runtime-benchmarks",
"frame-benchmarking/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"g6-solo-runtime/runtime-benchmarks",
"sc-service/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
# Enable features that allow the runtime to be tried and debugged. Name might be subject to change
# in the near future.
try-runtime = [
"frame-system/try-runtime",
"g6-solo-runtime/try-runtime",
"pallet-transaction-payment/try-runtime",
"sp-runtime/try-runtime",
]

7
node/build.rs 100644
View File

@ -0,0 +1,7 @@
use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed};
fn main() {
generate_cargo_keys();
rerun_if_git_head_changed();
}

View File

@ -0,0 +1,176 @@
//! Setup code for [`super::command`] which would otherwise bloat that module.
//!
//! Should only be used for benchmarking as it may break in other contexts.
use crate::service::FullClient;
use g6_solo_runtime as runtime;
use runtime::{AccountId, Balance, BalancesCall, SystemCall};
use sc_cli::Result;
use sc_client_api::BlockBackend;
use sp_core::{Encode, Pair};
use sp_inherents::{InherentData, InherentDataProvider};
use sp_keyring::Sr25519Keyring;
use sp_runtime::{OpaqueExtrinsic, SaturatedConversion};
use std::{sync::Arc, time::Duration};
/// Generates extrinsics for the `benchmark overhead` command.
///
/// Note: Should only be used for benchmarking.
pub struct RemarkBuilder {
client: Arc<FullClient>,
}
impl RemarkBuilder {
/// Creates a new [`Self`] from the given client.
pub fn new(client: Arc<FullClient>) -> Self {
Self { client }
}
}
impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder {
fn pallet(&self) -> &str {
"system"
}
fn extrinsic(&self) -> &str {
"remark"
}
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let acc = Sr25519Keyring::Bob.pair();
let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
self.client.as_ref(),
acc,
SystemCall::remark { remark: vec![] }.into(),
nonce,
)
.into();
Ok(extrinsic)
}
}
/// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks.
///
/// Note: Should only be used for benchmarking.
pub struct TransferKeepAliveBuilder {
client: Arc<FullClient>,
dest: AccountId,
value: Balance,
}
impl TransferKeepAliveBuilder {
/// Creates a new [`Self`] from the given client.
pub fn new(client: Arc<FullClient>, dest: AccountId, value: Balance) -> Self {
Self {
client,
dest,
value,
}
}
}
impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder {
fn pallet(&self) -> &str {
"balances"
}
fn extrinsic(&self) -> &str {
"transfer_keep_alive"
}
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let acc = Sr25519Keyring::Bob.pair();
let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
self.client.as_ref(),
acc,
BalancesCall::transfer_keep_alive {
dest: self.dest.clone().into(),
value: self.value,
}
.into(),
nonce,
)
.into();
Ok(extrinsic)
}
}
/// Create a transaction using the given `call`.
///
/// Note: Should only be used for benchmarking.
pub fn create_benchmark_extrinsic(
client: &FullClient,
sender: sp_core::sr25519::Pair,
call: runtime::RuntimeCall,
nonce: u32,
) -> runtime::UncheckedExtrinsic {
let genesis_hash = client
.block_hash(0)
.ok()
.flatten()
.expect("Genesis block exists; qed");
let best_hash = client.chain_info().best_hash;
let best_block = client.chain_info().best_number;
let period = runtime::BlockHashCount::get()
.checked_next_power_of_two()
.map(|c| c / 2)
.unwrap_or(2) as u64;
let extra: runtime::SignedExtra = (
frame_system::CheckNonZeroSender::<runtime::Runtime>::new(),
frame_system::CheckSpecVersion::<runtime::Runtime>::new(),
frame_system::CheckTxVersion::<runtime::Runtime>::new(),
frame_system::CheckGenesis::<runtime::Runtime>::new(),
frame_system::CheckEra::<runtime::Runtime>::from(sp_runtime::generic::Era::mortal(
period,
best_block.saturated_into(),
)),
frame_system::CheckNonce::<runtime::Runtime>::from(nonce),
frame_system::CheckWeight::<runtime::Runtime>::new(),
pallet_skip_feeless_payment::SkipCheckIfFeeless::from(
pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
),
frame_metadata_hash_extension::CheckMetadataHash::<runtime::Runtime>::new(false),
);
let raw_payload = runtime::SignedPayload::from_raw(
call.clone(),
extra.clone(),
(
(),
runtime::VERSION.spec_version,
runtime::VERSION.transaction_version,
genesis_hash,
best_hash,
(),
(),
(),
None,
),
);
let signature = raw_payload.using_encoded(|e| sender.sign(e));
runtime::UncheckedExtrinsic::new_signed(
call,
sp_runtime::AccountId32::from(sender.public()).into(),
runtime::Signature::Sr25519(signature),
extra,
)
}
/// Generates inherent data for the `benchmark overhead` command.
///
/// Note: Should only be used for benchmarking.
pub fn inherent_benchmark_data() -> Result<InherentData> {
let mut inherent_data = InherentData::new();
let d = Duration::from_millis(0);
let timestamp = sp_timestamp::InherentDataProvider::new(d.into());
futures::executor::block_on(timestamp.provide_inherent_data(&mut inherent_data))
.map_err(|e| format!("creating inherent data: {:?}", e))?;
Ok(inherent_data)
}

View File

@ -0,0 +1,236 @@
use g6_solo_runtime::{opaque::SessionKeys, AccountId, Signature, WASM_BINARY};
use sc_service::ChainType;
use sc_service::Properties;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_consensus_grandpa::AuthorityId as GrandpaId;
use sp_core::{sr25519, Pair, Public};
use sp_runtime::traits::{IdentifyAccount, Verify};
// The URL for the telemetry server.
// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type.
// pub type ChainSpec = sc_service::GenericChainSpec<RuntimeGenesisConfig>;
pub type ChainSpec = sc_service::GenericChainSpec;
/// Generate a crypto pair from seed.
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
type AccountPublic = <Signature as Verify>::Signer;
/// Generate an account ID from seed.
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Generate an Aura authority key.
pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId, AccountId) {
(
get_from_seed::<AuraId>(s),
get_from_seed::<GrandpaId>(s),
get_account_id_from_seed::<sr25519::Public>(s),
)
}
fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys {
SessionKeys { aura, grandpa }
}
fn build_default_chain_properties() -> Properties {
let mut properties = Properties::new();
properties.insert("tokenSymbol".into(), network_constants::TOKEN_SYMBOL.into());
properties.insert(
"tokenDecimals".into(),
network_constants::TOKEN_DECIMALS.into(),
);
properties.insert("ss58Format".into(), network_constants::SS58FORMAT.into());
properties
}
pub fn development_config() -> Result<ChainSpec, String> {
let properties = build_default_chain_properties();
Ok(ChainSpec::builder(
WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?,
None,
)
.with_name("Development")
.with_id("dev")
.with_chain_type(ChainType::Development)
.with_genesis_config_patch(testnet_genesis(
// Initial PoA authorities
vec![authority_keys_from_seed("Alice")],
// Sudo account
Some(get_account_id_from_seed::<sr25519::Public>("Alice")),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
],
// Initial identity registrars
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
],
true,
))
.with_properties(properties)
.build())
}
pub fn local_testnet_config() -> Result<ChainSpec, String> {
let properties = build_default_chain_properties();
Ok(ChainSpec::builder(
WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?,
None,
)
.with_name("G6 Local Solo Chain")
.with_id("g6_local_solo_chain")
.with_chain_type(ChainType::Local)
.with_genesis_config_patch(testnet_genesis(
// Initial PoA authorities
vec![
authority_keys_from_seed("Alice"),
authority_keys_from_seed("Bob"),
],
// Sudo account
Some(get_account_id_from_seed::<sr25519::Public>("Alice")),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
// Initial identity registrars
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
],
true,
))
.with_properties(properties)
.build())
}
pub fn empty_config() -> Result<ChainSpec, String> {
let properties = build_default_chain_properties();
Ok(ChainSpec::builder(
WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?,
None,
)
.with_name(network_constants::CHAIN_NAME.into())
.with_id(network_constants::CHAIN_ID.into())
.with_chain_type(ChainType::Live)
.with_genesis_config_patch(testnet_genesis(vec![], None, vec![], vec![], true))
.with_properties(properties)
.build())
}
fn testnet_genesis(
initial_authorities: Vec<(AuraId, GrandpaId, AccountId)>,
root_key: Option<AccountId>,
endowed_accounts: Vec<AccountId>,
initial_identity_registrars: Vec<AccountId>,
_enable_println: bool,
) -> serde_json::Value {
let total_genesis_supply: u128 = network_constants::GENESIS_SUPPLY_DISTRIBUTED
* 10u128.pow(network_constants::TOKEN_DECIMALS.into());
let balances: Vec<(AccountId, u128)> =
distribute_genesis_supply(total_genesis_supply, &endowed_accounts);
// let initial_permissions_admins = vec![get_account_id_from_seed::<sr25519::Public>("Bob")];
// let initial_roles = vec![
// // (b"admin".to_vec(), Some(vec![
// // b"monitoring".to_vec(),
// // b"permissions".to_vec(),
// // ])),
// // (b"maintainer".to_vec(), Some(vec![
// // b"monitoring".to_vec(),
// // b"permissions".to_vec(),
// // ])),
// ];
serde_json::json!({
"balances": {
"balances": balances,
},
"substrateValidatorSet": {
"initialValidators": initial_authorities.iter().map(|x| x.0.clone()).collect::<Vec<_>>(),
"validatorRewardFund": network_constants::VALIDATOR_REWARD_SUPPLY * 10u128.pow(network_constants::TOKEN_DECIMALS.into()),
},
"session": {
"keys": initial_authorities.iter().map(|x| {
(x.2.clone(), x.2.clone(), session_keys(x.0.clone(), x.1.clone()))
}).collect::<Vec<_>>(),
},
"aura": {
// "authorities": initial_authorities.iter().map(|x| (x.0.clone())).collect::<Vec<_>>(),
"authorities": Vec::<AuraId>::new(),
},
"grandpa": {
// "authorities": initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect::<Vec<_>>(),
"authorities": Vec::<(GrandpaId, u64)>::new(),
},
"identity": {
"registrars": initial_identity_registrars,
},
"sudo": {
"key": root_key,
},
// "permissions": {
// "initialAdmins": initial_permissions_admins,
// "initialRoles": initial_roles,
// },
})
}
fn distribute_genesis_supply(
total_supply: u128,
endowed_accounts: &Vec<AccountId>,
) -> Vec<(AccountId, u128)> {
let num_accounts: u128 = endowed_accounts.len() as u128;
let initial_balance_per_account: u128 = if num_accounts > 0 {
total_supply / num_accounts
} else {
0 // Avoid division by zero
};
let already_allocated: u128 = initial_balance_per_account * num_accounts;
let remainder = total_supply - already_allocated;
let mut balances = endowed_accounts
.iter()
.cloned()
.map(|k| (k, initial_balance_per_account))
.collect::<Vec<_>>();
// Add the remainder to the first account, if any
if remainder > 0 && !balances.is_empty() {
if let Some(first_account) = balances.get_mut(0) {
first_account.1 += remainder;
}
}
balances
}

46
node/src/cli.rs 100644
View File

@ -0,0 +1,46 @@
use sc_cli::RunCmd;
#[derive(Debug, clap::Parser)]
pub struct Cli {
#[command(subcommand)]
pub subcommand: Option<Subcommand>,
#[clap(flatten)]
pub run: RunCmd,
}
#[derive(Debug, clap::Subcommand)]
#[allow(clippy::large_enum_variant)]
pub enum Subcommand {
/// Key management cli utilities
#[command(subcommand)]
Key(sc_cli::KeySubcommand),
/// Build a chain specification.
BuildSpec(sc_cli::BuildSpecCmd),
/// Validate blocks.
CheckBlock(sc_cli::CheckBlockCmd),
/// Export blocks.
ExportBlocks(sc_cli::ExportBlocksCmd),
/// Export the state of a given block into a chain spec.
ExportState(sc_cli::ExportStateCmd),
/// Import blocks.
ImportBlocks(sc_cli::ImportBlocksCmd),
/// Remove the whole chain.
PurgeChain(sc_cli::PurgeChainCmd),
/// Revert the chain to a previous state.
Revert(sc_cli::RevertCmd),
/// Sub-commands concerned with benchmarking.
#[command(subcommand)]
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
/// Db meta columns information.
ChainInfo(sc_cli::ChainInfoCmd),
}

221
node/src/command.rs 100644
View File

@ -0,0 +1,221 @@
use crate::{
benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder},
chain_spec,
cli::{Cli, Subcommand},
service,
};
use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE};
use g6_solo_runtime::Block;
use sc_cli::SubstrateCli;
use sc_service::PartialComponents;
use sp_keyring::Sr25519Keyring;
impl SubstrateCli for Cli {
fn impl_name() -> String {
"Substrate Node".into()
}
fn impl_version() -> String {
env!("SUBSTRATE_CLI_IMPL_VERSION").into()
}
fn description() -> String {
env!("CARGO_PKG_DESCRIPTION").into()
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"support.anonymous.an".into()
}
fn copyright_start_year() -> i32 {
2017
}
fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> {
Ok(match id {
"dev" => Box::new(chain_spec::development_config()?),
"" | "local" => Box::new(chain_spec::local_testnet_config()?),
"live" => Box::new(chain_spec::empty_config()?),
"empty" => Box::new(chain_spec::empty_config()?),
path => Box::new(chain_spec::ChainSpec::from_json_file(
std::path::PathBuf::from(path),
)?),
})
}
}
/// Parse and run command line arguments
pub fn run() -> sc_cli::Result<()> {
let cli = Cli::from_args();
match &cli.subcommand {
Some(Subcommand::Key(cmd)) => cmd.run(&cli),
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
}
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents {
client,
task_manager,
import_queue,
..
} = service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
}
Some(Subcommand::ExportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents {
client,
task_manager,
..
} = service::new_partial(&config)?;
Ok((cmd.run(client, config.database), task_manager))
})
}
Some(Subcommand::ExportState(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents {
client,
task_manager,
..
} = service::new_partial(&config)?;
Ok((cmd.run(client, config.chain_spec), task_manager))
})
}
Some(Subcommand::ImportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents {
client,
task_manager,
import_queue,
..
} = service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
}
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.database))
}
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents {
client,
task_manager,
backend,
..
} = service::new_partial(&config)?;
let aux_revert = Box::new(|client, _, blocks| {
sc_consensus_grandpa::revert(client, blocks)?;
Ok(())
});
Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))
})
}
Some(Subcommand::Benchmark(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| {
// This switch needs to be in the client, since the client decides
// which sub-commands it wants to support.
match cmd {
BenchmarkCmd::Pallet(cmd) => {
if !cfg!(feature = "runtime-benchmarks") {
return Err(
"Runtime benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
.into(),
);
}
cmd.run_with_spec::<sp_runtime::traits::HashingFor<Block>, ()>(Some(
config.chain_spec,
))
}
BenchmarkCmd::Block(cmd) => {
let PartialComponents { client, .. } = service::new_partial(&config)?;
cmd.run(client)
}
#[cfg(not(feature = "runtime-benchmarks"))]
BenchmarkCmd::Storage(_) => Err(
"Storage benchmarking can be enabled with `--features runtime-benchmarks`."
.into(),
),
#[cfg(feature = "runtime-benchmarks")]
BenchmarkCmd::Storage(cmd) => {
let PartialComponents {
client, backend, ..
} = service::new_partial(&config)?;
let db = backend.expose_db();
let storage = backend.expose_storage();
cmd.run(config, client, db, storage)
}
BenchmarkCmd::Overhead(cmd) => {
let PartialComponents { client, .. } = service::new_partial(&config)?;
let ext_builder = RemarkBuilder::new(client.clone());
cmd.run(
config,
client,
inherent_benchmark_data()?,
Vec::new(),
&ext_builder,
)
}
BenchmarkCmd::Extrinsic(cmd) => {
let PartialComponents { client, .. } = service::new_partial(&config)?;
// Register the *Remark* and *TKA* builders.
let ext_factory = ExtrinsicFactory(vec![
Box::new(RemarkBuilder::new(client.clone())),
Box::new(TransferKeepAliveBuilder::new(
client.clone(),
Sr25519Keyring::Alice.to_account_id(),
network_constants::EXISTENTIAL_DEPOSIT,
)),
]);
cmd.run(client, inherent_benchmark_data()?, Vec::new(), &ext_factory)
}
BenchmarkCmd::Machine(cmd) => {
cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())
}
}
})
}
Some(Subcommand::ChainInfo(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run::<Block>(&config))
}
None => {
let runner = cli.create_runner(&cli.run)?;
runner.run_node_until_exit(|config| async move {
match config.network.network_backend {
sc_network::config::NetworkBackendType::Libp2p => service::new_full::<
sc_network::NetworkWorker<
g6_solo_runtime::opaque::Block,
<g6_solo_runtime::opaque::Block as sp_runtime::traits::Block>::Hash,
>,
>(config)
.map_err(sc_cli::Error::Service),
sc_network::config::NetworkBackendType::Litep2p => {
service::new_full::<sc_network::Litep2pNetworkBackend>(config)
.map_err(sc_cli::Error::Service)
}
}
})
}
}
}

13
node/src/main.rs 100644
View File

@ -0,0 +1,13 @@
//! Substrate Node Template CLI library.
#![warn(missing_docs)]
mod benchmarking;
mod chain_spec;
mod cli;
mod command;
mod rpc;
mod service;
fn main() -> sc_cli::Result<()> {
command::run()
}

68
node/src/rpc.rs 100644
View File

@ -0,0 +1,68 @@
//! A collection of node-specific RPC methods.
//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer
//! used by Substrate nodes. This file extends those RPC definitions with
//! capabilities that are specific to this project's runtime configuration.
#![warn(missing_docs)]
use std::sync::Arc;
use g6_solo_runtime::{opaque::Block, AccountId, Balance, Nonce};
use jsonrpsee::RpcModule;
use sc_transaction_pool_api::TransactionPool;
use sp_api::ProvideRuntimeApi;
use sp_block_builder::BlockBuilder;
use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
pub use sc_rpc_api::DenyUnsafe;
/// Full client dependencies.
pub struct FullDeps<C, P> {
/// The client instance to use.
pub client: Arc<C>,
/// Transaction pool instance.
pub pool: Arc<P>,
/// Whether to deny unsafe calls
pub deny_unsafe: DenyUnsafe,
}
/// Instantiate all full RPC extensions.
pub fn create_full<C, P>(
deps: FullDeps<C, P>,
) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
where
C: ProvideRuntimeApi<Block>,
C: HeaderBackend<Block> + HeaderMetadata<Block, Error = BlockChainError> + 'static,
C: Send + Sync + 'static,
C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
C::Api: BlockBuilder<Block>,
P: TransactionPool + 'static,
{
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
use substrate_frame_rpc_system::{System, SystemApiServer};
let mut module = RpcModule::new(());
let FullDeps {
client,
pool,
deny_unsafe,
} = deps;
module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
module.merge(TransactionPayment::new(client).into_rpc())?;
// Extend this RPC with a custom API by using the following syntax.
// `YourRpcStruct` should have a reference to a client, which is needed
// to call into the runtime.
// `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;`
// You probably want to enable the `rpc v2 chainSpec` API as well
//
// let chain_name = chain_spec.name().to_string();
// let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
// let properties = chain_spec.properties();
// module.merge(ChainSpec::new(chain_name, genesis_hash, properties).into_rpc())?;
Ok(module)
}

346
node/src/service.rs 100644
View File

@ -0,0 +1,346 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
use futures::FutureExt;
use g6_solo_runtime::{self, opaque::Block, RuntimeApi};
use sc_client_api::{Backend, BlockBackend};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
use sc_consensus_grandpa::SharedVoterState;
use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use std::{sync::Arc, time::Duration};
pub(crate) type FullClient = sc_service::TFullClient<
Block,
RuntimeApi,
sc_executor::WasmExecutor<sp_io::SubstrateHostFunctions>,
>;
type FullBackend = sc_service::TFullBackend<Block>;
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
/// The minimum period of blocks on which justifications will be
/// imported and generated.
const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512;
pub type Service = sc_service::PartialComponents<
FullClient,
FullBackend,
FullSelectChain,
sc_consensus::DefaultImportQueue<Block>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
sc_consensus_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
sc_consensus_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
),
>;
pub fn new_partial(config: &Configuration) -> Result<Service, ServiceError> {
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let executor = sc_service::new_wasm_executor::<sp_io::SubstrateHostFunctions>(config);
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);
let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager
.spawn_handle()
.spawn("telemetry", None, worker.run());
telemetry
});
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
);
let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import(
client.clone(),
GRANDPA_JUSTIFICATION_PERIOD,
&client,
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let cidp_client = client.clone();
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(
ImportQueueParams {
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
create_inherent_data_providers: move |parent_hash, _| {
let cidp_client = cidp_client.clone();
async move {
let slot_duration = sc_consensus_aura::standalone::slot_duration_at(
&*cidp_client,
parent_hash,
)?;
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);
Ok((slot, timestamp))
}
},
spawner: &task_manager.spawn_essential_handle(),
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
compatibility_mode: Default::default(),
},
)?;
Ok(sc_service::PartialComponents {
client,
backend,
task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
other: (grandpa_block_import, grandpa_link, telemetry),
})
}
/// Builds a new service for a full client.
pub fn new_full<
N: sc_network::NetworkBackend<Block, <Block as sp_runtime::traits::Block>::Hash>,
>(
config: Configuration,
) -> Result<TaskManager, ServiceError> {
let sc_service::PartialComponents {
client,
backend,
mut task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
other: (block_import, grandpa_link, mut telemetry),
} = new_partial(&config)?;
let mut net_config = sc_network::config::FullNetworkConfiguration::<
Block,
<Block as sp_runtime::traits::Block>::Hash,
N,
>::new(&config.network);
let metrics = N::register_notification_metrics(config.prometheus_registry());
let peer_store_handle = net_config.peer_store_handle();
let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
&client
.block_hash(0)
.ok()
.flatten()
.expect("Genesis block exists; qed"),
&config.chain_spec,
);
let (grandpa_protocol_config, grandpa_notification_service) =
sc_consensus_grandpa::grandpa_peers_set_config::<_, N>(
grandpa_protocol_name.clone(),
metrics.clone(),
peer_store_handle,
);
net_config.add_notification_protocol(grandpa_protocol_config);
let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set().clone(),
Vec::default(),
));
let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
block_announce_validator_builder: None,
warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
block_relay: None,
metrics,
})?;
if config.offchain_worker.enabled {
task_manager.spawn_handle().spawn(
"offchain-workers-runner",
"offchain-worker",
sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
runtime_api_provider: client.clone(),
is_validator: config.role.is_authority(),
keystore: Some(keystore_container.keystore()),
offchain_db: backend.offchain_storage(),
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
network_provider: Arc::new(network.clone()),
enable_http_requests: true,
custom_extensions: |_| vec![],
})
.run(client.clone(), task_manager.spawn_handle())
.boxed(),
);
}
let role = config.role.clone();
let force_authoring = config.force_authoring;
let backoff_authoring_blocks: Option<()> = None;
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let rpc_extensions_builder = {
let client = client.clone();
let pool = transaction_pool.clone();
Box::new(move |deny_unsafe, _| {
let deps = crate::rpc::FullDeps {
client: client.clone(),
pool: pool.clone(),
deny_unsafe,
};
crate::rpc::create_full(deps).map_err(Into::into)
})
};
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: Arc::new(network.clone()),
client: client.clone(),
keystore: keystore_container.keystore(),
task_manager: &mut task_manager,
transaction_pool: transaction_pool.clone(),
rpc_builder: rpc_extensions_builder,
backend,
system_rpc_tx,
tx_handler_controller,
sync_service: sync_service.clone(),
config,
telemetry: telemetry.as_mut(),
})?;
if role.is_authority() {
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(
StartAuraParams {
slot_duration,
client,
select_chain,
block_import,
proposer_factory,
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);
Ok((slot, timestamp))
},
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.keystore(),
sync_oracle: sync_service.clone(),
justification_sync_link: sync_service.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
max_block_proposal_slot_portion: None,
telemetry: telemetry.as_ref().map(|x| x.handle()),
compatibility_mode: Default::default(),
},
)?;
// the AURA authoring task is considered essential, i.e. if it
// fails we take down the service with it.
task_manager
.spawn_essential_handle()
.spawn_blocking("aura", Some("block-authoring"), aura);
}
if enable_grandpa {
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if role.is_authority() {
Some(keystore_container.keystore())
} else {
None
};
let grandpa_config = sc_consensus_grandpa::Config {
// FIXME #1578 make this available through chainspec
gossip_duration: Duration::from_millis(333),
justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD,
name: Some(name),
observer_enabled: false,
keystore,
local_role: role,
telemetry: telemetry.as_ref().map(|x| x.handle()),
protocol_name: grandpa_protocol_name,
};
// start the full GRANDPA voter
// NOTE: non-authorities could run the GRANDPA observer protocol, but at
// this point the full voter should provide better guarantees of block
// and vote data availability than the observer. The observer has not
// been tested extensively yet and having most nodes in a network run it
// could lead to finality stalls.
let grandpa_config = sc_consensus_grandpa::GrandpaParams {
config: grandpa_config,
link: grandpa_link,
network,
sync: Arc::new(sync_service),
notification_service: grandpa_notification_service,
voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),
};
// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",
None,
sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?,
);
}
network_starter.start_network();
Ok(task_manager)
}

View File

@ -0,0 +1,56 @@
[package]
name = "pallet-aura"
version = "27.0.0"
authors.workspace = true
edition.workspace = true
license = "Apache-2.0"
homepage.workspace = true
repository.workspace = true
description = "FRAME AURA consensus pallet"
readme = "README.md"
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { features = ["derive", "max-encoded-len"], workspace = true }
log = { version = "0.4.22", default-features = false }
scale-info = { features = ["derive"], workspace = true }
frame-support = { workspace = true }
frame-system = { workspace = true }
pallet-session = { workspace = true }
pallet-timestamp = { workspace = true }
sp-application-crypto = { workspace = true }
sp-consensus-aura = { workspace = true }
sp-staking = { workspace = true }
sp-runtime = { workspace = true }
primitive-types = { workspace = true, default-features = false }
[dev-dependencies]
sp-core = { workspace = true }
sp-io = { workspace = true, default-features = true }
[features]
default = ["std"]
std = [
"codec/std",
"frame-support/std",
"frame-system/std",
"log/std",
"pallet-timestamp/std",
"scale-info/std",
"sp-application-crypto/std",
"sp-consensus-aura/std",
"sp-core/std",
"sp-io/std",
"sp-runtime/std",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
"pallet-timestamp/try-runtime",
"sp-runtime/try-runtime",
]

View File

@ -0,0 +1,29 @@
# Aura Module
- [`aura::Config`](https://docs.rs/pallet-aura/latest/pallet_aura/pallet/trait.Config.html)
- [`Pallet`](https://docs.rs/pallet-aura/latest/pallet_aura/pallet/struct.Pallet.html)
## Overview
The Aura module extends Aura consensus by managing offline reporting.
## Interface
### Public Functions
- `slot_duration` - Determine the Aura slot-duration based on the Timestamp module configuration.
## Related Modules
- [Timestamp](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/): The Timestamp module is used in Aura to track
consensus rounds (via `slots`).
## References
If you're interested in hacking on this module, it is useful to understand the interaction with
`substrate/primitives/inherents/src/lib.rs` and, specifically, the required implementation of
[`ProvideInherent`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherent.html) and
[`ProvideInherentData`](https://docs.rs/sp-inherents/latest/sp_inherents/trait.ProvideInherentData.html) to create and
check inherents.
License: Apache-2.0

View File

@ -0,0 +1,553 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Aura Module
//!
//! - [`Config`]
//! - [`Pallet`]
//!
//! ## Overview
//!
//! The Aura module extends Aura consensus by managing offline reporting.
//!
//! ## Interface
//!
//! ### Public Functions
//!
//! - `slot_duration` - Determine the Aura slot-duration based on the Timestamp module
//! configuration.
//!
//! ## Related Modules
//!
//! - [Timestamp](../pallet_timestamp/index.html): The Timestamp module is used in Aura to track
//! consensus rounds (via `slots`).
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloc::vec::Vec;
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{
traits::{DisabledValidators, FindAuthor, Get, OnTimestampSet, OneSessionHandler},
BoundedSlice, BoundedVec, ConsensusEngineId, Parameter,
};
use log;
use primitive_types::U256;
use sp_consensus_aura::{AuthorityIndex, ConsensusLog, Slot, AURA_ENGINE_ID};
use sp_runtime::{
generic::DigestItem,
traits::{IsMember, Member, SaturatedConversion, Saturating, Zero},
Perbill, RuntimeAppPublic,
};
use sp_staking::{
offence::{Kind, ReportOffence},
SessionIndex,
};
pub mod migrations;
mod mock;
mod tests;
pub use pallet::*;
const LOG_TARGET: &str = "runtime::aura";
/// A slot duration provider which infers the slot duration from the
/// [`pallet_timestamp::Config::MinimumPeriod`] by multiplying it by two, to ensure
/// that authors have the majority of their slot to author within.
///
/// This was the default behavior of the Aura pallet and may be used for
/// backwards compatibility.
pub struct MinimumPeriodTimesTwo<T>(core::marker::PhantomData<T>);
impl<T: pallet_timestamp::Config> Get<T::Moment> for MinimumPeriodTimesTwo<T> {
fn get() -> T::Moment {
<T as pallet_timestamp::Config>::MinimumPeriod::get().saturating_mul(2u32.into())
}
}
/// Block producing authority missed its slot. This saves the block when this was noticed (if
/// things go well, it's the first non-skipped valid block after the offence) and amount of active
/// validators at that moment to measure impact of this offence on network.
pub struct AuraOffence<Offender> {
pub offenders: Vec<Offender>,
pub block: u32,
pub validator_set_count: u32,
}
impl<Offender: Clone> sp_staking::offence::Offence<Offender> for AuraOffence<Offender> {
const ID: Kind = *b"aura:skip_a_slot";
/// This is just block number here
type TimeSlot = u32;
/// List of offenders (consequently skipped blocks)
fn offenders(&self) -> Vec<Offender> {
self.offenders.clone()
}
/// A placeholder to satisfy other modules; uses block number as timestamp
fn session_index(&self) -> SessionIndex {
self.block
}
/// Total number of validators; used to estimate severity of violation and to compute whether
/// disabling the violators would be better for network then letting them try agian
fn validator_set_count(&self) -> u32 {
self.validator_set_count
}
/// Block number is used as timestamp
fn time_slot(&self) -> Self::TimeSlot {
self.block
}
/// TODO calculate how much the validators should be punished
fn slash_fraction(&self, _offenders_count: u32) -> Perbill {
Perbill::from_rational(0u32, 1u32) //TODO calculate punishment
}
}
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::config]
pub trait Config:
pallet_timestamp::Config + frame_system::Config + pallet_session::Config
{
/// The identifier type for an authority.
type AuthorityId: Member
+ Parameter
+ RuntimeAppPublic
+ MaybeSerializeDeserialize
+ MaxEncodedLen;
/// The maximum number of authorities that the pallet can hold.
type MaxAuthorities: Get<u32>;
/// A way to check whether a given validator is disabled and should not be authoring blocks.
/// Blocks authored by a disabled validator will lead to a panic as part of this module's
/// initialization.
type DisabledValidators: DisabledValidators;
/// Whether to allow block authors to create multiple blocks per slot.
///
/// If this is `true`, the pallet will allow slots to stay the same across sequential
/// blocks. If this is `false`, the pallet will require that subsequent blocks always have
/// higher slots than previous ones.
///
/// Regardless of the setting of this storage value, the pallet will always enforce the
/// invariant that slots don't move backwards as the chain progresses.
///
/// The typical value for this should be 'false' unless this pallet is being augmented by
/// another pallet which enforces some limitation on the number of blocks authors can create
/// using the same slot.
type AllowMultipleBlocksPerSlot: Get<bool>;
/// The slot duration Aura should run with, expressed in milliseconds.
/// The effective value of this type should not change while the chain is running.
///
/// For backwards compatibility either use [`MinimumPeriodTimesTwo`] or a const.
#[pallet::constant]
type SlotDuration: Get<<Self as pallet_timestamp::Config>::Moment>;
/// The system to report authorities that missed their block slot. This was
/// designed specially for G6 network.
type OffenceReporter: ReportOffence<
Self::ValidatorId,
Self::ValidatorId,
AuraOffence<Self::ValidatorId>,
>;
}
#[pallet::pallet]
pub struct Pallet<T>(core::marker::PhantomData<T>);
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_initialize(block: BlockNumberFor<T>) -> Weight {
if let Some(new_slot) = Self::current_slot_from_digests() {
let current_slot = CurrentSlot::<T>::get();
if T::AllowMultipleBlocksPerSlot::get() {
assert!(current_slot <= new_slot, "Slot must not decrease");
} else {
assert!(current_slot < new_slot, "Slot must increase");
}
CurrentSlot::<T>::put(new_slot);
let reads = if let Some(n_authorities) = <Authorities<T>>::decode_len() {
let authority_index = *new_slot % n_authorities as u64;
if T::DisabledValidators::is_disabled(authority_index as u32) {
panic!(
"Validator with index {:?} is disabled and should not be attempting to author blocks.",
authority_index,
);
}
// Do not trigger violations on genesis and if chain was obviously just stuck
// for too long (either there is no active supermajority, or it was technical
// downtime). Punishing violators does not make sense if it will just get
// grandpa stuck!
if (current_slot + 1 != new_slot)
&& (current_slot != 0u64)
&& (new_slot - current_slot < n_authorities as u64 / 3)
{
let validators = <CurrentValidators<T>>::get();
let previous_validators = <PreviousValidators<T>>::get();
let previous_n_authorities = previous_validators.len();
let mut violators = Vec::new();
let change_slot = <LastChangeSlot<T>>::get();
for missed_slot in (u64::from(current_slot) + 1)..u64::from(new_slot) {
if missed_slot < *change_slot {
let violator_index = missed_slot % previous_n_authorities as u64;
if let Some(violator) =
previous_validators.get(violator_index as usize)
{
violators.push(violator.clone());
}
} else {
let violator_index = missed_slot % n_authorities as u64;
if let Some(violator) = validators.get(violator_index as usize) {
violators.push(violator.clone());
}
}
}
// couple of brutal type conversions for greatly
// designed generics
let block_number: U256 = block.into();
// If we can't report missing blocks - let's be kind
// and ignore it
let _ = T::OffenceReporter::report_offence(
Vec::new(),
AuraOffence {
offenders: violators,
block: block_number.as_u32(), // this is a hack; just save
// reporting block index to
// know how long should the
// authority stay offline
// TODO: consider maybe
// actually doing intended
// things is good?
validator_set_count: n_authorities as u32,
},
);
3
} else {
0
}
} else {
0
};
// There was a TODO mark of following content:
// [#3398] Generate offence report for all authorities that skipped their
// slots.
T::DbWeight::get().reads_writes(2 + reads, 1)
} else {
T::DbWeight::get().reads(1)
}
}
#[cfg(feature = "try-runtime")]
fn try_state(_: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
Self::do_try_state()
}
}
/// The current authority set.
#[pallet::storage]
pub type Authorities<T: Config> =
StorageValue<_, BoundedVec<T::AuthorityId, T::MaxAuthorities>, ValueQuery>;
/// The current slot of this block.
///
/// This will be set in `on_initialize`.
#[pallet::storage]
pub type CurrentSlot<T: Config> = StorageValue<_, Slot, ValueQuery>;
/// The slot of last authority change.
///
/// This will be set in `change_session`.
#[pallet::storage]
pub type LastChangeSlot<T: Config> = StorageValue<_, Slot, ValueQuery>;
/// The current validators set.
///
/// Should be stored here to be moved into `PreviousValidators`. Decoupled from session pallet
/// for generalization of accountability
#[pallet::storage]
pub type CurrentValidators<T: Config> =
StorageValue<_, BoundedVec<T::ValidatorId, T::MaxAuthorities>, ValueQuery>;
/// The previous validators set. Used for accountability.
#[pallet::storage]
pub type PreviousValidators<T: Config> =
StorageValue<_, BoundedVec<T::ValidatorId, T::MaxAuthorities>, ValueQuery>;
#[pallet::genesis_config]
#[derive(frame_support::DefaultNoBound)]
pub struct GenesisConfig<T: Config> {
pub authorities: Vec<T::AuthorityId>,
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
Pallet::<T>::initialize_authorities(&self.authorities);
}
}
}
impl<T: Config> Pallet<T> {
/// Change authorities.
///
/// The storage will be applied immediately.
/// And aura consensus log will be appended to block's log.
///
/// This is a no-op if `new` is empty.
pub fn change_authorities(new: BoundedVec<T::AuthorityId, T::MaxAuthorities>) {
if new.is_empty() {
log::warn!(target: LOG_TARGET, "Ignoring empty authority change.");
return;
}
<LastChangeSlot<T>>::put(<CurrentSlot<T>>::get());
<PreviousValidators<T>>::put(&<CurrentValidators<T>>::get());
let new_validators = pallet_session::Validators::<T>::get();
let new_validators = <BoundedVec<_, T::MaxAuthorities>>::truncate_from(new_validators);
<CurrentValidators<T>>::put(&new_validators);
<Authorities<T>>::put(&new);
let log = DigestItem::Consensus(
AURA_ENGINE_ID,
ConsensusLog::AuthoritiesChange(new.into_inner()).encode(),
);
<frame_system::Pallet<T>>::deposit_log(log);
}
/// Initial authorities.
///
/// The storage will be applied immediately.
///
/// The authorities length must be equal or less than T::MaxAuthorities.
pub fn initialize_authorities(authorities: &[T::AuthorityId]) {
if !authorities.is_empty() {
assert!(
<Authorities<T>>::get().is_empty(),
"Authorities are already initialized!"
);
let bounded = <BoundedSlice<'_, _, T::MaxAuthorities>>::try_from(authorities)
.expect("Initial authority set must be less than T::MaxAuthorities");
<Authorities<T>>::put(bounded);
}
}
/// Return current authorities length.
pub fn authorities_len() -> usize {
Authorities::<T>::decode_len().unwrap_or(0)
}
/// Get the current slot from the pre-runtime digests.
fn current_slot_from_digests() -> Option<Slot> {
let digest = frame_system::Pallet::<T>::digest();
let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime());
for (id, mut data) in pre_runtime_digests {
if id == AURA_ENGINE_ID {
return Slot::decode(&mut data).ok();
}
}
None
}
/// Determine the Aura slot-duration based on the Timestamp module configuration.
pub fn slot_duration() -> T::Moment {
T::SlotDuration::get()
}
/// Ensure the correctness of the state of this pallet.
///
/// This should be valid before or after each state transition of this pallet.
///
/// # Invariants
///
/// ## `CurrentSlot`
///
/// If we don't allow for multiple blocks per slot, then the current slot must be less than the
/// maximal slot number. Otherwise, it can be arbitrary.
///
/// ## `Authorities`
///
/// * The authorities must be non-empty.
/// * The current authority cannot be disabled.
/// * The number of authorities must be less than or equal to `T::MaxAuthorities`. This however,
/// is guarded by the type system.
#[cfg(any(test, feature = "try-runtime"))]
pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> {
// We don't have any guarantee that we are already after `on_initialize` and thus we have to
// check the current slot from the digest or take the last known slot.
let current_slot =
Self::current_slot_from_digests().unwrap_or_else(|| CurrentSlot::<T>::get());
// Check that the current slot is less than the maximal slot number, unless we allow for
// multiple blocks per slot.
if !T::AllowMultipleBlocksPerSlot::get() {
frame_support::ensure!(
current_slot < u64::MAX,
"Current slot has reached maximum value and cannot be incremented further.",
);
}
let authorities_len =
<Authorities<T>>::decode_len().ok_or("Failed to decode authorities length")?;
// Check that the authorities are non-empty.
frame_support::ensure!(!authorities_len.is_zero(), "Authorities must be non-empty.");
// Check that the current authority is not disabled.
let authority_index = *current_slot % authorities_len as u64;
frame_support::ensure!(
!T::DisabledValidators::is_disabled(authority_index as u32),
"Current validator is disabled and should not be attempting to author blocks.",
);
Ok(())
}
}
impl<T: Config> sp_runtime::BoundToRuntimeAppPublic for Pallet<T> {
type Public = T::AuthorityId;
}
impl<T: Config> OneSessionHandler<T::AccountId> for Pallet<T> {
type Key = T::AuthorityId;
fn on_genesis_session<'a, I: 'a>(validators: I)
where
I: Iterator<Item = (&'a T::AccountId, T::AuthorityId)>,
{
let authorities = validators.map(|(_, k)| k).collect::<Vec<_>>();
Self::initialize_authorities(&authorities);
}
fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I)
where
I: Iterator<Item = (&'a T::AccountId, T::AuthorityId)>,
{
// instant changes
if changed {
let next_authorities = validators.map(|(_, k)| k).collect::<Vec<_>>();
let last_authorities = Authorities::<T>::get();
if last_authorities != next_authorities {
if next_authorities.len() as u32 > T::MaxAuthorities::get() {
log::warn!(
target: LOG_TARGET,
"next authorities list larger than {}, truncating",
T::MaxAuthorities::get(),
);
}
let bounded = <BoundedVec<_, T::MaxAuthorities>>::truncate_from(next_authorities);
Self::change_authorities(bounded);
}
}
}
fn on_disabled(i: u32) {
let log = DigestItem::Consensus(
AURA_ENGINE_ID,
ConsensusLog::<T::AuthorityId>::OnDisabled(i as AuthorityIndex).encode(),
);
<frame_system::Pallet<T>>::deposit_log(log);
}
}
impl<T: Config> FindAuthor<u32> for Pallet<T> {
fn find_author<'a, I>(digests: I) -> Option<u32>
where
I: 'a + IntoIterator<Item = (ConsensusEngineId, &'a [u8])>,
{
for (id, mut data) in digests.into_iter() {
if id == AURA_ENGINE_ID {
let slot = Slot::decode(&mut data).ok()?;
let author_index = *slot % Self::authorities_len() as u64;
return Some(author_index as u32);
}
}
None
}
}
/// We can not implement `FindAuthor` twice, because the compiler does not know if
/// `u32 == T::AuthorityId` and thus, prevents us to implement the trait twice.
#[doc(hidden)]
pub struct FindAccountFromAuthorIndex<T, Inner>(core::marker::PhantomData<(T, Inner)>);
impl<T: Config, Inner: FindAuthor<u32>> FindAuthor<T::AuthorityId>
for FindAccountFromAuthorIndex<T, Inner>
{
fn find_author<'a, I>(digests: I) -> Option<T::AuthorityId>
where
I: 'a + IntoIterator<Item = (ConsensusEngineId, &'a [u8])>,
{
let i = Inner::find_author(digests)?;
let validators = Authorities::<T>::get();
validators.get(i as usize).cloned()
}
}
/// Find the authority ID of the Aura authority who authored the current block.
pub type AuraAuthorId<T> = FindAccountFromAuthorIndex<T, Pallet<T>>;
impl<T: Config> IsMember<T::AuthorityId> for Pallet<T> {
fn is_member(authority_id: &T::AuthorityId) -> bool {
Authorities::<T>::get().iter().any(|id| id == authority_id)
}
}
impl<T: Config> OnTimestampSet<T::Moment> for Pallet<T> {
fn on_timestamp_set(moment: T::Moment) {
let slot_duration = Self::slot_duration();
assert!(
!slot_duration.is_zero(),
"Aura slot duration cannot be zero."
);
let timestamp_slot = moment / slot_duration;
let timestamp_slot = Slot::from(timestamp_slot.saturated_into::<u64>());
assert_eq!(
CurrentSlot::<T>::get(),
timestamp_slot,
"Timestamp slot must match `CurrentSlot`. This likely means that the configured block \
time in the node and/or rest of the runtime is not compatible with Aura's \
`SlotDuration`",
);
}
}

View File

@ -0,0 +1,45 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Migrations for the AURA pallet.
use frame_support::{pallet_prelude::*, traits::Get, weights::Weight};
struct __LastTimestamp<T>(core::marker::PhantomData<T>);
impl<T: RemoveLastTimestamp> frame_support::traits::StorageInstance for __LastTimestamp<T> {
fn pallet_prefix() -> &'static str {
T::PalletPrefix::get()
}
const STORAGE_PREFIX: &'static str = "LastTimestamp";
}
type LastTimestamp<T> = StorageValue<__LastTimestamp<T>, (), ValueQuery>;
pub trait RemoveLastTimestamp: super::Config {
type PalletPrefix: Get<&'static str>;
}
/// Remove the `LastTimestamp` storage value.
///
/// This storage value was removed and replaced by `CurrentSlot`. As we only remove this storage
/// value, it is safe to call this method multiple times.
///
/// This migration requires a type `T` that implements [`RemoveLastTimestamp`].
pub fn remove_last_timestamp<T: RemoveLastTimestamp>() -> Weight {
LastTimestamp::<T>::kill();
T::DbWeight::get().writes(1)
}

View File

@ -0,0 +1,113 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Test utilities
#![cfg(test)]
use crate as pallet_aura;
use frame_support::{
derive_impl, parameter_types,
traits::{ConstU32, ConstU64, DisabledValidators},
};
use sp_consensus_aura::{ed25519::AuthorityId, AuthorityIndex};
use sp_runtime::{testing::UintAuthorityId, BuildStorage};
type Block = frame_system::mocking::MockBlock<Test>;
const SLOT_DURATION: u64 = 2;
frame_support::construct_runtime!(
pub enum Test
{
System: frame_system,
Timestamp: pallet_timestamp,
Aura: pallet_aura,
}
);
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
impl frame_system::Config for Test {
type Block = Block;
}
impl pallet_timestamp::Config for Test {
type Moment = u64;
type OnTimestampSet = Aura;
type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>;
type WeightInfo = ();
}
parameter_types! {
static DisabledValidatorTestValue: Vec<AuthorityIndex> = Default::default();
pub static AllowMultipleBlocksPerSlot: bool = false;
}
pub struct MockDisabledValidators;
impl MockDisabledValidators {
pub fn disable_validator(index: AuthorityIndex) {
DisabledValidatorTestValue::mutate(|v| {
if let Err(i) = v.binary_search(&index) {
v.insert(i, index);
}
})
}
}
impl DisabledValidators for MockDisabledValidators {
fn is_disabled(index: AuthorityIndex) -> bool {
DisabledValidatorTestValue::get()
.binary_search(&index)
.is_ok()
}
fn disabled_validators() -> Vec<u32> {
DisabledValidatorTestValue::get()
}
}
impl pallet_aura::Config for Test {
type AuthorityId = AuthorityId;
type DisabledValidators = MockDisabledValidators;
type MaxAuthorities = ConstU32<10>;
type AllowMultipleBlocksPerSlot = AllowMultipleBlocksPerSlot;
type SlotDuration = ConstU64<SLOT_DURATION>;
}
fn build_ext(authorities: Vec<u64>) -> sp_io::TestExternalities {
let mut storage = frame_system::GenesisConfig::<Test>::default()
.build_storage()
.unwrap();
pallet_aura::GenesisConfig::<Test> {
authorities: authorities
.into_iter()
.map(|a| UintAuthorityId(a).to_public_key())
.collect(),
}
.assimilate_storage(&mut storage)
.unwrap();
storage.into()
}
pub fn build_ext_and_execute_test(authorities: Vec<u64>, test: impl FnOnce() -> ()) {
let mut ext = build_ext(authorities);
ext.execute_with(|| {
test();
Aura::do_try_state().expect("Storage invariants should hold")
});
}

View File

@ -0,0 +1,130 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Tests for the module.
#![cfg(test)]
use super::pallet;
use crate::mock::{build_ext_and_execute_test, Aura, MockDisabledValidators, System, Test};
use codec::Encode;
use frame_support::traits::OnInitialize;
use sp_consensus_aura::{Slot, AURA_ENGINE_ID};
use sp_runtime::{Digest, DigestItem};
#[test]
fn initial_values() {
build_ext_and_execute_test(vec![0, 1, 2, 3], || {
assert_eq!(pallet::CurrentSlot::<Test>::get(), 0u64);
assert_eq!(
pallet::Authorities::<Test>::get().len(),
Aura::authorities_len()
);
assert_eq!(Aura::authorities_len(), 4);
});
}
#[test]
#[should_panic(
expected = "Validator with index 1 is disabled and should not be attempting to author blocks."
)]
fn disabled_validators_cannot_author_blocks() {
build_ext_and_execute_test(vec![0, 1, 2, 3], || {
// slot 1 should be authored by validator at index 1
let slot = Slot::from(1);
let pre_digest = Digest {
logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())],
};
System::reset_events();
System::initialize(&42, &System::parent_hash(), &pre_digest);
// let's disable the validator
MockDisabledValidators::disable_validator(1);
// and we should not be able to initialize the block
Aura::on_initialize(42);
});
}
#[test]
#[should_panic(expected = "Slot must increase")]
fn pallet_requires_slot_to_increase_unless_allowed() {
build_ext_and_execute_test(vec![0, 1, 2, 3], || {
crate::mock::AllowMultipleBlocksPerSlot::set(false);
let slot = Slot::from(1);
let pre_digest = Digest {
logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())],
};
System::reset_events();
System::initialize(&42, &System::parent_hash(), &pre_digest);
// and we should not be able to initialize the block with the same slot a second time.
Aura::on_initialize(42);
Aura::on_initialize(42);
});
}
#[test]
fn pallet_can_allow_unchanged_slot() {
build_ext_and_execute_test(vec![0, 1, 2, 3], || {
let slot = Slot::from(1);
let pre_digest = Digest {
logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())],
};
System::reset_events();
System::initialize(&42, &System::parent_hash(), &pre_digest);
crate::mock::AllowMultipleBlocksPerSlot::set(true);
// and we should be able to initialize the block with the same slot a second time.
Aura::on_initialize(42);
Aura::on_initialize(42);
});
}
#[test]
#[should_panic(expected = "Slot must not decrease")]
fn pallet_always_rejects_decreasing_slot() {
build_ext_and_execute_test(vec![0, 1, 2, 3], || {
let slot = Slot::from(2);
let pre_digest = Digest {
logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())],
};
System::reset_events();
System::initialize(&42, &System::parent_hash(), &pre_digest);
crate::mock::AllowMultipleBlocksPerSlot::set(true);
Aura::on_initialize(42);
System::finalize();
let earlier_slot = Slot::from(1);
let pre_digest = Digest {
logs: vec![DigestItem::PreRuntime(
AURA_ENGINE_ID,
earlier_slot.encode(),
)],
};
System::initialize(&43, &System::parent_hash(), &pre_digest);
Aura::on_initialize(43);
});
}

View File

@ -0,0 +1,31 @@
[package]
name = "pallet-feeless"
description = "G6 Chain Feeless transactions FRAME pallet"
version.workspace = true
homepage.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
edition.workspace = true
rust-version.workspace = true
[dependencies]
frame-benchmarking = { workspace = true, default-features = false }
frame-support = { workspace = true, default-features = false }
frame-system = { workspace = true, default-features = false }
codec = { workspace = true, default-features = false, features = ["derive",] }
scale-info = { workspace = true, default-features = false }
[lints]
workspace = true
[features]
default = ["std"]
std = [
"frame-support/std",
"frame-system/std",
"codec/std",
"scale-info/std",
]
runtime-benchmarks = ['frame-benchmarking/runtime-benchmarks']
try-runtime = []

View File

@ -0,0 +1,2 @@
# G6 pallet for feeless transactions

View File

@ -0,0 +1,24 @@
//! Benchmarking setup for pallet-template
#![cfg(feature = "runtime-benchmarks")]
use super::*;
#[allow(unused)]
use crate::Pallet as Template;
use frame_benchmarking::v2::*;
use frame_system::RawOrigin;
#[benchmarks]
mod benchmarks {
use super::*;
#[benchmark]
fn do_something() {
let caller: T::AccountId = whitelisted_caller();
#[extrinsic_call]
do_something(RawOrigin::Signed(caller));
//assert_eq!(Something::<T>::get(), Some(value));
}
impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
}

View File

@ -0,0 +1,45 @@
#![cfg_attr(not(feature = "std"), no_std)]
pub use pallet::*;
pub use weights::*;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
#[frame_support::pallet(dev_mode)]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::config]
pub trait Config: frame_system::Config {
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
type WeightInfo: WeightInfo;
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
DummyEvent, // TODO change this
}
#[pallet::error]
pub enum Error<T> {
DummyError, // TODO change this
}
#[pallet::call]
impl<T: Config> Pallet<T> {
#[pallet::call_index(0)]
#[pallet::weight(T::WeightInfo::do_something())]
pub fn do_something(origin: OriginFor<T>) -> DispatchResult {
let _who = ensure_signed(origin)?;
Self::deposit_event(Event::DummyEvent);
Ok(())
}
}
}

View File

@ -0,0 +1,90 @@
//! Autogenerated weights for pallet_template
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `<UNKNOWN>`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// ../../target/release/node-template
// benchmark
// pallet
// --chain
// dev
// --pallet
// pallet_template
// --extrinsic
// *
// --steps=50
// --repeat=20
// --wasm-execution=compiled
// --output
// pallets/template/src/weights.rs
// --template
// ../../.maintain/frame-weight-template.hbs
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
use core::marker::PhantomData;
/// Weight functions needed for pallet_template.
pub trait WeightInfo {
fn do_something() -> Weight;
fn cause_error() -> Weight;
}
/// Weights for pallet_template using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
/// Storage: TemplateModule Something (r:0 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn do_something() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 8_000_000 picoseconds.
Weight::from_parts(9_000_000, 0)
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: TemplateModule Something (r:1 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn cause_error() -> Weight {
// Proof Size summary in bytes:
// Measured: `32`
// Estimated: `1489`
// Minimum execution time: 6_000_000 picoseconds.
Weight::from_parts(6_000_000, 1489)
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
/// Storage: TemplateModule Something (r:0 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn do_something() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 8_000_000 picoseconds.
Weight::from_parts(9_000_000, 0)
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: TemplateModule Something (r:1 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn cause_error() -> Weight {
// Proof Size summary in bytes:
// Measured: `32`
// Estimated: `1489`
// Minimum execution time: 6_000_000 picoseconds.
Weight::from_parts(6_000_000, 1489)
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
}

View File

@ -0,0 +1,65 @@
[package]
name = "pallet-identity"
version = "28.0.0"
authors.workspace = true
edition.workspace = true
license = "Apache-2.0"
homepage = "https://substrate.io"
repository.workspace = true
description = "FRAME identity management pallet"
readme = "README.md"
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] }
enumflags2 = { version = "0.7.7" }
log = { version = "0.4.17", default-features = false }
scale-info = { version = "2.10.0", default-features = false, features = ["derive"] }
frame-benchmarking = { workspace = true, default-features = false, optional = true }
frame-support = { workspace = true, default-features = false }
frame-system = { workspace = true, default-features = false }
sp-runtime = { workspace = true, default-features = false }
sp-std = { workspace = true, default-features = false }
sp-io = { workspace = true }
[dev-dependencies]
pallet-balances = { workspace = true }
sp-core = { workspace = true }
sp-keystore = { workspace = true }
[features]
default = ["std"]
std = [
"codec/std",
"enumflags2/std",
"frame-benchmarking?/std",
"frame-support/std",
"frame-system/std",
"log/std",
"pallet-balances/std",
"scale-info/std",
"sp-core/std",
"sp-io/std",
"sp-keystore/std",
"sp-runtime/std",
"sp-std/std",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"pallet-balances/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
"pallet-balances/try-runtime",
"sp-runtime/try-runtime",
]

View File

@ -0,0 +1,56 @@
# Identity Module
- [`identity::Config`](https://docs.rs/pallet-identity/latest/pallet_identity/trait.Config.html)
- [`Call`](https://docs.rs/pallet-identity/latest/pallet_identity/enum.Call.html)
## Overview
A federated naming system, allowing for multiple registrars to be added from a specified origin.
Registrars can set a fee to provide identity-verification service. Anyone can put forth a
proposed identity for a fixed deposit and ask for review by any number of registrars (paying
each of their fees). Registrar judgements are given as an `enum`, allowing for sophisticated,
multi-tier opinions.
Some judgements are identified as *sticky*, which means they cannot be removed except by
complete removal of the identity, or by the registrar. Judgements are allowed to represent a
portion of funds that have been reserved for the registrar.
A super-user can remove accounts and in doing so, slash the deposit.
All accounts may also have a limited number of sub-accounts which may be specified by the owner;
by definition, these have equivalent ownership and each has an individual name.
The number of registrars should be limited, and the deposit made sufficiently large, to ensure
no state-bloat attack is viable.
## Interface
### Dispatchable Functions
#### For general users
- `set_identity` - Set the associated identity of an account; a small deposit is reserved if not
already taken.
- `clear_identity` - Remove an account's associated identity; the deposit is returned.
- `request_judgement` - Request a judgement from a registrar, paying a fee.
- `cancel_request` - Cancel the previous request for a judgement.
#### For general users with sub-identities
- `set_subs` - Set the sub-accounts of an identity.
- `add_sub` - Add a sub-identity to an identity.
- `remove_sub` - Remove a sub-identity of an identity.
- `rename_sub` - Rename a sub-identity of an identity.
- `quit_sub` - Remove a sub-identity of an identity (called by the sub-identity).
#### For registrars
- `set_fee` - Set the fee required to be paid for a judgement to be given by the registrar.
- `set_fields` - Set the fields that a registrar cares about in their judgements.
- `provide_judgement` - Provide a judgement to an identity.
#### For super-users
- `add_registrar` - Add a new registrar to the system.
- `kill_identity` - Forcibly remove the associated identity; the deposit is lost.
[`Call`]: ./enum.Call.html
[`Config`]: ./trait.Config.html
License: Apache-2.0

View File

@ -0,0 +1,89 @@
use crate::{Data, IdentityInformationProvider};
use codec::{Decode, Encode, MaxEncodedLen};
#[cfg(feature = "runtime-benchmarks")]
use enumflags2::BitFlag;
use enumflags2::{bitflags, BitFlags};
use frame_support::{traits::Get, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound};
use scale_info::TypeInfo;
use sp_runtime::{BoundedVec, RuntimeDebug};
use sp_std::prelude::*;
use crate::{
generate_identity_field_enum, generate_identity_field_enum_and_type_info,
generate_identity_info_default_impl, generate_identity_info_impl, generate_identity_info_impls,
generate_identity_information_provider_impl, generate_type_info,
};
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const DEFAULT_DATA_MAX_LENGTH: usize = 32;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const BIO_DATA_MAX_LENGTH: usize = 256;
generate_identity_field_enum_and_type_info!(
Display,
FirstName,
LastName,
Email,
Address,
TelephoneNumber,
Bio,
);
/// Information concerning the identity of the controller of an account.
///
/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra
/// fields in a backwards compatible way through a specialized `Decode` impl.
#[derive(
CloneNoBound,
Encode,
Decode,
EqNoBound,
MaxEncodedLen,
PartialEqNoBound,
RuntimeDebugNoBound,
TypeInfo,
)]
#[codec(mel_bound())]
#[scale_info(skip_type_params(FieldLimit))]
pub struct IdentityInfo<FieldLimit: Get<u32>> {
/// Additional fields of the identity that are not catered for with the struct's explicit
/// fields.
pub additional: BoundedVec<(Data, Data), FieldLimit>,
/// A reasonable display name for the controller of the account. This should be whatever it is
/// that it is typically known as and should not be confusable with other entities, given
/// reasonable context.
///
/// Stored as UTF-8.
pub display: Data,
/// The full legal name in the local jurisdiction of the entity. This might be a bit
/// long-winded.
///
/// Stored as UTF-8.
pub first_name: Data,
pub last_name: Data,
/// The email address of the controller of the account.
///
/// Stored as UTF-8.
pub email: Data,
pub address: Data,
pub telephone_number: Data,
pub bio: Data,
}
generate_identity_info_impls!(
IdentityInfo,
FieldLimit,
(Display, display, DEFAULT_DATA_MAX_LENGTH),
(FirstName, first_name, DEFAULT_DATA_MAX_LENGTH),
(LastName, last_name, DEFAULT_DATA_MAX_LENGTH),
(Email, email, DEFAULT_DATA_MAX_LENGTH),
(Address, address, DEFAULT_DATA_MAX_LENGTH),
(TelephoneNumber, telephone_number, DEFAULT_DATA_MAX_LENGTH),
(Bio, bio, BIO_DATA_MAX_LENGTH),
);

View File

@ -0,0 +1,194 @@
use codec::{Decode, Encode, MaxEncodedLen};
#[cfg(feature = "runtime-benchmarks")]
use enumflags2::BitFlag;
use enumflags2::{bitflags, BitFlags};
use frame_support::{traits::Get, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound};
use crate::{Data, IdentityInformationProvider};
use scale_info::TypeInfo;
use sp_runtime::{BoundedVec, RuntimeDebug};
use sp_std::prelude::*;
// mod identity_macros;
use crate::identity_macros::{generate_type_info, generate_identity_info_default_impl, generate_identity_info_impl};
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const DEFAULT_DATA_MAX_LENGTH: usize = 32;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const BIO_DATA_MAX_LENGTH: usize = 256;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const LINK_DATA_MAX_LENGTH: usize = 128;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const BACKGROUND_MAX_LENGTH: usize = 32;
/// The fields that we use to identify the owner of an account with. Each corresponds to a field
/// in the `IdentityInfo` struct.
#[bitflags]
#[repr(u64)]
#[derive(Clone, Copy, PartialEq, Eq, RuntimeDebug)]
pub enum IdentityField {
Display,
FirstName,
LastName,
Email,
Address,
TelephoneNumber,
Bio,
Website,
X,
Telegram,
Discord,
Linkedin,
Instagram,
Medium,
YouTube,
Git,
Mastodon
}
use scale_info::{build::Variants, Path, Type, TypeInfo};
impl TypeInfo for IdentityField {
type Identity = Self;
fn type_info() -> scale_info::Type {
Type::builder()
.path(Path::new("IdentityField", module_path!()))
.variant(
Variants::new()
.variant("Display", |v| v.index(0))
.variant("FirstName", |v| v.index(1))
.variant("LastName", |v| v.index(2))
.variant("Email", |v| v.index(3))
.variant("Address", |v| v.index(4))
.variant("TelephoneNumber", |v| v.index(5))
.variant("Bio", |v| v.index(6)),
)
}
}
/// Information concerning the identity of the controller of an account.
///
/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra
/// fields in a backwards compatible way through a specialized `Decode` impl.
#[derive(
CloneNoBound,
Encode,
Decode,
EqNoBound,
MaxEncodedLen,
PartialEqNoBound,
RuntimeDebugNoBound,
TypeInfo,
)]
#[codec(mel_bound())]
#[scale_info(skip_type_params(FieldLimit))]
pub struct IdentityInfo<FieldLimit: Get<u32>> {
/// Additional fields of the identity that are not catered for with the struct's explicit
/// fields.
pub additional: BoundedVec<(Data, Data), FieldLimit>,
/// A reasonable display name for the controller of the account. This should be whatever it is
/// that it is typically known as and should not be confusable with other entities, given
/// reasonable context.
///
/// Stored as UTF-8.
pub display: Data,
/// The full legal name in the local jurisdiction of the entity. This might be a bit
/// long-winded.
///
/// Stored as UTF-8.
// pub legal: Data,
pub first_name: Data,
pub last_name: Data,
/// The email address of the controller of the account.
///
/// Stored as UTF-8.
pub email: Data,
pub address: Data,
pub telephone_number: Data,
pub bio: Data,
}
impl<FieldLimit: Get<u32> + 'static> IdentityInformationProvider for IdentityInfo<FieldLimit> {
type FieldsIdentifier = u64;
fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool {
self.fields().bits() & fields == fields
}
#[cfg(feature = "runtime-benchmarks")]
fn create_identity_info() -> Self {
let data = Data::Raw(vec![0; DEFAULT_DATA_MAX_LENGTH].try_into().unwrap());
let bio_data = Data::Raw(vec![0; BIO_DATA_MAX_LENGTH].try_into().unwrap());
IdentityInfo {
additional: vec![(data.clone(), data.clone()); FieldLimit::get().try_into().unwrap()]
.try_into()
.unwrap(),
display: data.clone(),
first_name: data.clone(),
last_name: data.clone(),
email: data.clone(),
address: data.clone(),
telephone_number: data.clone(),
bio: bio_data.clone(),
}
}
#[cfg(feature = "runtime-benchmarks")]
fn all_fields() -> Self::FieldsIdentifier {
IdentityField::all().bits()
}
}
impl<FieldLimit: Get<u32>> Default for IdentityInfo<FieldLimit> {
fn default() -> Self {
IdentityInfo {
additional: BoundedVec::default(),
display: Data::None,
first_name: Data::None,
last_name: Data::None,
email: Data::None,
address: Data::None,
telephone_number: Data::None,
bio: Data::None,
}
}
}
impl<FieldLimit: Get<u32>> IdentityInfo<FieldLimit> {
pub(crate) fn fields(&self) -> BitFlags<IdentityField> {
let mut res = <BitFlags<IdentityField>>::empty();
if !self.display.is_none() {
res.insert(IdentityField::Display);
}
if !self.first_name.is_none() {
res.insert(IdentityField::FirstName);
}
if !self.last_name.is_none() {
res.insert(IdentityField::LastName);
}
if !self.email.is_none() {
res.insert(IdentityField::Email);
}
if !self.address.is_none() {
res.insert(IdentityField::Address);
}
if !self.telephone_number.is_none() {
res.insert(IdentityField::TelephoneNumber);
}
if !self.bio.is_none() {
res.insert(IdentityField::Bio);
}
res
}
}

View File

@ -0,0 +1,823 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Identity pallet benchmarking.
#![cfg(feature = "runtime-benchmarks")]
use super::*;
use crate::Pallet as Identity;
use codec::Encode;
use frame_benchmarking::{
account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError,
};
use frame_support::{
assert_ok, ensure,
traits::{EnsureOrigin, Get, OnFinalize, OnInitialize},
};
use frame_system::RawOrigin;
use sp_io::crypto::{sr25519_generate, sr25519_sign};
use sp_runtime::{
traits::{Bounded, IdentifyAccount, One},
MultiSignature, MultiSigner,
};
const SEED: u32 = 0;
fn assert_has_event<T: Config>(generic_event: <T as Config>::RuntimeEvent) {
frame_system::Pallet::<T>::assert_has_event(generic_event.into());
}
fn assert_last_event<T: Config>(generic_event: <T as Config>::RuntimeEvent) {
frame_system::Pallet::<T>::assert_last_event(generic_event.into());
}
fn run_to_block<T: Config>(n: frame_system::pallet_prelude::BlockNumberFor<T>) {
while frame_system::Pallet::<T>::block_number() < n {
crate::Pallet::<T>::on_finalize(frame_system::Pallet::<T>::block_number());
frame_system::Pallet::<T>::on_finalize(frame_system::Pallet::<T>::block_number());
frame_system::Pallet::<T>::set_block_number(
frame_system::Pallet::<T>::block_number() + One::one(),
);
frame_system::Pallet::<T>::on_initialize(frame_system::Pallet::<T>::block_number());
crate::Pallet::<T>::on_initialize(frame_system::Pallet::<T>::block_number());
}
}
// Adds `r` registrars to the Identity Pallet. These registrars will have set fees and fields.
fn add_registrars<T: Config>(r: u32) -> Result<(), &'static str> {
for i in 0..r {
let registrar: T::AccountId = account("registrar", i, SEED);
let registrar_lookup = T::Lookup::unlookup(registrar.clone());
let _ = T::Currency::make_free_balance_be(&registrar, BalanceOf::<T>::max_value());
let registrar_origin = T::RegistrarOrigin::try_successful_origin()
.expect("RegistrarOrigin has no successful origin required for the benchmark");
Identity::<T>::add_registrar(registrar_origin, registrar_lookup)?;
Identity::<T>::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?;
let fields = T::IdentityInformation::all_fields();
Identity::<T>::set_fields(RawOrigin::Signed(registrar.clone()).into(), i, fields)?;
}
assert_eq!(Registrars::<T>::get().len(), r as usize);
Ok(())
}
// Create `s` sub-accounts for the identity of `who` and return them.
// Each will have 32 bytes of raw data added to it.
fn create_sub_accounts<T: Config>(
who: &T::AccountId,
s: u32,
) -> Result<Vec<(T::AccountId, Data)>, &'static str> {
let mut subs = Vec::new();
let who_origin = RawOrigin::Signed(who.clone());
let data = Data::Raw(vec![0; 32].try_into().unwrap());
for i in 0..s {
let sub_account = account("sub", i, SEED);
subs.push((sub_account, data.clone()));
}
// Set identity so `set_subs` does not fail.
if IdentityOf::<T>::get(who).is_none() {
let _ = T::Currency::make_free_balance_be(who, BalanceOf::<T>::max_value() / 2u32.into());
let info = T::IdentityInformation::create_identity_info();
Identity::<T>::set_identity(who_origin.into(), Box::new(info))?;
}
Ok(subs)
}
// Adds `s` sub-accounts to the identity of `who`. Each will have 32 bytes of raw data added to it.
// This additionally returns the vector of sub-accounts so it can be modified if needed.
fn add_sub_accounts<T: Config>(
who: &T::AccountId,
s: u32,
) -> Result<Vec<(T::AccountId, Data)>, &'static str> {
let who_origin = RawOrigin::Signed(who.clone());
let subs = create_sub_accounts::<T>(who, s)?;
Identity::<T>::set_subs(who_origin.into(), subs.clone())?;
Ok(subs)
}
fn bench_suffix() -> Vec<u8> {
b"bench".to_vec()
}
fn bench_username() -> Vec<u8> {
// len = 24
b"veryfastbenchmarkmachine".to_vec()
}
fn bounded_username<T: Config>(username: Vec<u8>, suffix: Vec<u8>) -> Username<T> {
let mut full_username = Vec::with_capacity(username.len() + suffix.len() + 1);
full_username.extend(username);
full_username.extend(b".");
full_username.extend(suffix);
Username::<T>::try_from(full_username).expect("test usernames should fit within bounds")
}
#[benchmarks(
where
<T as frame_system::Config>::AccountId: From<sp_runtime::AccountId32>,
T::OffchainSignature: From<MultiSignature>,
)]
mod benchmarks {
use super::*;
#[benchmark]
fn add_registrar(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
add_registrars::<T>(r)?;
ensure!(
Registrars::<T>::get().len() as u32 == r,
"Registrars not set up correctly."
);
let origin =
T::RegistrarOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
let account = T::Lookup::unlookup(account("registrar", r + 1, SEED));
#[extrinsic_call]
_(origin as T::RuntimeOrigin, account);
ensure!(
Registrars::<T>::get().len() as u32 == r + 1,
"Registrars not added."
);
Ok(())
}
#[benchmark]
fn set_identity(r: Linear<1, { T::MaxRegistrars::get() }>) -> Result<(), BenchmarkError> {
add_registrars::<T>(r)?;
let caller: T::AccountId = whitelisted_caller();
let caller_lookup = T::Lookup::unlookup(caller.clone());
let caller_origin: <T as frame_system::Config>::RuntimeOrigin =
RawOrigin::Signed(caller.clone()).into();
let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// Add an initial identity
let initial_info = T::IdentityInformation::create_identity_info();
Identity::<T>::set_identity(caller_origin.clone(), Box::new(initial_info.clone()))?;
// User requests judgement from all the registrars, and they approve
for i in 0..r {
let registrar: T::AccountId = account("registrar", i, SEED);
let _ = T::Lookup::unlookup(registrar.clone());
let balance_to_use = T::Currency::minimum_balance() * 10u32.into();
let _ = T::Currency::make_free_balance_be(&registrar, balance_to_use);
Identity::<T>::request_judgement(caller_origin.clone(), i, 10u32.into())?;
Identity::<T>::provide_judgement(
RawOrigin::Signed(registrar).into(),
i,
caller_lookup.clone(),
Judgement::Reasonable,
//T::Hashing::hash_of(&initial_info),
)?;
}
#[extrinsic_call]
_(
RawOrigin::Signed(caller.clone()),
Box::new(T::IdentityInformation::create_identity_info()),
);
assert_last_event::<T>(Event::<T>::IdentitySet { who: caller }.into());
Ok(())
}
// We need to split `set_subs` into two benchmarks to accurately isolate the potential
// writes caused by new or old sub accounts. The actual weight should simply be
// the sum of these two weights.
#[benchmark]
fn set_subs_new(s: Linear<0, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
// Create a new subs vec with sub accounts
let subs = create_sub_accounts::<T>(&caller, s)?;
ensure!(
SubsOf::<T>::get(&caller).1.len() == 0,
"Caller already has subs"
);
#[extrinsic_call]
set_subs(RawOrigin::Signed(caller.clone()), subs);
ensure!(
SubsOf::<T>::get(&caller).1.len() as u32 == s,
"Subs not added"
);
Ok(())
}
#[benchmark]
fn set_subs_old(p: Linear<0, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
// Give them p many previous sub accounts.
let _ = add_sub_accounts::<T>(&caller, p)?;
// Remove all subs.
let subs = create_sub_accounts::<T>(&caller, 0)?;
ensure!(
SubsOf::<T>::get(&caller).1.len() as u32 == p,
"Caller does have subs",
);
#[extrinsic_call]
set_subs(RawOrigin::Signed(caller.clone()), subs);
ensure!(SubsOf::<T>::get(&caller).1.len() == 0, "Subs not removed");
Ok(())
}
#[benchmark]
fn clear_identity(
r: Linear<1, { T::MaxRegistrars::get() }>,
s: Linear<0, { T::MaxSubAccounts::get() }>,
) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let caller_origin =
<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
let caller_lookup = <T::Lookup as StaticLookup>::unlookup(caller.clone());
let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// Register the registrars
add_registrars::<T>(r)?;
// Add sub accounts
let _ = add_sub_accounts::<T>(&caller, s)?;
// Create their main identity with x additional fields
let info = T::IdentityInformation::create_identity_info();
Identity::<T>::set_identity(caller_origin.clone(), Box::new(info.clone()))?;
// User requests judgement from all the registrars, and they approve
for i in 0..r {
let registrar: T::AccountId = account("registrar", i, SEED);
let balance_to_use = T::Currency::minimum_balance() * 10u32.into();
let _ = T::Currency::make_free_balance_be(&registrar, balance_to_use);
Identity::<T>::request_judgement(caller_origin.clone(), i, 10u32.into())?;
Identity::<T>::provide_judgement(
RawOrigin::Signed(registrar).into(),
i,
caller_lookup.clone(),
Judgement::Reasonable,
//T::Hashing::hash_of(&info),
)?;
}
ensure!(
IdentityOf::<T>::contains_key(&caller),
"Identity does not exist."
);
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()));
ensure!(
!IdentityOf::<T>::contains_key(&caller),
"Identity not cleared."
);
Ok(())
}
#[benchmark]
fn request_judgement(r: Linear<1, { T::MaxRegistrars::get() }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// Register the registrars
add_registrars::<T>(r)?;
// Create their main identity with x additional fields
let info = T::IdentityInformation::create_identity_info();
let caller_origin =
<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
Identity::<T>::set_identity(caller_origin.clone(), Box::new(info))?;
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into());
assert_last_event::<T>(
Event::<T>::JudgementRequested {
who: caller,
registrar_index: r - 1,
}
.into(),
);
Ok(())
}
#[benchmark]
fn cancel_request(r: Linear<1, { T::MaxRegistrars::get() }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// Register the registrars
add_registrars::<T>(r)?;
// Create their main identity with x additional fields
let info = T::IdentityInformation::create_identity_info();
let caller_origin =
<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(caller.clone()));
Identity::<T>::set_identity(caller_origin.clone(), Box::new(info))?;
Identity::<T>::request_judgement(caller_origin.clone(), r - 1, 10u32.into())?;
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()), r - 1);
assert_last_event::<T>(
Event::<T>::JudgementUnrequested {
who: caller,
registrar_index: r - 1,
}
.into(),
);
Ok(())
}
#[benchmark]
fn set_fee(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let caller_lookup = T::Lookup::unlookup(caller.clone());
add_registrars::<T>(r)?;
let registrar_origin = T::RegistrarOrigin::try_successful_origin()
.expect("RegistrarOrigin has no successful origin required for the benchmark");
Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
let registrars = Registrars::<T>::get();
ensure!(
registrars[r as usize].as_ref().unwrap().fee == 0u32.into(),
"Fee already set."
);
#[extrinsic_call]
_(RawOrigin::Signed(caller), r, 100u32.into());
let updated_registrars = Registrars::<T>::get();
ensure!(
updated_registrars[r as usize].as_ref().unwrap().fee == 100u32.into(),
"Fee not changed."
);
Ok(())
}
#[benchmark]
fn set_account_id(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let caller_lookup = T::Lookup::unlookup(caller.clone());
let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
add_registrars::<T>(r)?;
let registrar_origin = T::RegistrarOrigin::try_successful_origin()
.expect("RegistrarOrigin has no successful origin required for the benchmark");
Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
let registrars = Registrars::<T>::get();
ensure!(
registrars[r as usize].as_ref().unwrap().account == caller,
"id not set."
);
let new_account = T::Lookup::unlookup(account("new", 0, SEED));
#[extrinsic_call]
_(RawOrigin::Signed(caller), r, new_account);
let updated_registrars = Registrars::<T>::get();
ensure!(
updated_registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED),
"id not changed."
);
Ok(())
}
#[benchmark]
fn set_fields(r: Linear<1, { T::MaxRegistrars::get() - 1 }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let caller_lookup = T::Lookup::unlookup(caller.clone());
let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
add_registrars::<T>(r)?;
let registrar_origin = T::RegistrarOrigin::try_successful_origin()
.expect("RegistrarOrigin has no successful origin required for the benchmark");
Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
let registrars = Registrars::<T>::get();
ensure!(
registrars[r as usize].as_ref().unwrap().fields == Default::default(),
"fields already set."
);
let fields = T::IdentityInformation::all_fields();
#[extrinsic_call]
_(RawOrigin::Signed(caller), r, fields);
let updated_registrars = Registrars::<T>::get();
ensure!(
updated_registrars[r as usize].as_ref().unwrap().fields != Default::default(),
"fields not set."
);
Ok(())
}
#[benchmark]
fn provide_judgement(
r: Linear<1, { T::MaxRegistrars::get() - 1 }>,
) -> Result<(), BenchmarkError> {
// The user
let user: T::AccountId = account("user", r, SEED);
let user_origin =
<T as frame_system::Config>::RuntimeOrigin::from(RawOrigin::Signed(user.clone()));
let user_lookup = <T::Lookup as StaticLookup>::unlookup(user.clone());
let _ = T::Currency::make_free_balance_be(&user, BalanceOf::<T>::max_value());
let caller: T::AccountId = whitelisted_caller();
let caller_lookup = T::Lookup::unlookup(caller.clone());
let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
add_registrars::<T>(r)?;
let info = T::IdentityInformation::create_identity_info();
let info_hash = T::Hashing::hash_of(&info);
Identity::<T>::set_identity(user_origin.clone(), Box::new(info))?;
let registrar_origin = T::RegistrarOrigin::try_successful_origin()
.expect("RegistrarOrigin has no successful origin required for the benchmark");
Identity::<T>::add_registrar(registrar_origin, caller_lookup)?;
Identity::<T>::request_judgement(user_origin, r, 10u32.into())?;
#[extrinsic_call]
_(
RawOrigin::Signed(caller),
r,
user_lookup,
Judgement::Reasonable,
//info_hash,
);
assert_last_event::<T>(
Event::<T>::JudgementGiven {
target: user,
registrar_index: r,
}
.into(),
);
Ok(())
}
#[benchmark]
fn kill_identity(
r: Linear<1, { T::MaxRegistrars::get() }>,
s: Linear<0, { T::MaxSubAccounts::get() }>,
) -> Result<(), BenchmarkError> {
add_registrars::<T>(r)?;
let target: T::AccountId = account("target", 0, SEED);
let target_origin: <T as frame_system::Config>::RuntimeOrigin =
RawOrigin::Signed(target.clone()).into();
let target_lookup = T::Lookup::unlookup(target.clone());
let _ = T::Currency::make_free_balance_be(&target, BalanceOf::<T>::max_value());
let info = T::IdentityInformation::create_identity_info();
Identity::<T>::set_identity(target_origin.clone(), Box::new(info.clone()))?;
let _ = add_sub_accounts::<T>(&target, s)?;
// User requests judgement from all the registrars, and they approve
for i in 0..r {
let registrar: T::AccountId = account("registrar", i, SEED);
let balance_to_use = T::Currency::minimum_balance() * 10u32.into();
let _ = T::Currency::make_free_balance_be(&registrar, balance_to_use);
Identity::<T>::request_judgement(target_origin.clone(), i, 10u32.into())?;
Identity::<T>::provide_judgement(
RawOrigin::Signed(registrar).into(),
i,
target_lookup.clone(),
Judgement::Reasonable,
//T::Hashing::hash_of(&info),
)?;
}
ensure!(IdentityOf::<T>::contains_key(&target), "Identity not set");
let origin =
T::ForceOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
#[extrinsic_call]
_(origin as T::RuntimeOrigin, target_lookup);
ensure!(
!IdentityOf::<T>::contains_key(&target),
"Identity not removed"
);
Ok(())
}
#[benchmark]
fn add_sub(s: Linear<0, { T::MaxSubAccounts::get() - 1 }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let _ = add_sub_accounts::<T>(&caller, s)?;
let sub = account("new_sub", 0, SEED);
let data = Data::Raw(vec![0; 32].try_into().unwrap());
ensure!(
SubsOf::<T>::get(&caller).1.len() as u32 == s,
"Subs not set."
);
#[extrinsic_call]
_(
RawOrigin::Signed(caller.clone()),
T::Lookup::unlookup(sub),
data,
);
ensure!(
SubsOf::<T>::get(&caller).1.len() as u32 == s + 1,
"Subs not added."
);
Ok(())
}
#[benchmark]
fn rename_sub(s: Linear<1, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let (sub, _) = add_sub_accounts::<T>(&caller, s)?.remove(0);
let data = Data::Raw(vec![1; 32].try_into().unwrap());
ensure!(
SuperOf::<T>::get(&sub).unwrap().1 != data,
"data already set"
);
#[extrinsic_call]
_(
RawOrigin::Signed(caller),
T::Lookup::unlookup(sub.clone()),
data.clone(),
);
ensure!(SuperOf::<T>::get(&sub).unwrap().1 == data, "data not set");
Ok(())
}
#[benchmark]
fn remove_sub(s: Linear<1, { T::MaxSubAccounts::get() }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let (sub, _) = add_sub_accounts::<T>(&caller, s)?.remove(0);
ensure!(SuperOf::<T>::contains_key(&sub), "Sub doesn't exists");
#[extrinsic_call]
_(RawOrigin::Signed(caller), T::Lookup::unlookup(sub.clone()));
ensure!(!SuperOf::<T>::contains_key(&sub), "Sub not removed");
Ok(())
}
#[benchmark]
fn quit_sub(s: Linear<0, { T::MaxSubAccounts::get() - 1 }>) -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let sup = account("super", 0, SEED);
let _ = add_sub_accounts::<T>(&sup, s)?;
let sup_origin = RawOrigin::Signed(sup).into();
Identity::<T>::add_sub(
sup_origin,
T::Lookup::unlookup(caller.clone()),
Data::Raw(vec![0; 32].try_into().unwrap()),
)?;
ensure!(SuperOf::<T>::contains_key(&caller), "Sub doesn't exists");
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()));
ensure!(!SuperOf::<T>::contains_key(&caller), "Sub not removed");
Ok(())
}
#[benchmark]
fn add_username_authority() -> Result<(), BenchmarkError> {
let origin =
T::UsernameAuthorityOrigin::try_successful_origin().expect("can generate origin");
let authority: T::AccountId = account("authority", 0, SEED);
let authority_lookup = T::Lookup::unlookup(authority.clone());
let suffix = bench_suffix();
let allocation = 10;
#[extrinsic_call]
_(
origin as T::RuntimeOrigin,
authority_lookup,
suffix,
allocation,
);
assert_last_event::<T>(Event::<T>::AuthorityAdded { authority }.into());
Ok(())
}
#[benchmark]
fn remove_username_authority() -> Result<(), BenchmarkError> {
let origin =
T::UsernameAuthorityOrigin::try_successful_origin().expect("can generate origin");
let authority: T::AccountId = account("authority", 0, SEED);
let authority_lookup = T::Lookup::unlookup(authority.clone());
let suffix = bench_suffix();
let allocation = 10;
assert_ok!(Identity::<T>::add_username_authority(
origin.clone(),
authority_lookup.clone(),
suffix,
allocation
));
#[extrinsic_call]
_(origin as T::RuntimeOrigin, authority_lookup);
assert_last_event::<T>(Event::<T>::AuthorityRemoved { authority }.into());
Ok(())
}
#[benchmark]
fn set_username_for() -> Result<(), BenchmarkError> {
// Set up a username authority.
let auth_origin =
T::UsernameAuthorityOrigin::try_successful_origin().expect("can generate origin");
let authority: T::AccountId = account("authority", 0, SEED);
let authority_lookup = T::Lookup::unlookup(authority.clone());
let suffix = bench_suffix();
let allocation = 10;
Identity::<T>::add_username_authority(
auth_origin,
authority_lookup,
suffix.clone(),
allocation,
)?;
let username = bench_username();
let bounded_username = bounded_username::<T>(username.clone(), suffix.clone());
let encoded_username = Encode::encode(&bounded_username.to_vec());
let public = sr25519_generate(0.into(), None);
let who_account: T::AccountId = MultiSigner::Sr25519(public).into_account().into();
let who_lookup = T::Lookup::unlookup(who_account.clone());
let signature =
MultiSignature::Sr25519(sr25519_sign(0.into(), &public, &encoded_username).unwrap());
// Verify signature here to avoid surprise errors at runtime
assert!(signature.verify(&encoded_username[..], &public.into()));
#[extrinsic_call]
_(
RawOrigin::Signed(authority.clone()),
who_lookup,
username,
Some(signature.into()),
);
assert_has_event::<T>(
Event::<T>::UsernameSet {
who: who_account.clone(),
username: bounded_username.clone(),
}
.into(),
);
assert_has_event::<T>(
Event::<T>::PrimaryUsernameSet {
who: who_account,
username: bounded_username,
}
.into(),
);
Ok(())
}
#[benchmark]
fn accept_username() -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let username = bounded_username::<T>(bench_username(), bench_suffix());
Identity::<T>::queue_acceptance(&caller, username.clone());
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()), username.clone());
assert_last_event::<T>(
Event::<T>::UsernameSet {
who: caller,
username,
}
.into(),
);
Ok(())
}
#[benchmark]
fn remove_expired_approval() -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let username = bounded_username::<T>(bench_username(), bench_suffix());
Identity::<T>::queue_acceptance(&caller, username.clone());
let expected_exiration =
frame_system::Pallet::<T>::block_number() + T::PendingUsernameExpiration::get();
run_to_block::<T>(expected_exiration + One::one());
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()), username);
assert_last_event::<T>(Event::<T>::PreapprovalExpired { whose: caller }.into());
Ok(())
}
#[benchmark]
fn set_primary_username() -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let first_username = bounded_username::<T>(bench_username(), bench_suffix());
let second_username = bounded_username::<T>(b"slowbenchmark".to_vec(), bench_suffix());
// First one will be set as primary. Second will not be.
Identity::<T>::insert_username(&caller, first_username);
Identity::<T>::insert_username(&caller, second_username.clone());
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()), second_username.clone());
assert_last_event::<T>(
Event::<T>::PrimaryUsernameSet {
who: caller,
username: second_username,
}
.into(),
);
Ok(())
}
#[benchmark]
fn remove_dangling_username() -> Result<(), BenchmarkError> {
let caller: T::AccountId = whitelisted_caller();
let first_username = bounded_username::<T>(bench_username(), bench_suffix());
let second_username = bounded_username::<T>(b"slowbenchmark".to_vec(), bench_suffix());
// First one will be set as primary. Second will not be.
Identity::<T>::insert_username(&caller, first_username);
Identity::<T>::insert_username(&caller, second_username.clone());
// User calls `clear_identity`, leaving their second username as "dangling"
Identity::<T>::clear_identity(RawOrigin::Signed(caller.clone()).into())?;
#[extrinsic_call]
_(RawOrigin::Signed(caller.clone()), second_username.clone());
assert_last_event::<T>(
Event::<T>::DanglingUsernameRemoved {
who: caller,
username: second_username,
}
.into(),
);
Ok(())
}
impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test);
}

View File

@ -0,0 +1,218 @@
use crate::{
generate_identity_field_enum, generate_identity_field_enum_and_type_info,
generate_identity_info_default_impl, generate_identity_info_impl, generate_type_info,
};
use crate::{Data, IdentityInformationProvider};
use codec::{Decode, Encode, MaxEncodedLen};
#[cfg(feature = "runtime-benchmarks")]
use enumflags2::BitFlag;
use enumflags2::{bitflags, BitFlags};
use frame_support::{traits::Get, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound};
use scale_info::TypeInfo;
use sp_runtime::{BoundedVec, RuntimeDebug};
use sp_std::prelude::*;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const DEFAULT_DATA_MAX_LENGTH: usize = 32;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const DISPLAY_DATA_MAX_LENGTH: usize = 64;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const EMAIL_DATA_MAX_LENGTH: usize = 64;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const BIO_DATA_MAX_LENGTH: usize = 256;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const ADDRESS_DATA_MAX_LENGTH: usize = 256;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const LINK_DATA_MAX_LENGTH: usize = 128;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const ADDITIONAL_LINK_MAX_LENGTH: usize = 64;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const BACKGROUND_MAX_LENGTH: usize = 32;
#[cfg_attr(not(feature = "runtime-benchmarks"), allow(dead_code))]
const CUSTOM_LINKS_MAX_AMOUNT: usize = 8;
generate_identity_field_enum_and_type_info!(
Display,
FirstName,
LastName,
Email,
Address,
TelephoneNumber,
Bio,
Background,
Website,
X,
Telegram,
Discord,
Linkedin,
Instagram,
Medium,
YouTube,
Git,
Mastodon
);
/// Information concerning the identity of the controller of an account.
///
/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra
/// fields in a backwards compatible way through a specialized `Decode` impl.
#[derive(
CloneNoBound,
Encode,
Decode,
EqNoBound,
MaxEncodedLen,
PartialEqNoBound,
RuntimeDebugNoBound,
TypeInfo,
)]
#[codec(mel_bound())]
#[scale_info(skip_type_params(FieldLimit))]
pub struct IdentityInfo<FieldLimit: Get<u32>> {
/// Additional fields of the identity that are not catered for with the struct's explicit
/// fields.
pub additional: BoundedVec<(Data, Data), FieldLimit>,
/// A reasonable display name for the controller of the account. This should be whatever it is
/// that it is typically known as and should not be confusable with other entities, given
/// reasonable context.
///
/// Stored as UTF-8.
pub display: Data,
/// The full legal name in the local jurisdiction of the entity. This might be a bit
/// long-winded.
///
/// Stored as UTF-8.
// pub legal: Data,
// pub first_name: Data,
// pub last_name: Data,
/// The email address of the controller of the account.
///
/// Stored as UTF-8.
pub email: Data,
pub address: Data,
pub telephone_number: Data,
pub bio: Data,
// pub background: Data,
pub website: Data,
pub x: Data,
pub telegram: Data,
pub discord: Data,
pub linkedin: Data,
pub instagram: Data,
pub medium: Data,
pub youtube: Data,
pub git: Data,
pub mastodon: Data,
/// Additional links
pub additional_links: BoundedVec<(Data, Data), FieldLimit>,
}
impl<FieldLimit: Get<u32> + 'static> IdentityInformationProvider for IdentityInfo<FieldLimit> {
type FieldsIdentifier = u64;
fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool {
self.fields().bits() & fields == fields
}
#[cfg(feature = "runtime-benchmarks")]
fn create_identity_info() -> Self {
let data = Data::Raw(vec![0; DEFAULT_DATA_MAX_LENGTH].try_into().unwrap());
let address_data = Data::Raw(vec![0; ADDRESS_DATA_MAX_LENGTH].try_into().unwrap());
let email_data = Data::Raw(vec![0; EMAIL_DATA_MAX_LENGTH].try_into().unwrap());
let display_data = Data::Raw(vec![0; ADDRESS_DATA_MAX_LENGTH].try_into().unwrap());
let bio_data = Data::Raw(vec![0; BIO_DATA_MAX_LENGTH].try_into().unwrap());
let link_data = Data::Raw(vec![0; LINK_DATA_MAX_LENGTH].try_into().unwrap());
let additional_link_name =
Data::Raw(vec![0; ADDITIONAL_LINK_MAX_LENGTH].try_into().unwrap());
let background_data = Data::Raw(vec![0; BACKGROUND_MAX_LENGTH].try_into().unwrap());
IdentityInfo {
additional: vec![(data.clone(), data.clone()); FieldLimit::get().try_into().unwrap()]
.try_into()
.unwrap(),
display: display_data.clone(),
// first_name: data.clone(), // TODO we dont need it.
// last_name: data.clone(), // TODO we dont need it.
email: email_data.clone(),
address: address_data.clone(),
telephone_number: data.clone(),
bio: bio_data.clone(),
// background: background_data.clone(), // TODO we dont need it.
// links
website: link_data.clone(),
x: link_data.clone(),
telegram: link_data.clone(),
discord: link_data.clone(),
linkedin: link_data.clone(),
instagram: link_data.clone(),
medium: link_data.clone(),
youtube: link_data.clone(),
git: link_data.clone(),
mastodon: link_data.clone(),
additional_links: vec![
(additional_link_name.clone(), link_data.clone());
FieldLimit::get().try_into().unwrap()
]
.try_into()
.unwrap(),
}
}
#[cfg(feature = "runtime-benchmarks")]
fn all_fields() -> Self::FieldsIdentifier {
IdentityField::all().bits()
}
}
generate_identity_info_default_impl!(
(additional, BoundedVec::default()),
(display, Data::None),
// (first_name, Data::None),
// (last_name, Data::None),
(email, Data::None),
(address, Data::None),
(telephone_number, Data::None),
(bio, Data::None),
// (background, Data::None),
// links
(website, Data::None),
(x, Data::None),
(telegram, Data::None),
(discord, Data::None),
(linkedin, Data::None),
(instagram, Data::None),
(medium, Data::None),
(youtube, Data::None),
(git, Data::None),
(mastodon, Data::None),
(additional_links, BoundedVec::default())
);
generate_identity_info_impl!(
(display, Display),
// (first_name, FirstName),
// (last_name, LastName),
(email, Email),
(address, Address),
(telephone_number, TelephoneNumber),
(bio, Bio),
// (background, Background),
// links
(website, Website),
(x, X),
(telegram, Telegram),
(discord, Discord),
(linkedin, Linkedin),
(instagram, Instagram),
(medium, Medium),
(youtube, YouTube),
(git, Git),
(mastodon, Mastodon)
);

View File

@ -0,0 +1,293 @@
/// Macro for generating pub enum IdentityField
/// E.g.
///
///
/// The fields that we use to identify the owner of an account with. Each corresponds to a field
/// in the `IdentityInfo` struct.
/// #[bitflags]
/// #[repr(u64)]
/// #[derive(Clone, Copy, PartialEq, Eq, RuntimeDebug)]
/// pub enum IdentityField {
/// Display,
/// FirstName,
/// LastName,
/// Email,
/// Address,
/// TelephoneNumber,
/// Bio,
/// }
#[macro_export]
macro_rules! generate_identity_field_enum {
($($variant:ident),* $(,)?) => {
#[bitflags]
#[repr(u64)]
#[derive(Clone, Copy, PartialEq, Eq, RuntimeDebug)]
pub enum IdentityField {
$(
$variant,
)*
}
}
}
/// Macro for generating impl TypeInfo for IdentityField
/// E.g.
///
/// use scale_info::{build::Variants, Path, Type, TypeInfo};
///
/// impl TypeInfo for IdentityField {
/// type Identity = Self;
///
/// fn type_info() -> scale_info::Type {
/// Type::builder()
/// .path(Path::new("IdentityField", module_path!()))
/// .variant(
/// Variants::new()
/// .variant("Display", |v| v.index(0))
/// .variant("FirstName", |v| v.index(1))
/// .variant("LastName", |v| v.index(2))
/// .variant("Email", |v| v.index(3))
/// .variant("Address", |v| v.index(4))
/// .variant("TelephoneNumber", |v| v.index(5))
/// .variant("Bio", |v| v.index(6)),
/// )
/// }
/// }
#[macro_export]
macro_rules! generate_type_info {
($name:ident, $variants:expr) => {
impl TypeInfo for $name {
type Identity = Self;
fn type_info() -> scale_info::Type {
scale_info::Type::builder()
.path(scale_info::Path::new(stringify!($name), module_path!()))
.type_params(vec![])
.variant($variants.iter().enumerate().fold(
scale_info::build::Variants::new(),
|variants, (index, variant)| {
variants.variant(*variant, |v| v.index(index as u8))
},
))
}
}
};
}
/// Macro to generate the `impl` block for `IdentityInformationProvider` for `IdentityInfo`.
///
/// Parameters:
/// - `$type_name`: The name of the struct (e.g., `IdentityInfo`).
/// - `$field_limit`: The generic parameter for field limit (e.g., `FieldLimit`).
/// - `$fields`: A list of tuples containing field names and their associated data lengths.
///
/// Usage:
/// ```rust
/// generate_identity_information_provider_impl!(
/// IdentityInfo,
/// FieldLimit,
/// (display, DEFAULT_DATA_MAX_LENGTH),
/// (first_name, DEFAULT_DATA_MAX_LENGTH),
/// (last_name, DEFAULT_DATA_MAX_LENGTH),
/// (email, DEFAULT_DATA_MAX_LENGTH),
/// (address, DEFAULT_DATA_MAX_LENGTH),
/// (telephone_number, DEFAULT_DATA_MAX_LENGTH),
/// (bio, BIO_DATA_MAX_LENGTH)
/// );
/// ```
/// E.g.
///
///
/// impl<FieldLimit: Get<u32> + 'static> IdentityInformationProvider for IdentityInfo<FieldLimit> {
/// type FieldsIdentifier = u64;
///
/// fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool {
/// self.fields().bits() & fields == fields
/// }
///
/// #[cfg(feature = "runtime-benchmarks")]
/// fn create_identity_info() -> Self {
/// let data = Data::Raw(vec![0; DEFAULT_DATA_MAX_LENGTH].try_into().unwrap());
/// let bio_data = Data::Raw(vec![0; BIO_DATA_MAX_LENGTH].try_into().unwrap());
///
/// IdentityInfo {
/// additional: vec![(data.clone(), data.clone()); FieldLimit::get().try_into().unwrap()]
/// .try_into()
/// .unwrap(),
/// display: data.clone(),
/// first_name: data.clone(),
/// last_name: data.clone(),
/// email: data.clone(),
/// address: data.clone(),
/// telephone_number: data.clone(),
/// bio: bio_data.clone(),
/// }
/// }
///
/// #[cfg(feature = "runtime-benchmarks")]
/// fn all_fields() -> Self::FieldsIdentifier {
/// IdentityField::all().bits()
/// }
/// }
#[macro_export]
macro_rules! generate_identity_information_provider_impl {
($type_name:ident, $field_limit:ident, $(($field_name:ident, $data_length:expr)),* $(,)?) => {
impl<$field_limit: Get<u32> + 'static> IdentityInformationProvider for $type_name<$field_limit> {
type FieldsIdentifier = u64;
fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool {
self.fields().bits() & fields == fields
}
#[cfg(feature = "runtime-benchmarks")]
fn create_identity_info() -> Self {
$(
let $field_name = {
let data_length = $data_length;
Data::Raw(vec![0; data_length].try_into().unwrap())
};
)*
$type_name {
additional: vec![
(
Data::Raw(vec![0; DEFAULT_DATA_MAX_LENGTH].try_into().unwrap()),
Data::Raw(vec![0; DEFAULT_DATA_MAX_LENGTH].try_into().unwrap())
);
$field_limit::get() as usize
]
.try_into()
.unwrap(),
$(
$field_name: $field_name.clone(),
)*
}
}
#[cfg(feature = "runtime-benchmarks")]
fn all_fields() -> Self::FieldsIdentifier {
IdentityField::all().bits()
}
}
};
}
/// Macro for generating impl<FieldLimit: Get<u32>> Default for IdentityInfo<FieldLimit>
/// E.g.
///
/// impl<FieldLimit: Get<u32>> Default for IdentityInfo<FieldLimit> {
/// fn default() -> Self {
/// IdentityInfo {
/// additional: BoundedVec::default(),
/// display: Data::None,
/// first_name: Data::None,
/// last_name: Data::None,
/// email: Data::None,
/// address: Data::None,
/// telephone_number: Data::None,
/// bio: Data::None,
/// }
/// }
/// }
#[macro_export]
macro_rules! generate_identity_info_default_impl {
($(($field_name:ident, $default_value:expr)),*) => {
impl<FieldLimit: Get<u32>> Default for IdentityInfo<FieldLimit> {
fn default() -> Self {
IdentityInfo {
$(
$field_name: $default_value,
)*
}
}
}
};
}
/// Macro for generating impl<FieldLimit: Get<u32>> IdentityInfo<FieldLimit>
/// E.g.
///
/// impl<FieldLimit: Get<u32>> IdentityInfo<FieldLimit> {
/// pub(crate) fn fields(&self) -> BitFlags<IdentityField> {
/// let mut res = <BitFlags<IdentityField>>::empty();
/// if !self.display.is_none() {
/// res.insert(IdentityField::Display);
/// }
/// if !self.first_name.is_none() {
/// res.insert(IdentityField::FirstName);
/// }
/// if !self.last_name.is_none() {
/// res.insert(IdentityField::LastName);
/// }
/// if !self.email.is_none() {
/// res.insert(IdentityField::Email);
/// }
/// if !self.address.is_none() {
/// res.insert(IdentityField::Address);
/// }
/// if !self.telephone_number.is_none() {
/// res.insert(IdentityField::TelephoneNumber);
/// }
/// if !self.bio.is_none() {
/// res.insert(IdentityField::Bio);
/// }
/// res
/// }
/// }
#[macro_export]
macro_rules! generate_identity_info_impl {
($(($field_name:ident, $variant_name:ident)),*) => {
impl<FieldLimit: Get<u32>> IdentityInfo<FieldLimit> {
pub(crate) fn fields(&self) -> BitFlags<IdentityField> {
let mut res = <BitFlags<IdentityField>>::empty();
$(
if !self.$field_name.is_none() {
res.insert(IdentityField::$variant_name);
}
)*
res
}
}
};
}
// combined
#[macro_export]
macro_rules! generate_identity_field_enum_and_type_info {
($($variant:ident),+ $(,)?) => {
generate_identity_field_enum!(
$($variant),+
);
generate_type_info!(
IdentityField,
vec![
$(stringify!($variant)),+
]
);
}
}
#[macro_export]
macro_rules! generate_identity_info_impls {
(
$struct_name:ident,
$field_limit:ident,
$(($variant_name:ident, $field_name:ident, $max_length:expr)),+ $(,)?
) => {
generate_identity_information_provider_impl!(
$struct_name,
$field_limit,
$(($field_name, $max_length)),+
);
generate_identity_info_default_impl!(
(additional, BoundedVec::default()),
$(($field_name, Data::None)),+
);
generate_identity_info_impl!(
$(($field_name, $variant_name)),+
);
}
}

View File

@ -0,0 +1,204 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use codec::{Decode, Encode, MaxEncodedLen};
#[cfg(feature = "runtime-benchmarks")]
use enumflags2::BitFlag;
use enumflags2::{bitflags, BitFlags};
use frame_support::{traits::Get, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound};
use scale_info::{build::Variants, Path, Type, TypeInfo};
use sp_runtime::{BoundedVec, RuntimeDebug};
use sp_std::prelude::*;
use crate::types::{Data, IdentityInformationProvider};
/// The fields that we use to identify the owner of an account with. Each corresponds to a field
/// in the `IdentityInfo` struct.
#[bitflags]
#[repr(u64)]
#[derive(Clone, Copy, PartialEq, Eq, RuntimeDebug)]
pub enum IdentityField {
Display,
Legal,
Web,
Riot,
Email,
PgpFingerprint,
Image,
Twitter,
}
impl TypeInfo for IdentityField {
type Identity = Self;
fn type_info() -> scale_info::Type {
Type::builder()
.path(Path::new("IdentityField", module_path!()))
.variant(
Variants::new()
.variant("Display", |v| v.index(0))
.variant("Legal", |v| v.index(1))
.variant("Web", |v| v.index(2))
.variant("Riot", |v| v.index(3))
.variant("Email", |v| v.index(4))
.variant("PgpFingerprint", |v| v.index(5))
.variant("Image", |v| v.index(6))
.variant("Twitter", |v| v.index(7)),
)
}
}
/// Information concerning the identity of the controller of an account.
///
/// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra
/// fields in a backwards compatible way through a specialized `Decode` impl.
#[derive(
CloneNoBound,
Encode,
Decode,
EqNoBound,
MaxEncodedLen,
PartialEqNoBound,
RuntimeDebugNoBound,
TypeInfo,
)]
#[codec(mel_bound())]
#[scale_info(skip_type_params(FieldLimit))]
pub struct IdentityInfo<FieldLimit: Get<u32>> {
/// Additional fields of the identity that are not catered for with the struct's explicit
/// fields.
pub additional: BoundedVec<(Data, Data), FieldLimit>,
/// A reasonable display name for the controller of the account. This should be whatever it is
/// that it is typically known as and should not be confusable with other entities, given
/// reasonable context.
///
/// Stored as UTF-8.
pub display: Data,
/// The full legal name in the local jurisdiction of the entity. This might be a bit
/// long-winded.
///
/// Stored as UTF-8.
pub legal: Data,
/// A representative website held by the controller of the account.
///
/// NOTE: `https://` is automatically prepended.
///
/// Stored as UTF-8.
pub web: Data,
/// The Riot/Matrix handle held by the controller of the account.
///
/// Stored as UTF-8.
pub riot: Data,
/// The email address of the controller of the account.
///
/// Stored as UTF-8.
pub email: Data,
/// The PGP/GPG public key of the controller of the account.
pub pgp_fingerprint: Option<[u8; 20]>,
/// A graphic image representing the controller of the account. Should be a company,
/// organization or project logo or a headshot in the case of a human.
pub image: Data,
/// The Twitter identity. The leading `@` character may be elided.
pub twitter: Data,
}
impl<FieldLimit: Get<u32> + 'static> IdentityInformationProvider for IdentityInfo<FieldLimit> {
type FieldsIdentifier = u64;
fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool {
self.fields().bits() & fields == fields
}
#[cfg(feature = "runtime-benchmarks")]
fn create_identity_info() -> Self {
let data = Data::Raw(vec![0; 32].try_into().unwrap());
IdentityInfo {
additional: vec![(data.clone(), data.clone()); FieldLimit::get().try_into().unwrap()]
.try_into()
.unwrap(),
display: data.clone(),
legal: data.clone(),
web: data.clone(),
riot: data.clone(),
email: data.clone(),
pgp_fingerprint: Some([0; 20]),
image: data.clone(),
twitter: data,
}
}
#[cfg(feature = "runtime-benchmarks")]
fn all_fields() -> Self::FieldsIdentifier {
IdentityField::all().bits()
}
}
impl<FieldLimit: Get<u32>> Default for IdentityInfo<FieldLimit> {
fn default() -> Self {
IdentityInfo {
additional: BoundedVec::default(),
display: Data::None,
legal: Data::None,
web: Data::None,
riot: Data::None,
email: Data::None,
pgp_fingerprint: None,
image: Data::None,
twitter: Data::None,
}
}
}
impl<FieldLimit: Get<u32>> IdentityInfo<FieldLimit> {
pub(crate) fn fields(&self) -> BitFlags<IdentityField> {
let mut res = <BitFlags<IdentityField>>::empty();
if !self.display.is_none() {
res.insert(IdentityField::Display);
}
if !self.legal.is_none() {
res.insert(IdentityField::Legal);
}
if !self.web.is_none() {
res.insert(IdentityField::Web);
}
if !self.riot.is_none() {
res.insert(IdentityField::Riot);
}
if !self.email.is_none() {
res.insert(IdentityField::Email);
}
if self.pgp_fingerprint.is_some() {
res.insert(IdentityField::PgpFingerprint);
}
if !self.image.is_none() {
res.insert(IdentityField::Image);
}
if !self.twitter.is_none() {
res.insert(IdentityField::Twitter);
}
res
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,130 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Storage migrations for the Identity pallet.
use super::*;
use frame_support::{migrations::VersionedMigration, pallet_prelude::*, traits::OnRuntimeUpgrade};
#[cfg(feature = "try-runtime")]
use codec::{Decode, Encode};
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
pub mod versioned {
use super::*;
pub type V0ToV1<T, const KL: u64> = VersionedMigration<
0,
1,
v1::VersionUncheckedMigrateV0ToV1<T, KL>,
crate::pallet::Pallet<T>,
<T as frame_system::Config>::DbWeight,
>;
}
pub mod v1 {
use super::*;
/// The log target.
const TARGET: &'static str = "runtime::identity::migration::v1";
/// The old identity type, useful in pre-upgrade.
mod v0 {
use super::*;
use frame_support::storage_alias;
#[storage_alias]
pub type IdentityOf<T: Config> = StorageMap<
Pallet<T>,
Twox64Concat,
<T as frame_system::Config>::AccountId,
Registration<
BalanceOf<T>,
<T as pallet::Config>::MaxRegistrars,
<T as pallet::Config>::IdentityInformation,
>,
OptionQuery,
>;
}
/// Migration to add usernames to Identity info.
///
/// `T` is the runtime and `KL` is the key limit to migrate. This is just a safety guard to
/// prevent stalling a parachain by accumulating too much weight in the migration. To have an
/// unlimited migration (e.g. in a chain without PoV limits), set this to `u64::MAX`.
pub struct VersionUncheckedMigrateV0ToV1<T, const KL: u64>(PhantomData<T>);
impl<T: Config, const KL: u64> OnRuntimeUpgrade for VersionUncheckedMigrateV0ToV1<T, KL> {
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
let identities = v0::IdentityOf::<T>::iter().count();
log::info!(
target: TARGET,
"pre-upgrade state contains '{}' identities.",
identities
);
ensure!((identities as u64) < KL, "too many identities to migrate");
Ok((identities as u64).encode())
}
fn on_runtime_upgrade() -> Weight {
log::info!(
target: TARGET,
"running storage migration from version 0 to version 1."
);
let mut weight = T::DbWeight::get().reads(1);
let mut translated: u64 = 0;
let mut interrupted = false;
for (account, registration) in v0::IdentityOf::<T>::iter() {
IdentityOf::<T>::insert(account, (registration, None::<Username<T>>));
translated.saturating_inc();
if translated >= KL {
log::warn!(
"Incomplete! Migration limit reached. Only {} identities migrated.",
translated
);
interrupted = true;
break;
}
}
if !interrupted {
log::info!("all {} identities migrated", translated);
}
weight.saturating_accrue(T::DbWeight::get().reads_writes(translated, translated));
weight.saturating_accrue(T::DbWeight::get().writes(1));
weight
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(state: Vec<u8>) -> Result<(), TryRuntimeError> {
let identities_to_migrate: u64 = Decode::decode(&mut &state[..])
.expect("failed to decode the state from pre-upgrade.");
let identities = IdentityOf::<T>::iter().count() as u64;
log::info!(
"post-upgrade expects '{}' identities to have been migrated.",
identities
);
ensure!(
identities_to_migrate == identities,
"must migrate all identities."
);
log::info!(target: TARGET, "migrated all identities.");
Ok(())
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,421 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::*;
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{traits::ConstU32, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound};
use scale_info::{
build::{Fields, Variants},
Path, Type, TypeInfo,
};
use sp_runtime::{traits::Member, RuntimeDebug};
// use sp_std::{fmt::Debug, iter::once, ops::Add, prelude::*};
use sp_std::{fmt::Debug, iter::once, ops::Add};
/// An identifier for a single name registrar/identity verification service.
pub type RegistrarIndex = u32;
/// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater
/// than 32-bytes then it will be truncated when encoding.
///
/// Can also be `None`.
#[derive(Clone, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)]
pub enum Data {
/// No data here.
None,
/// The data is stored directly.
Raw(BoundedVec<u8, ConstU32<32>>),
/// Only the Blake2 hash of the data is stored. The preimage of the hash may be retrieved
/// through some hash-lookup service.
BlakeTwo256([u8; 32]),
/// Only the SHA2-256 hash of the data is stored. The preimage of the hash may be retrieved
/// through some hash-lookup service.
Sha256([u8; 32]),
/// Only the Keccak-256 hash of the data is stored. The preimage of the hash may be retrieved
/// through some hash-lookup service.
Keccak256([u8; 32]),
/// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved
/// through some hash-lookup service.
ShaThree256([u8; 32]),
}
impl Data {
pub fn is_none(&self) -> bool {
self == &Data::None
}
}
impl Decode for Data {
fn decode<I: codec::Input>(input: &mut I) -> sp_std::result::Result<Self, codec::Error> {
let b = input.read_byte()?;
Ok(match b {
0 => Data::None,
n @ 1..=33 => {
let mut r: BoundedVec<_, _> = vec![0u8; n as usize - 1]
.try_into()
.expect("bound checked in match arm condition; qed");
input.read(&mut r[..])?;
Data::Raw(r)
}
34 => Data::BlakeTwo256(<[u8; 32]>::decode(input)?),
35 => Data::Sha256(<[u8; 32]>::decode(input)?),
36 => Data::Keccak256(<[u8; 32]>::decode(input)?),
37 => Data::ShaThree256(<[u8; 32]>::decode(input)?),
_ => return Err(codec::Error::from("invalid leading byte")),
})
}
}
impl Encode for Data {
fn encode(&self) -> Vec<u8> {
match self {
Data::None => vec![0u8; 1],
Data::Raw(ref x) => {
let l = x.len().min(32);
let mut r = vec![l as u8 + 1; l + 1];
r[1..].copy_from_slice(&x[..l as usize]);
r
}
Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(),
Data::Sha256(ref h) => once(35u8).chain(h.iter().cloned()).collect(),
Data::Keccak256(ref h) => once(36u8).chain(h.iter().cloned()).collect(),
Data::ShaThree256(ref h) => once(37u8).chain(h.iter().cloned()).collect(),
}
}
}
impl codec::EncodeLike for Data {}
/// Add a Raw variant with the given index and a fixed sized byte array
macro_rules! data_raw_variants {
($variants:ident, $(($index:literal, $size:literal)),* ) => {
$variants
$(
.variant(concat!("Raw", stringify!($size)), |v| v
.index($index)
.fields(Fields::unnamed().field(|f| f.ty::<[u8; $size]>()))
)
)*
}
}
impl TypeInfo for Data {
type Identity = Self;
fn type_info() -> Type {
let variants = Variants::new().variant("None", |v| v.index(0));
// create a variant for all sizes of Raw data from 0-32
let variants = data_raw_variants!(
variants,
(1, 0),
(2, 1),
(3, 2),
(4, 3),
(5, 4),
(6, 5),
(7, 6),
(8, 7),
(9, 8),
(10, 9),
(11, 10),
(12, 11),
(13, 12),
(14, 13),
(15, 14),
(16, 15),
(17, 16),
(18, 17),
(19, 18),
(20, 19),
(21, 20),
(22, 21),
(23, 22),
(24, 23),
(25, 24),
(26, 25),
(27, 26),
(28, 27),
(29, 28),
(30, 29),
(31, 30),
(32, 31),
(33, 32)
);
let variants = variants
.variant("BlakeTwo256", |v| {
v.index(34)
.fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>()))
})
.variant("Sha256", |v| {
v.index(35)
.fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>()))
})
.variant("Keccak256", |v| {
v.index(36)
.fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>()))
})
.variant("ShaThree256", |v| {
v.index(37)
.fields(Fields::unnamed().field(|f| f.ty::<[u8; 32]>()))
});
Type::builder()
.path(Path::new("Data", module_path!()))
.variant(variants)
}
}
impl Default for Data {
fn default() -> Self {
Self::None
}
}
/// An attestation of a registrar over how accurate some `IdentityInfo` is in describing an account.
///
/// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear
/// which fields their attestation is relevant for by off-chain means.
#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub enum Judgement<Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq>
{
/// The default value; no opinion is held.
Unknown,
/// No judgement is yet in place, but a deposit is reserved as payment for providing one.
FeePaid(Balance),
/// The data appears to be reasonably acceptable in terms of its accuracy, however no in depth
/// checks (such as in-person meetings or formal KYC) have been conducted.
Reasonable,
/// The target is known directly by the registrar and the registrar can fully attest to the
/// the data's accuracy.
KnownGood,
/// The data was once good but is currently out of date. There is no malicious intent in the
/// inaccuracy. This judgement can be removed through updating the data.
OutOfDate,
/// The data is imprecise or of sufficiently low-quality to be problematic. It is not
/// indicative of malicious intent. This judgement can be removed through updating the data.
LowQuality,
/// The data is erroneous. This may be indicative of malicious intent. This cannot be removed
/// except by the registrar.
Erroneous,
}
impl<Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq>
Judgement<Balance>
{
/// Returns `true` if this judgement is indicative of a deposit being currently held. This means
/// it should not be cleared or replaced except by an operation which utilizes the deposit.
pub(crate) fn has_deposit(&self) -> bool {
matches!(self, Judgement::FeePaid(_))
}
/// Returns `true` if this judgement is one that should not be generally be replaced outside
/// of specialized handlers. Examples include "malicious" judgements and deposit-holding
/// judgements.
pub(crate) fn is_sticky(&self) -> bool {
matches!(self, Judgement::FeePaid(_) | Judgement::Erroneous)
}
}
/// Information concerning the identity of the controller of an account.
pub trait IdentityInformationProvider:
Encode + Decode + MaxEncodedLen + Clone + Debug + Eq + PartialEq + TypeInfo + Default
{
/// Type capable of holding information on which identity fields are set.
type FieldsIdentifier: Member + Encode + Decode + MaxEncodedLen + TypeInfo + Default;
/// Check if an identity registered information for some given `fields`.
fn has_identity(&self, fields: Self::FieldsIdentifier) -> bool;
/// Create a basic instance of the identity information.
#[cfg(feature = "runtime-benchmarks")]
fn create_identity_info() -> Self;
/// The identity information representation for all identity fields enabled.
#[cfg(feature = "runtime-benchmarks")]
fn all_fields() -> Self::FieldsIdentifier;
}
/// Information on an identity along with judgements from registrars.
///
/// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a
/// backwards compatible way through a specialized `Decode` impl.
#[derive(
CloneNoBound, Encode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, TypeInfo,
)]
#[codec(mel_bound())]
#[scale_info(skip_type_params(MaxJudgements))]
pub struct Registration<
Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq,
MaxJudgements: Get<u32>,
IdentityInfo: IdentityInformationProvider,
> {
/// Judgements from the registrars on this identity. Stored ordered by `RegistrarIndex`. There
/// may be only a single judgement from each registrar.
pub judgements: BoundedVec<(RegistrarIndex, Judgement<Balance>), MaxJudgements>,
/// Amount held on deposit for this information.
pub deposit: Balance,
/// Information on the identity.
pub info: IdentityInfo,
}
impl<
Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq + Zero + Add,
MaxJudgements: Get<u32>,
IdentityInfo: IdentityInformationProvider,
> Registration<Balance, MaxJudgements, IdentityInfo>
{
pub(crate) fn total_deposit(&self) -> Balance {
self.deposit
+ self
.judgements
.iter()
.map(|(_, ref j)| {
if let Judgement::FeePaid(fee) = j {
*fee
} else {
Zero::zero()
}
})
.fold(Zero::zero(), |a, i| a + i)
}
}
impl<
Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq,
MaxJudgements: Get<u32>,
IdentityInfo: IdentityInformationProvider,
> Decode for Registration<Balance, MaxJudgements, IdentityInfo>
{
fn decode<I: codec::Input>(input: &mut I) -> sp_std::result::Result<Self, codec::Error> {
let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?;
Ok(Self {
judgements,
deposit,
info,
})
}
}
/// Information concerning a registrar.
#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub struct RegistrarInfo<
Balance: Encode + Decode + Clone + Debug + Eq + PartialEq,
AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq,
IdField: Encode + Decode + Clone + Debug + Default + Eq + PartialEq + TypeInfo + MaxEncodedLen,
> {
/// The account of the registrar.
pub account: AccountId,
/// Amount required to be given to the registrar for them to provide judgement.
pub fee: Balance,
/// Relevant fields for this registrar. Registrar judgements are limited to attestations on
/// these fields.
pub fields: IdField,
}
/// Authority properties for a given pallet configuration.
pub type AuthorityPropertiesOf<T> = AuthorityProperties<Suffix<T>>;
/// The number of usernames that an authority may allocate.
type Allocation = u32;
/// A byte vec used to represent a username.
pub(crate) type Suffix<T> = BoundedVec<u8, <T as Config>::MaxSuffixLength>;
/// Properties of a username authority.
#[derive(Clone, Encode, Decode, MaxEncodedLen, TypeInfo, PartialEq, Debug)]
pub struct AuthorityProperties<Suffix> {
/// The suffix added to usernames granted by this authority. Will be appended to usernames; for
/// example, a suffix of `wallet` will result in `.wallet` being appended to a user's selected
/// name.
pub suffix: Suffix,
/// The number of usernames remaining that this authority can grant.
pub allocation: Allocation,
}
/// A byte vec used to represent a username.
pub(crate) type Username<T> = BoundedVec<u8, <T as Config>::MaxUsernameLength>;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn manual_data_type_info() {
let mut registry = scale_info::Registry::new();
let type_id = registry.register_type(&scale_info::meta_type::<Data>());
let registry: scale_info::PortableRegistry = registry.into();
let type_info = registry.resolve(type_id.id).unwrap();
let check_type_info = |data: &Data| {
let variant_name = match data {
Data::None => "None".to_string(),
Data::BlakeTwo256(_) => "BlakeTwo256".to_string(),
Data::Sha256(_) => "Sha256".to_string(),
Data::Keccak256(_) => "Keccak256".to_string(),
Data::ShaThree256(_) => "ShaThree256".to_string(),
Data::Raw(bytes) => format!("Raw{}", bytes.len()),
};
if let scale_info::TypeDef::Variant(variant) = &type_info.type_def {
let variant = variant
.variants
.iter()
.find(|v| v.name == variant_name)
.expect(&format!("Expected to find variant {}", variant_name));
let field_arr_len = variant
.fields
.first()
.and_then(|f| registry.resolve(f.ty.id))
.map(|ty| {
if let scale_info::TypeDef::Array(arr) = &ty.type_def {
arr.len
} else {
panic!("Should be an array type")
}
})
.unwrap_or(0);
let encoded = data.encode();
assert_eq!(encoded[0], variant.index);
assert_eq!(encoded.len() as u32 - 1, field_arr_len);
} else {
panic!("Should be a variant type")
};
};
let mut data = vec![
Data::None,
Data::BlakeTwo256(Default::default()),
Data::Sha256(Default::default()),
Data::Keccak256(Default::default()),
Data::ShaThree256(Default::default()),
];
// A Raw instance for all possible sizes of the Raw data
for n in 0..32 {
data.push(Data::Raw(vec![0u8; n as usize].try_into().unwrap()))
}
for d in data.iter() {
check_type_info(d);
}
}
}

View File

@ -0,0 +1,840 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Autogenerated weights for pallet_identity
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// ./target/production/substrate
// benchmark
// pallet
// --chain=dev
// --steps=50
// --repeat=20
// --pallet=pallet_identity
// --no-storage-info
// --no-median-slopes
// --no-min-squares
// --extrinsic=*
// --execution=wasm
// --wasm-execution=compiled
// --heap-pages=4096
// --output=./frame/identity/src/weights.rs
// --header=./HEADER-APACHE2
// --template=./.maintain/frame-weight-template.hbs
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
use core::marker::PhantomData;
/// Weight functions needed for pallet_identity.
pub trait WeightInfo {
fn add_registrar(r: u32, ) -> Weight;
fn set_identity(r: u32, ) -> Weight;
fn set_subs_new(s: u32, ) -> Weight;
fn set_subs_old(p: u32, ) -> Weight;
fn clear_identity(r: u32, s: u32, ) -> Weight;
fn request_judgement(r: u32, ) -> Weight;
fn cancel_request(r: u32, ) -> Weight;
fn set_fee(r: u32, ) -> Weight;
fn set_account_id(r: u32, ) -> Weight;
fn set_fields(r: u32, ) -> Weight;
fn provide_judgement(r: u32, ) -> Weight;
fn kill_identity(r: u32, s: u32, ) -> Weight;
fn add_sub(s: u32, ) -> Weight;
fn rename_sub(s: u32, ) -> Weight;
fn remove_sub(s: u32, ) -> Weight;
fn quit_sub(s: u32, ) -> Weight;
fn add_username_authority() -> Weight;
fn remove_username_authority() -> Weight;
fn set_username_for() -> Weight;
fn accept_username() -> Weight;
fn remove_expired_approval() -> Weight;
fn set_primary_username() -> Weight;
fn remove_dangling_username() -> Weight;
fn emit_identity_hash() -> Weight;
}
/// Weights for pallet_identity using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn add_registrar(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `32 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 11_683_000 picoseconds.
Weight::from_parts(12_515_830, 2626)
// Standard Error: 2_154
.saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
fn set_identity(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `442 + r * (5 ±0)`
// Estimated: `11003`
// Minimum execution time: 32_949_000 picoseconds.
Weight::from_parts(31_329_634, 11003)
// Standard Error: 4_496
.saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:100 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `s` is `[0, 100]`.
fn set_subs_new(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `101`
// Estimated: `11003 + s * (2589 ±0)`
// Minimum execution time: 9_157_000 picoseconds.
Weight::from_parts(24_917_444, 11003)
// Standard Error: 4_554
.saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into())))
.saturating_add(T::DbWeight::get().writes(1_u64))
.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
.saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into()))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:0 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `p` is `[0, 100]`.
fn set_subs_old(p: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `194 + p * (32 ±0)`
// Estimated: `11003`
// Minimum execution time: 9_240_000 picoseconds.
Weight::from_parts(23_326_035, 11003)
// Standard Error: 3_664
.saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into()))
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into())))
}
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:0 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
/// The range of component `s` is `[0, 100]`.
fn clear_identity(r: u32, s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 55_687_000 picoseconds.
Weight::from_parts(30_695_182, 11003)
// Standard Error: 9_921
.saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into()))
// Standard Error: 1_937
.saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
}
/// Storage: Identity Registrars (r:1 w:0)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
fn request_judgement(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `367 + r * (57 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 34_876_000 picoseconds.
Weight::from_parts(32_207_018, 11003)
// Standard Error: 5_247
.saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
fn cancel_request(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `398 + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 30_689_000 picoseconds.
Weight::from_parts(31_967_170, 11003)
// Standard Error: 5_387
.saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn set_fee(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `89 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 7_357_000 picoseconds.
Weight::from_parts(7_932_950, 2626)
// Standard Error: 1_804
.saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn set_account_id(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `89 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 7_437_000 picoseconds.
Weight::from_parts(8_051_889, 2626)
// Standard Error: 1_997
.saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn set_fields(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `89 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 7_385_000 picoseconds.
Weight::from_parts(7_911_589, 2626)
// Standard Error: 1_791
.saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:0)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn provide_judgement(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `445 + r * (57 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 24_073_000 picoseconds.
Weight::from_parts(17_817_684, 11003)
// Standard Error: 8_612
.saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into()))
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: System Account (r:1 w:1)
/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:0 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
/// The range of component `s` is `[0, 100]`.
fn kill_identity(r: u32, s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 73_981_000 picoseconds.
Weight::from_parts(51_684_057, 11003)
// Standard Error: 12_662
.saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into()))
// Standard Error: 2_472
.saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(3_u64))
.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// The range of component `s` is `[0, 99]`.
fn add_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `475 + s * (36 ±0)`
// Estimated: `11003`
// Minimum execution time: 29_367_000 picoseconds.
Weight::from_parts(34_214_998, 11003)
// Standard Error: 1_522
.saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `s` is `[1, 100]`.
fn rename_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `591 + s * (3 ±0)`
// Estimated: `11003`
// Minimum execution time: 12_384_000 picoseconds.
Weight::from_parts(14_417_903, 11003)
// Standard Error: 539
.saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// The range of component `s` is `[1, 100]`.
fn remove_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `638 + s * (35 ±0)`
// Estimated: `11003`
// Minimum execution time: 33_327_000 picoseconds.
Weight::from_parts(36_208_941, 11003)
// Standard Error: 1_240
.saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: System Account (r:1 w:0)
/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
/// The range of component `s` is `[0, 99]`.
fn quit_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `704 + s * (37 ±0)`
// Estimated: `6723`
// Minimum execution time: 23_764_000 picoseconds.
Weight::from_parts(26_407_731, 6723)
// Standard Error: 1_025
.saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: `Identity::UsernameAuthorities` (r:0 w:1)
/// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`)
fn add_username_authority() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 13_873_000 picoseconds.
Weight::from_parts(13_873_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Identity::UsernameAuthorities` (r:0 w:1)
/// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`)
fn remove_username_authority() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 10_653_000 picoseconds.
Weight::from_parts(10_653_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Identity::UsernameAuthorities` (r:1 w:1)
/// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`)
/// Storage: `Identity::AccountOfUsername` (r:1 w:1)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:1)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
fn set_username_for() -> Weight {
// Proof Size summary in bytes:
// Measured: `80`
// Estimated: `11037`
// Minimum execution time: 75_928_000 picoseconds.
Weight::from_parts(75_928_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(T::DbWeight::get().reads(3))
.saturating_add(T::DbWeight::get().writes(3))
}
/// Storage: `Identity::PendingUsernames` (r:1 w:1)
/// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:1)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
/// Storage: `Identity::AccountOfUsername` (r:0 w:1)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
fn accept_username() -> Weight {
// Proof Size summary in bytes:
// Measured: `106`
// Estimated: `11037`
// Minimum execution time: 38_157_000 picoseconds.
Weight::from_parts(38_157_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(3))
}
/// Storage: `Identity::PendingUsernames` (r:1 w:1)
/// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
fn remove_expired_approval() -> Weight {
// Proof Size summary in bytes:
// Measured: `106`
// Estimated: `3542`
// Minimum execution time: 46_821_000 picoseconds.
Weight::from_parts(46_821_000, 0)
.saturating_add(Weight::from_parts(0, 3542))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Identity::AccountOfUsername` (r:1 w:0)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:1)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
fn set_primary_username() -> Weight {
// Proof Size summary in bytes:
// Measured: `247`
// Estimated: `11037`
// Minimum execution time: 22_515_000 picoseconds.
Weight::from_parts(22_515_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Identity::AccountOfUsername` (r:1 w:1)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:0)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
fn remove_dangling_username() -> Weight {
// Proof Size summary in bytes:
// Measured: `126`
// Estimated: `11037`
// Minimum execution time: 15_997_000 picoseconds.
Weight::from_parts(15_997_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
fn emit_identity_hash() -> Weight {
// Proof Size summary in bytes:
// Measured: `100`
// Estimated: `11003`
// Minimum execution time: 10_000_000 picoseconds.
Weight::from_parts(10_000_000, 11003)
.saturating_add(T::DbWeight::get().reads(1_u64))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn add_registrar(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `32 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 11_683_000 picoseconds.
Weight::from_parts(12_515_830, 2626)
// Standard Error: 2_154
.saturating_add(Weight::from_parts(147_919, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
fn set_identity(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `442 + r * (5 ±0)`
// Estimated: `11003`
// Minimum execution time: 32_949_000 picoseconds.
Weight::from_parts(31_329_634, 11003)
// Standard Error: 4_496
.saturating_add(Weight::from_parts(203_570, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:100 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `s` is `[0, 100]`.
fn set_subs_new(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `101`
// Estimated: `11003 + s * (2589 ±0)`
// Minimum execution time: 9_157_000 picoseconds.
Weight::from_parts(24_917_444, 11003)
// Standard Error: 4_554
.saturating_add(Weight::from_parts(3_279_868, 0).saturating_mul(s.into()))
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into())))
.saturating_add(RocksDbWeight::get().writes(1_u64))
.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into())))
.saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into()))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:0 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `p` is `[0, 100]`.
fn set_subs_old(p: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `194 + p * (32 ±0)`
// Estimated: `11003`
// Minimum execution time: 9_240_000 picoseconds.
Weight::from_parts(23_326_035, 11003)
// Standard Error: 3_664
.saturating_add(Weight::from_parts(1_439_873, 0).saturating_mul(p.into()))
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into())))
}
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:0 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
/// The range of component `s` is `[0, 100]`.
fn clear_identity(r: u32, s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `469 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 55_687_000 picoseconds.
Weight::from_parts(30_695_182, 11003)
// Standard Error: 9_921
.saturating_add(Weight::from_parts(162_357, 0).saturating_mul(r.into()))
// Standard Error: 1_937
.saturating_add(Weight::from_parts(1_427_998, 0).saturating_mul(s.into()))
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into())))
}
/// Storage: Identity Registrars (r:1 w:0)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
fn request_judgement(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `367 + r * (57 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 34_876_000 picoseconds.
Weight::from_parts(32_207_018, 11003)
// Standard Error: 5_247
.saturating_add(Weight::from_parts(249_156, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
fn cancel_request(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `398 + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 30_689_000 picoseconds.
Weight::from_parts(31_967_170, 11003)
// Standard Error: 5_387
.saturating_add(Weight::from_parts(42_676, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn set_fee(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `89 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 7_357_000 picoseconds.
Weight::from_parts(7_932_950, 2626)
// Standard Error: 1_804
.saturating_add(Weight::from_parts(132_653, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn set_account_id(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `89 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 7_437_000 picoseconds.
Weight::from_parts(8_051_889, 2626)
// Standard Error: 1_997
.saturating_add(Weight::from_parts(129_592, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:1)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn set_fields(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `89 + r * (57 ±0)`
// Estimated: `2626`
// Minimum execution time: 7_385_000 picoseconds.
Weight::from_parts(7_911_589, 2626)
// Standard Error: 1_791
.saturating_add(Weight::from_parts(125_788, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity Registrars (r:1 w:0)
/// Proof: Identity Registrars (max_values: Some(1), max_size: Some(1141), added: 1636, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 19]`.
fn provide_judgement(r: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `445 + r * (57 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 24_073_000 picoseconds.
Weight::from_parts(17_817_684, 11003)
// Standard Error: 8_612
.saturating_add(Weight::from_parts(406_251, 0).saturating_mul(r.into()))
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: Identity IdentityOf (r:1 w:1)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: System Account (r:1 w:1)
/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:0 w:100)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `r` is `[1, 20]`.
/// The range of component `s` is `[0, 100]`.
fn kill_identity(r: u32, s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `676 + r * (5 ±0) + s * (32 ±0) + x * (66 ±0)`
// Estimated: `11003`
// Minimum execution time: 73_981_000 picoseconds.
Weight::from_parts(51_684_057, 11003)
// Standard Error: 12_662
.saturating_add(Weight::from_parts(145_285, 0).saturating_mul(r.into()))
// Standard Error: 2_472
.saturating_add(Weight::from_parts(1_421_039, 0).saturating_mul(s.into()))
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(3_u64))
.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into())))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// The range of component `s` is `[0, 99]`.
fn add_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `475 + s * (36 ±0)`
// Estimated: `11003`
// Minimum execution time: 29_367_000 picoseconds.
Weight::from_parts(34_214_998, 11003)
// Standard Error: 1_522
.saturating_add(Weight::from_parts(114_551, 0).saturating_mul(s.into()))
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// The range of component `s` is `[1, 100]`.
fn rename_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `591 + s * (3 ±0)`
// Estimated: `11003`
// Minimum execution time: 12_384_000 picoseconds.
Weight::from_parts(14_417_903, 11003)
// Standard Error: 539
.saturating_add(Weight::from_parts(38_371, 0).saturating_mul(s.into()))
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// The range of component `s` is `[1, 100]`.
fn remove_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `638 + s * (35 ±0)`
// Estimated: `11003`
// Minimum execution time: 33_327_000 picoseconds.
Weight::from_parts(36_208_941, 11003)
// Standard Error: 1_240
.saturating_add(Weight::from_parts(105_805, 0).saturating_mul(s.into()))
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: Identity SuperOf (r:1 w:1)
/// Proof: Identity SuperOf (max_values: None, max_size: Some(114), added: 2589, mode: MaxEncodedLen)
/// Storage: Identity SubsOf (r:1 w:1)
/// Proof: Identity SubsOf (max_values: None, max_size: Some(3258), added: 5733, mode: MaxEncodedLen)
/// Storage: System Account (r:1 w:0)
/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
/// The range of component `s` is `[0, 99]`.
fn quit_sub(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `704 + s * (37 ±0)`
// Estimated: `6723`
// Minimum execution time: 23_764_000 picoseconds.
Weight::from_parts(26_407_731, 6723)
// Standard Error: 1_025
.saturating_add(Weight::from_parts(101_112, 0).saturating_mul(s.into()))
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: `Identity::UsernameAuthorities` (r:0 w:1)
/// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`)
fn add_username_authority() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 13_873_000 picoseconds.
Weight::from_parts(13_873_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(RocksDbWeight::get().writes(1))
}
/// Storage: `Identity::UsernameAuthorities` (r:0 w:1)
/// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`)
fn remove_username_authority() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 10_653_000 picoseconds.
Weight::from_parts(10_653_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(RocksDbWeight::get().writes(1))
}
/// Storage: `Identity::UsernameAuthorities` (r:1 w:1)
/// Proof: `Identity::UsernameAuthorities` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`)
/// Storage: `Identity::AccountOfUsername` (r:1 w:1)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:1)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
fn set_username_for() -> Weight {
// Proof Size summary in bytes:
// Measured: `80`
// Estimated: `11037`
// Minimum execution time: 75_928_000 picoseconds.
Weight::from_parts(75_928_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(RocksDbWeight::get().reads(3))
.saturating_add(RocksDbWeight::get().writes(3))
}
/// Storage: `Identity::PendingUsernames` (r:1 w:1)
/// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:1)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
/// Storage: `Identity::AccountOfUsername` (r:0 w:1)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
fn accept_username() -> Weight {
// Proof Size summary in bytes:
// Measured: `106`
// Estimated: `11037`
// Minimum execution time: 38_157_000 picoseconds.
Weight::from_parts(38_157_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(RocksDbWeight::get().reads(2))
.saturating_add(RocksDbWeight::get().writes(3))
}
/// Storage: `Identity::PendingUsernames` (r:1 w:1)
/// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
fn remove_expired_approval() -> Weight {
// Proof Size summary in bytes:
// Measured: `106`
// Estimated: `3542`
// Minimum execution time: 46_821_000 picoseconds.
Weight::from_parts(46_821_000, 0)
.saturating_add(Weight::from_parts(0, 3542))
.saturating_add(RocksDbWeight::get().reads(1))
.saturating_add(RocksDbWeight::get().writes(1))
}
/// Storage: `Identity::AccountOfUsername` (r:1 w:0)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:1)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
fn set_primary_username() -> Weight {
// Proof Size summary in bytes:
// Measured: `247`
// Estimated: `11037`
// Minimum execution time: 22_515_000 picoseconds.
Weight::from_parts(22_515_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(RocksDbWeight::get().reads(2))
.saturating_add(RocksDbWeight::get().writes(1))
}
/// Storage: `Identity::AccountOfUsername` (r:1 w:1)
/// Proof: `Identity::AccountOfUsername` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
/// Storage: `Identity::IdentityOf` (r:1 w:0)
/// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7572), added: 10047, mode: `MaxEncodedLen`)
fn remove_dangling_username() -> Weight {
// Proof Size summary in bytes:
// Measured: `126`
// Estimated: `11037`
// Minimum execution time: 15_997_000 picoseconds.
Weight::from_parts(15_997_000, 0)
.saturating_add(Weight::from_parts(0, 11037))
.saturating_add(RocksDbWeight::get().reads(2))
.saturating_add(RocksDbWeight::get().writes(1))
}
/// Storage: Identity IdentityOf (r:1 w:0)
/// Proof: Identity IdentityOf (max_values: None, max_size: Some(7538), added: 10013, mode: MaxEncodedLen)
fn emit_identity_hash() -> Weight {
// Proof Size summary in bytes:
// Measured: `100`
// Estimated: `11003`
// Minimum execution time: 10_000_000 picoseconds.
Weight::from_parts(10_000_000, 11003)
.saturating_add(RocksDbWeight::get().reads(1_u64))
}
}

View File

@ -0,0 +1,35 @@
[package]
name = "pallet-middleware"
description = "G6 Chain Middleware pallet"
version.workspace = true
homepage.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
edition.workspace = true
rust-version.workspace = true
[dependencies]
log = { version = "0.4.17", default-features = false }
frame-benchmarking = { workspace = true, default-features = false }
frame-support = { workspace = true, default-features = false }
frame-system = { workspace = true, default-features = false }
codec = { workspace = true, default-features = false, features = ["derive",] }
scale-info = { workspace = true, default-features = false }
sp-runtime = { workspace = true, default-features = false }
[lints]
workspace = true
[features]
default = ["std"]
std = [
"frame-support/std",
"frame-system/std",
"codec/std",
"scale-info/std",
"sp-runtime/std",
]
runtime-benchmarks = ['frame-benchmarking/runtime-benchmarks']
try-runtime = []

View File

@ -0,0 +1,3 @@
# G6 pallet for feeless transactions
cargo build --package pallet-middleware

View File

@ -0,0 +1,214 @@
#![cfg_attr(not(feature = "std"), no_std)]
pub use pallet::*;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
pub use weights::*;
// use sp_runtime::offchain::storage::StorageValueRef;
// use frame_system::pallet_prelude::*;
// use core::convert::TryInto;
// use sp_core::offchain::StorageKind;
#[frame_support::pallet(dev_mode)]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
// use frame_support::{
// dispatch::{DispatchResult, DispatchResultWithPostInfo},
// };
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::config]
pub trait Config: frame_system::Config {
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
// type WeightInfo: WeightInfo;
}
// #[pallet::storage]
// #[pallet::getter(fn user_strings)]
// pub type UserStrings<T: Config> = StorageMap<_, Blake2_128Concat, T::AccountId, u8, ValueQuery>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
// StringSet(T::AccountId, u8),
// StringGet(T::AccountId, u8),
Dummy
}
#[pallet::error]
pub enum Error<T> {
Dummy,
}
// #[pallet::hooks]
// impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
// // #[cfg(feature = "experimental")]
// fn offchain_worker(block_number: BlockNumberFor<T>) {
// let block_number_u64: u64 = block_number.try_into().ok().unwrap_or_default();
// // println!("Current print block number: {:?}", block_number_u64);
// log::info!("Current log block number: {:?}", block_number_u64);
// }
// }
// #[pallet::hooks]
// impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
// /// Offchain Worker entry point.
// ///
// /// By implementing `fn offchain_worker` you declare a new offchain worker.
// /// This function will be called when the node is fully synced and a new best block is
// /// successfully imported.
// /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might
// /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs),
// /// so the code should be able to handle that.
// /// You can use `Local Storage` API to coordinate runs of the worker.
// fn offchain_worker(block_number: BlockNumberFor<T>) {
// // Note that having logs compiled to WASM may cause the size of the blob to increase
// // significantly. You can use `RuntimeDebug` custom derive to hide details of the types
// // in WASM. The `sp-api` crate also provides a feature `disable-logging` to disable
// // all logging and thus, remove any logging from the WASM.
// log::info!("Hello World from offchain workers!");
//
// // Since off-chain workers are just part of the runtime code, they have direct access
// // to the storage and other included pallets.
// //
// // We can easily import `frame_system` and retrieve a block hash of the parent block.
// let parent_hash = <system::Pallet<T>>::block_hash(block_number - 1u32.into());
// log::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash);
//
// // It's a good practice to keep `fn offchain_worker()` function minimal, and move most
// // of the code to separate `impl` block.
// // Here we call a helper function to calculate current average price.
// // This function reads storage entries of the current state.
// let average: Option<u32> = Self::average_price();
// log::debug!("Current price: {:?}", average);
//
// // For this example we are going to send both signed and unsigned transactions
// // depending on the block number.
// // Usually it's enough to choose one or the other.
// let should_send = Self::choose_transaction_type(block_number);
// let res = match should_send {
// TransactionType::Signed => Self::fetch_price_and_send_signed(),
// TransactionType::UnsignedForAny =>
// Self::fetch_price_and_send_unsigned_for_any_account(block_number),
// TransactionType::UnsignedForAll =>
// Self::fetch_price_and_send_unsigned_for_all_accounts(block_number),
// TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number),
// TransactionType::None => Ok(()),
// };
// if let Err(e) = res {
// log::error!("Error: {}", e);
// }
// }
// }
#[pallet::call]
impl<T: Config> Pallet<T> {
#[pallet::call_index(0)]
#[pallet::weight(10_000)]
pub fn set(origin: OriginFor<T>, user_input: u8) -> DispatchResult {
let who = ensure_signed(origin)?;
// UserStrings::<T>::insert(&who, user_input.clone());
// Self::deposit_event(Event::StringSet(who, user_input));
Ok(())
}
//
// #[pallet::call_index(1)]
// #[pallet::weight(5_000)]
// pub fn get(origin: OriginFor<T>) -> DispatchResult {
// let who = ensure_signed(origin)?;
// let data = UserStrings::<T>::get(&who);
// Self::deposit_event(Event::StringGet(who, data));
// Ok(())
// }
//
// #[pallet::call_index(2)]
// #[pallet::weight(5_000)]
// pub fn upload_picture(origin: OriginFor<T>, picture: Vec<u8>) -> DispatchResult {
// let who = ensure_signed(origin)?;
//
//
// Ok(())
// }
//
// /// Submit new price to the list.
// ///
// /// This method is a public function of the module and can be called from within
// /// a transaction. It appends given `price` to current list of prices.
// /// In our example the `offchain worker` will create, sign & submit a transaction that
// /// calls this function passing the price.
// ///
// /// The transaction needs to be signed (see `ensure_signed`) check, so that the caller
// /// pays a fee to execute it.
// /// This makes sure that it's not easy (or rather cheap) to attack the chain by submitting
// /// excessive transactions, but note that it doesn't ensure the price oracle is actually
// /// working and receives (and provides) meaningful data.
// /// This example is not focused on correctness of the oracle itself, but rather its
// /// purpose is to showcase offchain worker capabilities.
// #[pallet::call_index(3)]
// #[pallet::weight({0})]
// pub fn submit_price(origin: OriginFor<T>, price: u32) -> DispatchResultWithPostInfo {
// // Retrieve sender of the transaction.
// let who = ensure_signed(origin)?;
// // Add the price to the on-chain list.
// Self::add_price(Some(who), price);
// Ok(().into())
// }
//
// /// Submit new price to the list via unsigned transaction.
// ///
// /// Works exactly like the `submit_price` function, but since we allow sending the
// /// transaction without a signature, and hence without paying any fees,
// /// we need a way to make sure that only some transactions are accepted.
// /// This function can be called only once every `T::UnsignedInterval` blocks.
// /// Transactions that call that function are de-duplicated on the pool level
// /// via `validate_unsigned` implementation and also are rendered invalid if
// /// the function has already been called in current "session".
// ///
// /// It's important to specify `weight` for unsigned calls as well, because even though
// /// they don't charge fees, we still don't want a single block to contain unlimited
// /// number of such transactions.
// ///
// /// This example is not focused on correctness of the oracle itself, but rather its
// /// purpose is to showcase offchain worker capabilities.
// #[pallet::call_index(4)]
// #[pallet::weight({0})]
// pub fn submit_price_unsigned(
// origin: OriginFor<T>,
// _block_number: BlockNumberFor<T>,
// price: u32,
// ) -> DispatchResultWithPostInfo {
// // This ensures that the function can only be called via unsigned transaction.
// ensure_none(origin)?;
// // Add the price to the on-chain list, but mark it as coming from an empty address.
// Self::add_price(None, price);
// // now increment the block number at which we expect next unsigned transaction.
// let current_block = <system::Pallet<T>>::block_number();
// <NextUnsignedAt<T>>::put(current_block + T::UnsignedInterval::get());
// Ok(().into())
// }
//
// #[pallet::call_index(5)]
// #[pallet::weight({0})]
// pub fn submit_price_unsigned_with_signed_payload(
// origin: OriginFor<T>,
// price_payload: PricePayload<T::Public, BlockNumberFor<T>>,
// _signature: T::Signature,
// ) -> DispatchResultWithPostInfo {
// // This ensures that the function can only be called via unsigned transaction.
// ensure_none(origin)?;
// // Add the price to the on-chain list, but mark it as coming from an empty address.
// Self::add_price(None, price_payload.price);
// // now increment the block number at which we expect next unsigned transaction.
// let current_block = <system::Pallet<T>>::block_number();
// <NextUnsignedAt<T>>::put(current_block + T::UnsignedInterval::get());
// Ok(().into())
// }
}
}

View File

@ -0,0 +1,25 @@
//! Benchmarking setup for pallet-template
#![cfg(feature = "runtime-benchmarks")]
use super::*;
#[allow(unused)]
use crate::Pallet as Template;
use frame_benchmarking::v2::*;
use frame_system::RawOrigin;
#[benchmarks]
mod benchmarks {
use super::*;
#[benchmark]
fn do_something() {
let value = 100u32.into();
let caller: T::AccountId = whitelisted_caller();
#[extrinsic_call]
do_something(RawOrigin::Signed(caller), value);
//assert_eq!(Something::<T>::get(), Some(value));
}
impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
}

View File

@ -0,0 +1,78 @@
#![cfg_attr(not(feature = "std"), no_std)]
pub use pallet::*;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
pub use weights::*;
#[frame_support::pallet(dev_mode)]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::config]
pub trait Config: frame_system::Config {
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
DummyEvent,
}
#[pallet::error]
pub enum Error<T> {
DummyError,
}
// #[pallet::hooks]
// impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
// // #[cfg(feature = "experimental")]
// fn offchain_worker(block_number: BlockNumberFor<T>) {
// // let block_number_u64: u64 = block_number.try_into().ok().unwrap_or_default();
// let block_number_u64: u64 = TryInto::<u64>::try_into(block_number).ok().unwrap_or_default();
// // println!("Current print block number: {:?}", block_number_u64);
// log::info!("Current log block number: {:?}", block_number_u64);
//
// let key = b"my_local_key";
// // let value = block_number_u64.try_into().ok().unwrap_or_default().to_be_bytes().to_vec();
// let value = block_number_u64.to_be_bytes().to_vec();
// // let value = <<<<T as frame_system::Config>::Block as sp_runtime::traits::Block>::Header as sp_runtime::traits::Header>::Number as TryInto<T>>::try_into(block_number).ok().unwrap_or_default().to_be_bytes().to_vec();
// let storage_ref = StorageValueRef::persistent(key);
// let _ = storage_ref.set(&value);
// log::info!("Stored block number: {:?}", block_number);
// }
// }
#[pallet::call]
impl<T: Config> Pallet<T> {
#[pallet::call_index(0)]
#[pallet::weight(10000)]
pub fn do_something(origin: OriginFor<T>, _something: u32) -> DispatchResult {
let _who = ensure_signed(origin)?;
Self::deposit_event(Event::DummyEvent);
Ok(())
}
// #[pallet::call_index(1)]
// #[pallet::weight(10000)]
// pub fn store_data(origin: OriginFor<T>, data: u32) -> DispatchResult {
// let _who = ensure_signed(origin)?;
// let key = b"my_local_key2";
// let value = data.to_be_bytes().to_vec();
// let storage_ref = StorageValueRef::persistent(key);
// if storage_ref.set(&value).is_err() {
// return Err(Error::<T>::StorageError.into());
// }
// Self::deposit_event(Event::DataStored(data));
// Ok(())
// }
}
}

View File

@ -0,0 +1,25 @@
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
use core::marker::PhantomData;
pub trait WeightInfo {
fn do_something() -> Weight;
}
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
fn do_something() -> Weight {
Weight::from_parts(9_000_000, 0)
.saturating_add(T::DbWeight::get().writes(1_u64))
}
}
impl WeightInfo for () {
fn do_something() -> Weight {
Weight::from_parts(9_000_000, 0)
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
}

34
pallets/parking/.gitignore vendored 100644
View File

@ -0,0 +1,34 @@
.cargo-remote.toml
.direnv/
.DS_Store
.env*
.idea
.local
.lycheecache
.vscode
.wasm-binaries
*.adoc
*.bin
*.iml
*.orig
*.rej
*.swp
**/._*
**/.criterion/
**/*.rs.bk
**/hfuzz_target/
**/hfuzz_workspace/
**/node_modules
**/target/
**/wip/*.stderr
/.cargo/config
/.envrc
artifacts
nohup.out
polkadot_argument_parsing
release-artifacts
release.json
rls*.log
target/
Cargo.lock
runtime/wasm/target/

View File

@ -0,0 +1,50 @@
[package]
name = 'pallet-parking'
version = '0.1.0'
authors = ['Gautam Dhameja <quasijatt@outlook.com>']
edition = '2021'
license = 'Apache-2.0'
[dependencies]
pallet-balances = { default-features = false, workspace = true }
pallet-authorship = { workspace = true, default-features = false }
pallet-timestamp = { workspace = true, default-features = false }
sp-arithmetic = { workspace = true, default-features = false }
sp-core = { workspace = true, default-features = false }
sp-io = { workspace = true, default-features = false }
sp-runtime = { workspace = true, default-features = false }
sp-std = { workspace = true, default-features = false }
sp-staking = { workspace = true, default-features = false }
frame-benchmarking = { workspace = true, default-features = false, optional = true }
frame-support = { workspace = true, default-features = false }
frame-system = { workspace = true, default-features = false }
pallet-session = { workspace = true, default-features = false, features = ['historical'] }
sp-weights = { workspace = true, default-features = false }
scale-info = { workspace = true, default-features = false, features = ['derive', 'serde'] }
#lazy_static = { version = "1.5.0", default-features = false, features = ['spin_no_std'] }
log = { default-features = false, version = '0.4.22' }
codec = { workspace = true, default-features = false, features = ['derive'] }
time = {version = "0.3.37", default-features = false, features = ["alloc"]}
[dev-dependencies]
sp-state-machine = { workspace = true, default-features = false }
serde = { features = ['derive'], version = '1.0.205' }
[features]
default = ['std']
runtime-benchmarks = ['frame-benchmarking/runtime-benchmarks']
std = [
'codec/std',
'frame-benchmarking/std',
'frame-support/std',
'frame-system/std',
'pallet-balances/std',
'pallet-timestamp/std',
'pallet-session/std',
'scale-info/std',
'sp-core/std',
'sp-io/std',
'sp-std/std',
'sp-runtime/std',
]
try-runtime = ['frame-support/try-runtime']

View File

@ -0,0 +1,74 @@
# Pallet Parking
This is a pallet to icentivize network users to hold their tokens on-chain. The mechanism for this is named "Parking".
## Parking metaphor
Account holders can "park" their tokens in a single parking lot. Tokens take some time to get their parking slot. Then, for each week when tokens are parked, their amount grows according to total amount of parked tokens. Users can request unparking anytime, but they will only receive their tokens next Wednesday, when they are successfully unparked.
## Parking mechanics
### Parking
To park tokens, account should call "park" call and indicate amount to be parked. It is possible to reap account by doing so, but not recommended in general (there is small potential for replay attack if immortal transactions were used). These tokens go into parking lot pool and stay there, while parked amount is stored in separate parking ledger. There is minimal parked amount, it is highly recommended that it is set above ED.
### Rewards computation
On first block after Wednesday midnight, following procedure happens:
1. Total parked amount (and thus all individual parking amount) is increased with accordance to following annual Parking Reward Percentage (PRP) assuming 365.25 days in a year:
in case of 1 - 50k tokens in the parking pool 15% is the PRP
in case of 50k - 100k tokens in the parking pool 14.5% is the PRP
in case of 100k -150k tokens in the parking pool 14% is the PRP
in case of 150k - 200k tokens in the parking pool 13.5% is the PRP
in case of 200k - 250k tokens in the parking pool 13% is the PRP
in case of 250k - 300k tokens in the parking pool 12.5% is the PRP
in case of 300k - 400 tokens in the parking pool 12% is the PRP
in case of 400k - 500 tokens in the parking pool 11% is the PRP
in case of 500k - 600 tokens in the parking pool 10% is the PRP
in case of 600k - 700 tokens in the parking pool 9% is the PRP
in case of 700k - 800 tokens in the parking pool 8% is the PRP
in case of 800k - 900 tokens in the parking pool 7% is the PRP
in case of 900k - 1M tokens in the parking pool 6% is the PRP
There is no mechanics defined outside of these ranges.
2. All newly parked tokens are added to pool and from this moment are considered as parked.
3. Reward coefficient is stored for lazy reward computation.
### Lazy rewards computation
To avoid getting extremely large Wednesday block with many parked accounts, rewards are computed on demand. Using associativity of multiplication (`a(b+c) = a*b + a*c`), we can calculate only total pool size on reward event, but spread individual accounts computation in time (and even get the logic for that off-chain). Now rewards are computed in either of the following:
1. Parking events
2. Unparking events
3. 'apply_rewards' calls
Upon recalculation, updated balance of account is recorded. This could be used to estimate accumulated rewards on demand. This call could be sent from any account for any other account, is harmless and benefitial for the network.
Downside of this approach is that full log of past reward coefficients is stored. Although not particularly large, it is limited from above to keep things clean. Fortunately, automatic trimming is implemented for coefficients that are not needed anymore (no accounts with not calculated rewards remain before the unnecessary coefficient). Thus, occasionally some check should be performed to prevent the list of stored coefficients overgrowing.
These are possible solutions for this:
- Incentivize users to recalculate rewards when those are sufficiently old (silly solution but increases involvement) or make an off-chain robots do it on community funding;
- Implement automated "on idle" recalculation (requires some extra caching logic and thus should be implemented carefully);
- Keep growing cache size with upgrades, as needed
### Unparking
Account can request unparking (possibly partial) anytime by calling `unfreeze`. After this request, rewards are calculated and unparked tokens are delivered to user in frozen form if they were properly parked in the pool or unfrozen if they were not (unfrozen unparking taking precedence). Funds can be unfrozen after the Wednesday or are automatically unfrozed on next parking or unparking by the same account.
### Source of funds and management
Funds are coming from "benefactor" account that should be set by parking manager account (or Root) before the pallet could be used. Funds would be drawn from this account on every Wednesday to grow parking pool. There is no plan on what should happen if this account does not have enough free balance. Maximum amount that could be withdrawn in a week is approximately 1000 tokens. Please make sure benefitiary does not get drained.
Benefitiary account could be changed anytime, but could not be un-set.
## Usage
1. Parking manager or root sets benefitiary account by calling `set benefactor` at least once.
2. Users park, unpark, collect rewards, enjoy.
3. Remember to trim old coefficients when needed (or patch this pallet to do it automatically).

View File

@ -0,0 +1,269 @@
use super::*;
use crate::Pallet as Parking;
use frame_benchmarking::v2::*;
use frame_support::traits::{EnsureOrigin, OriginTrait};
use frame_system::RawOrigin;
use pallet_timestamp::Pallet as Timestamp;
const SEED: u32 = 0;
fn prepare_benefactor<T: Config>() {
let origin = T::ParkingManager::try_successful_origin().unwrap();
let balance = T::MinPark::get() * 20_000_000u32.into();
let benefactor: T::AccountId = account("benefactor", 0, SEED);
let _ = T::Currency::set_balance(&benefactor, balance);
assert!(Parking::<T>::set_benefactor(origin, benefactor).is_ok());
}
#[benchmarks (
where T: pallet_timestamp::Config<Moment = u64>,
)]
mod benchmarks {
use super::*;
// Park all tokens and kill parking account; make it first parking for heaviness.
#[benchmark]
fn park() {
Timestamp::<T>::set_timestamp(1_000);
let caller = whitelisted_caller();
let balance = T::MinPark::get() * 100u32.into();
T::Currency::set_balance(&caller, balance);
prepare_benefactor::<T>();
#[extrinsic_call]
_(RawOrigin::Signed(caller), balance);
//assert_eq!(Balances::<T>::free_balance(&caller), Zero::zero());
}
// Park portion of tokens; first parking.
#[benchmark(extra)]
fn park_partial() {
Timestamp::<T>::set_timestamp(1_000);
let caller = whitelisted_caller();
let balance = T::MinPark::get() * 100u32.into();
let balance_to_park = T::MinPark::get() * 10u32.into();
T::Currency::set_balance(&caller, balance);
prepare_benefactor::<T>();
#[extrinsic_call]
park(RawOrigin::Signed(caller), balance_to_park);
}
// Park all tokens, then unpark all immediately
#[benchmark(extra)]
fn unpark_immediately() {
Timestamp::<T>::set_timestamp(1_000);
let caller = whitelisted_caller();
prepare_benefactor::<T>();
let balance = T::MinPark::get() * 100u32.into();
T::Currency::set_balance(&caller, balance);
let origin = T::RuntimeOrigin::signed(caller.clone());
assert!(Parking::<T>::park(origin, balance).is_ok());
#[extrinsic_call]
unpark(RawOrigin::Signed(caller), None);
}
// Park all tokens, unpark all after some time
#[benchmark(extra)]
fn unpark_after_rewarding() {
Timestamp::<T>::set_timestamp(1_000);
let caller = whitelisted_caller();
prepare_benefactor::<T>();
let balance = T::MinPark::get() * 100u32.into();
T::Currency::set_balance(&caller, balance);
T::Currency::set_balance(&Parking::<T>::account_id(), balance * 100u32.into());
Count::<T>::set(10);
<ParkingLot<T>>::insert(
caller.clone(),
Parked {
amount: 0u32.into(),
amount_previous: balance,
date: Wednesday {
year: 2024,
week: 11,
},
},
);
<DudeNext<T>>::set(Some(Wednesday {
year: 2024,
week: 13,
}));
let mut wprp = WPRP::<T>::get();
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 50,
});
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 51,
});
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 50,
});
WPRP::<T>::set(wprp);
T::Currency::set_frozen(
&FreezeReason::AccountedParked.into(),
&Parking::<T>::account_id(),
balance * 100u32.into(),
Fortitude::Polite,
)
.unwrap();
#[extrinsic_call]
unpark(RawOrigin::Signed(caller), None);
}
// Park all tokens, unpark all after some time, account is not set up
#[benchmark]
fn unpark() {
let caller: T::AccountId = whitelisted_caller();
prepare_benefactor::<T>();
let balance = T::MinPark::get() * 100u32.into();
T::Currency::set_balance(&Parking::<T>::account_id(), balance * 100u32.into());
Count::<T>::set(10);
<ParkingLot<T>>::insert(
caller.clone(),
Parked {
amount: 0u32.into(),
amount_previous: balance,
date: Wednesday {
year: 2024,
week: 11,
},
},
);
<DudeNext<T>>::set(Some(Wednesday {
year: 2024,
week: 13,
}));
let mut wprp = WPRP::<T>::get();
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 50,
});
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 51,
});
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 50,
});
WPRP::<T>::set(wprp);
T::Currency::set_frozen(
&FreezeReason::AccountedParked.into(),
&Parking::<T>::account_id(),
balance * 100u32.into(),
Fortitude::Polite,
)
.unwrap();
#[extrinsic_call]
_(RawOrigin::Signed(caller), None);
}
// Trivial initialization call
#[benchmark]
fn set_benefactor() {
let caller = T::ParkingManager::try_successful_origin().unwrap();
let benefactor: T::AccountId = account("benefactor", 0, SEED);
#[extrinsic_call]
_(caller as T::RuntimeOrigin, benefactor);
}
// Apply rewards
#[benchmark]
fn apply_rewards() {
let caller: T::AccountId = whitelisted_caller();
prepare_benefactor::<T>();
<ParkingLot<T>>::insert(
caller.clone(),
Parked {
amount: 0u32.into(),
amount_previous: T::MinPark::get() * 100u32.into(),
date: Wednesday {
year: 2024,
week: 11,
},
},
);
<DudeNext<T>>::set(Some(Wednesday {
year: 2024,
week: 13,
}));
let mut wprp = WPRP::<T>::get();
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 50,
});
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 51,
});
let _ = wprp.try_push(Weekly {
wprp: 2.into(),
prev_week_dependents: 50,
});
WPRP::<T>::set(wprp);
#[extrinsic_call]
_(RawOrigin::Signed(caller), None);
}
// Unfreeze account
#[benchmark]
fn unfreeze() {
prepare_benefactor::<T>();
let caller = whitelisted_caller();
let balance = T::MinPark::get() * 100u32.into();
<DudeNext<T>>::set(Some(Wednesday {
year: 2024,
week: 18,
}));
T::Currency::set_balance(&caller, balance);
T::Currency::set_frozen(
&FreezeReason::Unparking.into(),
&caller,
balance,
Fortitude::Polite,
)
.unwrap();
<Freezer<T>>::insert(
caller.clone(),
Wednesday {
year: 2024,
week: 13,
},
);
#[extrinsic_call]
_(RawOrigin::Signed(caller), None);
}
impl_benchmark_test_suite!(Parking, crate::mock::new_test_ext(), crate::mock::Test,);
}

View File

@ -0,0 +1,829 @@
//! Pallet to imitate earning model of "staking", but unbound from validation
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
#[cfg(test)]
#[cfg(feature = "std")]
mod mock;
#[cfg(test)]
#[cfg(feature = "std")]
mod tests;
pub mod weights;
mod reward_curve;
use reward_curve::weekly_parking_reward_percentage;
use frame_support::{
ensure,
pallet_prelude::*,
traits::{
fungible::{Inspect, InspectFreeze, Mutate, MutateFreeze},
tokens::{Fortitude, Preservation},
Get, UnixTime,
},
PalletId,
};
use frame_system::pallet_prelude::*;
//use log;
pub use pallet::*;
use scale_info::TypeInfo;
use sp_runtime::{
traits::{AccountIdConversion, UniqueSaturatedInto},
FixedPointNumber, FixedU128, Saturating,
};
use sp_std::prelude::*;
use time::{Date, Duration, OffsetDateTime, Weekday};
pub use weights::*;
pub const LOG_TARGET: &str = "runtime::parking";
/// Wednesday descriptor
#[derive(Clone, Debug, Decode, Encode, Eq, MaxEncodedLen, PartialEq, TypeInfo)]
pub struct Wednesday {
pub year: i32,
pub week: u8,
}
impl From<Date> for Wednesday {
fn from(d: Date) -> Self {
let (year, week, _) = d.to_iso_week_date();
Self { year, week }
}
}
impl From<Wednesday> for Date {
fn from(val: Wednesday) -> Self {
Self::from_iso_week_date(val.year, val.week, Weekday::Wednesday).unwrap()
}
}
/// Ledger record for parked tokens
#[derive(Decode, Encode, Debug, Clone, MaxEncodedLen, TypeInfo)]
#[scale_info(skip_type_params(T))]
pub struct Parked<T: Config> {
/// Amount that was parked
amount: <T::Currency as Inspect<T::AccountId>>::Balance,
/// Amount that was parked already for some time
amount_previous: <T::Currency as Inspect<T::AccountId>>::Balance,
/// Date of first accounted day of parking (next Wednesday)
date: Wednesday,
}
#[derive(Decode, Encode, Debug, Clone, MaxEncodedLen, TypeInfo)]
pub struct Weekly {
wprp: FixedU128,
prev_week_dependents: u32,
}
#[frame_support::pallet()]
pub mod pallet {
use super::*;
/// Configure the pallet by specifying the parameters and types on which it
/// depends.
#[pallet::config]
pub trait Config: frame_system::Config {
/// The Event type.
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Validator set pallet ID to derive rewards pool ID.
#[pallet::constant]
type PalletId: Get<PalletId>;
/// How do we reward the validators
type Currency: Mutate<Self::AccountId>
+ MutateFreeze<Self::AccountId, Id = Self::RuntimeFreezeReason>;
type MinPark: Get<<Self::Currency as Inspect<Self::AccountId>>::Balance>;
type Time: UnixTime;
type MaxOperationTime: Get<u32>;
type ParkingManager: EnsureOrigin<Self::RuntimeOrigin>;
/// Implement freezing of existential deposit for validators
type RuntimeFreezeReason: From<FreezeReason>;
/// Information on runtime weights.
type WeightInfo: WeightInfo;
}
const STORAGE_VERSION: StorageVersion = StorageVersion::new(0);
#[pallet::pallet]
#[pallet::storage_version(STORAGE_VERSION)]
pub struct Pallet<T>(_);
/// Account from which rewards are paid out
#[pallet::storage]
#[pallet::getter(fn benefactor)]
pub type Benefactor<T: Config> = StorageValue<_, T::AccountId, OptionQuery>;
/// The next Wednesday current parking pool will be
#[pallet::storage]
#[pallet::getter(fn dude_next)]
pub type DudeNext<T: Config> = StorageValue<_, Wednesday, OptionQuery>;
/// Weekly multipliers and weekly dependents set
///
/// The pallet would have to be upgraded within `MaxOperationTime` weeks from the first
/// parking event to make cache dynamic or extend weekly multipliers cache
///
/// We have considered logarithms here, but they seem unnecessary at given scope, while
/// regular fast implementation just introduce non-reproducibility.
///
/// Weekly dependents represent the number of accounts that performed any of
/// the following actions during the week:
///
/// - parked or added to existing parking
/// - applied rewards
#[pallet::storage]
#[pallet::getter(fn wprp)]
pub type WPRP<T: Config> = StorageValue<_, BoundedVec<Weekly, T::MaxOperationTime>, ValueQuery>;
/// Dependents entered in current week.
/// Value empties into `prev_week_dependents` of `Weekly` at the turn of the
/// week.
#[pallet::storage]
#[pallet::getter(fn dependents)]
pub type Dependents<T: Config> = StorageValue<_, u32, ValueQuery>;
/// Ledger for parked tokent
#[pallet::storage]
#[pallet::getter(fn parked)]
pub type ParkingLot<T: Config> =
CountedStorageMap<_, Twox64Concat, T::AccountId, Parked<T>, OptionQuery>;
/// Counter of parked funds (cache for len(parked))
#[pallet::storage]
#[pallet::getter(fn count)]
pub type Count<T: Config> = StorageValue<_, u32, ValueQuery>;
/// Funds frozen until stated
#[pallet::storage]
#[pallet::getter(fn freezer)]
pub type Freezer<T: Config> =
CountedStorageMap<_, Twox64Concat, T::AccountId, Wednesday, OptionQuery>;
#[pallet::composite_enum]
pub enum FreezeReason {
/// Rewards are computed on frozen funds
#[codec(index = 0)]
AccountedParked,
/// Funds in process of unparking
#[codec(index = 1)]
Unparking,
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
/// Applied rewards and updated deposit timestamp for account
AppliedRewards(T::AccountId),
/// Account that would be paying for parking was set.
ParkingFunded,
/// Parking reward was declared; all parked accounts increased by `reward_percentage`
/// and overall pool grew to `total`.
ParkingRewarded {
reward_percentage: FixedU128,
total: <T::Currency as Inspect<T::AccountId>>::Balance,
},
/// Tokens were parked
TokensParked {
who: T::AccountId,
amount: <T::Currency as Inspect<T::AccountId>>::Balance,
},
/// Tokens were unparked
TokensUnparked {
who: T::AccountId,
free: <T::Currency as Inspect<T::AccountId>>::Balance,
frozen: <T::Currency as Inspect<T::AccountId>>::Balance,
},
/// Funds were unfrozen
Unfrozen(T::AccountId),
}
// Errors inform users that something went wrong.
#[pallet::error]
pub enum Error<T> {
/// Attempt to park too small number of tokens.
ParkedAmountTooLow,
/// Pallet is unusable unless it is known what entity pays for it.
BenefactorNotSet,
/// Attempt to unpark tokens from account that did not park anything.
NotParked,
/// Attempted to unfreeze funds that were not unparked
NotUnparking,
OverflowRisk,
/// Errors that would happen if the pool is somehow empty but ledgers are not.
///
/// Should be unreachable.
PoolNotInitialized,
/// Errors that would happen if the log is somehow shallower than some ledgers.
///
/// Should be unreachable.
PeriodsRecordInvalid,
/// Optionally provided amount to unpark exceeds available balance
RequestedUnparkTooLarge,
/// Attempted to unfreeze before the end of freeze period
StillUnparking,
/// Tried to rebase funds not yet parked
WaitingToPark,
}
// it is important not to touch ParkingLot here for that one might be large
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_initialize(_: BlockNumberFor<T>) -> Weight {
if let Some(benefactor) = Pallet::<T>::benefactor() {
if let Some(dude_next) = Pallet::<T>::dude_next() {
let now = T::Time::now().as_millis() as u64;
if let Some(wednesday) = Self::my_dudes(dude_next, now) {
let account_id = &Pallet::<T>::account_id();
// Account for unfortunate chance that chain was down more than a week
for _ in 0..wednesday.weeks_passed {
let total_parked = T::Currency::balance_frozen(
&FreezeReason::AccountedParked.into(),
account_id,
);
let reward_percentage = weekly_parking_reward_percentage(
total_parked.unique_saturated_into(),
);
let reward = reward_percentage.saturating_mul_int(total_parked);
let _ = T::Currency::transfer(
&benefactor,
account_id,
reward,
Preservation::Expendable,
); // TODO
let total = T::Currency::balance_freezable(account_id);
let _ = T::Currency::set_frozen(
&FreezeReason::AccountedParked.into(),
account_id,
total,
Fortitude::Polite,
); // TODO this should be impossible
let mut wprp = Pallet::<T>::wprp();
let prev_week_dependents = Pallet::<T>::dependents();
let _ = wprp.try_push(Weekly {
wprp: reward_percentage + 1.into(),
prev_week_dependents,
}); // TODO
<WPRP<T>>::set(wprp);
<Dependents<T>>::set(0);
<DudeNext<T>>::set(Some(wednesday.new_wednesday.clone()));
Self::deposit_event(Event::ParkingRewarded {
reward_percentage,
total,
});
}
T::DbWeight::get().reads_writes(
4 + wednesday.weeks_passed as u64 * 3,
wednesday.weeks_passed as u64 * 5,
)
} else {
T::DbWeight::get().reads(3)
}
} else {
T::DbWeight::get().reads(2)
}
} else {
T::DbWeight::get().reads(1)
}
}
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Park tokens
#[pallet::call_index(0)]
#[pallet::weight(<T as pallet::Config>::WeightInfo::park())]
pub fn park(
origin: OriginFor<T>,
amount: <T::Currency as Inspect<T::AccountId>>::Balance,
) -> DispatchResult {
let who = ensure_signed(origin)?;
Self::park_account_checked(&who, amount)?;
Self::deposit_event(Event::TokensParked { who, amount });
Ok(())
}
/// Withdraw parked tokens (to the same account that parked them)
#[pallet::call_index(1)]
#[pallet::weight(<T as pallet::Config>::WeightInfo::unpark())]
pub fn unpark(
origin: OriginFor<T>,
optional_amount: Option<<T::Currency as Inspect<T::AccountId>>::Balance>,
) -> DispatchResult {
let who = ensure_signed(origin)?;
let amount_unparked = Self::unpark_account_checked(&who, optional_amount)?;
Self::deposit_event(Event::TokensUnparked {
who,
free: amount_unparked.free,
frozen: amount_unparked.frozen,
});
Ok(())
}
/// Set benefactor account
#[pallet::call_index(2)]
#[pallet::weight(<T as pallet::Config>::WeightInfo::set_benefactor())]
pub fn set_benefactor(origin: OriginFor<T>, benefactor: T::AccountId) -> DispatchResult {
T::ParkingManager::ensure_origin(origin)?;
Self::set_benefactor_checked(benefactor);
Self::deposit_event(Event::ParkingFunded);
Ok(())
}
/// Apply rewards and shift starting Wednesday if possible. If optional
/// account is not provided, applies rewards to `origin` funds.
#[pallet::call_index(3)]
#[pallet::weight(<T as pallet::Config>::WeightInfo::apply_rewards())]
pub fn apply_rewards(
origin: OriginFor<T>,
maybe_account: Option<T::AccountId>,
) -> DispatchResult {
let who = ensure_signed(origin)?;
let for_account = maybe_account.unwrap_or(who);
Self::rebase_parking(&for_account)?;
Self::deposit_event(Event::AppliedRewards(for_account));
Ok(())
}
/// Unfreeze account's funds. If optional account is not provided,
/// unfreezes `origin` funds.
#[pallet::call_index(4)]
#[pallet::weight(<T as pallet::Config>::WeightInfo::unfreeze())]
pub fn unfreeze(
origin: OriginFor<T>,
maybe_account: Option<T::AccountId>,
) -> DispatchResult {
let who = ensure_signed(origin)?;
let for_account = maybe_account.unwrap_or(who);
Self::unfreeze_account(&for_account)?;
Self::deposit_event(Event::Unfrozen(for_account));
Ok(())
}
}
}
impl<T: Config> Pallet<T> {
fn account_id() -> T::AccountId {
T::PalletId::get().into_account_truncating()
}
fn park_account_checked(
who: &T::AccountId,
amount: <T::Currency as Inspect<T::AccountId>>::Balance,
) -> DispatchResult {
ensure!(
Pallet::<T>::benefactor().is_some(),
Error::<T>::BenefactorNotSet
);
let parked = match Pallet::<T>::parked(who) {
Some(checked) => {
let amount_parked_checked = Self::amount_parked_checked(checked)?;
// Move tokens
T::Currency::transfer(
who,
&Pallet::<T>::account_id(),
amount,
Preservation::Expendable,
)?; // TODO reconsider preservation
if amount_parked_checked.free == 0u32.into() {
// account was last time accessed NOT this week - add a dependent
let dependents = Pallet::<T>::dependents();
<Dependents<T>>::set(dependents + 1);
}
if let Some(periods) = amount_parked_checked.periods {
let mut wprp = Pallet::<T>::wprp();
let len = wprp.len();
wprp[len - periods as usize - 1].prev_week_dependents -= 1;
Self::trim_and_set_wprp(wprp);
}
Parked {
amount: amount_parked_checked.free + amount,
amount_previous: amount_parked_checked.frozen,
date: Self::dude_next().expect("account already existed, dudes initiated"),
}
}
None => {
// Ensure amount is above ed
ensure!(amount >= T::MinPark::get(), Error::<T>::ParkedAmountTooLow);
// TODO: consider transfer_all in vicinity of ED
// Move tokens
T::Currency::transfer(
who,
&Pallet::<T>::account_id(),
amount,
Preservation::Expendable,
)?; // TODO reconsider preservation
let count = Pallet::<T>::count();
let date = if count == 0 {
// Special logic for first parking
let now = T::Time::now().as_millis() as u64;
<Count<T>>::set(1);
let dude_next = next_dude(now);
<DudeNext<T>>::set(Some(dude_next.clone()));
dude_next
} else {
<Count<T>>::set(count + 1);
Pallet::<T>::dude_next().expect("Dude_next is never unset")
};
// Add a dependent for this week
let dependents = Pallet::<T>::dependents();
<Dependents<T>>::set(dependents + 1);
Parked {
amount,
amount_previous: 0u32.into(),
date,
}
}
};
// Add funds
<ParkingLot<T>>::insert(who.clone(), parked);
Ok(())
}
fn unpark_account_checked(
who: &T::AccountId,
optional_amount: Option<<T::Currency as Inspect<T::AccountId>>::Balance>,
) -> Result<Amount<T>, sp_runtime::DispatchError> {
let _ = Self::unfreeze_account(who);
let amount_parked_at = Self::amount_parked_at(who)?;
match optional_amount {
Some(requested_amount) => {
if requested_amount <= amount_parked_at.free + amount_parked_at.frozen {
if amount_parked_at.free + amount_parked_at.frozen - requested_amount
< T::MinPark::get()
{
Self::reap_account(who, amount_parked_at)
} else {
let unpark_frozen = requested_amount.saturating_sub(amount_parked_at.free);
let unpark_free = requested_amount - unpark_frozen;
if unpark_frozen > 0u32.into() {
T::Currency::decrease_frozen(
&FreezeReason::AccountedParked.into(),
&Pallet::<T>::account_id(),
unpark_frozen,
)?;
}
// Move tokens
T::Currency::transfer(
&Pallet::<T>::account_id(),
who,
unpark_free + unpark_frozen,
Preservation::Expendable,
)?;
if unpark_frozen > 0u32.into() {
T::Currency::increase_frozen(
&FreezeReason::Unparking.into(),
who,
unpark_frozen,
)?;
<Freezer<T>>::insert(
who.clone(),
Pallet::<T>::dude_next().expect("account is not even the last one"),
);
}
<ParkingLot<T>>::insert(
who.clone(),
Parked {
amount: amount_parked_at.free - unpark_free,
amount_previous: amount_parked_at.frozen - unpark_frozen,
date: Pallet::<T>::dude_next()
.expect("checked to be Some in amount_parked_at"),
},
);
Ok(Amount {
free: unpark_free,
frozen: unpark_frozen,
periods: amount_parked_at.periods,
})
}
} else {
Err(Error::<T>::RequestedUnparkTooLarge.into())
}
}
None => Self::reap_account(who, amount_parked_at),
}
}
fn amount_parked_at(who: &T::AccountId) -> Result<Amount<T>, sp_runtime::DispatchError> {
let parked_optional = Pallet::<T>::parked(who);
if let Some(parked) = parked_optional {
Self::amount_parked_checked(parked)
} else {
Err(Error::<T>::NotParked.into())
}
}
fn amount_parked_checked(parked: Parked<T>) -> Result<Amount<T>, sp_runtime::DispatchError> {
if let Some(next) = Pallet::<T>::dude_next() {
let periods = Self::periods(parked.date, next);
// Calculate reward
let (free, frozen) = if let Some(periods) = periods {
let mut amount = parked.amount_previous;
let mut wprp = Pallet::<T>::wprp();
if amount > 0u32.into() {
if let Some(weekly) = wprp.get(wprp.len() - periods as usize - 1) {
amount = weekly
.wprp
.checked_mul_int(amount)
.ok_or(Error::<T>::OverflowRisk)?;
} else {
return Err(Error::<T>::PeriodsRecordInvalid.into());
}
}
amount += parked.amount;
if periods > 0 {
for _ in 0..periods {
if let Some(weekly) = wprp.pop() {
amount = weekly
.wprp
.checked_mul_int(amount)
.ok_or(Error::<T>::OverflowRisk)?;
} else {
return Err(Error::<T>::PeriodsRecordInvalid.into());
}
}
}
(0u32.into(), amount)
} else {
(parked.amount, parked.amount_previous)
};
Ok(Amount {
free,
frozen,
periods,
})
} else {
Err(Error::<T>::PoolNotInitialized.into())
}
}
fn reap_account(
who: &T::AccountId,
amount_parked_at: Amount<T>,
) -> Result<Amount<T>, sp_runtime::DispatchError> {
let count = Pallet::<T>::count() - 1;
if count == 0 {
// Special logic for last parked cleaned up
T::Currency::thaw(
&FreezeReason::AccountedParked.into(),
&Pallet::<T>::account_id(),
)?;
let amount = T::Currency::reducible_balance(
&Pallet::<T>::account_id(),
Preservation::Expendable,
Fortitude::Force,
);
T::Currency::transfer(
&Pallet::<T>::account_id(),
who,
amount,
Preservation::Expendable,
)?;
if amount_parked_at.frozen > 0u32.into() {
T::Currency::set_frozen(
&FreezeReason::Unparking.into(),
who,
amount_parked_at.frozen,
Fortitude::Polite,
)?;
<Freezer<T>>::insert(
who.clone(),
Pallet::<T>::dude_next().expect("account still was there"),
);
}
// <DudeNext<T>>::set(None); // keep DudeNext because someone may be still unparking
<WPRP<T>>::set(BoundedVec::new());
<ParkingLot<T>>::remove(who);
<Count<T>>::set(count);
<Dependents<T>>::set(0);
Ok(Amount {
free: amount - amount_parked_at.frozen,
frozen: amount_parked_at.frozen,
periods: amount_parked_at.periods,
})
} else {
if amount_parked_at.frozen > 0u32.into() {
T::Currency::decrease_frozen(
&FreezeReason::AccountedParked.into(),
&Pallet::<T>::account_id(),
amount_parked_at.frozen,
)?;
}
// Move tokens
T::Currency::transfer(
&Pallet::<T>::account_id(),
who,
amount_parked_at.free + amount_parked_at.frozen,
Preservation::Expendable,
)?;
if amount_parked_at.frozen > 0u32.into() {
T::Currency::set_frozen(
&FreezeReason::Unparking.into(),
who,
amount_parked_at.frozen,
Fortitude::Polite,
)?;
<Freezer<T>>::insert(
who.clone(),
Pallet::<T>::dude_next().expect("account is not even the last one"),
);
}
<ParkingLot<T>>::remove(who);
<Count<T>>::set(count);
if let Some(periods) = amount_parked_at.periods {
let mut wprp = Pallet::<T>::wprp();
let len = wprp.len();
wprp[len - periods as usize - 1].prev_week_dependents -= 1;
Self::trim_and_set_wprp(wprp);
} else {
let dependents = Pallet::<T>::dependents();
<Dependents<T>>::set(dependents - 1);
}
Ok(amount_parked_at)
}
}
fn unfreeze_account(who: &T::AccountId) -> DispatchResult {
if let Some(wed) = Pallet::<T>::freezer(who) {
if wed == Pallet::<T>::dude_next().expect("Dude_next is never unset.") {
Err(Error::<T>::StillUnparking.into())
} else {
T::Currency::thaw(&FreezeReason::Unparking.into(), who)
}
} else {
Err(Error::<T>::NotUnparking.into())
}
}
fn set_benefactor_checked(benefactor: T::AccountId) {
<Benefactor<T>>::set(Some(benefactor))
}
/// Should we be informed on matter of Wednesdays happening since the last observation of the same?
fn my_dudes(new_wednesday: Wednesday, timestamp: u64) -> Option<NewWednesday> {
let next_dude: Date = next_dude(timestamp).into();
let mut new_wednesday: Date = new_wednesday.into();
// If the time was flipped somehow, assume future is now. Was guaranteed by timestamp pallet not to
// happen.
if next_dude <= new_wednesday {
return None;
}
for weeks_passed in 1..T::MaxOperationTime::get() {
new_wednesday = new_wednesday.next_occurrence(Weekday::Wednesday);
if next_dude <= new_wednesday {
return Some(NewWednesday {
weeks_passed,
new_wednesday: next_dude.into(),
});
};
}
None
}
/// How many weeks were tokens parked?
fn periods(first_wednesday: Wednesday, next_wednesday: Wednesday) -> Option<u32> {
let mut first_wednesday: Date = first_wednesday.into();
let next_wednesday: Date = next_wednesday.into();
if next_wednesday <= first_wednesday {
return None;
}
for weeks_passed in 1..T::MaxOperationTime::get() {
first_wednesday = first_wednesday.next_occurrence(Weekday::Wednesday);
if next_wednesday <= first_wednesday {
return Some(weeks_passed - 1);
};
}
Some(0)
}
fn rebase_parking(who: &T::AccountId) -> DispatchResult {
let amount_parked_at = Self::amount_parked_at(who)?;
if amount_parked_at.free > 0u32.into() {
// account has non-frozen part, too early for rebase
Err(Error::<T>::WaitingToPark.into())
} else {
<ParkingLot<T>>::insert(
who.clone(),
Parked {
amount: 0u32.into(),
amount_previous: amount_parked_at.frozen,
date: Pallet::<T>::dude_next().expect("checked to be Some in amount_parked_at"),
},
);
if let Some(periods) = amount_parked_at.periods {
let mut wprp = Pallet::<T>::wprp();
let len = wprp.len();
wprp[len - periods as usize - 1].prev_week_dependents -= 1;
Self::trim_and_set_wprp(wprp);
} else {
// should not get here currently, as free amount is zero
let dependents = Pallet::<T>::dependents();
<Dependents<T>>::set(dependents - 1);
}
// Add a dependent for this week
let dependents = Pallet::<T>::dependents();
<Dependents<T>>::set(dependents + 1);
Ok(())
}
}
fn trim_and_set_wprp(mut wprp: BoundedVec<Weekly, T::MaxOperationTime>) {
while let Some(Weekly {
wprp: _,
prev_week_dependents: 0,
}) = wprp.first()
{
wprp.remove(0);
}
<WPRP<T>>::set(wprp)
}
}
#[derive(Debug)]
pub struct Amount<T: Config> {
pub free: <T::Currency as Inspect<T::AccountId>>::Balance,
pub frozen: <T::Currency as Inspect<T::AccountId>>::Balance,
pub periods: Option<u32>,
}
/// Observation of Wednesday that is different from the last processed
#[derive(Debug)]
struct NewWednesday {
pub weeks_passed: u32,
pub new_wednesday: Wednesday,
}
/// Anticipated occurence of Wednesday
fn next_dude(timestamp: u64) -> Wednesday {
let unix_epoch = OffsetDateTime::UNIX_EPOCH;
let duration = Duration::milliseconds(timestamp as i64);
(unix_epoch + duration)
.date()
.next_occurrence(Weekday::Wednesday)
.into()
}

View File

@ -0,0 +1,103 @@
//! Mock helpers for Parking pallet.
use super::*;
use crate::{self as pallet_parking, reward_curve::UNIT};
use frame_support::{
derive_impl, parameter_types,
traits::{ConstU64, VariantCountOf},
};
use frame_system::EnsureRoot;
use sp_runtime::BuildStorage;
use time::Date;
type Block = frame_system::mocking::MockBlock<Test>;
frame_support::construct_runtime!(
pub enum Test {
System: frame_system,
Balances: pallet_balances,
Parking: pallet_parking,
Timestamp: pallet_timestamp,
}
);
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
impl frame_system::Config for Test {
type Block = Block;
type AccountId = u64;
type AccountData = pallet_balances::AccountData<u128>;
}
#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)]
impl pallet_balances::Config for Test {
type Balance = u128;
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type FreezeIdentifier = RuntimeFreezeReason;
type MaxFreezes = VariantCountOf<RuntimeFreezeReason>;
type RuntimeFreezeReason = RuntimeFreezeReason;
}
#[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)]
impl pallet_timestamp::Config for Test {
type Moment = u64;
type OnTimestampSet = ();
type MinimumPeriod = ConstU64<5>;
type WeightInfo = ();
}
impl pallet_parking::Config for Test {
type RuntimeEvent = RuntimeEvent;
type PalletId = PalletParkingId;
type Currency = Balances;
type MinPark = MinParkMock;
type Time = Timestamp;
type MaxOperationTime = MaxOperationTimeMock;
type ParkingManager = EnsureRoot<u64>;
type RuntimeFreezeReason = RuntimeFreezeReason;
type WeightInfo = ();
}
parameter_types! {
pub const PalletParkingId: PalletId = PalletId(*b"pparking");
pub const MinParkMock: u128 = UNIT;
pub const MaxOperationTimeMock: u32 = 100;
pub const ExistentialDeposit: u128 = UNIT/10;
}
pub fn run_to_timestamp(target_time: u64) {
let mut current_time = Timestamp::now().as_millis() as u64;
while current_time <= target_time {
let current_block_number = System::block_number() + 1;
System::set_block_number(current_block_number);
Timestamp::set_timestamp(current_block_number * BLOCK_TIME + INIT_TIMESTAMP);
<pallet::Pallet<Test> as Hooks<u64>>::on_initialize(current_block_number);
current_time = Timestamp::now().as_millis() as u64;
}
}
pub fn to_next_wed() {
let current_time = Timestamp::now().as_millis() as u64;
let next_dude: Date = next_dude(current_time).into();
let time_to_set = (next_dude.midnight().assume_utc() - OffsetDateTime::UNIX_EPOCH)
.whole_milliseconds() as u64;
run_to_timestamp(time_to_set)
}
pub const INIT_TIMESTAMP: u64 = 30_000_000;
pub const BLOCK_TIME: u64 = 6_000;
pub fn prepare_benefactor() {
let balance: u128 = <Test as pallet::Config>::MinPark::get() * 20_000_000u128;
let benefactor: <Test as frame_system::Config>::AccountId = 365u64;
let _ = <Test as pallet::Config>::Currency::set_balance(&benefactor, balance);
Parking::set_benefactor_checked(benefactor);
}
pub fn new_test_ext() -> sp_io::TestExternalities {
let t = frame_system::GenesisConfig::<Test>::default()
.build_storage()
.unwrap();
sp_io::TestExternalities::new(t)
}

View File

@ -0,0 +1,138 @@
//! Hardcoded values for parking rewards
//use lazy_static::lazy_static;
use crate::FixedU128;
use core::ops::Range;
pub const UNIT: u128 = 1_000_000_000_000_000_000;
pub const BORDER_00: u128 = UNIT;
pub const BORDER_01: u128 = 50_000 * UNIT;
pub const BORDER_02: u128 = 100_000 * UNIT;
pub const BORDER_03: u128 = 150_000 * UNIT;
pub const BORDER_04: u128 = 200_000 * UNIT;
pub const BORDER_05: u128 = 250_000 * UNIT;
pub const BORDER_06: u128 = 300_000 * UNIT;
pub const BORDER_07: u128 = 400_000 * UNIT;
pub const BORDER_08: u128 = 500_000 * UNIT;
pub const BORDER_09: u128 = 600_000 * UNIT;
pub const BORDER_10: u128 = 700_000 * UNIT;
pub const BORDER_11: u128 = 800_000 * UNIT;
pub const BORDER_12: u128 = 900_000 * UNIT;
pub const BORDER_13: u128 = 1_000_000 * UNIT;
pub const RANGE_00: Range<u128> = BORDER_00..BORDER_01;
pub const RANGE_01: Range<u128> = BORDER_01..BORDER_02;
pub const RANGE_02: Range<u128> = BORDER_02..BORDER_03;
pub const RANGE_03: Range<u128> = BORDER_03..BORDER_04;
pub const RANGE_04: Range<u128> = BORDER_04..BORDER_05;
pub const RANGE_05: Range<u128> = BORDER_05..BORDER_06;
pub const RANGE_06: Range<u128> = BORDER_06..BORDER_07;
pub const RANGE_07: Range<u128> = BORDER_07..BORDER_08;
pub const RANGE_08: Range<u128> = BORDER_08..BORDER_09;
pub const RANGE_09: Range<u128> = BORDER_09..BORDER_10;
pub const RANGE_10: Range<u128> = BORDER_10..BORDER_11;
pub const RANGE_11: Range<u128> = BORDER_11..BORDER_12;
pub const RANGE_12: Range<u128> = BORDER_12..BORDER_13;
const RAT_00: u128 = 2682121877864762880;
const RAT_01: u128 = 2598393832873524736;
const RAT_02: u128 = 2514306399607147008;
const RAT_03: u128 = 2429856448999734272;
const RAT_04: u128 = 2345040810807885824;
const RAT_05: u128 = 2259856272882609920;
const RAT_06: u128 = 2174299580426808320;
const RAT_07: u128 = 2002056494917470208;
const RAT_08: u128 = 1828284633643040256;
const RAT_09: u128 = 1652956343169531648;
const RAT_10: u128 = 1476043210382638080;
const RAT_11: u128 = 1297516034356460800;
const RAT_12: u128 = 1117344796908170624;
// This was statically computed once; numerical errors are noticable, but are dwarfed by
// transaction commissions anyway
const PRP_00_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_00, 1000 * UNIT);
const PRP_01_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_01, 1000 * UNIT);
const PRP_02_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_02, 1000 * UNIT);
const PRP_03_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_03, 1000 * UNIT);
const PRP_04_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_04, 1000 * UNIT);
const PRP_05_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_05, 1000 * UNIT);
const PRP_06_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_06, 1000 * UNIT);
const PRP_07_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_07, 1000 * UNIT);
const PRP_08_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_08, 1000 * UNIT);
const PRP_09_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_09, 1000 * UNIT);
const PRP_10_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_10, 1000 * UNIT);
const PRP_11_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_11, 1000 * UNIT);
const PRP_12_WEEKLY: FixedU128 = FixedU128::from_rational(RAT_12, 1000 * UNIT);
/// Weekly PRP, not in %, but as fraction.
pub fn weekly_parking_reward_percentage(tokens_in_pool: u128) -> FixedU128 {
match tokens_in_pool {
a if RANGE_00.contains(&a) => PRP_00_WEEKLY,
a if RANGE_01.contains(&a) => PRP_01_WEEKLY,
a if RANGE_02.contains(&a) => PRP_02_WEEKLY,
a if RANGE_03.contains(&a) => PRP_03_WEEKLY,
a if RANGE_04.contains(&a) => PRP_04_WEEKLY,
a if RANGE_05.contains(&a) => PRP_05_WEEKLY,
a if RANGE_06.contains(&a) => PRP_06_WEEKLY,
a if RANGE_07.contains(&a) => PRP_07_WEEKLY,
a if RANGE_08.contains(&a) => PRP_08_WEEKLY,
a if RANGE_09.contains(&a) => PRP_09_WEEKLY,
a if RANGE_10.contains(&a) => PRP_10_WEEKLY,
a if RANGE_11.contains(&a) => PRP_11_WEEKLY,
a if RANGE_12.contains(&a) => PRP_12_WEEKLY,
_ => 0.into(), //unreachable!("Unexpected token amount currently in parking. Code assumes parked tokens amount ranges 1..1_000_000"),
}
}
#[cfg(test)]
#[cfg(feature = "std")]
mod tests {
use super::*;
const PRP_00: f64 = 15.0 / 100.;
const PRP_01: f64 = 14.5 / 100.;
const PRP_02: f64 = 14.0 / 100.;
const PRP_03: f64 = 13.5 / 100.;
const PRP_04: f64 = 13.0 / 100.;
const PRP_05: f64 = 12.5 / 100.;
const PRP_06: f64 = 12.0 / 100.;
const PRP_07: f64 = 11.0 / 100.;
const PRP_08: f64 = 10.0 / 100.;
const PRP_09: f64 = 9.0 / 100.;
const PRP_10: f64 = 8.0 / 100.;
const PRP_11: f64 = 7.0 / 100.;
const PRP_12: f64 = 6.0 / 100.;
/// Inverse number of weeks in a year
const PRP_POWER_YEARLY_TO_WEEKLY: f64 = 7.0 / 365.25;
macro_rules! check_number {
($prp:tt, $rat:tt) => {
assert_eq!(
((($prp + 1.).powf(PRP_POWER_YEARLY_TO_WEEKLY) - 1.)
* (1000 as f64)
* (UNIT as f64))
.round() as u128,
$rat
)
};
}
#[test]
fn numbers_correct() {
check_number!(PRP_00, RAT_00);
check_number!(PRP_01, RAT_01);
check_number!(PRP_02, RAT_02);
check_number!(PRP_03, RAT_03);
check_number!(PRP_04, RAT_04);
check_number!(PRP_05, RAT_05);
check_number!(PRP_06, RAT_06);
check_number!(PRP_07, RAT_07);
check_number!(PRP_08, RAT_08);
check_number!(PRP_09, RAT_09);
check_number!(PRP_10, RAT_10);
check_number!(PRP_11, RAT_11);
check_number!(PRP_12, RAT_12);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,247 @@
//! Autogenerated weights for `pallet_parking`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 42.0.1
//! DATE: 2025-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `Karningul`, CPU: `AMD Ryzen 7 8700G w/ Radeon 780M Graphics`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("local")`, DB CACHE: 1024
// Executed Command:
// ../../../target/release/g6-solo-node
// benchmark
// pallet
// --pallet
// pallet-parking
// --extrinsic
// *
// --chain
// local
// --steps
// 50
// --repeat
// 20
// --output
// weights.rs
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
use frame_support::{traits::Get, weights::Weight};
use core::marker::PhantomData;
pub trait WeightInfo {
fn park() -> Weight;
fn unpark() -> Weight;
fn set_benefactor() -> Weight;
fn apply_rewards() -> Weight;
fn unfreeze() -> Weight;
}
/// Weight functions for `pallet_parking`.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
/// Storage: `Parking::Benefactor` (r:1 w:0)
/// Proof: `Parking::Benefactor` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
/// Storage: `Parking::ParkingLot` (r:1 w:1)
/// Proof: `Parking::ParkingLot` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `System::Account` (r:1 w:1)
/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
/// Storage: `Parking::Count` (r:1 w:1)
/// Proof: `Parking::Count` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Timestamp::Now` (r:1 w:0)
/// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
/// Storage: `Parking::Dependents` (r:1 w:1)
/// Proof: `Parking::Dependents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Parking::CounterForParkingLot` (r:1 w:1)
/// Proof: `Parking::CounterForParkingLot` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:0 w:1)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
fn park() -> Weight {
// Proof Size summary in bytes:
// Measured: `278`
// Estimated: `3593`
// Minimum execution time: 55_455_000 picoseconds.
Weight::from_parts(56_517_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
.saturating_add(T::DbWeight::get().reads(7))
.saturating_add(T::DbWeight::get().writes(6))
}
/// Storage: `Parking::Freezer` (r:1 w:1)
/// Proof: `Parking::Freezer` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
/// Storage: `Parking::ParkingLot` (r:1 w:1)
/// Proof: `Parking::ParkingLot` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:1 w:0)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
/// Storage: `Parking::WPRP` (r:1 w:1)
/// Proof: `Parking::WPRP` (`max_values`: Some(1), `max_size`: Some(2562), added: 3057, mode: `MaxEncodedLen`)
/// Storage: `Parking::Count` (r:1 w:1)
/// Proof: `Parking::Count` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Balances::Freezes` (r:2 w:2)
/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
/// Storage: `System::Account` (r:1 w:1)
/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
/// Storage: `Balances::Locks` (r:2 w:0)
/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
/// Storage: `Parking::CounterForFreezer` (r:1 w:1)
/// Proof: `Parking::CounterForFreezer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Parking::CounterForParkingLot` (r:1 w:1)
/// Proof: `Parking::CounterForParkingLot` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn unpark() -> Weight {
// Proof Size summary in bytes:
// Measured: `643`
// Estimated: `8538`
// Minimum execution time: 92_504_000 picoseconds.
Weight::from_parts(94_878_000, 0)
.saturating_add(Weight::from_parts(0, 8538))
.saturating_add(T::DbWeight::get().reads(12))
.saturating_add(T::DbWeight::get().writes(9))
}
/// Storage: `Parking::Benefactor` (r:0 w:1)
/// Proof: `Parking::Benefactor` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
fn set_benefactor() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 4_358_000 picoseconds.
Weight::from_parts(4_639_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Parking::ParkingLot` (r:1 w:1)
/// Proof: `Parking::ParkingLot` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:1 w:0)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
/// Storage: `Parking::WPRP` (r:1 w:1)
/// Proof: `Parking::WPRP` (`max_values`: Some(1), `max_size`: Some(2562), added: 3057, mode: `MaxEncodedLen`)
/// Storage: `Parking::Dependents` (r:1 w:1)
/// Proof: `Parking::Dependents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn apply_rewards() -> Weight {
// Proof Size summary in bytes:
// Measured: `287`
// Estimated: `4047`
// Minimum execution time: 18_124_000 picoseconds.
Weight::from_parts(18_364_000, 0)
.saturating_add(Weight::from_parts(0, 4047))
.saturating_add(T::DbWeight::get().reads(4))
.saturating_add(T::DbWeight::get().writes(3))
}
/// Storage: `Parking::Freezer` (r:1 w:0)
/// Proof: `Parking::Freezer` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:1 w:0)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
/// Storage: `Balances::Freezes` (r:1 w:1)
/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
/// Storage: `Balances::Locks` (r:1 w:0)
/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
fn unfreeze() -> Weight {
// Proof Size summary in bytes:
// Measured: `324`
// Estimated: `4764`
// Minimum execution time: 26_600_000 picoseconds.
Weight::from_parts(27_221_000, 0)
.saturating_add(Weight::from_parts(0, 4764))
.saturating_add(T::DbWeight::get().reads(4))
.saturating_add(T::DbWeight::get().writes(1))
}
}
impl WeightInfo for () {
/// Storage: `Parking::Benefactor` (r:1 w:0)
/// Proof: `Parking::Benefactor` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
/// Storage: `Parking::ParkingLot` (r:1 w:1)
/// Proof: `Parking::ParkingLot` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `System::Account` (r:1 w:1)
/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
/// Storage: `Parking::Count` (r:1 w:1)
/// Proof: `Parking::Count` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Timestamp::Now` (r:1 w:0)
/// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`)
/// Storage: `Parking::Dependents` (r:1 w:1)
/// Proof: `Parking::Dependents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Parking::CounterForParkingLot` (r:1 w:1)
/// Proof: `Parking::CounterForParkingLot` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:0 w:1)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
fn park() -> Weight {
// Proof Size summary in bytes:
// Measured: `278`
// Estimated: `3593`
// Minimum execution time: 55_455_000 picoseconds.
Weight::from_parts(56_517_000, 0)
.saturating_add(Weight::from_parts(0, 3593))
}
/// Storage: `Parking::Freezer` (r:1 w:1)
/// Proof: `Parking::Freezer` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
/// Storage: `Parking::ParkingLot` (r:1 w:1)
/// Proof: `Parking::ParkingLot` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:1 w:0)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
/// Storage: `Parking::WPRP` (r:1 w:1)
/// Proof: `Parking::WPRP` (`max_values`: Some(1), `max_size`: Some(2562), added: 3057, mode: `MaxEncodedLen`)
/// Storage: `Parking::Count` (r:1 w:1)
/// Proof: `Parking::Count` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Balances::Freezes` (r:2 w:2)
/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
/// Storage: `System::Account` (r:1 w:1)
/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
/// Storage: `Balances::Locks` (r:2 w:0)
/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
/// Storage: `Parking::CounterForFreezer` (r:1 w:1)
/// Proof: `Parking::CounterForFreezer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `Parking::CounterForParkingLot` (r:1 w:1)
/// Proof: `Parking::CounterForParkingLot` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn unpark() -> Weight {
// Proof Size summary in bytes:
// Measured: `643`
// Estimated: `8538`
// Minimum execution time: 92_504_000 picoseconds.
Weight::from_parts(94_878_000, 0)
.saturating_add(Weight::from_parts(0, 8538))
}
/// Storage: `Parking::Benefactor` (r:0 w:1)
/// Proof: `Parking::Benefactor` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
fn set_benefactor() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 4_358_000 picoseconds.
Weight::from_parts(4_639_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
/// Storage: `Parking::ParkingLot` (r:1 w:1)
/// Proof: `Parking::ParkingLot` (`max_values`: None, `max_size`: Some(77), added: 2552, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:1 w:0)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
/// Storage: `Parking::WPRP` (r:1 w:1)
/// Proof: `Parking::WPRP` (`max_values`: Some(1), `max_size`: Some(2562), added: 3057, mode: `MaxEncodedLen`)
/// Storage: `Parking::Dependents` (r:1 w:1)
/// Proof: `Parking::Dependents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn apply_rewards() -> Weight {
// Proof Size summary in bytes:
// Measured: `287`
// Estimated: `4047`
// Minimum execution time: 18_124_000 picoseconds.
Weight::from_parts(18_364_000, 0)
.saturating_add(Weight::from_parts(0, 4047))
}
/// Storage: `Parking::Freezer` (r:1 w:0)
/// Proof: `Parking::Freezer` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
/// Storage: `Parking::DudeNext` (r:1 w:0)
/// Proof: `Parking::DudeNext` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`)
/// Storage: `Balances::Freezes` (r:1 w:1)
/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
/// Storage: `Balances::Locks` (r:1 w:0)
/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
fn unfreeze() -> Weight {
// Proof Size summary in bytes:
// Measured: `324`
// Estimated: `4764`
// Minimum execution time: 26_600_000 picoseconds.
Weight::from_parts(27_221_000, 0)
.saturating_add(Weight::from_parts(0, 4764))
}
}

View File

@ -0,0 +1,35 @@
[package]
name = "pallet-permissions"
description = "G6 Chain Pallet that stores permissions"
version.workspace = true
homepage.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
edition.workspace = true
rust-version.workspace = true
[dependencies]
log = { version = "0.4.17", default-features = false }
frame-benchmarking = { workspace = true, default-features = false }
frame-support = { workspace = true, default-features = false }
frame-system = { workspace = true, default-features = false }
codec = { workspace = true, default-features = false, features = ["derive",] }
scale-info = { workspace = true, default-features = false }
sp-runtime = { workspace = true, default-features = false, features = ["serde"] }
sp-std = { workspace = true, default-features = false }
[lints]
workspace = true
[features]
default = ["std"]
std = [
"frame-support/std",
"frame-system/std",
"codec/std",
"scale-info/std",
"sp-runtime/std",
]
runtime-benchmarks = ['frame-benchmarking/runtime-benchmarks']
try-runtime = []

View File

@ -0,0 +1,3 @@
# G6 pallet for permissions transactions
cargo build --package pallet-permissions

View File

@ -0,0 +1,25 @@
//! Benchmarking setup for pallet-template
#![cfg(feature = "runtime-benchmarks")]
use super::*;
#[allow(unused)]
use crate::Pallet as Template;
use frame_benchmarking::v2::*;
use frame_system::RawOrigin;
/*
#[benchmarks]
mod benchmarks {
use super::*;
/*
#[benchmark]
fn do_something() {
let value = 100u32.into();
let caller: T::AccountId = whitelisted_caller();
#[extrinsic_call]
do_something(RawOrigin::Signed(caller), value);
assert_eq!(Something::<T>::get(), Some(value));
}
*/
impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
}*/

View File

@ -0,0 +1,491 @@
#![cfg_attr(not(feature = "std"), no_std)]
pub use pallet::*;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
pub use weights::*;
// #[frame_support::pallet]
#[frame_support::pallet(dev_mode)]
pub mod pallet {
extern crate alloc;
use alloc::collections::{BTreeMap, BTreeSet};
use frame_support::traits::BuildGenesisConfig;
use frame_support::{dispatch::DispatchResult, pallet_prelude::*};
use frame_system::pallet_prelude::*;
use sp_runtime::traits::BadOrigin;
use sp_std::vec;
use sp_std::vec::Vec;
// use sp_runtime::serde::{Deserialize, Serialize};
pub const ADMIN_ROLE: &'static [u8] = b"admin";
pub const ADMIN_ID: u32 = 0;
pub const MAX_PERMISSION_NAME_LENGTH: usize = 32;
pub const MAX_ROLE_NAME_LENGTH: usize = 32;
pub type PermissionId = u32;
pub type PermissionName = Vec<u8>;
pub type RoleId = u32;
pub type RoleName = Vec<u8>;
pub type RolePermissionId = u32;
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::config]
pub trait Config: frame_system::Config {
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
}
#[pallet::genesis_config]
pub struct GenesisConfig<T: Config> {
pub initial_admins: Option<Vec<T::AccountId>>,
pub initial_permissions: Option<Vec<PermissionName>>,
pub initial_roles: Option<Vec<(RoleName, Option<Vec<PermissionName>>)>>,
pub initial_role_assignments: Option<Vec<(Vec<T::AccountId>, Vec<RoleName>)>>,
}
impl<T: Config> Default for GenesisConfig<T> {
fn default() -> Self {
Self {
initial_admins: None,
initial_permissions: None,
initial_roles: None,
initial_role_assignments: None,
}
}
}
impl<T: Config> GenesisConfig<T> {
fn initialize_admin_role() {
let role_id = Pallet::<T>::_create_role(ADMIN_ROLE.to_vec(), Default::default())
.expect("Failed to create admin role during genesis initialization");
if role_id != ADMIN_ID {
panic!("ADMIN_ID must be 0 but was: {}", role_id);
}
}
fn initialize_roles(initial_roles: &[(RoleName, Option<Vec<PermissionId>>)]) {
for (name, permission_ids) in initial_roles {
let _ = Pallet::<T>::_create_role(name.clone(), permission_ids.clone());
}
}
fn initialize_permissions(initialize_permissions: &[PermissionName]) {
let mut all_permissions: BTreeSet<PermissionName> = Pallet::<T>::get_all_permissions()
.into_iter()
.map(|permission| permission.name)
.collect();
for permission_name in initialize_permissions {
if !all_permissions.contains(permission_name) {
// Create the permission if it doesn't exist
Pallet::<T>::_create_permission(permission_name.clone())
.expect("Failed to create permission during genesis build");
all_permissions.insert(permission_name.clone());
}
}
}
fn add_admin_role_to_account(account: T::AccountId) {
let mut roles = AccountRoles::<T>::get(&account);
if !roles.contains(&ADMIN_ID) {
roles.push(ADMIN_ID);
AccountRoles::<T>::insert(account, roles);
}
}
}
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
Self::initialize_admin_role();
if let Some(admins) = &self.initial_admins {
for admin in admins {
Self::add_admin_role_to_account(admin.clone());
}
}
if let Some(initial_permissions) = &self.initial_permissions {
Self::initialize_permissions(initial_permissions);
}
if let Some(initial_roles) = &self.initial_roles {
// create all permissions that the roles have
let mut all_roles_permissions: Vec<PermissionName> = initial_roles
.iter()
.filter_map(|(_, maybe_permission_names)| maybe_permission_names.clone())
.flat_map(|permission_names| permission_names.into_iter())
.collect();
all_roles_permissions.sort();
all_roles_permissions.dedup();
Self::initialize_permissions(&all_roles_permissions);
// fetch all current permisions on the blockchain
let all_permissions = Pallet::<T>::get_all_permissions();
let permission_name_to_id: BTreeMap<PermissionName, PermissionId> = all_permissions
.into_iter()
.map(|permission| (permission.name, permission.id))
.collect();
let roles_with_ids: Vec<(RoleName, Option<Vec<PermissionId>>)> = initial_roles
.iter()
.map(|(role_name, maybe_permission_names)| {
let permission_ids =
maybe_permission_names.as_ref().map(|permission_names| {
permission_names
.iter()
.filter_map(|name| permission_name_to_id.get(name).cloned()) // Map names to IDs
.collect::<Vec<_>>()
});
(role_name.clone(), permission_ids)
})
.collect();
Self::initialize_roles(&roles_with_ids);
}
}
}
// #[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo, Serialize, Deserialize)]
#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo, Default)]
pub struct Permission {
pub id: PermissionId,
pub name: PermissionName,
}
#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo, Default)]
pub struct StorageRole {
pub name: RoleName,
pub permissions: Vec<PermissionId>,
}
#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)]
pub struct Role {
pub id: RoleId,
pub name: RoleName,
pub permissions: Vec<Permission>,
}
#[pallet::storage]
#[pallet::getter(fn permissions)]
pub type Permissions<T: Config> =
StorageMap<_, Blake2_128Concat, PermissionId, PermissionName, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn next_permission_id)]
pub type NextPermissionId<T> = StorageValue<_, PermissionId, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn roles)]
pub type Roles<T: Config> = StorageMap<_, Blake2_128Concat, RoleId, StorageRole, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn next_role_id)]
pub type NextRoleId<T> = StorageValue<_, RoleId, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn account_roles)]
pub type AccountRoles<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, Vec<RoleId>, ValueQuery>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
PermissionCreated((PermissionId, PermissionName)),
RoleCreated((RoleId, StorageRole)),
PermissionsRetrieved(Vec<Permission>),
RoleRemoved(RoleId),
RoleAssigned {
account: T::AccountId,
role_id: RoleId,
},
RoleRevoked {
account: T::AccountId,
role_id: RoleId,
},
AdminsRetrieved(Vec<T::AccountId>),
RolesWithPermissionsRetrieved(Vec<Role>),
}
#[pallet::error]
pub enum Error<T> {
CannotRemoveAdminRole,
RoleNotFound,
PermissionNameTooLong,
RoleNameTooLong,
RoleAlreadyAssigned,
RoleAlreadyExists,
RoleNotAssigned,
CannotAssignAdminRole,
PermissionNotFound,
PermissionAlreadyExists,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
#[pallet::call_index(0)]
#[pallet::weight(10_000)]
pub fn create_permission(origin: OriginFor<T>, name: Vec<u8>) -> DispatchResult {
Self::ensure_root_or_admin(origin)?;
Self::_create_permission(name)?;
Ok(())
}
#[pallet::call_index(1)]
#[pallet::weight(10_000)]
pub fn emit_all_permissions(_origin: OriginFor<T>) -> DispatchResultWithPostInfo {
Self::deposit_event(Event::PermissionsRetrieved(Self::get_all_permissions()));
Ok(().into())
}
#[pallet::call_index(2)]
#[pallet::weight(10_000)]
pub fn create_role(
origin: OriginFor<T>,
name: Vec<u8>,
permissions: Option<Vec<PermissionId>>,
) -> DispatchResult {
Self::ensure_root_or_admin(origin)?;
Self::_create_role(name, permissions)?;
Ok(())
}
#[pallet::call_index(3)]
#[pallet::weight(10_000)]
pub fn remove_role(origin: OriginFor<T>, role_id: RoleId) -> DispatchResult {
Self::ensure_root_or_admin(origin)?;
if role_id == ADMIN_ID {
return Err(Error::<T>::CannotRemoveAdminRole.into());
}
ensure!(Roles::<T>::contains_key(role_id), Error::<T>::RoleNotFound);
Roles::<T>::remove(role_id);
Self::deposit_event(Event::RoleRemoved(role_id));
Ok(())
}
#[pallet::call_index(4)]
#[pallet::weight(10_000)]
pub fn assign_role_to_account(
origin: OriginFor<T>,
account: T::AccountId,
role_id: RoleId,
) -> DispatchResult {
Self::ensure_root_or_admin(origin)?;
ensure!(role_id != ADMIN_ID, Error::<T>::CannotAssignAdminRole);
ensure!(Roles::<T>::contains_key(role_id), Error::<T>::RoleNotFound);
let mut roles = AccountRoles::<T>::get(&account);
ensure!(!roles.contains(&role_id), Error::<T>::RoleAlreadyAssigned);
roles.push(role_id);
AccountRoles::<T>::insert(&account, roles);
Self::deposit_event(Event::RoleAssigned { account, role_id });
Ok(())
}
#[pallet::call_index(5)]
#[pallet::weight(10_000)]
pub fn assign_admin(origin: OriginFor<T>, account: T::AccountId) -> DispatchResult {
Self::ensure_root_or_admin(origin)?;
let mut roles = AccountRoles::<T>::get(&account);
ensure!(!roles.contains(&ADMIN_ID), Error::<T>::RoleAlreadyAssigned);
roles.push(ADMIN_ID);
AccountRoles::<T>::insert(&account, roles);
Self::deposit_event(Event::RoleAssigned {
account,
role_id: ADMIN_ID,
});
Ok(())
}
#[pallet::call_index(6)]
#[pallet::weight(10_000)]
pub fn revoke_role_from_account(
origin: OriginFor<T>,
account: T::AccountId,
role_id: RoleId,
) -> DispatchResult {
Self::ensure_root_or_admin(origin)?;
ensure!(role_id != ADMIN_ID, Error::<T>::CannotRemoveAdminRole);
let mut roles = AccountRoles::<T>::get(&account);
ensure!(roles.contains(&role_id), Error::<T>::RoleNotAssigned);
// Revoke the role from the account
roles.retain(|&r| r != role_id);
AccountRoles::<T>::insert(&account, roles);
Self::deposit_event(Event::RoleRevoked { account, role_id });
Ok(())
}
#[pallet::call_index(7)]
#[pallet::weight(10_000)]
pub fn emit_all_admins(_origin: OriginFor<T>) -> DispatchResult {
Self::deposit_event(Event::AdminsRetrieved(Self::get_all_admins()));
Ok(())
}
#[pallet::call_index(8)]
#[pallet::weight(10_000)]
pub fn emit_all_roles_with_permissions(_origin: OriginFor<T>) -> DispatchResult {
let roles_with_permissions = Self::collect_all_roles_with_permissions();
Self::deposit_event(Event::RolesWithPermissionsRetrieved(roles_with_permissions));
Ok(())
}
// TODO
// - add permission to role
// - remove permission from role
// - remove admin from
}
impl<T: Config> Pallet<T> {
pub fn _create_permission(name: Vec<u8>) -> Result<u32, Error<T>> {
ensure!(
name.len() <= MAX_PERMISSION_NAME_LENGTH,
Error::<T>::PermissionNameTooLong
);
// Ensure the permission name is unique
let is_duplicate =
Permissions::<T>::iter().any(|(_, existing_name)| existing_name == name);
ensure!(!is_duplicate, Error::<T>::PermissionAlreadyExists);
let id = Self::get_next_permissions_id_incrementing();
Permissions::<T>::insert(id, name.clone());
Self::deposit_event(Event::PermissionCreated((id, name)));
Ok(id)
}
pub fn _create_role(
name: Vec<u8>,
permissions: Option<Vec<PermissionId>>,
) -> Result<u32, Error<T>> {
ensure!(
name.len() <= MAX_ROLE_NAME_LENGTH,
Error::<T>::RoleNameTooLong
);
// Ensure the role name is unique
let is_duplicate =
Roles::<T>::iter().any(|(_, existing_role)| existing_role.name == name);
ensure!(!is_duplicate, Error::<T>::RoleAlreadyExists);
let permissions_vec = permissions.unwrap_or_else(Vec::new);
// Validate that all permission IDs exist in storage
for &permission_id in &permissions_vec {
if !Permissions::<T>::contains_key(permission_id) {
return Err(Error::<T>::PermissionNotFound.into());
}
}
let id = Self::get_next_role_id_incrementing();
let role = StorageRole {
name: name.clone(),
permissions: permissions_vec,
};
Roles::<T>::insert(id, role.clone());
Self::deposit_event(Event::RoleCreated((id, role.clone())));
Ok(id)
}
pub fn ensure_root_or_admin(
origin: OriginFor<T>,
) -> Result<Option<T::AccountId>, BadOrigin> {
match ensure_signed_or_root(origin.clone()) {
Ok(Some(account_id)) => {
// Check if the account has the admin role (ADMIN_ID)
let roles = AccountRoles::<T>::get(&account_id);
if roles.contains(&ADMIN_ID) {
Ok(Some(account_id))
} else {
Err(BadOrigin)
}
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
pub fn get_next_permissions_id_incrementing() -> u32 {
let id = NextPermissionId::<T>::get();
NextPermissionId::<T>::put(id + 1);
id
}
pub fn get_next_role_id_incrementing() -> u32 {
let id = NextRoleId::<T>::get();
NextRoleId::<T>::put(id + 1);
id
}
// pub fn get_next_role_permission_id_incrementing(role_id: RoleId) -> u32 {
// let id = NextRolePermissionId::<T>::get(role_id);
// NextRolePermissionId::<T>::insert(role_id, id + 1);
// id
// }
pub fn get_all_permissions() -> Vec<Permission> {
Permissions::<T>::iter()
.map(|(id, name)| Permission { id, name })
.collect()
}
pub fn get_all_roles() -> Vec<StorageRole> {
Roles::<T>::iter().map(|(_, role)| role).collect()
}
pub fn get_all_admins() -> Vec<T::AccountId> {
AccountRoles::<T>::iter()
.filter_map(|(account, roles)| {
if roles.contains(&ADMIN_ID) {
Some(account)
} else {
None
}
})
.collect()
}
pub fn collect_all_roles_with_permissions() -> Vec<Role> {
Roles::<T>::iter()
.map(|(role_id, storage_role)| {
let permissions: Vec<Permission> = storage_role
.permissions
.iter()
.filter_map(|&perm_id| {
let name = Permissions::<T>::get(perm_id);
if !name.is_empty() {
Some(Permission { id: perm_id, name })
} else {
None
}
})
.collect();
Role {
id: role_id,
name: storage_role.name,
permissions,
}
})
.collect()
}
}
}

Some files were not shown because too many files have changed in this diff Show More