Added repuchain node

node
wolfy 2023-12-05 07:49:08 +01:00
parent b50f85950d
commit 136ea60670
41 changed files with 13137 additions and 0 deletions

View File

@ -1,2 +1,7 @@
# Team Walto
## Main track
DAO - economy
Project name: RepuChain
Solution to manage reputations in the Polkadot ecosystem.
Its purpose is to make a system chain what store the reputation of an address based on its behavior and provide a simplified way to set and get blocks via XCM.

View File

@ -0,0 +1,25 @@
{
"name": "Substrate Node template",
"context": "..",
"settings": {
"terminal.integrated.shell.linux": "/bin/bash",
"lldb.executable": "/usr/bin/lldb"
},
"extensions": [
"rust-lang.rust",
"bungcip.better-toml",
"vadimcn.vscode-lldb"
],
"forwardPorts": [
3000,
9944
],
"onCreateCommand": ["cargo build", "cargo check"],
"postStartCommand": "./target/debug/node-template --dev --ws-external",
"menuActions": [
{"id": "polkadotjs",
"label": "Open PolkadotJS Apps",
"type": "external-preview",
"args": ["https://polkadot.js.org/apps/?rpc=wss%3A%2F%2F/$HOST/wss"]}
]
}

View File

@ -0,0 +1,16 @@
root = true
[*]
indent_style=space
indent_size=2
tab_width=2
end_of_line=lf
charset=utf-8
trim_trailing_whitespace=true
insert_final_newline = true
[*.{rs,toml}]
indent_style=tab
indent_size=tab
tab_width=4
max_line_length=100

View File

@ -0,0 +1,12 @@
blank_issues_enabled: false
contact_links:
- name: Support & Troubleshooting with the Substrate Stack Exchange Community
url: https://substrate.stackexchange.com
about: |
For general problems with Substrate or related technologies, please search here first
for solutions, by keyword and tags. If you discover no solution, please then ask and questions in our community! We highly encourage everyone also share their understanding by answering questions for others.
- name: Feature Requests and PRs to be submitted upstream
url: https://github.com/paritytech/substrate/tree/master/bin/node-template
about: |
This template is generated on tagged releases upstream, it is not independently updated and maintained.
Please direct all suggestions for improvements and PRs upstream.

View File

@ -0,0 +1,11 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@ -0,0 +1,43 @@
name: Check Set-Up & Build
# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
branches: [main]
pull_request:
branches: [main]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
check:
# The type of runner that the job will run on
runs-on: ubuntu-22.04
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3
- name: Install linux dependencies
run: sudo apt-get install -y clang libssl-dev llvm libudev-dev protobuf-compiler
- name: Install Rust
run: |
rustup update stable --no-self-update
rustup target add wasm32-unknown-unknown
# Rust cache
- uses: Swatinem/rust-cache@v2
- name: Check Build
run: |
SKIP_WASM_BUILD=1 cargo check --release
- name: Check Build for Benchmarking
run: >
pushd node &&
cargo check --features=runtime-benchmarks --release

View File

@ -0,0 +1,155 @@
# This is an example GitHub action that will build and publish the binaries and a Docker image
# You need to add the following secrets to your GitHub Repository or Organization to make this work
# - DOCKERHUB_USERNAME: The username of the DockerHub account. E.g. parity
# - DOCKERHUB_TOKEN: Access token for DockerHub, see https://docs.docker.com/docker-hub/access-tokens/.
# The following is set up as an environment variable below
# - DOCKER_REPO: The unique name of the DockerHub repository. E.g. parity/polkadot
name: Release
permissions:
contents: read
# Controls when the action will run.
on:
push:
# Triggers the workflow on tag push events
tags:
- v[0-9]+.*
env:
RUST_BACKTRACE: 1
CARGO_INCREMENTAL: 0
CARGO_NET_GIT_FETCH_WITH_CLI: true
CARGO_NET_RETRY: 10
RUSTFLAGS: -D warnings
RUSTUP_MAX_RETRIES: 10
CARGO_TERM_COLOR: always
# Set an environment variable (that can be overriden) for the Docker Repo
DOCKER_REPO: tripleight/node-template
defaults:
run:
shell: bash
jobs:
create-release:
# The type of runner that the job will run on
runs-on: ubuntu-22.04
timeout-minutes: 60
permissions:
contents: write
steps:
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: taiki-e/create-gh-release-action@v1
with:
title: $version
branch: main
token: ${{ secrets.GITHUB_TOKEN }}
upload-assets:
name: ${{ matrix.target }}
needs:
- create-release
strategy:
matrix:
# The list of architechture and OS to build for
# You can add or remove targets here if you want
#
# When updating this list, remember to update the target list in tests too
include:
# - target: aarch64-unknown-linux-gnu
- target: x86_64-unknown-linux-gnu
- target: aarch64-apple-darwin
os: macos-11
- target: x86_64-apple-darwin
os: macos-11
# - target: universal-apple-darwin
# os: macos-11
# The type of runner that the job will run on
# Runs on Ubuntu if other os is not specified above
runs-on: ${{ matrix.os || 'ubuntu-22.04' }}
timeout-minutes: 90
permissions:
contents: write
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3
with:
persist-credentials: false
- name: Install Rust
run: |
rustup update stable --no-self-update
rustup target add wasm32-unknown-unknown
- name: Install linux dependencies
if: (matrix.os == '' || startsWith(matrix.os, 'ubuntu'))
run: |
sudo apt-get -qq update
sudo apt-get install -y protobuf-compiler
- name: Install mac dependencies
if: startsWith(matrix.os, 'macos')
run: brew install protobuf
- uses: taiki-e/setup-cross-toolchain-action@v1
if: (matrix.os == '' || startsWith(matrix.os, 'ubuntu'))
with:
target: ${{ matrix.target }}
# Build and upload the binary to the new release
- uses: taiki-e/upload-rust-binary-action@v1
with:
bin: node-template
target: ${{ matrix.target }}
tar: all
token: ${{ secrets.GITHUB_TOKEN }}
- name: Upload x86_64 linux binary to workflow
if: (matrix.target == 'x86_64-unknown-linux-gnu')
uses: actions/upload-artifact@v3
with:
name: node-template
path: ${{ github.workspace }}/target/x86_64-unknown-linux-gnu/release/node-template
build-image:
# The type of runner that the job will run on
needs:
- upload-assets
runs-on: ubuntu-22.04
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Login to Docker hub using the credentials stored in the repository secrets
- name: Log in to Docker Hub
uses: docker/login-action@v2.1.0
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: Check out the repo
uses: actions/checkout@v3
# Download the binary from the previous job
- name: Download x86_64 linux binary
uses: actions/download-artifact@v3
with:
name: node-template
path: ${{ github.workspace }}
# Build and push 2 images, One with the version tag and the other with latest tag
- name: Build and push Docker images
uses: docker/build-push-action@v4
with:
context: .
file: ./Containerfile
push: true
build-args: |
DOCKER_REPO=${{ env.DOCKER_REPO }}
tags: |
${{ env.DOCKER_REPO }}:${{ github.ref_name }}
${{ env.DOCKER_REPO }}:latest

12
repuchain-node/.gitignore vendored 100644
View File

@ -0,0 +1,12 @@
# Generated by Cargo
# will have compiled files and executables
**/target/
# These are backup files generated by rustfmt
**/*.rs.bk
.DS_Store
# direnv files
.envrc
.direnv
.vscode

View File

@ -0,0 +1 @@
* @sacha-l @lisa-parity

10191
repuchain-node/Cargo.lock generated 100644

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
[workspace]
members = [
"node",
"pallets/reputation",
"runtime",
]
resolver = "2"
[profile.release]
panic = "unwind"

View File

@ -0,0 +1,31 @@
FROM docker.io/library/ubuntu:22.04
# show backtraces
ENV RUST_BACKTRACE 1
# install tools and dependencies
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates && \
# apt cleanup
apt-get autoremove -y && \
apt-get clean && \
find /var/lib/apt/lists/ -type f -not -name lock -delete; \
# add user and link ~/.local/share/polkadot to /data
useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \
mkdir -p /data /polkadot/.local/share && \
chown -R polkadot:polkadot /data && \
ln -s /data /polkadot/.local/share/node-template
USER polkadot
# copy the compiled binary to the container
COPY --chown=polkadot:polkadot --chmod=774 node-template /usr/bin/node-template
# check if executable works in this container
RUN /usr/bin/node-template --version
# ws_port
EXPOSE 9930 9333 9944 30333 30334
CMD ["/usr/bin/node-template"]

View File

@ -0,0 +1,16 @@
MIT No Attribution
Copyright Parity Technologies (UK) Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,159 @@
# RepuChain
RepuChain is a solution to manage reputations in the Polkadot ecosystem.
Its purpose is to make a system chain what store the reputation of an address based on its behavior and provide a simplified way to set and get blocks via XCM.
## Getting Started
Depending on your operating system and Rust version, there might be additional packages required to compile this template.
Check the [Install](https://docs.substrate.io/install/) instructions for your platform for the most common dependencies.
Alternatively, you can use one of the [alternative installation](#alternatives-installations) options.
### Build
Use the following command to build the node without launching it:
```sh
cargo build --release
```
### Embedded Docs
After you build the project, you can use the following command to explore its parameters and subcommands:
```sh
./target/release/node-template -h
```
You can generate and view the [Rust Docs](https://doc.rust-lang.org/cargo/commands/cargo-doc.html) for this template with this command:
```sh
cargo +nightly doc --open
```
### Single-Node Development Chain
The following command starts a single-node development chain that doesn't persist state:
```sh
./target/release/node-template --dev
```
To purge the development chain's state, run the following command:
```sh
./target/release/node-template purge-chain --dev
```
To start the development chain with detailed logging, run the following command:
```sh
RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev
```
Development chains:
- Maintain state in a `tmp` folder while the node is running.
- Use the **Alice** and **Bob** accounts as default validator authorities.
- Use the **Alice** account as the default `sudo` account.
- Are preconfigured with a genesis state (`/node/src/chain_spec.rs`) that includes several prefunded development accounts.
To persist chain state between runs, specify a base path by running a command similar to the following:
```sh
// Create a folder to use as the db base path
$ mkdir my-chain-state
// Use of that folder to store the chain state
$ ./target/release/node-template --dev --base-path ./my-chain-state/
// Check the folder structure created inside the base path after running the chain
$ ls ./my-chain-state
chains
$ ls ./my-chain-state/chains/
dev
$ ls ./my-chain-state/chains/dev
db keystore network
```
### Connect with Polkadot-JS Apps Front-End
After you start the node template locally, you can interact with it using the hosted version of the [Polkadot/Substrate Portal](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) front-end by connecting to the local node endpoint.
A hosted version is also available on [IPFS (redirect) here](https://dotapps.io/) or [IPNS (direct) here](ipns://dotapps.io/?rpc=ws%3A%2F%2F127.0.0.1%3A9944#/explorer).
You can also find the source code and instructions for hosting your own instance on the [polkadot-js/apps](https://github.com/polkadot-js/apps) repository.
### Multi-Node Local Testnet
If you want to see the multi-node consensus algorithm in action, see [Simulate a network](https://docs.substrate.io/tutorials/build-a-blockchain/simulate-network/).
## Template Structure
A Substrate project such as this consists of a number of components that are spread across a few directories.
### Node
A blockchain node is an application that allows users to participate in a blockchain network.
Substrate-based blockchain nodes expose a number of capabilities:
- Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the
nodes in the network to communicate with one another.
- Consensus: Blockchains must have a way to come to [consensus](https://docs.substrate.io/fundamentals/consensus/) on the state of the network.
Substrate makes it possible to supply custom consensus engines and also ships with several consensus mechanisms that have been built on top of [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html).
- RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes.
There are several files in the `node` directory.
Take special note of the following:
- [`chain_spec.rs`](./node/src/chain_spec.rs): A [chain specification](https://docs.substrate.io/build/chain-spec/) is a source code file that defines a Substrate chain's initial (genesis) state.
Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain.
Take note of the `development_config` and `testnet_genesis` functions,.
These functions are used to define the genesis state for the local development chain configuration.
These functions identify some [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) and use them to configure the blockchain's initial state.
- [`service.rs`](./node/src/service.rs): This file defines the node implementation.
Take note of the libraries that this file imports and the names of the functions it invokes.
In particular, there are references to consensus-related topics, such as the [block finalization and forks](https://docs.substrate.io/fundamentals/consensus/#finalization-and-forks) and other [consensus mechanisms](https://docs.substrate.io/fundamentals/consensus/#default-consensus-models) such as Aura for block authoring and GRANDPA for finality.
### Runtime
In Substrate, the terms "runtime" and "state transition function" are analogous.
Both terms refer to the core logic of the blockchain that is responsible for validating blocks and executing the state changes they define.
The Substrate project in this repository uses [FRAME](https://docs.substrate.io/learn/runtime-development/#frame) to construct a blockchain runtime.
FRAME allows runtime developers to declare domain-specific logic in modules called "pallets".
At the heart of FRAME is a helpful [macro language](https://docs.substrate.io/reference/frame-macros/) that makes it easy to create pallets and flexibly compose them to create blockchains that can address [a variety of needs](https://substrate.io/ecosystem/projects/).
Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following:
- This file configures several pallets to include in the runtime.
Each pallet configuration is defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`.
- The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://paritytech.github.io/substrate/master/frame_support/macro.construct_runtime.html) macro, which is part of the [core FRAME pallet library](https://docs.substrate.io/reference/frame-pallets/#system-pallets).
### Pallets
The runtime in this project is constructed using many FRAME pallets that ship with [the Substrate repository](https://github.com/paritytech/substrate/tree/master/frame) and a template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs) directory.
A FRAME pallet is comprised of a number of blockchain primitives, including:
- Storage: FRAME defines a rich set of powerful [storage abstractions](https://docs.substrate.io/build/runtime-storage/) that makes it easy to use Substrate's efficient key-value database to manage the evolving state of a blockchain.
- Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) from outside of the runtime in order to update its state.
- Events: Substrate uses [events](https://docs.substrate.io/build/events-and-errors/) to notify users of significant state changes.
- Errors: When a dispatchable fails, it returns an error.
Each pallet has its own `Config` trait which serves as a configuration interface to generically define the types and parameters it depends on.
## Alternatives Installations
Instead of installing dependencies and building this source directly, consider the following alternatives.
### Nix
Install [nix](https://nixos.org/) and
[nix-direnv](https://github.com/nix-community/nix-direnv) for a fully plug-and-play
experience for setting up the development environment.
To get all the correct dependencies, activate direnv `direnv allow`.
### Docker
Please follow the [Substrate Docker instructions here](https://github.com/paritytech/substrate/blob/master/docker/README.md) to build the Docker container with the Substrate Node Template binary.

View File

@ -0,0 +1,225 @@
---
title: Installation
---
This guide is for reference only, please check the latest information on getting starting with Substrate
[here](https://docs.substrate.io/main-docs/install/).
This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development.
Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first
thing you will need to do is prepare the computer for Rust development - these steps will vary based
on the computer's operating system. Once Rust is configured, you will use its toolchains to interact
with Rust projects; the commands for Rust's toolchains will be the same for all supported,
Unix-based operating systems.
## Build dependencies
Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples
in the [Substrate Docs](https://docs.substrate.io) use Unix-style terminals to demonstrate how to
interact with Substrate from the command line.
### Ubuntu/Debian
Use a terminal shell to execute the following commands:
```bash
sudo apt update
# May prompt for location information
sudo apt install -y git clang curl libssl-dev llvm libudev-dev
```
### Arch Linux
Run these commands from a terminal:
```bash
pacman -Syu --needed --noconfirm curl git clang
```
### Fedora
Run these commands from a terminal:
```bash
sudo dnf update
sudo dnf install clang curl git openssl-devel
```
### OpenSUSE
Run these commands from a terminal:
```bash
sudo zypper install clang curl git openssl-devel llvm-devel libudev-devel
```
### macOS
> **Apple M1 ARM**
> If you have an Apple M1 ARM system on a chip, make sure that you have Apple Rosetta 2
> installed through `softwareupdate --install-rosetta`. This is only needed to run the
> `protoc` tool during the build. The build itself and the target binaries would remain native.
Open the Terminal application and execute the following commands:
```bash
# Install Homebrew if necessary https://brew.sh/
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
# Make sure Homebrew is up-to-date, install openssl
brew update
brew install openssl
```
### Windows
**_PLEASE NOTE:_** Native Windows development of Substrate is _not_ very well supported! It is _highly_
recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10)
(WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian).
Please refer to the separate
[guide for native Windows development](https://docs.substrate.io/main-docs/install/windows/).
## Rust developer environment
This guide uses <https://rustup.rs> installer and the `rustup` tool to manage the Rust toolchain.
First install and configure `rustup`:
```bash
# Install
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# Configure
source ~/.cargo/env
```
Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target:
```bash
rustup default stable
rustup update
rustup update nightly
rustup target add wasm32-unknown-unknown --toolchain nightly
```
## Test your set-up
Now the best way to ensure that you have successfully prepared a computer for Substrate
development is to follow the steps in [our first Substrate tutorial](https://docs.substrate.io/tutorials/v3/create-your-first-substrate-chain/).
## Troubleshooting Substrate builds
Sometimes you can't get the Substrate node template
to compile out of the box. Here are some tips to help you work through that.
### Rust configuration check
To see what Rust toolchain you are presently using, run:
```bash
rustup show
```
This will show something like this (Ubuntu example) output:
```text
Default host: x86_64-unknown-linux-gnu
rustup home: /home/user/.rustup
installed toolchains
--------------------
stable-x86_64-unknown-linux-gnu (default)
nightly-2020-10-06-x86_64-unknown-linux-gnu
nightly-x86_64-unknown-linux-gnu
installed targets for active toolchain
--------------------------------------
wasm32-unknown-unknown
x86_64-unknown-linux-gnu
active toolchain
----------------
stable-x86_64-unknown-linux-gnu (default)
rustc 1.50.0 (cb75ad5db 2021-02-10)
```
As you can see above, the default toolchain is stable, and the
`nightly-x86_64-unknown-linux-gnu` toolchain as well as its `wasm32-unknown-unknown` target is installed.
You also see that `nightly-2020-10-06-x86_64-unknown-linux-gnu` is installed, but is not used unless explicitly defined as illustrated in the [specify your nightly version](#specifying-nightly-version)
section.
### WebAssembly compilation
Substrate uses [WebAssembly](https://webassembly.org) (Wasm) to produce portable blockchain
runtimes. You will need to configure your Rust compiler to use
[`nightly` builds](https://doc.rust-lang.org/book/appendix-07-nightly-rust.html) to allow you to
compile Substrate runtime code to the Wasm target.
> There are upstream issues in Rust that need to be resolved before all of Substrate can use the stable Rust toolchain.
> [This is our tracking issue](https://github.com/paritytech/substrate/issues/1252) if you're curious as to why and how this will be resolved.
#### Latest nightly for Substrate `master`
Developers who are building Substrate _itself_ should always use the latest bug-free versions of
Rust stable and nightly. This is because the Substrate codebase follows the tip of Rust nightly,
which means that changes in Substrate often depend on upstream changes in the Rust nightly compiler.
To ensure your Rust compiler is always up to date, you should run:
```bash
rustup update
rustup update nightly
rustup target add wasm32-unknown-unknown --toolchain nightly
```
> NOTE: It may be necessary to occasionally rerun `rustup update` if a change in the upstream Substrate
> codebase depends on a new feature of the Rust compiler. When you do this, both your nightly
> and stable toolchains will be pulled to the most recent release, and for nightly, it is
> generally _not_ expected to compile WASM without error (although it very often does).
> Be sure to [specify your nightly version](#specifying-nightly-version) if you get WASM build errors
> from `rustup` and [downgrade nightly as needed](#downgrading-rust-nightly).
#### Rust nightly toolchain
If you want to guarantee that your build works on your computer as you update Rust and other
dependencies, you should use a specific Rust nightly version that is known to be
compatible with the version of Substrate they are using; this version will vary from project to
project and different projects may use different mechanisms to communicate this version to
developers. For instance, the Polkadot client specifies this information in its
[release notes](https://github.com/paritytech/polkadot/releases).
```bash
# Specify the specific nightly toolchain in the date below:
rustup install nightly-<yyyy-MM-dd>
```
#### Wasm toolchain
Now, configure the nightly version to work with the Wasm compilation target:
```bash
rustup target add wasm32-unknown-unknown --toolchain nightly-<yyyy-MM-dd>
```
### Specifying nightly version
Use the `WASM_BUILD_TOOLCHAIN` environment variable to specify the Rust nightly version a Substrate
project should use for Wasm compilation:
```bash
WASM_BUILD_TOOLCHAIN=nightly-<yyyy-MM-dd> cargo build --release
```
> Note that this only builds _the runtime_ with the specified nightly. The rest of project will be
> compiled with **your default toolchain**, i.e. the latest installed stable toolchain.
### Downgrading Rust nightly
If your computer is configured to use the latest Rust nightly and you would like to downgrade to a
specific nightly version, follow these steps:
```bash
rustup uninstall nightly
rustup install nightly-<yyyy-MM-dd>
rustup target add wasm32-unknown-unknown --toolchain nightly-<yyyy-MM-dd>
```

View File

@ -0,0 +1,43 @@
{
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1678901627,
"narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1679262748,
"narHash": "sha256-DQCrrAFrkxijC6haUzOC5ZoFqpcv/tg2WxnyW3np1Cc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "60c1d71f2ba4c80178ec84523c2ca0801522e0a6",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@ -0,0 +1,22 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils, ... }: flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs { inherit system; };
in
{
devShells.default = pkgs.mkShell {
packages = with pkgs; [
rustup
clang
protobuf
];
LIBCLANG_PATH = "${pkgs.libclang.lib}/lib";
};
});
}

View File

@ -0,0 +1,80 @@
[package]
name = "node-template"
version = "4.0.0-dev"
description = "A fresh FRAME-based Substrate node, ready for hacking."
authors = ["Substrate DevHub <https://github.com/substrate-developer-hub>"]
homepage = "https://substrate.io/"
edition = "2021"
license = "MIT-0"
publish = false
repository = "https://github.com/substrate-developer-hub/substrate-node-template/"
build = "build.rs"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[[bin]]
name = "node-template"
[dependencies]
clap = { version = "4.4.2", features = ["derive"] }
futures = { version = "0.3.21", features = ["thread-pool"]}
sc-cli = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-core = { version = "21.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-executor = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-network = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-service = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-telemetry = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-transaction-pool = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-transaction-pool-api = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-offchain = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-statement-store = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-consensus-aura = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-consensus-aura = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-consensus = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-consensus-grandpa = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-consensus-grandpa = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-client-api = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-runtime = { version = "24.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-io = { version = "23.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-timestamp = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-inherents = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-keyring = { version = "24.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-system = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
# These dependencies are used for the node template's RPCs
jsonrpsee = { version = "0.16.2", features = ["server"] }
sp-api = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-rpc-api = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-block-builder = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sc-basic-authorship = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
substrate-frame-rpc-system = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-transaction-payment-rpc = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
# These dependencies are used for runtime benchmarking
frame-benchmarking = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-benchmarking-cli = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
# Local Dependencies
node-template-runtime = { version = "4.0.0-dev", path = "../runtime" }
# CLI-specific dependencies
try-runtime-cli = { version = "0.10.0-dev", optional = true, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
[build-dependencies]
substrate-build-script-utils = { version = "3.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
[features]
default = []
# Dependencies that are only required if runtime benchmarking should be build.
runtime-benchmarks = [
"node-template-runtime/runtime-benchmarks",
"frame-benchmarking/runtime-benchmarks",
"frame-benchmarking-cli/runtime-benchmarks",
]
# Enable features that allow the runtime to be tried and debugged. Name might be subject to change
# in the near future.
try-runtime = ["node-template-runtime/try-runtime", "try-runtime-cli/try-runtime"]

View File

@ -0,0 +1,7 @@
use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed};
fn main() {
generate_cargo_keys();
rerun_if_git_head_changed();
}

View File

@ -0,0 +1,161 @@
//! Setup code for [`super::command`] which would otherwise bloat that module.
//!
//! Should only be used for benchmarking as it may break in other contexts.
use crate::service::FullClient;
use node_template_runtime as runtime;
use runtime::{AccountId, Balance, BalancesCall, SystemCall};
use sc_cli::Result;
use sc_client_api::BlockBackend;
use sp_core::{Encode, Pair};
use sp_inherents::{InherentData, InherentDataProvider};
use sp_keyring::Sr25519Keyring;
use sp_runtime::{OpaqueExtrinsic, SaturatedConversion};
use std::{sync::Arc, time::Duration};
/// Generates extrinsics for the `benchmark overhead` command.
///
/// Note: Should only be used for benchmarking.
pub struct RemarkBuilder {
client: Arc<FullClient>,
}
impl RemarkBuilder {
/// Creates a new [`Self`] from the given client.
pub fn new(client: Arc<FullClient>) -> Self {
Self { client }
}
}
impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder {
fn pallet(&self) -> &str {
"system"
}
fn extrinsic(&self) -> &str {
"remark"
}
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let acc = Sr25519Keyring::Bob.pair();
let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
self.client.as_ref(),
acc,
SystemCall::remark { remark: vec![] }.into(),
nonce,
)
.into();
Ok(extrinsic)
}
}
/// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks.
///
/// Note: Should only be used for benchmarking.
pub struct TransferKeepAliveBuilder {
client: Arc<FullClient>,
dest: AccountId,
value: Balance,
}
impl TransferKeepAliveBuilder {
/// Creates a new [`Self`] from the given client.
pub fn new(client: Arc<FullClient>, dest: AccountId, value: Balance) -> Self {
Self { client, dest, value }
}
}
impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder {
fn pallet(&self) -> &str {
"balances"
}
fn extrinsic(&self) -> &str {
"transfer_keep_alive"
}
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let acc = Sr25519Keyring::Bob.pair();
let extrinsic: OpaqueExtrinsic = create_benchmark_extrinsic(
self.client.as_ref(),
acc,
BalancesCall::transfer_keep_alive { dest: self.dest.clone().into(), value: self.value }
.into(),
nonce,
)
.into();
Ok(extrinsic)
}
}
/// Create a transaction using the given `call`.
///
/// Note: Should only be used for benchmarking.
pub fn create_benchmark_extrinsic(
client: &FullClient,
sender: sp_core::sr25519::Pair,
call: runtime::RuntimeCall,
nonce: u32,
) -> runtime::UncheckedExtrinsic {
let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed");
let best_hash = client.chain_info().best_hash;
let best_block = client.chain_info().best_number;
let period = runtime::BlockHashCount::get()
.checked_next_power_of_two()
.map(|c| c / 2)
.unwrap_or(2) as u64;
let extra: runtime::SignedExtra = (
frame_system::CheckNonZeroSender::<runtime::Runtime>::new(),
frame_system::CheckSpecVersion::<runtime::Runtime>::new(),
frame_system::CheckTxVersion::<runtime::Runtime>::new(),
frame_system::CheckGenesis::<runtime::Runtime>::new(),
frame_system::CheckEra::<runtime::Runtime>::from(sp_runtime::generic::Era::mortal(
period,
best_block.saturated_into(),
)),
frame_system::CheckNonce::<runtime::Runtime>::from(nonce),
frame_system::CheckWeight::<runtime::Runtime>::new(),
pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
);
let raw_payload = runtime::SignedPayload::from_raw(
call.clone(),
extra.clone(),
(
(),
runtime::VERSION.spec_version,
runtime::VERSION.transaction_version,
genesis_hash,
best_hash,
(),
(),
(),
),
);
let signature = raw_payload.using_encoded(|e| sender.sign(e));
runtime::UncheckedExtrinsic::new_signed(
call,
sp_runtime::AccountId32::from(sender.public()).into(),
runtime::Signature::Sr25519(signature),
extra,
)
}
/// Generates inherent data for the `benchmark overhead` command.
///
/// Note: Should only be used for benchmarking.
pub fn inherent_benchmark_data() -> Result<InherentData> {
let mut inherent_data = InherentData::new();
let d = Duration::from_millis(0);
let timestamp = sp_timestamp::InherentDataProvider::new(d.into());
futures::executor::block_on(timestamp.provide_inherent_data(&mut inherent_data))
.map_err(|e| format!("creating inherent data: {:?}", e))?;
Ok(inherent_data)
}

View File

@ -0,0 +1,158 @@
use node_template_runtime::{
AccountId, AuraConfig, BalancesConfig, GrandpaConfig, RuntimeGenesisConfig, Signature,
SudoConfig, SystemConfig, WASM_BINARY,
};
use sc_service::ChainType;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_consensus_grandpa::AuthorityId as GrandpaId;
use sp_core::{sr25519, Pair, Public};
use sp_runtime::traits::{IdentifyAccount, Verify};
// The URL for the telemetry server.
// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type.
pub type ChainSpec = sc_service::GenericChainSpec<RuntimeGenesisConfig>;
/// Generate a crypto pair from seed.
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
type AccountPublic = <Signature as Verify>::Signer;
/// Generate an account ID from seed.
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Generate an Aura authority key.
pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) {
(get_from_seed::<AuraId>(s), get_from_seed::<GrandpaId>(s))
}
pub fn development_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?;
Ok(ChainSpec::from_genesis(
// Name
"Development",
// ID
"dev",
ChainType::Development,
move || {
testnet_genesis(
wasm_binary,
// Initial PoA authorities
vec![authority_keys_from_seed("Alice")],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
],
true,
)
},
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
None,
None,
// Properties
None,
// Extensions
None,
))
}
pub fn local_testnet_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?;
Ok(ChainSpec::from_genesis(
// Name
"Local Testnet",
// ID
"local_testnet",
ChainType::Local,
move || {
testnet_genesis(
wasm_binary,
// Initial PoA authorities
vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
true,
)
},
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
None,
// Properties
None,
None,
// Extensions
None,
))
}
/// Configure initial storage state for FRAME modules.
fn testnet_genesis(
wasm_binary: &[u8],
initial_authorities: Vec<(AuraId, GrandpaId)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
_enable_println: bool,
) -> RuntimeGenesisConfig {
RuntimeGenesisConfig {
system: SystemConfig {
// Add Wasm runtime to storage.
code: wasm_binary.to_vec(),
..Default::default()
},
balances: BalancesConfig {
// Configure endowed accounts with initial balance of 1 << 60.
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(),
},
aura: AuraConfig {
authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(),
},
grandpa: GrandpaConfig {
authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(),
..Default::default()
},
sudo: SudoConfig {
// Assign network admin rights.
key: Some(root_key),
},
transaction_payment: Default::default(),
}
}

View File

@ -0,0 +1,54 @@
use sc_cli::RunCmd;
#[derive(Debug, clap::Parser)]
pub struct Cli {
#[command(subcommand)]
pub subcommand: Option<Subcommand>,
#[clap(flatten)]
pub run: RunCmd,
}
#[derive(Debug, clap::Subcommand)]
#[allow(clippy::large_enum_variant)]
pub enum Subcommand {
/// Key management cli utilities
#[command(subcommand)]
Key(sc_cli::KeySubcommand),
/// Build a chain specification.
BuildSpec(sc_cli::BuildSpecCmd),
/// Validate blocks.
CheckBlock(sc_cli::CheckBlockCmd),
/// Export blocks.
ExportBlocks(sc_cli::ExportBlocksCmd),
/// Export the state of a given block into a chain spec.
ExportState(sc_cli::ExportStateCmd),
/// Import blocks.
ImportBlocks(sc_cli::ImportBlocksCmd),
/// Remove the whole chain.
PurgeChain(sc_cli::PurgeChainCmd),
/// Revert the chain to a previous state.
Revert(sc_cli::RevertCmd),
/// Sub-commands concerned with benchmarking.
#[command(subcommand)]
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
/// Try some command against runtime state.
#[cfg(feature = "try-runtime")]
TryRuntime(try_runtime_cli::TryRuntimeCmd),
/// Try some command against runtime state. Note: `try-runtime` feature must be enabled.
#[cfg(not(feature = "try-runtime"))]
TryRuntime,
/// Db meta columns information.
ChainInfo(sc_cli::ChainInfoCmd),
}

View File

@ -0,0 +1,212 @@
use crate::{
benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder},
chain_spec,
cli::{Cli, Subcommand},
service,
};
use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE};
use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT};
use sc_cli::SubstrateCli;
use sc_service::PartialComponents;
use sp_keyring::Sr25519Keyring;
#[cfg(feature = "try-runtime")]
use try_runtime_cli::block_building_info::timestamp_with_aura_info;
impl SubstrateCli for Cli {
fn impl_name() -> String {
"Substrate Node".into()
}
fn impl_version() -> String {
env!("SUBSTRATE_CLI_IMPL_VERSION").into()
}
fn description() -> String {
env!("CARGO_PKG_DESCRIPTION").into()
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"support.anonymous.an".into()
}
fn copyright_start_year() -> i32 {
2017
}
fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> {
Ok(match id {
"dev" => Box::new(chain_spec::development_config()?),
"" | "local" => Box::new(chain_spec::local_testnet_config()?),
path =>
Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?),
})
}
}
/// Parse and run command line arguments
pub fn run() -> sc_cli::Result<()> {
let cli = Cli::from_args();
match &cli.subcommand {
Some(Subcommand::Key(cmd)) => cmd.run(&cli),
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
},
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::ExportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
Ok((cmd.run(client, config.database), task_manager))
})
},
Some(Subcommand::ExportState(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
Ok((cmd.run(client, config.chain_spec), task_manager))
})
},
Some(Subcommand::ImportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.database))
},
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, backend, .. } =
service::new_partial(&config)?;
let aux_revert = Box::new(|client, _, blocks| {
sc_consensus_grandpa::revert(client, blocks)?;
Ok(())
});
Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))
})
},
Some(Subcommand::Benchmark(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| {
// This switch needs to be in the client, since the client decides
// which sub-commands it wants to support.
match cmd {
BenchmarkCmd::Pallet(cmd) => {
if !cfg!(feature = "runtime-benchmarks") {
return Err(
"Runtime benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
.into(),
)
}
cmd.run::<Block, ()>(config)
},
BenchmarkCmd::Block(cmd) => {
let PartialComponents { client, .. } = service::new_partial(&config)?;
cmd.run(client)
},
#[cfg(not(feature = "runtime-benchmarks"))]
BenchmarkCmd::Storage(_) => Err(
"Storage benchmarking can be enabled with `--features runtime-benchmarks`."
.into(),
),
#[cfg(feature = "runtime-benchmarks")]
BenchmarkCmd::Storage(cmd) => {
let PartialComponents { client, backend, .. } =
service::new_partial(&config)?;
let db = backend.expose_db();
let storage = backend.expose_storage();
cmd.run(config, client, db, storage)
},
BenchmarkCmd::Overhead(cmd) => {
let PartialComponents { client, .. } = service::new_partial(&config)?;
let ext_builder = RemarkBuilder::new(client.clone());
cmd.run(
config,
client,
inherent_benchmark_data()?,
Vec::new(),
&ext_builder,
)
},
BenchmarkCmd::Extrinsic(cmd) => {
let PartialComponents { client, .. } = service::new_partial(&config)?;
// Register the *Remark* and *TKA* builders.
let ext_factory = ExtrinsicFactory(vec![
Box::new(RemarkBuilder::new(client.clone())),
Box::new(TransferKeepAliveBuilder::new(
client.clone(),
Sr25519Keyring::Alice.to_account_id(),
EXISTENTIAL_DEPOSIT,
)),
]);
cmd.run(client, inherent_benchmark_data()?, Vec::new(), &ext_factory)
},
BenchmarkCmd::Machine(cmd) =>
cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()),
}
})
},
#[cfg(feature = "try-runtime")]
Some(Subcommand::TryRuntime(cmd)) => {
use crate::service::ExecutorDispatch;
use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, NativeExecutionDispatch};
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
// we don't need any of the components of new_partial, just a runtime, or a task
// manager to do `async_run`.
let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry);
let task_manager =
sc_service::TaskManager::new(config.tokio_handle.clone(), registry)
.map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?;
let info_provider = timestamp_with_aura_info(6000);
Ok((
cmd.run::<Block, ExtendedHostFunctions<
sp_io::SubstrateHostFunctions,
<ExecutorDispatch as NativeExecutionDispatch>::ExtendHostFunctions,
>, _>(Some(info_provider)),
task_manager,
))
})
},
#[cfg(not(feature = "try-runtime"))]
Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \
You can enable it with `--features try-runtime`."
.into()),
Some(Subcommand::ChainInfo(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run::<Block>(&config))
},
None => {
let runner = cli.create_runner(&cli.run)?;
runner.run_node_until_exit(|config| async move {
service::new_full(config).map_err(sc_cli::Error::Service)
})
},
}
}

View File

@ -0,0 +1,3 @@
pub mod chain_spec;
pub mod rpc;
pub mod service;

View File

@ -0,0 +1,14 @@
//! Substrate Node Template CLI library.
#![warn(missing_docs)]
mod chain_spec;
#[macro_use]
mod service;
mod benchmarking;
mod cli;
mod command;
mod rpc;
fn main() -> sc_cli::Result<()> {
command::run()
}

View File

@ -0,0 +1,57 @@
//! A collection of node-specific RPC methods.
//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer
//! used by Substrate nodes. This file extends those RPC definitions with
//! capabilities that are specific to this project's runtime configuration.
#![warn(missing_docs)]
use std::sync::Arc;
use jsonrpsee::RpcModule;
use node_template_runtime::{opaque::Block, AccountId, Balance, Nonce};
use sc_transaction_pool_api::TransactionPool;
use sp_api::ProvideRuntimeApi;
use sp_block_builder::BlockBuilder;
use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
pub use sc_rpc_api::DenyUnsafe;
/// Full client dependencies.
pub struct FullDeps<C, P> {
/// The client instance to use.
pub client: Arc<C>,
/// Transaction pool instance.
pub pool: Arc<P>,
/// Whether to deny unsafe calls
pub deny_unsafe: DenyUnsafe,
}
/// Instantiate all full RPC extensions.
pub fn create_full<C, P>(
deps: FullDeps<C, P>,
) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
where
C: ProvideRuntimeApi<Block>,
C: HeaderBackend<Block> + HeaderMetadata<Block, Error = BlockChainError> + 'static,
C: Send + Sync + 'static,
C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
C::Api: BlockBuilder<Block>,
P: TransactionPool + 'static,
{
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
use substrate_frame_rpc_system::{System, SystemApiServer};
let mut module = RpcModule::new(());
let FullDeps { client, pool, deny_unsafe } = deps;
module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
module.merge(TransactionPayment::new(client).into_rpc())?;
// Extend this RPC with a custom API by using the following syntax.
// `YourRpcStruct` should have a reference to a client, which is needed
// to call into the runtime.
// `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;`
Ok(module)
}

View File

@ -0,0 +1,331 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
use futures::FutureExt;
use node_template_runtime::{self, opaque::Block, RuntimeApi};
use sc_client_api::{Backend, BlockBackend};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
use sc_consensus_grandpa::SharedVoterState;
pub use sc_executor::NativeElseWasmExecutor;
use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sc_transaction_pool_api::OffchainTransactionPoolFactory;
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use std::{sync::Arc, time::Duration};
// Our native executor instance.
pub struct ExecutorDispatch;
impl sc_executor::NativeExecutionDispatch for ExecutorDispatch {
/// Only enable the benchmarking host functions when we actually want to benchmark.
#[cfg(feature = "runtime-benchmarks")]
type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions;
/// Otherwise we only use the default Substrate host functions.
#[cfg(not(feature = "runtime-benchmarks"))]
type ExtendHostFunctions = ();
fn dispatch(method: &str, data: &[u8]) -> Option<Vec<u8>> {
node_template_runtime::api::dispatch(method, data)
}
fn native_version() -> sc_executor::NativeVersion {
node_template_runtime::native_version()
}
}
pub(crate) type FullClient =
sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
type FullBackend = sc_service::TFullBackend<Block>;
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
#[allow(clippy::type_complexity)]
pub fn new_partial(
config: &Configuration,
) -> Result<
sc_service::PartialComponents<
FullClient,
FullBackend,
FullSelectChain,
sc_consensus::DefaultImportQueue<Block, FullClient>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
sc_consensus_grandpa::GrandpaBlockImport<
FullBackend,
Block,
FullClient,
FullSelectChain,
>,
sc_consensus_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
),
>,
ServiceError,
> {
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let executor = sc_service::new_native_or_wasm_executor(config);
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
let client = Arc::new(client);
let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", None, worker.run());
telemetry
});
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
);
let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import(
client.clone(),
&client,
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let import_queue =
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);
Ok((slot, timestamp))
},
spawner: &task_manager.spawn_essential_handle(),
registry: config.prometheus_registry(),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
compatibility_mode: Default::default(),
})?;
Ok(sc_service::PartialComponents {
client,
backend,
task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
other: (grandpa_block_import, grandpa_link, telemetry),
})
}
/// Builds a new service for a full client.
pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
let sc_service::PartialComponents {
client,
backend,
mut task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
other: (block_import, grandpa_link, mut telemetry),
} = new_partial(&config)?;
let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
&client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"),
&config.chain_spec,
);
net_config.add_notification_protocol(sc_consensus_grandpa::grandpa_peers_set_config(
grandpa_protocol_name.clone(),
));
let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set().clone(),
Vec::default(),
));
let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
block_announce_validator_builder: None,
warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
})?;
if config.offchain_worker.enabled {
task_manager.spawn_handle().spawn(
"offchain-workers-runner",
"offchain-worker",
sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
runtime_api_provider: client.clone(),
is_validator: config.role.is_authority(),
keystore: Some(keystore_container.keystore()),
offchain_db: backend.offchain_storage(),
transaction_pool: Some(OffchainTransactionPoolFactory::new(
transaction_pool.clone(),
)),
network_provider: network.clone(),
enable_http_requests: true,
custom_extensions: |_| vec![],
})
.run(client.clone(), task_manager.spawn_handle())
.boxed(),
);
}
let role = config.role.clone();
let force_authoring = config.force_authoring;
let backoff_authoring_blocks: Option<()> = None;
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let rpc_extensions_builder = {
let client = client.clone();
let pool = transaction_pool.clone();
Box::new(move |deny_unsafe, _| {
let deps =
crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe };
crate::rpc::create_full(deps).map_err(Into::into)
})
};
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
keystore: keystore_container.keystore(),
task_manager: &mut task_manager,
transaction_pool: transaction_pool.clone(),
rpc_builder: rpc_extensions_builder,
backend,
system_rpc_tx,
tx_handler_controller,
sync_service: sync_service.clone(),
config,
telemetry: telemetry.as_mut(),
})?;
if role.is_authority() {
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(
StartAuraParams {
slot_duration,
client,
select_chain,
block_import,
proposer_factory,
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*timestamp,
slot_duration,
);
Ok((slot, timestamp))
},
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.keystore(),
sync_oracle: sync_service.clone(),
justification_sync_link: sync_service.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
max_block_proposal_slot_portion: None,
telemetry: telemetry.as_ref().map(|x| x.handle()),
compatibility_mode: Default::default(),
},
)?;
// the AURA authoring task is considered essential, i.e. if it
// fails we take down the service with it.
task_manager
.spawn_essential_handle()
.spawn_blocking("aura", Some("block-authoring"), aura);
}
if enable_grandpa {
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if role.is_authority() { Some(keystore_container.keystore()) } else { None };
let grandpa_config = sc_consensus_grandpa::Config {
// FIXME #1578 make this available through chainspec
gossip_duration: Duration::from_millis(333),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore,
local_role: role,
telemetry: telemetry.as_ref().map(|x| x.handle()),
protocol_name: grandpa_protocol_name,
};
// start the full GRANDPA voter
// NOTE: non-authorities could run the GRANDPA observer protocol, but at
// this point the full voter should provide better guarantees of block
// and vote data availability than the observer. The observer has not
// been tested extensively yet and having most nodes in a network run it
// could lead to finality stalls.
let grandpa_config = sc_consensus_grandpa::GrandpaParams {
config: grandpa_config,
link: grandpa_link,
network,
sync: Arc::new(sync_service),
voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),
};
// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",
None,
sc_consensus_grandpa::run_grandpa_voter(grandpa_config)?,
);
}
network_starter.start_network();
Ok(task_manager)
}

View File

@ -0,0 +1,33 @@
[package]
name = "pallet-reputation"
version = "4.0.0-dev"
edition = "2021"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [
"derive",
] }
scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-support = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-system = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
[dev-dependencies]
sp-core = { version = "21.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-io = { version = "23.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-runtime = { version = "24.0.0", git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
[features]
default = ["std"]
std = [
"codec/std",
"frame-benchmarking?/std",
"frame-support/std",
"frame-system/std",
"scale-info/std",
]
runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"]
try-runtime = ["frame-support/try-runtime"]

View File

@ -0,0 +1 @@
License: MIT-0

View File

@ -0,0 +1,35 @@
//! Benchmarking setup for pallet-template
#![cfg(feature = "runtime-benchmarks")]
use super::*;
#[allow(unused)]
use crate::Pallet as Template;
use frame_benchmarking::v2::*;
use frame_system::RawOrigin;
#[benchmarks]
mod benchmarks {
use super::*;
#[benchmark]
fn do_something() {
let value = 100u32.into();
let caller: T::AccountId = whitelisted_caller();
#[extrinsic_call]
do_something(RawOrigin::Signed(caller), value);
assert_eq!(Something::<T>::get(), Some(value));
}
#[benchmark]
fn cause_error() {
Something::<T>::put(100u32);
let caller: T::AccountId = whitelisted_caller();
#[extrinsic_call]
cause_error(RawOrigin::Signed(caller));
assert_eq!(Something::<T>::get(), Some(101u32));
}
impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test);
}

View File

@ -0,0 +1,67 @@
#![cfg_attr(not(feature = "std"), no_std)]
pub use pallet::*;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
pub use weights::*;
#[frame_support::pallet(dev_mode)]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::pallet]
pub struct Pallet<T>(_);
#[pallet::config]
pub trait Config: frame_system::Config {
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
type WeightInfo: WeightInfo;
}
#[pallet::storage]
#[pallet::getter(fn something)]
pub type Reputation<T: Config> = StorageMap<_, Blake2_128Concat, T::AccountId, u32, ValueQuery>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
//SomethingStored { something: u32, who: T::AccountId },
}
// Errors inform users that something went wrong.
#[pallet::error]
pub enum Error<T> {
NoneValue,
StorageOverflow,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
#[pallet::call_index(0)]
pub fn set_rep(origin: OriginFor<T>, account: T::AccountId, rep: u32) -> DispatchResult{
let who = ensure_signed(origin)?;
Reputation::<T>::set(account, rep);
Ok(())
}
//#[pallet::call_index(1)]
//#[pallet::weight(T::WeightInfo::do_something())]
//pub fn set_rep(origin: OriginFor<T>, data: Vec<_>) -> DispatchResult {
// let who = ensure_signed(origin)?;
// let score = "lots of calculation from `data`";
// Reputation::<T>::set(account, score);
// Ok(())
//}
}
}

View File

@ -0,0 +1,54 @@
use crate as pallet_template;
use frame_support::traits::{ConstU16, ConstU64};
use sp_core::H256;
use sp_runtime::{
traits::{BlakeTwo256, IdentityLookup},
BuildStorage,
};
type Block = frame_system::mocking::MockBlock<Test>;
// Configure a mock runtime to test the pallet.
frame_support::construct_runtime!(
pub enum Test
{
System: frame_system,
TemplateModule: pallet_template,
}
);
impl frame_system::Config for Test {
type BaseCallFilter = frame_support::traits::Everything;
type BlockWeights = ();
type BlockLength = ();
type DbWeight = ();
type RuntimeOrigin = RuntimeOrigin;
type RuntimeCall = RuntimeCall;
type Nonce = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Block = Block;
type RuntimeEvent = RuntimeEvent;
type BlockHashCount = ConstU64<250>;
type Version = ();
type PalletInfo = PalletInfo;
type AccountData = ();
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type SS58Prefix = ConstU16<42>;
type OnSetCode = ();
type MaxConsumers = frame_support::traits::ConstU32<16>;
}
impl pallet_template::Config for Test {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = ();
}
// Build genesis storage according to the mock runtime.
pub fn new_test_ext() -> sp_io::TestExternalities {
frame_system::GenesisConfig::<Test>::default().build_storage().unwrap().into()
}

View File

@ -0,0 +1,27 @@
use crate::{mock::*, Error, Event};
use frame_support::{assert_noop, assert_ok};
#[test]
fn it_works_for_default_value() {
new_test_ext().execute_with(|| {
// Go past genesis block so events get deposited
System::set_block_number(1);
// Dispatch a signed extrinsic.
assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42));
// Read pallet storage and assert an expected result.
assert_eq!(TemplateModule::something(), Some(42));
// Assert that the correct event was deposited
System::assert_last_event(Event::SomethingStored { something: 42, who: 1 }.into());
});
}
#[test]
fn correct_error_for_none_value() {
new_test_ext().execute_with(|| {
// Ensure the expected error is thrown when no value is present.
assert_noop!(
TemplateModule::cause_error(RuntimeOrigin::signed(1)),
Error::<Test>::NoneValue
);
});
}

View File

@ -0,0 +1,90 @@
//! Autogenerated weights for pallet_template
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-04-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `Alexs-MacBook-Pro-2.local`, CPU: `<UNKNOWN>`
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024
// Executed Command:
// ../../target/release/node-template
// benchmark
// pallet
// --chain
// dev
// --pallet
// pallet_template
// --extrinsic
// *
// --steps=50
// --repeat=20
// --wasm-execution=compiled
// --output
// pallets/template/src/weights.rs
// --template
// ../../.maintain/frame-weight-template.hbs
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
use core::marker::PhantomData;
/// Weight functions needed for pallet_template.
pub trait WeightInfo {
fn do_something() -> Weight;
fn cause_error() -> Weight;
}
/// Weights for pallet_template using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
/// Storage: TemplateModule Something (r:0 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn do_something() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 8_000_000 picoseconds.
Weight::from_parts(9_000_000, 0)
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: TemplateModule Something (r:1 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn cause_error() -> Weight {
// Proof Size summary in bytes:
// Measured: `32`
// Estimated: `1489`
// Minimum execution time: 6_000_000 picoseconds.
Weight::from_parts(6_000_000, 1489)
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
/// Storage: TemplateModule Something (r:0 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn do_something() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 8_000_000 picoseconds.
Weight::from_parts(9_000_000, 0)
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: TemplateModule Something (r:1 w:1)
/// Proof: TemplateModule Something (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
fn cause_error() -> Weight {
// Proof Size summary in bytes:
// Measured: `32`
// Estimated: `1489`
// Minimum execution time: 6_000_000 picoseconds.
Weight::from_parts(6_000_000, 1489)
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
}

View File

@ -0,0 +1,112 @@
[package]
name = "node-template-runtime"
version = "4.0.0-dev"
edition = "2021"
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] }
scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
pallet-aura = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-balances = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-support = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-grandpa = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-sudo = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-system = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-try-runtime = { version = "0.10.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", optional = true , branch = "polkadot-v1.0.0" }
pallet-timestamp = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
frame-executive = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-api = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-block-builder = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-consensus-aura = { version = "0.10.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-consensus-grandpa = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-core = { version = "21.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-inherents = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-offchain = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-runtime = { version = "24.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-session = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-std = { version = "8.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-transaction-pool = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
sp-version = { version = "22.0.0", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-identity = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
# Used for the node template's RPCs
frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", branch = "polkadot-v1.0.0" }
# Used for runtime benchmarking
frame-benchmarking = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", optional = true , branch = "polkadot-v1.0.0" }
frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, git = "https://github.com/paritytech/substrate.git", optional = true , branch = "polkadot-v1.0.0" }
# Local Dependencies
pallet-reputation = { version = "4.0.0-dev", default-features = false, path = "../pallets/reputation" }
[build-dependencies]
substrate-wasm-builder = { version = "5.0.0-dev", git = "https://github.com/paritytech/substrate.git", optional = true , branch = "polkadot-v1.0.0" }
[features]
default = ["std"]
std = [
"frame-try-runtime?/std",
"frame-system-benchmarking?/std",
"frame-benchmarking?/std",
"codec/std",
"scale-info/std",
"frame-executive/std",
"frame-support/std",
"frame-system-rpc-runtime-api/std",
"frame-system/std",
"frame-try-runtime/std",
"pallet-aura/std",
"pallet-balances/std",
"pallet-grandpa/std",
"pallet-sudo/std",
"pallet-reputation/std",
"pallet-timestamp/std",
"pallet-transaction-payment-rpc-runtime-api/std",
"pallet-transaction-payment/std",
"pallet-identity/std",
"sp-api/std",
"sp-block-builder/std",
"sp-consensus-aura/std",
"sp-consensus-grandpa/std",
"sp-core/std",
"sp-inherents/std",
"sp-offchain/std",
"sp-runtime/std",
"sp-session/std",
"sp-std/std",
"sp-transaction-pool/std",
"sp-version/std",
"substrate-wasm-builder",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system-benchmarking/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"pallet-balances/runtime-benchmarks",
"pallet-grandpa/runtime-benchmarks",
"pallet-sudo/runtime-benchmarks",
"pallet-reputation/runtime-benchmarks",
"pallet-timestamp/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
try-runtime = [
"frame-try-runtime/try-runtime",
"frame-executive/try-runtime",
"frame-system/try-runtime",
"frame-support/try-runtime",
"pallet-aura/try-runtime",
"pallet-balances/try-runtime",
"pallet-grandpa/try-runtime",
"pallet-sudo/try-runtime",
"pallet-reputation/try-runtime",
"pallet-timestamp/try-runtime",
"pallet-transaction-payment/try-runtime",
]

View File

@ -0,0 +1,10 @@
fn main() {
#[cfg(feature = "std")]
{
substrate_wasm_builder::WasmBuilder::new()
.with_current_project()
.export_heap_base()
.import_memory()
.build();
}
}

View File

@ -0,0 +1,570 @@
#![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit = "256"]
// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use pallet_grandpa::AuthorityId as GrandpaId;
use sp_api::impl_runtime_apis;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
traits::{
AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify,
},
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, MultiSignature,
};
use sp_std::prelude::*;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
// A few exports that help ease life for downstream crates.
pub use frame_support::{
construct_runtime, parameter_types,
traits::{
ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness,
StorageInfo,
},
weights::{
constants::{
BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND,
},
IdentityFee, Weight,
},
StorageValue,
};
pub use frame_system::Call as SystemCall;
pub use pallet_balances::Call as BalancesCall;
pub use pallet_timestamp::Call as TimestampCall;
use pallet_transaction_payment::{ConstFeeMultiplier, CurrencyAdapter, Multiplier};
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Perbill, Permill};
/// Import the template pallet.
pub use pallet_reputation;
/// An index to a block.
pub type BlockNumber = u32;
/// Alias to 512-bit hash when used in the context of a transaction signature on the chain.
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
/// Balance of an account.
pub type Balance = u128;
/// Index of a transaction in the chain.
pub type Nonce = u32;
/// A hash of some data used by the chain.
pub type Hash = sp_core::H256;
/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know
/// the specifics of the runtime. They can then be made to be agnostic over specific formats
/// of data like extrinsics, allowing for them to continue syncing the network through upgrades
/// to even the core data structures.
pub mod opaque {
use super::*;
pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic;
/// Opaque block header type.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Opaque block type.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// Opaque block identifier type.
pub type BlockId = generic::BlockId<Block>;
impl_opaque_keys! {
pub struct SessionKeys {
pub aura: Aura,
pub grandpa: Grandpa,
}
}
}
// To learn more about runtime versioning, see:
// https://docs.substrate.io/main-docs/build/upgrade#runtime-versioning
#[sp_version::runtime_version]
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("node-template"),
impl_name: create_runtime_str!("node-template"),
authoring_version: 1,
// The version of the runtime specification. A full node will not attempt to use its native
// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`,
// `spec_version`, and `authoring_version` are the same between Wasm and native.
// This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use
// the compatible custom types.
spec_version: 100,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
state_version: 1,
};
/// This determines the average expected block time that we are targeting.
/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`.
/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked
/// up by `pallet_aura` to implement `fn slot_duration()`.
///
/// Change this to adjust the block time.
pub const MILLISECS_PER_BLOCK: u64 = 6000;
// NOTE: Currently it is not possible to change the slot duration after the chain has started.
// Attempting to do so will brick block production.
pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
// Time is measured by number of blocks.
pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
parameter_types! {
pub const BlockHashCount: BlockNumber = 2400;
pub const Version: RuntimeVersion = VERSION;
/// We allow for 2 seconds of compute with a 6 second average block time.
pub BlockWeights: frame_system::limits::BlockWeights =
frame_system::limits::BlockWeights::with_sensible_defaults(
Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX),
NORMAL_DISPATCH_RATIO,
);
pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength
::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO);
pub const SS58Prefix: u8 = 42;
}
// Configure FRAME pallets to include in runtime.
impl frame_system::Config for Runtime {
/// The basic call filter to use in dispatchable.
type BaseCallFilter = frame_support::traits::Everything;
/// The block type for the runtime.
type Block = Block;
/// Block & extrinsics weights: base values and limits.
type BlockWeights = BlockWeights;
/// The maximum length of a block (in bytes).
type BlockLength = BlockLength;
/// The identifier used to distinguish between accounts.
type AccountId = AccountId;
/// The aggregated dispatch type that is available for extrinsics.
type RuntimeCall = RuntimeCall;
/// The lookup mechanism to get account ID from whatever is passed in dispatchers.
type Lookup = AccountIdLookup<AccountId, ()>;
/// The type for storing how many extrinsics an account has signed.
type Nonce = Nonce;
/// The type for hashing blocks and tries.
type Hash = Hash;
/// The hashing algorithm used.
type Hashing = BlakeTwo256;
/// The ubiquitous event type.
type RuntimeEvent = RuntimeEvent;
/// The ubiquitous origin type.
type RuntimeOrigin = RuntimeOrigin;
/// Maximum number of block number to block hash mappings to keep (oldest pruned first).
type BlockHashCount = BlockHashCount;
/// The weight of database operations that the runtime can invoke.
type DbWeight = RocksDbWeight;
/// Version of the runtime.
type Version = Version;
/// Converts a module to the index of the module in `construct_runtime!`.
///
/// This type is being generated by `construct_runtime!`.
type PalletInfo = PalletInfo;
/// What to do if a new account is created.
type OnNewAccount = ();
/// What to do if an account is fully reaped from the system.
type OnKilledAccount = ();
/// The data to be stored in an account.
type AccountData = pallet_balances::AccountData<Balance>;
/// Weight information for the extrinsics of this pallet.
type SystemWeightInfo = ();
/// This is used as an identifier of the chain. 42 is the generic substrate prefix.
type SS58Prefix = SS58Prefix;
/// The set code logic, just the default since we're not a parachain.
type OnSetCode = ();
type MaxConsumers = frame_support::traits::ConstU32<16>;
}
impl pallet_aura::Config for Runtime {
type AuthorityId = AuraId;
type DisabledValidators = ();
type MaxAuthorities = ConstU32<32>;
type AllowMultipleBlocksPerSlot = ConstBool<false>;
}
impl pallet_grandpa::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = ();
type MaxAuthorities = ConstU32<32>;
type MaxSetIdSessionEntries = ConstU64<0>;
type KeyOwnerProof = sp_core::Void;
type EquivocationReportSystem = ();
}
impl pallet_timestamp::Config for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = u64;
type OnTimestampSet = Aura;
type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>;
type WeightInfo = ();
}
/// Existential deposit.
pub const EXISTENTIAL_DEPOSIT: u128 = 500;
impl pallet_balances::Config for Runtime {
type MaxLocks = ConstU32<50>;
type MaxReserves = ();
type ReserveIdentifier = [u8; 8];
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type RuntimeEvent = RuntimeEvent;
type DustRemoval = ();
type ExistentialDeposit = ConstU128<EXISTENTIAL_DEPOSIT>;
type AccountStore = System;
type WeightInfo = pallet_balances::weights::SubstrateWeight<Runtime>;
type FreezeIdentifier = ();
type MaxFreezes = ();
type RuntimeHoldReason = ();
type MaxHolds = ();
}
parameter_types! {
pub FeeMultiplier: Multiplier = Multiplier::one();
}
impl pallet_transaction_payment::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type OnChargeTransaction = CurrencyAdapter<Balances, ()>;
type OperationalFeeMultiplier = ConstU8<5>;
type WeightToFee = IdentityFee<Balance>;
type LengthToFee = IdentityFee<Balance>;
type FeeMultiplierUpdate = ConstFeeMultiplier<FeeMultiplier>;
}
impl pallet_sudo::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type RuntimeCall = RuntimeCall;
type WeightInfo = pallet_sudo::weights::SubstrateWeight<Runtime>;
}
/// Configure the pallet-reputation in pallets/template.
impl pallet_reputation::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = pallet_reputation::weights::SubstrateWeight<Runtime>;
}
// Create the runtime by composing the FRAME pallets that were previously configured.
construct_runtime!(
pub struct Runtime {
System: frame_system,
Timestamp: pallet_timestamp,
Aura: pallet_aura,
Grandpa: pallet_grandpa,
Balances: pallet_balances,
TransactionPayment: pallet_transaction_payment,
Sudo: pallet_sudo,
// Include the custom logic from the pallet-reputation in the runtime.
TemplateModule: pallet_reputation,
}
);
/// The address format for describing accounts.
pub type Address = sp_runtime::MultiAddress<AccountId, ()>;
/// Block header type as expected by this runtime.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// The SignedExtension to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckNonZeroSender<Runtime>,
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
);
/// Unchecked extrinsic type as expected by this runtime.
pub type UncheckedExtrinsic =
generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, SignedExtra>;
/// The payload being signed in transactions.
pub type SignedPayload = generic::SignedPayload<RuntimeCall, SignedExtra>;
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllPalletsWithSystem,
>;
#[cfg(feature = "runtime-benchmarks")]
#[macro_use]
extern crate frame_benchmarking;
#[cfg(feature = "runtime-benchmarks")]
mod benches {
define_benchmarks!(
[frame_benchmarking, BaselineBench::<Runtime>]
[frame_system, SystemBench::<Runtime>]
[pallet_balances, Balances]
[pallet_timestamp, Timestamp]
[pallet_sudo, Sudo]
[pallet_reputation, TemplateModule]
);
}
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block);
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
OpaqueMetadata::new(Runtime::metadata().into())
}
fn metadata_at_version(version: u32) -> Option<OpaqueMetadata> {
Runtime::metadata_at_version(version)
}
fn metadata_versions() -> sp_std::vec::Vec<u32> {
Runtime::metadata_versions()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(
block: Block,
data: sp_inherents::InherentData,
) -> sp_inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
block_hash: <Block as BlockT>::Hash,
) -> TransactionValidity {
Executive::validate_transaction(source, tx, block_hash)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
fn slot_duration() -> sp_consensus_aura::SlotDuration {
sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration())
}
fn authorities() -> Vec<AuraId> {
Aura::authorities().into_inner()
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
opaque::SessionKeys::generate(seed)
}
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
opaque::SessionKeys::decode_into_raw_public_keys(&encoded)
}
}
impl sp_consensus_grandpa::GrandpaApi<Block> for Runtime {
fn grandpa_authorities() -> sp_consensus_grandpa::AuthorityList {
Grandpa::grandpa_authorities()
}
fn current_set_id() -> sp_consensus_grandpa::SetId {
Grandpa::current_set_id()
}
fn submit_report_equivocation_unsigned_extrinsic(
_equivocation_proof: sp_consensus_grandpa::EquivocationProof<
<Block as BlockT>::Hash,
NumberFor<Block>,
>,
_key_owner_proof: sp_consensus_grandpa::OpaqueKeyOwnershipProof,
) -> Option<()> {
None
}
fn generate_key_ownership_proof(
_set_id: sp_consensus_grandpa::SetId,
_authority_id: GrandpaId,
) -> Option<sp_consensus_grandpa::OpaqueKeyOwnershipProof> {
// NOTE: this is the only implementation possible since we've
// defined our key owner proof type as a bottom type (i.e. a type
// with no values).
None
}
}
impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Nonce> for Runtime {
fn account_nonce(account: AccountId) -> Nonce {
System::account_nonce(account)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
fn query_info(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_info(uxt, len)
}
fn query_fee_details(
uxt: <Block as BlockT>::Extrinsic,
len: u32,
) -> pallet_transaction_payment::FeeDetails<Balance> {
TransactionPayment::query_fee_details(uxt, len)
}
fn query_weight_to_fee(weight: Weight) -> Balance {
TransactionPayment::weight_to_fee(weight)
}
fn query_length_to_fee(length: u32) -> Balance {
TransactionPayment::length_to_fee(length)
}
}
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi<Block, Balance, RuntimeCall>
for Runtime
{
fn query_call_info(
call: RuntimeCall,
len: u32,
) -> pallet_transaction_payment::RuntimeDispatchInfo<Balance> {
TransactionPayment::query_call_info(call, len)
}
fn query_call_fee_details(
call: RuntimeCall,
len: u32,
) -> pallet_transaction_payment::FeeDetails<Balance> {
TransactionPayment::query_call_fee_details(call, len)
}
fn query_weight_to_fee(weight: Weight) -> Balance {
TransactionPayment::weight_to_fee(weight)
}
fn query_length_to_fee(length: u32) -> Balance {
TransactionPayment::length_to_fee(length)
}
}
#[cfg(feature = "runtime-benchmarks")]
impl frame_benchmarking::Benchmark<Block> for Runtime {
fn benchmark_metadata(extra: bool) -> (
Vec<frame_benchmarking::BenchmarkList>,
Vec<frame_support::traits::StorageInfo>,
) {
use frame_benchmarking::{baseline, Benchmarking, BenchmarkList};
use frame_support::traits::StorageInfoTrait;
use frame_system_benchmarking::Pallet as SystemBench;
use baseline::Pallet as BaselineBench;
let mut list = Vec::<BenchmarkList>::new();
list_benchmarks!(list, extra);
let storage_info = AllPalletsWithSystem::storage_info();
(list, storage_info)
}
fn dispatch_benchmark(
config: frame_benchmarking::BenchmarkConfig
) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, TrackedStorageKey};
use frame_system_benchmarking::Pallet as SystemBench;
use baseline::Pallet as BaselineBench;
impl frame_system_benchmarking::Config for Runtime {}
impl baseline::Config for Runtime {}
use frame_support::traits::WhitelistedStorageKeys;
let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
let mut batches = Vec::<BenchmarkBatch>::new();
let params = (&config, &whitelist);
add_benchmarks!(params, batches);
Ok(batches)
}
}
#[cfg(feature = "try-runtime")]
impl frame_try_runtime::TryRuntime<Block> for Runtime {
fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) {
// NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to
// have a backtrace here. If any of the pre/post migration checks fail, we shall stop
// right here and right now.
let weight = Executive::try_runtime_upgrade(checks).unwrap();
(weight, BlockWeights::get().max_block)
}
fn execute_block(
block: Block,
state_root_check: bool,
signature_check: bool,
select: frame_try_runtime::TryStateSelect
) -> Weight {
// NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to
// have a backtrace here.
Executive::try_execute_block(block, state_root_check, signature_check, select).expect("execute-block failed")
}
}
}

View File

@ -0,0 +1,14 @@
[toolchain]
#channel = "nightly"
components = [
"cargo",
"clippy",
"rust-analyzer",
"rust-src",
"rust-std",
"rustc-dev",
"rustc",
"rustfmt",
]
targets = [ "wasm32-unknown-unknown" ]
profile = "minimal"

View File

@ -0,0 +1,23 @@
# Basic
hard_tabs = true
max_width = 100
use_small_heuristics = "Max"
# Imports
imports_granularity = "Crate"
reorder_imports = true
# Consistency
newline_style = "Unix"
# Format comments
comment_width = 100
wrap_comments = true
# Misc
chain_width = 80
spaces_around_ranges = false
binop_separator = "Back"
reorder_impl_items = false
match_arm_leading_pipes = "Preserve"
match_arm_blocks = false
match_block_trailing_comma = true
trailing_comma = "Vertical"
trailing_semicolon = false
use_field_init_shorthand = true

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
# This script is meant to be run on Unix/Linux based systems
set -e
echo "*** Initializing WASM build environment"
if [ -z $CI_PROJECT_NAME ] ; then
rustup update nightly
rustup update stable
fi
rustup target add wasm32-unknown-unknown --toolchain nightly

View File

@ -0,0 +1,35 @@
let
mozillaOverlay =
import (builtins.fetchGit {
url = "https://github.com/mozilla/nixpkgs-mozilla.git";
rev = "57c8084c7ef41366993909c20491e359bbb90f54";
});
pinned = builtins.fetchGit {
# Descriptive name to make the store path easier to identify
url = "https://github.com/nixos/nixpkgs/";
# Commit hash for nixos-unstable as of 2020-04-26
# `git ls-remote https://github.com/nixos/nixpkgs nixos-unstable`
ref = "refs/heads/nixos-unstable";
rev = "1fe6ed37fd9beb92afe90671c0c2a662a03463dd";
};
nixpkgs = import pinned { overlays = [ mozillaOverlay ]; };
toolchain = with nixpkgs; (rustChannelOf { date = "2021-09-14"; channel = "nightly"; });
rust-wasm = toolchain.rust.override {
targets = [ "wasm32-unknown-unknown" ];
};
in
with nixpkgs; pkgs.mkShell {
buildInputs = [
clang
pkg-config
rust-wasm
] ++ stdenv.lib.optionals stdenv.isDarwin [
darwin.apple_sdk.frameworks.Security
];
LIBCLANG_PATH = "${llvmPackages.libclang}/lib";
PROTOC = "${protobuf}/bin/protoc";
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library/";
ROCKSDB_LIB_DIR = "${rocksdb}/lib";
}