Compare commits

..

2 Commits

Author SHA1 Message Date
Steve Myers
747866579d Add description to file_store cargo metadata 2023-03-20 12:45:33 -05:00
Steve Myers
e6387f8f27 Remove keyword from file_store cargo metadata 2023-03-20 12:37:22 -05:00
159 changed files with 17609 additions and 27488 deletions

View File

@@ -1,8 +0,0 @@
# Set update schedule for GitHub Actions
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
# Check for updates to GitHub Actions every week
interval: "weekly"

View File

@@ -9,7 +9,7 @@ jobs:
env:
RUSTFLAGS: "-Cinstrument-coverage"
RUSTDOCFLAGS: "-Cinstrument-coverage"
LLVM_PROFILE_FILE: "./target/coverage/%p-%m.profraw"
LLVM_PROFILE_FILE: "report-%p-%m.profraw"
steps:
- name: Checkout
@@ -19,7 +19,7 @@ jobs:
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
toolchain: "1.65.0"
override: true
profile: minimal
components: llvm-tools-preview
@@ -27,7 +27,6 @@ jobs:
uses: Swatinem/rust-cache@v2.2.1
- name: Install grcov
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi
# TODO: re-enable the hwi tests
- name: Build simulator image
run: docker build -t hwi/ledger_emulator ./ci -f ci/Dockerfile.ledger
- name: Run simulator image
@@ -40,16 +39,14 @@ jobs:
run: pip install hwi==2.1.1 protobuf==3.20.1
- name: Test
run: cargo test --all-features
- name: Make coverage directory
run: mkdir coverage
- name: Run grcov
run: grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --keep-only '**/crates/**' --ignore '**/tests/**' --ignore '**/examples/**' -o ./coverage/lcov.info
run: mkdir coverage; grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore '/*' -o ./coverage/lcov.info
- name: Generate HTML coverage report
run: genhtml -o coverage-report.html --ignore-errors source ./coverage/lcov.info
- name: Coveralls upload
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
run: genhtml -o coverage-report.html ./coverage/lcov.info
# - name: Coveralls upload
# uses: coverallsapp/github-action@master
# with:
# github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Upload artifact
uses: actions/upload-artifact@v2
with:

View File

@@ -12,7 +12,7 @@ jobs:
rust:
- version: stable
clippy: true
- version: 1.63.0 # MSRV
- version: 1.57.0 # MSRV
features:
- --no-default-features
- --all-features
@@ -27,49 +27,11 @@ jobs:
profile: minimal
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- name: Pin dependencies for MSRV
if: matrix.rust.version == '1.63.0'
run: |
cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5"
cargo update -p time --precise "0.3.20"
cargo update -p home --precise "0.5.5"
cargo update -p proptest --precise "1.2.0"
cargo update -p url --precise "2.5.0"
cargo update -p cc --precise "1.0.105"
cargo update -p tokio --precise "1.38.1"
- name: Build
run: cargo build ${{ matrix.features }}
- name: Test
run: cargo test ${{ matrix.features }}
check-no-std:
name: Check no_std
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
profile: minimal
# target: "thumbv6m-none-eabi"
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- name: Check bdk_chain
working-directory: ./crates/chain
# TODO "--target thumbv6m-none-eabi" should work but currently does not
run: cargo check --no-default-features --features miniscript/no-std,hashbrown
- name: Check bdk wallet
working-directory: ./crates/wallet
# TODO "--target thumbv6m-none-eabi" should work but currently does not
run: cargo check --no-default-features --features miniscript/no-std,bdk_chain/hashbrown
- name: Check esplora
working-directory: ./crates/esplora
# TODO "--target thumbv6m-none-eabi" should work but currently does not
run: cargo check --no-default-features --features miniscript/no-std,bdk_chain/hashbrown
check-wasm:
name: Check WASM
runs-on: ubuntu-20.04
@@ -81,6 +43,7 @@ jobs:
uses: actions/checkout@v2
# Install a recent version of clang that supports wasm32
- run: wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - || exit 1
- run: sudo apt-add-repository "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-10 main" || exit 1
- run: sudo apt-get update || exit 1
- run: sudo apt-get install -y libclang-common-10-dev clang-10 libc6-dev-i386 || exit 1
- name: Install Rust toolchain
@@ -92,12 +55,12 @@ jobs:
target: "wasm32-unknown-unknown"
- name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1
- name: Check bdk wallet
working-directory: ./crates/wallet
run: cargo check --target wasm32-unknown-unknown --no-default-features --features miniscript/no-std,bdk_chain/hashbrown
- name: Check bdk
working-directory: ./crates/bdk
run: cargo check --target wasm32-unknown-unknown --features dev-getrandom-wasm
- name: Check esplora
working-directory: ./crates/esplora
run: cargo check --target wasm32-unknown-unknown --no-default-features --features miniscript/no-std,bdk_chain/hashbrown,async
run: cargo check --target wasm32-unknown-unknown --features async --no-default-features
fmt:
name: Rust fmt
@@ -121,7 +84,9 @@ jobs:
- uses: actions/checkout@v1
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.78.0
# we pin clippy instead of using "stable" so that our CI doesn't break
# at each new cargo release
toolchain: "1.67.0"
components: clippy
override: true
- name: Rust Cache

View File

@@ -10,7 +10,7 @@ jobs:
- name: Checkout sources
uses: actions/checkout@v2
- name: Set default toolchain
run: rustup default nightly-2024-05-12
run: rustup default nightly-2022-12-14
- name: Set profile
run: rustup set profile minimal
- name: Update toolchain

4
.gitignore vendored
View File

@@ -4,7 +4,3 @@ Cargo.lock
*.swp
.idea
# Example persisted files.
*.db
*.sqlite*

View File

@@ -158,7 +158,7 @@ BDK and LDK together.
- Add the ability to specify which leaves to sign in a taproot transaction through `TapLeavesOptions` in `SignOptions`
- Add the ability to specify whether a taproot transaction should be signed using the internal key or not, using `sign_with_tap_internal_key` in `SignOptions`
- Consolidate params `fee_amount` and `amount_needed` in `target_amount` in `CoinSelectionAlgorithm::coin_select` signature.
- Change the meaning of the `fee_amount` field inside `CoinSelectionResult`: from now on the `fee_amount` will represent only the fees associated with the utxos in the `selected` field of `CoinSelectionResult`.
- Change the meaning of the `fee_amount` field inside `CoinSelectionResult`: from now on the `fee_amount` will represent only the fees asociated with the utxos in the `selected` field of `CoinSelectionResult`.
- New `RpcBlockchain` implementation with various fixes.
- Return balance in separate categories, namely `confirmed`, `trusted_pending`, `untrusted_pending` & `immature`.
@@ -449,7 +449,7 @@ final transaction is created by calling `finish` on the builder.
#### Changed
- Simplify the architecture of blockchain traits
- Improve sync
- Remove unused variant `HeaderParseFail`
- Remove unused varaint HeaderParseFail
### CLI
#### Added
@@ -517,7 +517,7 @@ final transaction is created by calling `finish` on the builder.
- Default to SIGHASH_ALL if not specified
- Replace ChangeSpendPolicy::filter_utxos with a predicate
- Make 'unspendable' into a HashSet
- Stop implicitly enforcing manual selection by .add_utxo
- Stop implicitly enforcing manaul selection by .add_utxo
- Rename DumbCS to LargestFirstCoinSelection
- Rename must_use_utxos to required_utxos
- Rename may_use_utxos to optional_uxtos
@@ -529,7 +529,7 @@ final transaction is created by calling `finish` on the builder.
- Use TXIN_DEFAULT_WEIGHT constant in coin selection
- Replace `must_use` with `required` in coin selection
- Take both spending policies into account in create_tx
- Check last derivation in cache to avoid recomputing
- Check last derivation in cache to avoid recomputation
- Use the branch-and-bound cs by default
- Make coin_select return UTXOs instead of TxIns
- Build output lookup inside complete transaction
@@ -550,7 +550,7 @@ final transaction is created by calling `finish` on the builder.
- Require esplora feature for repl example
#### Security
- Use dirs-next instead of dirs since the latter is unmaintained
- Use dirs-next instead of dirs since the latter is unmantained
## [0.1.0-beta.1] - 2020-09-08

View File

@@ -28,7 +28,7 @@ The codebase is maintained using the "contributor workflow" where everyone
without exception contributes patch proposals using "pull requests". This
facilitates social contribution, easy testing and peer review.
To contribute a patch, the workflow is as follows:
To contribute a patch, the worflow is a as follows:
1. Fork Repository
2. Create topic branch
@@ -46,15 +46,15 @@ Every new feature should be covered by functional tests where possible.
When refactoring, structure your PR to make it easy to review and don't
hesitate to split it into multiple small, focused PRs.
The Minimal Supported Rust Version is **1.57.0** (enforced by our CI).
The Minimal Supported Rust Version is 1.46 (enforced by our CI).
Commits should cover both the issue fixed and the solution's rationale.
These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind. Commit messages should follow the ["Conventional Commits 1.0.0"](https://www.conventionalcommits.org/en/v1.0.0/) to make commit histories easier to read by humans and automated tools.
These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind.
To facilitate communication with other contributors, the project is making use
of GitHub's "assignee" field. First check that no one is assigned and then
comment suggesting that you're working on it. If someone is already assigned,
don't hesitate to ask if the assigned party or previous commenter are still
don't hesitate to ask if the assigned party or previous commenters are still
working on it if it has been awhile.
Deprecation policy
@@ -91,7 +91,7 @@ This is also enforced by the CI.
Security
--------
Security is a high priority of BDK; disclosure of security vulnerabilities helps
Security is a high priority of BDK; disclosure of security vulnerabilites helps
prevent user loss of funds.
Note that BDK is currently considered "pre-production" during this time, there

View File

@@ -1,22 +1,15 @@
[workspace]
resolver = "2"
members = [
"crates/wallet",
"crates/bdk",
"crates/chain",
"crates/file_store",
"crates/electrum",
"crates/esplora",
"crates/bitcoind_rpc",
"crates/hwi",
"crates/testenv",
"example-crates/example_cli",
"example-crates/example_electrum",
"example-crates/example_esplora",
"example-crates/example_bitcoind_rpc_polling",
"example-crates/keychain_tracker_electrum",
"example-crates/keychain_tracker_esplora",
"example-crates/keychain_tracker_example_cli",
"example-crates/wallet_electrum",
"example-crates/wallet_esplora_blocking",
"example-crates/wallet_esplora",
"example-crates/wallet_esplora_async",
"example-crates/wallet_rpc",
"nursery/tmp_plan",
"nursery/coin_select"
]

View File

@@ -10,19 +10,19 @@
</p>
<p>
<a href="https://crates.io/crates/bdk_wallet"><img alt="Crate Info" src="https://img.shields.io/crates/v/bdk_wallet.svg"/></a>
<a href="https://crates.io/crates/bdk"><img alt="Crate Info" src="https://img.shields.io/crates/v/bdk.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/blob/master/LICENSE"><img alt="MIT or Apache-2.0 Licensed" src="https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk_wallet"><img alt="Wallet API Docs" src="https://img.shields.io/badge/docs.rs-bdk_wallet-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a>
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
<a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p>
<h4>
<a href="https://bitcoindevkit.org">Project Homepage</a>
<span> | </span>
<a href="https://docs.rs/bdk_wallet">Documentation</a>
<a href="https://docs.rs/bdk">Documentation</a>
</h4>
</div>
@@ -33,62 +33,22 @@ It is built upon the excellent [`rust-bitcoin`] and [`rust-miniscript`] crates.
> ⚠ The Bitcoin Dev Kit developers are in the process of releasing a `v1.0` which is a fundamental re-write of how the library works.
> See for some background on this project: https://bitcoindevkit.org/blog/road-to-bdk-1/ (ignore the timeline 😁)
> For a release timeline see the [`BDK 1.0 project page`].
> For a release timeline see the [`bdk_core_staging`] repo where a lot of the component work is being done. The plan is that everything in the `bdk_core_staging` repo will be moved into the `crates` directory here.
## Architecture
The project is split up into several crates in the `/crates` directory:
- [`wallet`](./crates/wallet): Contains the central high level `Wallet` type that is built from the low-level mechanisms provided by the other components
- [`bdk`](./crates/bdk): Contains the central high level `Wallet` type that is built from the low-level mechanisms provided by the other components
- [`chain`](./crates/chain): Tools for storing and indexing chain data
- [`persist`](./crates/persist): Types that define data persistence of a BDK wallet
- [`file_store`](./crates/file_store): A (experimental) persistence backend for storing chain data in a single file.
- [`esplora`](./crates/esplora): Extends the [`esplora-client`] crate with methods to fetch chain data from an esplora HTTP server in the form that [`bdk_chain`] and `Wallet` can consume.
- [`electrum`](./crates/electrum): Extends the [`electrum-client`] crate with methods to fetch chain data from an electrum server in the form that [`bdk_chain`] and `Wallet` can consume.
Fully working examples of how to use these components are in `/example-crates`:
- [`example_cli`](./example-crates/example_cli): Library used by the `example_*` crates. Provides utilities for syncing, showing the balance, generating addresses and creating transactions without using the bdk_wallet `Wallet`.
- [`example_electrum`](./example-crates/example_electrum): A command line Bitcoin wallet application built on top of `example_cli` and the `electrum` crate. It shows the power of the bdk tools (`chain` + `file_store` + `electrum`), without depending on the main `bdk_wallet` library.
- [`example_esplora`](./example-crates/example_esplora): A command line Bitcoin wallet application built on top of `example_cli` and the `esplora` crate. It shows the power of the bdk tools (`chain` + `file_store` + `esplora`), without depending on the main `bdk_wallet` library.
- [`example_bitcoind_rpc_polling`](./example-crates/example_bitcoind_rpc_polling): A command line Bitcoin wallet application built on top of `example_cli` and the `bitcoind_rpc` crate. It shows the power of the bdk tools (`chain` + `file_store` + `bitcoind_rpc`), without depending on the main `bdk_wallet` library.
- [`wallet_esplora_blocking`](./example-crates/wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface.
- [`wallet_esplora_async`](./example-crates/wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface.
- [`wallet_electrum`](./example-crates/wallet_electrum): Uses the `Wallet` to sync and spend using Electrum.
Fully working examples of how to use these components are in `/example-crates`
[`BDK 1.0 project page`]: https://github.com/orgs/bitcoindevkit/projects/14
[`bdk_core_staging`]: https://github.com/LLFourn/bdk_core_staging
[`rust-miniscript`]: https://github.com/rust-bitcoin/rust-miniscript
[`rust-bitcoin`]: https://github.com/rust-bitcoin/rust-bitcoin
[`esplora-client`]: https://docs.rs/esplora-client/
[`electrum-client`]: https://docs.rs/electrum-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/
## Minimum Supported Rust Version (MSRV)
This library should compile with any combination of features with Rust 1.63.0.
To build with the MSRV you will need to pin dependencies as follows:
```shell
cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5"
cargo update -p time --precise "0.3.20"
cargo update -p home --precise "0.5.5"
cargo update -p proptest --precise "1.2.0"
cargo update -p url --precise "2.5.0"
cargo update -p cc --precise "1.0.105"
cargo update -p tokio --precise "1.38.1"
```
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or <https://www.apache.org/licenses/LICENSE-2.0>)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or <https://opensource.org/licenses/MIT>)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.
[`esplora-client`]: https://docs.rs/esplora-client/0.3.0/esplora_client/
[`electrum-client`]: https://docs.rs/electrum-client/0.13.0/electrum_client/

View File

@@ -1 +1 @@
msrv="1.63.0"
msrv="1.57.0"

69
crates/bdk/Cargo.toml Normal file
View File

@@ -0,0 +1,69 @@
[package]
name = "bdk"
homepage = "https://bitcoindevkit.org"
version = "1.0.0-alpha.0"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk"
description = "A modern, lightweight, descriptor-based wallet library"
keywords = ["bitcoin", "wallet", "descriptor", "psbt"]
readme = "README.md"
license = "MIT OR Apache-2.0"
authors = ["Bitcoin Dev Kit Developers"]
edition = "2021"
rust-version = "1.57"
[dependencies]
log = "^0.4"
rand = "^0.8"
miniscript = { version = "9", features = ["serde"] }
bitcoin = { version = "0.29", features = ["serde", "base64", "rand"] }
serde = { version = "^1.0", features = ["derive"] }
serde_json = { version = "^1.0" }
bdk_chain = { path = "../chain", version = "0.4.0", features = ["miniscript", "serde"] }
# Optional dependencies
hwi = { version = "0.5", optional = true, features = [ "use-miniscript"] }
bip39 = { version = "1.0.1", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = "0.2"
js-sys = "0.3"
[features]
default = ["std"]
std = []
compiler = ["miniscript/compiler"]
all-keys = ["keys-bip39"]
keys-bip39 = ["bip39"]
hardware-signer = ["hwi"]
test-hardware-signer = ["hardware-signer"]
# This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended
# for libraries to explicitly include the "getrandom/js" feature, so we only do it when
# necessary for running our CI. See: https://docs.rs/getrandom/0.2.8/getrandom/#webassembly-support
dev-getrandom-wasm = ["getrandom/js"]
[dev-dependencies]
lazy_static = "1.4"
env_logger = "0.7"
# Move back to importing from rust-bitcoin once https://github.com/rust-bitcoin/rust-bitcoin/pull/1342 is released
base64 = "^0.13"
assert_matches = "1.5.0"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[[example]]
name = "mnemonic_to_descriptors"
path = "examples/mnemonic_to_descriptors.rs"
required-features = ["all-keys"]
[[example]]
name = "miniscriptc"
path = "examples/compiler.rs"
required-features = ["compiler"]

View File

@@ -8,25 +8,25 @@
</p>
<p>
<a href="https://crates.io/crates/bdk_wallet"><img alt="Crate Info" src="https://img.shields.io/crates/v/bdk_wallet.svg"/></a>
<a href="https://crates.io/crates/bdk"><img alt="Crate Info" src="https://img.shields.io/crates/v/bdk.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/blob/master/LICENSE"><img alt="MIT or Apache-2.0 Licensed" src="https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg"/></a>
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk_wallet"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk_wallet-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a>
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
<a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p>
<h4>
<a href="https://bitcoindevkit.org">Project Homepage</a>
<span> | </span>
<a href="https://docs.rs/bdk_wallet">Documentation</a>
<a href="https://docs.rs/bdk">Documentation</a>
</h4>
</div>
# BDK Wallet
## `bdk`
The `bdk_wallet` crate provides the [`Wallet`] type which is a simple, high-level
The `bdk` crate provides the [`Wallet`](`crate::Wallet`) type which is a simple, high-level
interface built from the low-level components of [`bdk_chain`]. `Wallet` is a good starting point
for many simple applications as well as a good demonstration of how to use the other mechanisms to
construct a wallet. It has two keychains (external and internal) which are defined by
@@ -34,80 +34,64 @@ construct a wallet. It has two keychains (external and internal) which are defin
chain data it also uses the descriptors to find transaction outputs owned by them. From there, you
can create and sign transactions.
For details about the API of `Wallet` see the [module-level documentation][`Wallet`].
For more information, see the [`Wallet`'s documentation](https://docs.rs/bdk/latest/bdk/wallet/struct.Wallet.html).
## Blockchain data
### Blockchain data
In order to get blockchain data for `Wallet` to consume, you should configure a client from
an available chain source. Typically you make a request to the chain source and get a response
that the `Wallet` can use to update its view of the chain.
In order to get blockchain data for `Wallet` to consume, you have to put it into particular form.
Right now this is [`KeychainScan`] which is defined in [`bdk_chain`].
This can be created manually or from blockchain-scanning crates.
**Blockchain Data Sources**
* [`bdk_esplora`]: Grabs blockchain data from Esplora for updating BDK structures.
* [`bdk_electrum`]: Grabs blockchain data from Electrum for updating BDK structures.
* [`bdk_bitcoind_rpc`]: Grabs blockchain data from Bitcoin Core for updating BDK structures.
**Examples**
* [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async)
* [`example-crates/wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_blocking)
* [`example-crates/wallet_esplora`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora)
* [`example-crates/wallet_electrum`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_electrum)
* [`example-crates/wallet_rpc`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_rpc)
## Persistence
### Persistence
To persist `Wallet` state data use a data store crate that reads and writes [`ChangeSet`].
To persist the `Wallet` on disk, `Wallet` needs to be constructed with a
[`Persist`](https://docs.rs/bdk_chain/latest/bdk_chain/keychain/struct.KeychainPersist.html) implementation.
**Implementations**
* [`bdk_file_store`]: Stores wallet changes in a simple flat file.
* [`bdk_file_store`]: a simple flat-file implementation of `Persist`.
**Example**
<!-- compile_fail because outpoint and txout are fake variables -->
```rust,no_run
use bdk_wallet::{bitcoin::Network, KeychainKind, ChangeSet, Wallet};
```rust
use bdk::{bitcoin::Network, wallet::{AddressIndex, Wallet}};
// Open or create a new file store for wallet data.
let mut db =
bdk_file_store::Store::<ChangeSet>::open_or_create_new(b"magic_bytes", "/tmp/my_wallet.db")
.expect("create store");
fn main() {
// a type that implements `Persist`
let db = ();
// Create a wallet with initial wallet data read from the file store.
let network = Network::Testnet;
let descriptor = "wpkh(tprv8ZgxMBicQKsPdcAqYBpzAFwU5yxBUo88ggoBqu1qPcHUfSbKK1sKMLmC7EAk438btHQrSdu3jGGQa6PA71nvH5nkDexhLteJqkM4dQmWF9g/84'/1'/0'/0/*)";
let change_descriptor = "wpkh(tprv8ZgxMBicQKsPdcAqYBpzAFwU5yxBUo88ggoBqu1qPcHUfSbKK1sKMLmC7EAk438btHQrSdu3jGGQa6PA71nvH5nkDexhLteJqkM4dQmWF9g/84'/1'/0'/1/*)";
let wallet_opt = Wallet::load()
.descriptors(descriptor, change_descriptor)
.network(network)
.load_wallet(&mut db)
.expect("wallet");
let mut wallet = match wallet_opt {
Some(wallet) => wallet,
None => Wallet::create(descriptor, change_descriptor)
.network(network)
.create_wallet(&mut db)
.expect("wallet"),
};
let descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
let mut wallet = Wallet::new(descriptor, None, db, Network::Testnet).expect("should create");
// Get a new address to receive bitcoin.
let receive_address = wallet.reveal_next_address(KeychainKind::External);
// Persist staged wallet data changes to the file store.
wallet.persist(&mut db).expect("persist");
println!("Your new receive address is: {}", receive_address.address);
// get a new address (this increments revealed derivation index)
println!("revealed address: {}", wallet.get_address(AddressIndex::New));
println!("staged changes: {:?}", wallet.staged());
// persist changes
wallet.commit().expect("must save");
}
```
<!-- ### Sync the balance of a descriptor -->
<!-- ```rust,no_run -->
<!-- use bdk_wallet::Wallet; -->
<!-- use bdk_wallet::blockchain::ElectrumBlockchain; -->
<!-- use bdk_wallet::SyncOptions; -->
<!-- use bdk_wallet::electrum_client::Client; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- use bdk::Wallet; -->
<!-- use bdk::blockchain::ElectrumBlockchain; -->
<!-- use bdk::SyncOptions; -->
<!-- use bdk::electrum_client::Client; -->
<!-- use bdk::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- fn main() -> Result<(), bdk::Error> { -->
<!-- let blockchain = ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?); -->
<!-- let wallet = Wallet::new( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)", -->
@@ -117,7 +101,7 @@ println!("Your new receive address is: {}", receive_address.address);
<!-- wallet.sync(&blockchain, SyncOptions::default())?; -->
<!-- println!("Descriptor balance: {} SAT", wallet.balance()?); -->
<!-- println!("Descriptor balance: {} SAT", wallet.get_balance()?); -->
<!-- Ok(()) -->
<!-- } -->
@@ -125,12 +109,12 @@ println!("Your new receive address is: {}", receive_address.address);
<!-- ### Generate a few addresses -->
<!-- ```rust -->
<!-- use bdk_wallet::Wallet; -->
<!-- use bdk_wallet::AddressIndex::New; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- use bdk::Wallet; -->
<!-- use bdk::wallet::AddressIndex::New; -->
<!-- use bdk::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- let wallet = Wallet::new( -->
<!-- fn main() -> Result<(), bdk::Error> { -->
<!-- let wallet = Wallet::new_no_persist( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)", -->
<!-- Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"), -->
<!-- Network::Testnet, -->
@@ -147,19 +131,19 @@ println!("Your new receive address is: {}", receive_address.address);
<!-- ### Create a transaction -->
<!-- ```rust,no_run -->
<!-- use bdk_wallet::{FeeRate, Wallet, SyncOptions}; -->
<!-- use bdk_wallet::blockchain::ElectrumBlockchain; -->
<!-- use bdk::{FeeRate, Wallet, SyncOptions}; -->
<!-- use bdk::blockchain::ElectrumBlockchain; -->
<!-- use bdk_wallet::electrum_client::Client; -->
<!-- use bdk_wallet::AddressIndex::New; -->
<!-- use bdk::electrum_client::Client; -->
<!-- use bdk::wallet::AddressIndex::New; -->
<!-- use bitcoin::base64; -->
<!-- use bdk_wallet::bitcoin::consensus::serialize; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- use base64; -->
<!-- use bdk::bitcoin::consensus::serialize; -->
<!-- use bdk::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- fn main() -> Result<(), bdk::Error> { -->
<!-- let blockchain = ElectrumBlockchain::from(Client::new("ssl://electrum.blockstream.info:60002")?); -->
<!-- let wallet = Wallet::new( -->
<!-- let wallet = Wallet::new_no_persist( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/0/*)", -->
<!-- Some("wpkh([c258d2e4/84h/1h/0h]tpubDDYkZojQFQjht8Tm4jsS3iuEmKjTiEGjG6KnuFNKKJb5A6ZUCUZKdvLdSDWofKi4ToRCwb9poe1XdqfUnP4jaJjCB2Zwv11ZLgSbnZSNecE/1/*)"), -->
<!-- Network::Testnet, -->
@@ -188,14 +172,14 @@ println!("Your new receive address is: {}", receive_address.address);
<!-- ### Sign a transaction -->
<!-- ```rust,no_run -->
<!-- use bdk_wallet::{Wallet, SignOptions}; -->
<!-- use bdk::{Wallet, SignOptions}; -->
<!-- use bitcoin::base64; -->
<!-- use bdk_wallet::bitcoin::consensus::deserialize; -->
<!-- use bdk_wallet::bitcoin::Network; -->
<!-- use base64; -->
<!-- use bdk::bitcoin::consensus::deserialize; -->
<!-- use bdk::bitcoin::Network; -->
<!-- fn main() -> Result<(), bdk_wallet::Error> { -->
<!-- let wallet = Wallet::new( -->
<!-- fn main() -> Result<(), bdk::Error> { -->
<!-- let wallet = Wallet::new_no_persist( -->
<!-- "wpkh([c258d2e4/84h/1h/0h]tprv8griRPhA7342zfRyB6CqeKF8CJDXYu5pgnj1cjL1u2ngKcJha5jjTRimG82ABzJQ4MQe71CV54xfn25BbhCNfEGGJZnxvCDQCd6JkbvxW6h/0/*)", -->
<!-- Some("wpkh([c258d2e4/84h/1h/0h]tprv8griRPhA7342zfRyB6CqeKF8CJDXYu5pgnj1cjL1u2ngKcJha5jjTRimG82ABzJQ4MQe71CV54xfn25BbhCNfEGGJZnxvCDQCd6JkbvxW6h/1/*)"), -->
<!-- Network::Testnet, -->
@@ -218,26 +202,26 @@ println!("Your new receive address is: {}", receive_address.address);
cargo test
```
# License
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](../../LICENSE-APACHE) or <https://www.apache.org/licenses/LICENSE-2.0>)
* MIT license ([LICENSE-MIT](../../LICENSE-MIT) or <https://opensource.org/licenses/MIT>)
* Apache License, Version 2.0
([LICENSE-APACHE](LICENSE-APACHE) or <https://www.apache.org/licenses/LICENSE-2.0>)
* MIT license
([LICENSE-MIT](LICENSE-MIT) or <https://opensource.org/licenses/MIT>)
at your option.
# Contribution
## Contribution
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or
conditions.
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.
[`Wallet`]: https://docs.rs/bdk_wallet/latest/bdk_wallet/wallet/struct.Wallet.html
[`bdk_chain`]: https://docs.rs/bdk_chain/latest
[`bdk_file_store`]: https://docs.rs/bdk_file_store/latest
[`bdk_electrum`]: https://docs.rs/bdk_electrum/latest
[`bdk_esplora`]: https://docs.rs/bdk_esplora/latest
[`bdk_bitcoind_rpc`]: https://docs.rs/bdk_bitcoind_rpc/latest
[`KeychainScan`]: https://docs.rs/bdk_chain/latest/bdk_chain/keychain/struct.KeychainScan.html
[`rust-miniscript`]: https://docs.rs/miniscript/latest/miniscript/index.html

View File

@@ -0,0 +1,73 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
extern crate bdk;
extern crate bitcoin;
extern crate log;
extern crate miniscript;
extern crate serde_json;
use std::error::Error;
use std::str::FromStr;
use log::info;
use bitcoin::Network;
use miniscript::policy::Concrete;
use miniscript::Descriptor;
use bdk::wallet::AddressIndex::New;
use bdk::{KeychainKind, Wallet};
/// Miniscript policy is a high level abstraction of spending conditions. Defined in the
/// rust-miniscript library here https://docs.rs/miniscript/7.0.0/miniscript/policy/index.html
/// rust-miniscript provides a `compile()` function that can be used to compile any miniscript policy
/// into a descriptor. This descriptor then in turn can be used in bdk a fully functioning wallet
/// can be derived from the policy.
///
/// This example demonstrates the interaction between a bdk wallet and miniscript policy.
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init_from_env(
env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
);
// We start with a generic miniscript policy string
let policy_str = "or(10@thresh(4,pk(029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0),pk(025f05815e3a1a8a83bfbb03ce016c9a2ee31066b98f567f6227df1d76ec4bd143),pk(025625f41e4a065efc06d5019cbbd56fe8c07595af1231e7cbc03fafb87ebb71ec),pk(02a27c8b850a00f67da3499b60562673dcf5fdfb82b7e17652a7ac54416812aefd),pk(03e618ec5f384d6e19ca9ebdb8e2119e5bef978285076828ce054e55c4daf473e2)),1@and(older(4209713),thresh(2,pk(03deae92101c790b12653231439f27b8897264125ecb2f46f48278603102573165),pk(033841045a531e1adf9910a6ec279589a90b3b8a904ee64ffd692bd08a8996c1aa),pk(02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068))))";
info!("Compiling policy: \n{}", policy_str);
// Parse the string as a [`Concrete`] type miniscript policy.
let policy = Concrete::<String>::from_str(policy_str)?;
// Create a `wsh` type descriptor from the policy.
// `policy.compile()` returns the resulting miniscript from the policy.
let descriptor = Descriptor::new_wsh(policy.compile()?)?;
info!("Compiled into following Descriptor: \n{}", descriptor);
// Create a new wallet from this descriptor
let mut wallet = Wallet::new_no_persist(&format!("{}", descriptor), None, Network::Regtest)?;
info!(
"First derived address from the descriptor: \n{}",
wallet.get_address(New)
);
// BDK also has it's own `Policy` structure to represent the spending condition in a more
// human readable json format.
let spending_policy = wallet.policies(KeychainKind::External)?;
info!(
"The BDK spending policy: \n{}",
serde_json::to_string_pretty(&spending_policy)?
);
Ok(())
}

View File

@@ -6,20 +6,21 @@
// You may not use this file except in accordance with one or both of these
// licenses.
use anyhow::anyhow;
use bdk_wallet::bitcoin::bip32::DerivationPath;
use bdk_wallet::bitcoin::secp256k1::Secp256k1;
use bdk_wallet::bitcoin::Network;
use bdk_wallet::descriptor;
use bdk_wallet::descriptor::IntoWalletDescriptor;
use bdk_wallet::keys::bip39::{Language, Mnemonic, WordCount};
use bdk_wallet::keys::{GeneratableKey, GeneratedKey};
use bdk_wallet::miniscript::Tap;
use bdk::bitcoin::secp256k1::Secp256k1;
use bdk::bitcoin::util::bip32::DerivationPath;
use bdk::bitcoin::Network;
use bdk::descriptor;
use bdk::descriptor::IntoWalletDescriptor;
use bdk::keys::bip39::{Language, Mnemonic, WordCount};
use bdk::keys::{GeneratableKey, GeneratedKey};
use bdk::miniscript::Tap;
use bdk::Error as BDK_Error;
use std::error::Error;
use std::str::FromStr;
/// This example demonstrates how to generate a mnemonic phrase
/// using BDK and use that to generate a descriptor string.
fn main() -> Result<(), anyhow::Error> {
fn main() -> Result<(), Box<dyn Error>> {
let secp = Secp256k1::new();
// In this example we are generating a 12 words mnemonic phrase
@@ -27,14 +28,14 @@ fn main() -> Result<(), anyhow::Error> {
// using their respective `WordCount` variant.
let mnemonic: GeneratedKey<_, Tap> =
Mnemonic::generate((WordCount::Words12, Language::English))
.map_err(|_| anyhow!("Mnemonic generation error"))?;
.map_err(|_| BDK_Error::Generic("Mnemonic generation error".to_string()))?;
println!("Mnemonic phrase: {}", *mnemonic);
let mnemonic_with_passphrase = (mnemonic, None);
// define external and internal derivation key path
let external_path = DerivationPath::from_str("m/86h/1h/0h/0").unwrap();
let internal_path = DerivationPath::from_str("m/86h/1h/0h/1").unwrap();
let external_path = DerivationPath::from_str("m/86h/0h/0h/0").unwrap();
let internal_path = DerivationPath::from_str("m/86h/0h/0h/1").unwrap();
// generate external and internal descriptor from mnemonic
let (external_descriptor, ext_keymap) =

View File

@@ -9,14 +9,16 @@
// You may not use this file except in accordance with one or both of these
// licenses.
extern crate bdk_wallet;
extern crate bdk;
extern crate env_logger;
extern crate log;
use std::error::Error;
use bdk_wallet::bitcoin::Network;
use bdk_wallet::descriptor::{policy::BuildSatisfaction, ExtractPolicy, IntoWalletDescriptor};
use bdk_wallet::signer::SignersContainer;
use bdk::bitcoin::Network;
use bdk::descriptor::{policy::BuildSatisfaction, ExtractPolicy, IntoWalletDescriptor};
use bdk::wallet::signer::SignersContainer;
/// This example describes the use of the BDK's [`bdk_wallet::descriptor::policy`] module.
/// This example describes the use of the BDK's [`bdk::descriptor::policy`] module.
///
/// Policy is higher abstraction representation of the wallet descriptor spending condition.
/// This is useful to express complex miniscript spending conditions into more human readable form.
@@ -27,6 +29,10 @@ use bdk_wallet::signer::SignersContainer;
/// one of the Extend Private key.
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init_from_env(
env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
);
let secp = bitcoin::secp256k1::Secp256k1::new();
// The descriptor used in the example
@@ -34,15 +40,15 @@ fn main() -> Result<(), Box<dyn Error>> {
let desc = "wsh(multi(2,tprv8ZgxMBicQKsPdpkqS7Eair4YxjcuuvDPNYmKX3sCniCf16tHEVrjjiSXEkFRnUH77yXc6ZcwHHcLNfjdi5qUvw3VDfgYiH5mNsj5izuiu2N/1/*,tpubD6NzVbkrYhZ4XHndKkuB8FifXm8r5FQHwrN6oZuWCz13qb93rtgKvD4PQsqC4HP4yhV3tA2fqr2RbY5mNXfM7RxXUoeABoDtsFUq2zJq6YK/1/*))";
// Use the descriptor string to derive the full descriptor and a keymap.
// The wallet descriptor can be used to create a new bdk_wallet::wallet.
// The wallet descriptor can be used to create a new bdk::wallet.
// While the `keymap` can be used to create a `SignerContainer`.
//
// The `SignerContainer` can sign for `PSBT`s.
// a `bdk_wallet::Wallet` internally uses these to handle transaction signing.
// a bdk::wallet internally uses these to handle transaction signing.
// But they can be used as independent tools also.
let (wallet_desc, keymap) = desc.into_wallet_descriptor(&secp, Network::Testnet)?;
println!("Example Descriptor for policy analysis : {}", wallet_desc);
log::info!("Example Descriptor for policy analysis : {}", wallet_desc);
// Create the signer with the keymap and descriptor.
let signers_container = SignersContainer::build(keymap, &wallet_desc, &secp);
@@ -54,7 +60,7 @@ fn main() -> Result<(), Box<dyn Error>> {
.extract_policy(&signers_container, BuildSatisfaction::None, &secp)?
.expect("We expect a policy");
println!("Derived Policy for the descriptor {:#?}", policy);
log::info!("Derived Policy for the descriptor {:#?}", policy);
Ok(())
}

View File

@@ -42,16 +42,22 @@ fn poly_mod(mut c: u64, val: u64) -> u64 {
c
}
/// Compute the checksum bytes of a descriptor, excludes any existing checksum in the descriptor string from the calculation
pub fn calc_checksum_bytes(mut desc: &str) -> Result<[u8; 8], DescriptorError> {
/// Computes the checksum bytes of a descriptor.
/// `exclude_hash = true` ignores all data after the first '#' (inclusive).
pub(crate) fn calc_checksum_bytes_internal(
mut desc: &str,
exclude_hash: bool,
) -> Result<[u8; 8], DescriptorError> {
let mut c = 1;
let mut cls = 0;
let mut clscount = 0;
let mut original_checksum = None;
if let Some(split) = desc.split_once('#') {
desc = split.0;
original_checksum = Some(split.1);
if exclude_hash {
if let Some(split) = desc.split_once('#') {
desc = split.0;
original_checksum = Some(split.1);
}
}
for ch in desc.as_bytes() {
@@ -89,10 +95,39 @@ pub fn calc_checksum_bytes(mut desc: &str) -> Result<[u8; 8], DescriptorError> {
Ok(checksum)
}
/// Compute the checksum bytes of a descriptor, excludes any existing checksum in the descriptor string from the calculation
pub fn calc_checksum_bytes(desc: &str) -> Result<[u8; 8], DescriptorError> {
calc_checksum_bytes_internal(desc, true)
}
/// Compute the checksum of a descriptor, excludes any existing checksum in the descriptor string from the calculation
pub fn calc_checksum(desc: &str) -> Result<String, DescriptorError> {
// unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET`
calc_checksum_bytes(desc).map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
calc_checksum_bytes_internal(desc, true)
.map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
}
// TODO in release 0.25.0, remove get_checksum_bytes and get_checksum
// TODO in release 0.25.0, consolidate calc_checksum_bytes_internal into calc_checksum_bytes
/// Compute the checksum bytes of a descriptor
#[deprecated(
since = "0.24.0",
note = "Use new `calc_checksum_bytes` function which excludes any existing checksum in the descriptor string before calculating the checksum hash bytes. See https://github.com/bitcoindevkit/bdk/pull/765."
)]
pub fn get_checksum_bytes(desc: &str) -> Result<[u8; 8], DescriptorError> {
calc_checksum_bytes_internal(desc, false)
}
/// Compute the checksum of a descriptor
#[deprecated(
since = "0.24.0",
note = "Use new `calc_checksum` function which excludes any existing checksum in the descriptor string before calculating the checksum hash. See https://github.com/bitcoindevkit/bdk/pull/765."
)]
pub fn get_checksum(desc: &str) -> Result<String, DescriptorError> {
// unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET`
calc_checksum_bytes_internal(desc, false)
.map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
}
#[cfg(test)]

View File

@@ -274,13 +274,14 @@ macro_rules! impl_sortedmulti {
#[macro_export]
macro_rules! parse_tap_tree {
( @merge $tree_a:expr, $tree_b:expr) => {{
use $crate::alloc::sync::Arc;
use $crate::miniscript::descriptor::TapTree;
$tree_a
.and_then(|tree_a| Ok((tree_a, $tree_b?)))
.and_then(|((a_tree, mut a_keymap, a_networks), (b_tree, b_keymap, b_networks))| {
a_keymap.extend(b_keymap.into_iter());
Ok((TapTree::combine(a_tree, b_tree), a_keymap, $crate::keys::merge_networks(&a_networks, &b_networks)))
Ok((TapTree::Tree(Arc::new(a_tree), Arc::new(b_tree)), a_keymap, $crate::keys::merge_networks(&a_networks, &b_networks)))
})
}};
@@ -423,7 +424,7 @@ macro_rules! apply_modifier {
///
/// ```
/// # use std::str::FromStr;
/// let (my_descriptor, my_keys_map, networks) = bdk_wallet::descriptor!(sh(wsh(and_v(v:pk("cVt4o7BGAig1UXywgGSmARhxMdzP5qvQsxKkSsc1XEkw3tDTQFpy"),older(50)))))?;
/// let (my_descriptor, my_keys_map, networks) = bdk::descriptor!(sh(wsh(and_v(v:pk("cVt4o7BGAig1UXywgGSmARhxMdzP5qvQsxKkSsc1XEkw3tDTQFpy"),older(50)))))?;
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
@@ -444,7 +445,7 @@ macro_rules! apply_modifier {
/// bitcoin::PrivateKey::from_wif("cVt4o7BGAig1UXywgGSmARhxMdzP5qvQsxKkSsc1XEkw3tDTQFpy")?;
/// let my_timelock = 50;
///
/// let (descriptor_a, key_map_a, networks) = bdk_wallet::descriptor! {
/// let (descriptor_a, key_map_a, networks) = bdk::descriptor! {
/// wsh (
/// thresh(2, pk(my_key_1), s:pk(my_key_2), s:n:d:v:older(my_timelock))
/// )
@@ -452,12 +453,11 @@ macro_rules! apply_modifier {
///
/// #[rustfmt::skip]
/// let b_items = vec![
/// bdk_wallet::fragment!(pk(my_key_1))?,
/// bdk_wallet::fragment!(s:pk(my_key_2))?,
/// bdk_wallet::fragment!(s:n:d:v:older(my_timelock))?,
/// bdk::fragment!(pk(my_key_1))?,
/// bdk::fragment!(s:pk(my_key_2))?,
/// bdk::fragment!(s:n:d:v:older(my_timelock))?,
/// ];
/// let (descriptor_b, mut key_map_b, networks) =
/// bdk_wallet::descriptor!(wsh(thresh_vec(2, b_items)))?;
/// let (descriptor_b, mut key_map_b, networks) = bdk::descriptor!(wsh(thresh_vec(2, b_items)))?;
///
/// assert_eq!(descriptor_a, descriptor_b);
/// assert_eq!(key_map_a.len(), key_map_b.len());
@@ -476,7 +476,7 @@ macro_rules! apply_modifier {
/// let my_key_2 =
/// bitcoin::PrivateKey::from_wif("cVt4o7BGAig1UXywgGSmARhxMdzP5qvQsxKkSsc1XEkw3tDTQFpy")?;
///
/// let (descriptor, key_map, networks) = bdk_wallet::descriptor! {
/// let (descriptor, key_map, networks) = bdk::descriptor! {
/// wsh (
/// multi(2, my_key_1, my_key_2)
/// )
@@ -492,7 +492,7 @@ macro_rules! apply_modifier {
/// let my_key =
/// bitcoin::PrivateKey::from_wif("cVt4o7BGAig1UXywgGSmARhxMdzP5qvQsxKkSsc1XEkw3tDTQFpy")?;
///
/// let (descriptor, key_map, networks) = bdk_wallet::descriptor!(wpkh(my_key))?;
/// let (descriptor, key_map, networks) = bdk::descriptor!(wpkh(my_key))?;
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
@@ -516,14 +516,13 @@ macro_rules! descriptor {
use $crate::miniscript::descriptor::{Descriptor, DescriptorPublicKey};
$crate::impl_top_level_pk!(Pkh, $crate::miniscript::Legacy, $key)
.and_then(|(a, b, c)| Ok((a.map_err(|e| miniscript::Error::from(e))?, b, c)))
.map(|(a, b, c)| (Descriptor::<DescriptorPublicKey>::Pkh(a), b, c))
});
( wpkh ( $key:expr ) ) => ({
use $crate::miniscript::descriptor::{Descriptor, DescriptorPublicKey};
$crate::impl_top_level_pk!(Wpkh, $crate::miniscript::Segwitv0, $key)
.and_then(|(a, b, c)| Ok((a.map_err(|e| miniscript::Error::from(e))?, b, c)))
.and_then(|(a, b, c)| Ok((a?, b, c)))
.map(|(a, b, c)| (Descriptor::<DescriptorPublicKey>::Wpkh(a), b, c))
});
( sh ( wpkh ( $key:expr ) ) ) => ({
@@ -533,7 +532,7 @@ macro_rules! descriptor {
use $crate::miniscript::descriptor::{Descriptor, DescriptorPublicKey, Sh};
$crate::impl_top_level_pk!(Wpkh, $crate::miniscript::Segwitv0, $key)
.and_then(|(a, b, c)| Ok((a.map_err(|e| miniscript::Error::from(e))?, b, c)))
.and_then(|(a, b, c)| Ok((a?, b, c)))
.and_then(|(a, b, c)| Ok((Descriptor::<DescriptorPublicKey>::Sh(Sh::new_wpkh(a.into_inner())?), b, c)))
});
( sh ( $( $minisc:tt )* ) ) => ({
@@ -703,10 +702,10 @@ macro_rules! fragment {
$crate::keys::make_pkh($key, &secp)
});
( after ( $value:expr ) ) => ({
$crate::impl_leaf_opcode_value!(After, $crate::miniscript::AbsLockTime::from_consensus($value).expect("valid `AbsLockTime`"))
$crate::impl_leaf_opcode_value!(After, $crate::bitcoin::PackedLockTime($value)) // TODO!! https://github.com/rust-bitcoin/rust-bitcoin/issues/1302
});
( older ( $value:expr ) ) => ({
$crate::impl_leaf_opcode_value!(Older, $crate::miniscript::RelLockTime::from_consensus($value).expect("valid `RelLockTime`")) // TODO!!
$crate::impl_leaf_opcode_value!(Older, $crate::bitcoin::Sequence($value)) // TODO!!
});
( sha256 ( $hash:expr ) ) => ({
$crate::impl_leaf_opcode_value!(Sha256, $hash)
@@ -757,8 +756,7 @@ macro_rules! fragment {
(keys_acc, net_acc)
});
let thresh = $crate::miniscript::Threshold::new($thresh, items).expect("valid threshold and pks collection");
$crate::impl_leaf_opcode_value!(Thresh, thresh)
$crate::impl_leaf_opcode_value_two!(Thresh, $thresh, items)
.map(|(minisc, _, _)| (minisc, key_maps, valid_networks))
});
( thresh ( $thresh:expr, $( $inner:tt )* ) ) => ({
@@ -770,12 +768,7 @@ macro_rules! fragment {
( multi_vec ( $thresh:expr, $keys:expr ) ) => ({
let secp = $crate::bitcoin::secp256k1::Secp256k1::new();
let fun = |k, pks| {
let thresh = $crate::miniscript::Threshold::new(k, pks).expect("valid threshold and pks collection");
$crate::miniscript::Terminal::Multi(thresh)
};
$crate::keys::make_multi($thresh, fun, $keys, &secp)
$crate::keys::make_multi($thresh, $crate::miniscript::Terminal::Multi, $keys, &secp)
});
( multi ( $thresh:expr $(, $key:expr )+ ) ) => ({
$crate::group_multi_keys!( $( $key ),* )
@@ -784,12 +777,7 @@ macro_rules! fragment {
( multi_a_vec ( $thresh:expr, $keys:expr ) ) => ({
let secp = $crate::bitcoin::secp256k1::Secp256k1::new();
let fun = |k, pks| {
let thresh = $crate::miniscript::Threshold::new(k, pks).expect("valid threshold and pks collection");
$crate::miniscript::Terminal::MultiA(thresh)
};
$crate::keys::make_multi($thresh, fun, $keys, &secp)
$crate::keys::make_multi($thresh, $crate::miniscript::Terminal::MultiA, $keys, &secp)
});
( multi_a ( $thresh:expr $(, $key:expr )+ ) ) => ({
$crate::group_multi_keys!( $( $key ),* )
@@ -808,6 +796,7 @@ macro_rules! fragment {
#[cfg(test)]
mod test {
use alloc::string::ToString;
use bitcoin::hashes::hex::ToHex;
use bitcoin::secp256k1::Secp256k1;
use miniscript::descriptor::{DescriptorPublicKey, KeyMap};
use miniscript::{Descriptor, Legacy, Segwitv0};
@@ -816,8 +805,8 @@ mod test {
use crate::descriptor::{DescriptorError, DescriptorMeta};
use crate::keys::{DescriptorKey, IntoDescriptorKey, ValidNetworks};
use bitcoin::bip32;
use bitcoin::Network::{Bitcoin, Regtest, Signet, Testnet};
use bitcoin::network::constants::Network::{Bitcoin, Regtest, Signet, Testnet};
use bitcoin::util::bip32;
use bitcoin::PrivateKey;
// test the descriptor!() macro
@@ -833,15 +822,18 @@ mod test {
assert_eq!(desc.is_witness(), is_witness);
assert_eq!(!desc.has_wildcard(), is_fixed);
for i in 0..expected.len() {
let child_desc = desc
.at_derivation_index(i as u32)
.expect("i is not hardened");
let index = i as u32;
let child_desc = if !desc.has_wildcard() {
desc.at_derivation_index(0)
} else {
desc.at_derivation_index(index)
};
let address = child_desc.address(Regtest);
if let Ok(address) = address {
assert_eq!(address.to_string(), *expected.get(i).unwrap());
} else {
let script = child_desc.script_pubkey();
assert_eq!(script.to_hex_string(), *expected.get(i).unwrap());
assert_eq!(script.to_hex().as_str(), *expected.get(i).unwrap());
}
}
}
@@ -947,7 +939,7 @@ mod test {
#[test]
fn test_bip32_legacy_descriptors() {
let xprv = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let xprv = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let path = bip32::DerivationPath::from_str("m/0").unwrap();
let desc_key = (xprv, path.clone()).into_descriptor_key().unwrap();
@@ -992,7 +984,7 @@ mod test {
#[test]
fn test_bip32_segwitv0_descriptors() {
let xprv = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let xprv = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let path = bip32::DerivationPath::from_str("m/0").unwrap();
let desc_key = (xprv, path.clone()).into_descriptor_key().unwrap();
@@ -1049,10 +1041,10 @@ mod test {
#[test]
fn test_dsl_sortedmulti() {
let key_1 = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let key_1 = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let path_1 = bip32::DerivationPath::from_str("m/0").unwrap();
let key_2 = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPegBHHnq7YEgM815dG24M2Jk5RVqipgDxF1HJ1tsnT815X5Fd5FRfMVUs8NZs9XCb6y9an8hRPThnhfwfXJ36intaekySHGF").unwrap();
let key_2 = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPegBHHnq7YEgM815dG24M2Jk5RVqipgDxF1HJ1tsnT815X5Fd5FRfMVUs8NZs9XCb6y9an8hRPThnhfwfXJ36intaekySHGF").unwrap();
let path_2 = bip32::DerivationPath::from_str("m/1").unwrap();
let desc_key1 = (key_1, path_1);
@@ -1108,7 +1100,7 @@ mod test {
// - verify the valid_networks returned is correctly computed based on the keys present in the descriptor
#[test]
fn test_valid_networks() {
let xprv = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let xprv = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let path = bip32::DerivationPath::from_str("m/0").unwrap();
let desc_key = (xprv, path).into_descriptor_key().unwrap();
@@ -1118,7 +1110,7 @@ mod test {
[Testnet, Regtest, Signet].iter().cloned().collect()
);
let xprv = bip32::Xpriv::from_str("xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi").unwrap();
let xprv = bip32::ExtendedPrivKey::from_str("xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi").unwrap();
let path = bip32::DerivationPath::from_str("m/10/20/30/40").unwrap();
let desc_key = (xprv, path).into_descriptor_key().unwrap();
@@ -1131,15 +1123,15 @@ mod test {
fn test_key_maps_merged() {
let secp = Secp256k1::new();
let xprv1 = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let xprv1 = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let path1 = bip32::DerivationPath::from_str("m/0").unwrap();
let desc_key1 = (xprv1, path1.clone()).into_descriptor_key().unwrap();
let xprv2 = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPegBHHnq7YEgM815dG24M2Jk5RVqipgDxF1HJ1tsnT815X5Fd5FRfMVUs8NZs9XCb6y9an8hRPThnhfwfXJ36intaekySHGF").unwrap();
let xprv2 = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPegBHHnq7YEgM815dG24M2Jk5RVqipgDxF1HJ1tsnT815X5Fd5FRfMVUs8NZs9XCb6y9an8hRPThnhfwfXJ36intaekySHGF").unwrap();
let path2 = bip32::DerivationPath::from_str("m/2147483647'/0").unwrap();
let desc_key2 = (xprv2, path2.clone()).into_descriptor_key().unwrap();
let xprv3 = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPdZXrcHNLf5JAJWFAoJ2TrstMRdSKtEggz6PddbuSkvHKM9oKJyFgZV1B7rw8oChspxyYbtmEXYyg1AjfWbL3ho3XHDpHRZf").unwrap();
let xprv3 = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPdZXrcHNLf5JAJWFAoJ2TrstMRdSKtEggz6PddbuSkvHKM9oKJyFgZV1B7rw8oChspxyYbtmEXYyg1AjfWbL3ho3XHDpHRZf").unwrap();
let path3 = bip32::DerivationPath::from_str("m/10/20/30/40").unwrap();
let desc_key3 = (xprv3, path3.clone()).into_descriptor_key().unwrap();
@@ -1163,7 +1155,7 @@ mod test {
#[test]
fn test_script_context_validation() {
// this compiles
let xprv = bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let xprv = bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let path = bip32::DerivationPath::from_str("m/0").unwrap();
let desc_key: DescriptorKey<Legacy> = (xprv, path).into_descriptor_key().unwrap();
@@ -1186,7 +1178,9 @@ mod test {
}
#[test]
#[should_panic(expected = "Miniscript(ContextError(UncompressedKeysNotAllowed))")]
#[should_panic(
expected = "Miniscript(ContextError(CompressedOnly(\"04b4632d08485ff1df2db55b9dafd23347d1c47a457072a1e87be26896549a87378ec38ff91d43e8c2092ebda601780485263da089465619e0358a5c1be7ac91f4\")))"
)]
fn test_dsl_miniscript_checks() {
let mut uncompressed_pk =
PrivateKey::from_wif("L5EZftvrYaSudiozVRzTqLcHLNDoVn7H5HSfM9BAN6tMJX8oTWz6").unwrap();

View File

@@ -10,10 +10,9 @@
// licenses.
//! Descriptor errors
use core::fmt;
/// Errors related to the parsing and usage of descriptors
#[derive(Debug, PartialEq)]
#[derive(Debug)]
pub enum Error {
/// Invalid HD Key path, such as having a wildcard but a length != 1
InvalidHdKeyPath,
@@ -21,8 +20,7 @@ pub enum Error {
InvalidDescriptorChecksum,
/// The descriptor contains hardened derivation steps on public extended keys
HardenedDerivationXpub,
/// The descriptor contains multipath keys
MultiPath,
/// Error thrown while working with [`keys`](crate::keys)
Key(crate::keys::KeyError),
/// Error while extracting and manipulating policies
@@ -32,17 +30,15 @@ pub enum Error {
InvalidDescriptorCharacter(u8),
/// BIP32 error
Bip32(bitcoin::bip32::Error),
Bip32(bitcoin::util::bip32::Error),
/// Error during base58 decoding
Base58(bitcoin::base58::Error),
Base58(bitcoin::util::base58::Error),
/// Key-related error
Pk(bitcoin::key::ParsePublicKeyError),
Pk(bitcoin::util::key::Error),
/// Miniscript error
Miniscript(miniscript::Error),
/// Hex decoding error
Hex(bitcoin::hex::HexToBytesError),
/// The provided wallet descriptors are identical
ExternalAndInternalAreTheSame,
Hex(bitcoin::hashes::hex::Error),
}
impl From<crate::keys::KeyError> for Error {
@@ -55,8 +51,8 @@ impl From<crate::keys::KeyError> for Error {
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::InvalidHdKeyPath => write!(f, "Invalid HD key path"),
Self::InvalidDescriptorChecksum => {
@@ -66,10 +62,6 @@ impl fmt::Display for Error {
f,
"The descriptor contains hardened derivation steps on public extended keys"
),
Self::MultiPath => write!(
f,
"The descriptor contains multipath keys, which are not supported yet"
),
Self::Key(err) => write!(f, "Key error: {}", err),
Self::Policy(err) => write!(f, "Policy error: {}", err),
Self::InvalidDescriptorCharacter(char) => {
@@ -80,9 +72,6 @@ impl fmt::Display for Error {
Self::Pk(err) => write!(f, "Key-related error: {}", err),
Self::Miniscript(err) => write!(f, "Miniscript error: {}", err),
Self::Hex(err) => write!(f, "Hex decoding error: {}", err),
Self::ExternalAndInternalAreTheSame => {
write!(f, "External and internal descriptors are the same")
}
}
}
}
@@ -90,38 +79,9 @@ impl fmt::Display for Error {
#[cfg(feature = "std")]
impl std::error::Error for Error {}
impl From<bitcoin::bip32::Error> for Error {
fn from(err: bitcoin::bip32::Error) -> Self {
Error::Bip32(err)
}
}
impl From<bitcoin::base58::Error> for Error {
fn from(err: bitcoin::base58::Error) -> Self {
Error::Base58(err)
}
}
impl From<bitcoin::key::ParsePublicKeyError> for Error {
fn from(err: bitcoin::key::ParsePublicKeyError) -> Self {
Error::Pk(err)
}
}
impl From<miniscript::Error> for Error {
fn from(err: miniscript::Error) -> Self {
Error::Miniscript(err)
}
}
impl From<bitcoin::hex::HexToBytesError> for Error {
fn from(err: bitcoin::hex::HexToBytesError) -> Self {
Error::Hex(err)
}
}
impl From<crate::descriptor::policy::PolicyError> for Error {
fn from(err: crate::descriptor::policy::PolicyError) -> Self {
Error::Policy(err)
}
}
impl_error!(bitcoin::util::bip32::Error, Bip32);
impl_error!(bitcoin::util::base58::Error, Base58);
impl_error!(bitcoin::util::key::Error, Pk);
impl_error!(miniscript::Error, Miniscript);
impl_error!(bitcoin::hashes::hex::Error, Hex);
impl_error!(crate::descriptor::policy::PolicyError, Policy);

View File

@@ -18,17 +18,17 @@ use crate::collections::BTreeMap;
use alloc::string::String;
use alloc::vec::Vec;
use bitcoin::bip32::{ChildNumber, DerivationPath, Fingerprint, KeySource, Xpub};
use bitcoin::{key::XOnlyPublicKey, secp256k1, PublicKey};
use bitcoin::{psbt, taproot};
use bitcoin::util::bip32::{ChildNumber, DerivationPath, ExtendedPubKey, Fingerprint, KeySource};
use bitcoin::util::{psbt, taproot};
use bitcoin::{secp256k1, PublicKey, XOnlyPublicKey};
use bitcoin::{Network, TxOut};
use miniscript::descriptor::{
DefiniteDescriptorKey, DescriptorMultiXKey, DescriptorSecretKey, DescriptorType,
DescriptorXKey, InnerXKey, KeyMap, SinglePubKey, Wildcard,
DefiniteDescriptorKey, DescriptorSecretKey, DescriptorType, InnerXKey, SinglePubKey,
};
pub use miniscript::{
Descriptor, DescriptorPublicKey, Legacy, Miniscript, ScriptContext, Segwitv0,
descriptor::DescriptorXKey, descriptor::KeyMap, descriptor::Wildcard, Descriptor,
DescriptorPublicKey, Legacy, Miniscript, ScriptContext, Segwitv0,
};
use miniscript::{ForEachKey, MiniscriptKey, TranslatePk};
@@ -59,16 +59,16 @@ pub type DerivedDescriptor = Descriptor<DefiniteDescriptorKey>;
/// Alias for the type of maps that represent derivation paths in a [`psbt::Input`] or
/// [`psbt::Output`]
///
/// [`psbt::Input`]: bitcoin::psbt::Input
/// [`psbt::Output`]: bitcoin::psbt::Output
/// [`psbt::Input`]: bitcoin::util::psbt::Input
/// [`psbt::Output`]: bitcoin::util::psbt::Output
pub type HdKeyPaths = BTreeMap<secp256k1::PublicKey, KeySource>;
/// Alias for the type of maps that represent taproot key origins in a [`psbt::Input`] or
/// [`psbt::Output`]
///
/// [`psbt::Input`]: bitcoin::psbt::Input
/// [`psbt::Output`]: bitcoin::psbt::Output
pub type TapKeyOrigins = BTreeMap<XOnlyPublicKey, (Vec<taproot::TapLeafHash>, KeySource)>;
/// [`psbt::Input`]: bitcoin::util::psbt::Input
/// [`psbt::Output`]: bitcoin::util::psbt::Output
pub type TapKeyOrigins = BTreeMap<bitcoin::XOnlyPublicKey, (Vec<taproot::TapLeafHash>, KeySource)>;
/// Trait for types which can be converted into an [`ExtendedDescriptor`] and a [`KeyMap`] usable by a wallet in a specific [`Network`]
pub trait IntoWalletDescriptor {
@@ -112,16 +112,6 @@ impl IntoWalletDescriptor for &String {
}
}
impl IntoWalletDescriptor for String {
fn into_wallet_descriptor(
self,
secp: &SecpCtx,
network: Network,
) -> Result<(ExtendedDescriptor, KeyMap), DescriptorError> {
self.as_str().into_wallet_descriptor(secp, network)
}
}
impl IntoWalletDescriptor for ExtendedDescriptor {
fn into_wallet_descriptor(
self,
@@ -146,10 +136,14 @@ impl IntoWalletDescriptor for (ExtendedDescriptor, KeyMap) {
network: Network,
}
impl<'s, 'd> miniscript::Translator<DescriptorPublicKey, String, DescriptorError>
impl<'s, 'd>
miniscript::Translator<DescriptorPublicKey, miniscript::DummyKey, DescriptorError>
for Translator<'s, 'd>
{
fn pk(&mut self, pk: &DescriptorPublicKey) -> Result<String, DescriptorError> {
fn pk(
&mut self,
pk: &DescriptorPublicKey,
) -> Result<miniscript::DummyKey, DescriptorError> {
let secp = &self.secp;
let (_, _, networks) = if self.descriptor.is_taproot() {
@@ -167,7 +161,7 @@ impl IntoWalletDescriptor for (ExtendedDescriptor, KeyMap) {
};
if networks.contains(&self.network) {
Ok(Default::default())
Ok(miniscript::DummyKey)
} else {
Err(DescriptorError::Key(KeyError::InvalidNetwork))
}
@@ -175,40 +169,35 @@ impl IntoWalletDescriptor for (ExtendedDescriptor, KeyMap) {
fn sha256(
&mut self,
_sha256: &<DescriptorPublicKey as MiniscriptKey>::Sha256,
) -> Result<String, DescriptorError> {
) -> Result<miniscript::DummySha256Hash, DescriptorError> {
Ok(Default::default())
}
fn hash256(
&mut self,
_hash256: &<DescriptorPublicKey as MiniscriptKey>::Hash256,
) -> Result<String, DescriptorError> {
) -> Result<miniscript::DummyHash256Hash, DescriptorError> {
Ok(Default::default())
}
fn ripemd160(
&mut self,
_ripemd160: &<DescriptorPublicKey as MiniscriptKey>::Ripemd160,
) -> Result<String, DescriptorError> {
) -> Result<miniscript::DummyRipemd160Hash, DescriptorError> {
Ok(Default::default())
}
fn hash160(
&mut self,
_hash160: &<DescriptorPublicKey as MiniscriptKey>::Hash160,
) -> Result<String, DescriptorError> {
) -> Result<miniscript::DummyHash160Hash, DescriptorError> {
Ok(Default::default())
}
}
// check the network for the keys
use miniscript::TranslateErr;
match self.0.translate_pk(&mut Translator {
self.0.translate_pk(&mut Translator {
secp,
network,
descriptor: &self.0,
}) {
Ok(_) => {}
Err(TranslateErr::TranslatorErr(e)) => return Err(e),
Err(TranslateErr::OuterError(e)) => return Err(e.into()),
}
})?;
Ok(self)
}
@@ -239,7 +228,7 @@ impl IntoWalletDescriptor for DescriptorTemplateOut {
let pk = match pk {
DescriptorPublicKey::XPub(ref xpub) => {
let mut xpub = xpub.clone();
xpub.xkey.network = self.network.into();
xpub.xkey.network = self.network;
DescriptorPublicKey::XPub(xpub)
}
@@ -262,23 +251,18 @@ impl IntoWalletDescriptor for DescriptorTemplateOut {
}
// fixup the network for keys that need it in the descriptor
use miniscript::TranslateErr;
let translated = match desc.translate_pk(&mut Translator { network }) {
Ok(descriptor) => descriptor,
Err(TranslateErr::TranslatorErr(e)) => return Err(e),
Err(TranslateErr::OuterError(e)) => return Err(e.into()),
};
let translated = desc.translate_pk(&mut Translator { network })?;
// ...and in the key map
let fixed_keymap = keymap
.into_iter()
.map(|(mut k, mut v)| {
match (&mut k, &mut v) {
(DescriptorPublicKey::XPub(xpub), DescriptorSecretKey::XPrv(xprv)) => {
xpub.xkey.network = network.into();
xprv.xkey.network = network.into();
xpub.xkey.network = network;
xprv.xkey.network = network;
}
(_, DescriptorSecretKey::Single(key)) => {
key.key.network = network.into();
key.key.network = network;
}
_ => {}
}
@@ -291,10 +275,15 @@ impl IntoWalletDescriptor for DescriptorTemplateOut {
}
}
/// Extra checks for [`ExtendedDescriptor`].
pub(crate) fn check_wallet_descriptor(
descriptor: &Descriptor<DescriptorPublicKey>,
) -> Result<(), DescriptorError> {
/// Wrapper for `IntoWalletDescriptor` that performs additional checks on the keys contained in the
/// descriptor
pub(crate) fn into_wallet_descriptor_checked<T: IntoWalletDescriptor>(
inner: T,
secp: &SecpCtx,
network: Network,
) -> Result<(ExtendedDescriptor, KeyMap), DescriptorError> {
let (descriptor, keymap) = inner.into_wallet_descriptor(secp, network)?;
// Ensure the keys don't contain any hardened derivation steps or hardened wildcards
let descriptor_contains_hardened_steps = descriptor.for_any_key(|k| {
if let DescriptorPublicKey::XPub(DescriptorXKey {
@@ -313,15 +302,11 @@ pub(crate) fn check_wallet_descriptor(
return Err(DescriptorError::HardenedDerivationXpub);
}
if descriptor.is_multipath() {
return Err(DescriptorError::MultiPath);
}
// Run miniscript's sanity check, which will look for duplicated keys and other potential
// issues
descriptor.sanity_check()?;
Ok(())
Ok((descriptor, keymap))
}
#[doc(hidden)]
@@ -355,18 +340,6 @@ pub(crate) trait XKeyUtils {
fn root_fingerprint(&self, secp: &SecpCtx) -> Fingerprint;
}
impl<T> XKeyUtils for DescriptorMultiXKey<T>
where
T: InnerXKey,
{
fn root_fingerprint(&self, secp: &SecpCtx) -> Fingerprint {
match self.origin {
Some((fingerprint, _)) => fingerprint,
None => self.xkey.xkey_fingerprint(secp),
}
}
}
impl<T> XKeyUtils for DescriptorXKey<T>
where
T: InnerXKey,
@@ -382,7 +355,7 @@ where
pub(crate) trait DescriptorMeta {
fn is_witness(&self) -> bool;
fn is_taproot(&self) -> bool;
fn get_extended_keys(&self) -> Vec<DescriptorXKey<Xpub>>;
fn get_extended_keys(&self) -> Vec<DescriptorXKey<ExtendedPubKey>>;
fn derive_from_hd_keypaths(
&self,
hd_keypaths: &HdKeyPaths,
@@ -423,7 +396,7 @@ impl DescriptorMeta for ExtendedDescriptor {
self.desc_type() == DescriptorType::Tr
}
fn get_extended_keys(&self) -> Vec<DescriptorXKey<Xpub>> {
fn get_extended_keys(&self) -> Vec<DescriptorXKey<ExtendedPubKey>> {
let mut answer = Vec::new();
self.for_each_key(|pk| {
@@ -443,20 +416,21 @@ impl DescriptorMeta for ExtendedDescriptor {
secp: &SecpCtx,
) -> Option<DerivedDescriptor> {
// Ensure that deriving `xpub` with `path` yields `expected`
let verify_key =
|xpub: &DescriptorXKey<Xpub>, path: &DerivationPath, expected: &SinglePubKey| {
let derived = xpub
.xkey
.derive_pub(secp, path)
.expect("The path should never contain hardened derivation steps")
.public_key;
let verify_key = |xpub: &DescriptorXKey<ExtendedPubKey>,
path: &DerivationPath,
expected: &SinglePubKey| {
let derived = xpub
.xkey
.derive_pub(secp, path)
.expect("The path should never contain hardened derivation steps")
.public_key;
match expected {
SinglePubKey::FullKey(pk) if &PublicKey::new(derived) == pk => true,
SinglePubKey::XOnly(pk) if &XOnlyPublicKey::from(derived) == pk => true,
_ => false,
}
};
match expected {
SinglePubKey::FullKey(pk) if &PublicKey::new(derived) == pk => true,
SinglePubKey::XOnly(pk) if &XOnlyPublicKey::from(derived) == pk => true,
_ => false,
}
};
let mut path_found = None;
@@ -492,6 +466,11 @@ impl DescriptorMeta for ExtendedDescriptor {
) {
Some(derive_path)
} else {
log::debug!(
"Key `{}` derived with {} yields an unexpected key",
root_fingerprint,
derive_path
);
None
}
});
@@ -515,10 +494,7 @@ impl DescriptorMeta for ExtendedDescriptor {
false
});
path_found.map(|path| {
self.at_derivation_index(path)
.expect("We ignore hardened wildcards")
})
path_found.map(|path| self.at_derivation_index(path))
}
fn derive_from_hd_keypaths(
@@ -569,7 +545,7 @@ impl DescriptorMeta for ExtendedDescriptor {
return None;
}
let descriptor = self.at_derivation_index(0).expect("0 is not hardened");
let descriptor = self.at_derivation_index(0);
match descriptor.desc_type() {
// TODO: add pk() here
DescriptorType::Pkh
@@ -609,10 +585,11 @@ mod test {
use core::str::FromStr;
use assert_matches::assert_matches;
use bitcoin::hex::FromHex;
use bitcoin::consensus::encode::deserialize;
use bitcoin::hashes::hex::FromHex;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::{bip32, Psbt};
use bitcoin::{NetworkKind, ScriptBuf};
use bitcoin::util::{bip32, psbt};
use bitcoin::Script;
use super::*;
use crate::psbt::PsbtUtils;
@@ -623,7 +600,7 @@ mod test {
"wpkh(02b4632d08485ff1df2db55b9dafd23347d1c47a457072a1e87be26896549a8737)",
)
.unwrap();
let psbt = Psbt::deserialize(
let psbt: psbt::PartiallySignedTransaction = deserialize(
&Vec::<u8>::from_hex(
"70736274ff010052010000000162307be8e431fbaff807cdf9cdc3fde44d7402\
11bc8342c31ffd6ec11fe35bcc0100000000ffffffff01328601000000000016\
@@ -646,7 +623,7 @@ mod test {
"pkh([0f056943/44h/0h/0h]tpubDDpWvmUrPZrhSPmUzCMBHffvC3HyMAPnWDSAQNBTnj1iZeJa7BZQEttFiP4DS4GCcXQHezdXhn86Hj6LHX5EDstXPWrMaSneRWM8yUf6NFd/10/*)",
)
.unwrap();
let psbt = Psbt::deserialize(
let psbt: psbt::PartiallySignedTransaction = deserialize(
&Vec::<u8>::from_hex(
"70736274ff010053010000000145843b86be54a3cd8c9e38444e1162676c00df\
e7964122a70df491ea12fd67090100000000ffffffff01c19598000000000017\
@@ -677,7 +654,7 @@ mod test {
"wsh(and_v(v:pk(03b6633fef2397a0a9de9d7b6f23aef8368a6e362b0581f0f0af70d5ecfd254b14),older(6)))",
)
.unwrap();
let psbt = Psbt::deserialize(
let psbt: psbt::PartiallySignedTransaction = deserialize(
&Vec::<u8>::from_hex(
"70736274ff01005302000000011c8116eea34408ab6529223c9a176606742207\
67a1ff1d46a6e3c4a88243ea6e01000000000600000001109698000000000017\
@@ -701,7 +678,7 @@ mod test {
"sh(and_v(v:pk(021403881a5587297818fcaf17d239cefca22fce84a45b3b1d23e836c4af671dbb),after(630000)))",
)
.unwrap();
let psbt = Psbt::deserialize(
let psbt: psbt::PartiallySignedTransaction = deserialize(
&Vec::<u8>::from_hex(
"70736274ff0100530100000001bc8c13df445dfadcc42afa6dc841f85d22b01d\
a6270ebf981740f4b7b1d800390000000000feffffff01ba9598000000000017\
@@ -731,7 +708,7 @@ mod test {
let secp = Secp256k1::new();
let xprv = bip32::Xpriv::from_str("xprv9s21ZrQH143K3c3gF1DUWpWNr2SG2XrG8oYPpqYh7hoWsJy9NjabErnzriJPpnGHyKz5NgdXmq1KVbqS1r4NXdCoKitWg5e86zqXHa8kxyB").unwrap();
let xprv = bip32::ExtendedPrivKey::from_str("xprv9s21ZrQH143K3c3gF1DUWpWNr2SG2XrG8oYPpqYh7hoWsJy9NjabErnzriJPpnGHyKz5NgdXmq1KVbqS1r4NXdCoKitWg5e86zqXHa8kxyB").unwrap();
let path = bip32::DerivationPath::from_str("m/0").unwrap();
// here `to_descriptor_key` will set the valid networks for the key to only mainnet, since
@@ -748,9 +725,9 @@ mod test {
.unwrap();
let mut xprv_testnet = xprv;
xprv_testnet.network = NetworkKind::Test;
xprv_testnet.network = Network::Testnet;
let xpub_testnet = bip32::Xpub::from_priv(&secp, &xprv_testnet);
let xpub_testnet = bip32::ExtendedPubKey::from_priv(&secp, &xprv_testnet);
let desc_pubkey = DescriptorPublicKey::XPub(DescriptorXKey {
xkey: xpub_testnet,
origin: None,
@@ -840,7 +817,7 @@ mod test {
fn test_descriptor_from_str_from_output_of_macro() {
let secp = Secp256k1::new();
let tpub = bip32::Xpub::from_str("tpubD6NzVbkrYhZ4XHndKkuB8FifXm8r5FQHwrN6oZuWCz13qb93rtgKvD4PQsqC4HP4yhV3tA2fqr2RbY5mNXfM7RxXUoeABoDtsFUq2zJq6YK").unwrap();
let tpub = bip32::ExtendedPubKey::from_str("tpubD6NzVbkrYhZ4XHndKkuB8FifXm8r5FQHwrN6oZuWCz13qb93rtgKvD4PQsqC4HP4yhV3tA2fqr2RbY5mNXfM7RxXUoeABoDtsFUq2zJq6YK").unwrap();
let path = bip32::DerivationPath::from_str("m/1/2").unwrap();
let key = (tpub, path).into_descriptor_key().unwrap();
@@ -860,31 +837,16 @@ mod test {
}
#[test]
fn test_check_wallet_descriptor() {
fn test_into_wallet_descriptor_checked() {
let secp = Secp256k1::new();
let descriptor = "wpkh(tpubD6NzVbkrYhZ4XHndKkuB8FifXm8r5FQHwrN6oZuWCz13qb93rtgKvD4PQsqC4HP4yhV3tA2fqr2RbY5mNXfM7RxXUoeABoDtsFUq2zJq6YK/0'/1/2/*)";
let (descriptor, _) = descriptor
.into_wallet_descriptor(&secp, Network::Testnet)
.expect("must parse");
let result = check_wallet_descriptor(&descriptor);
let result = into_wallet_descriptor_checked(descriptor, &secp, Network::Testnet);
assert_matches!(result, Err(DescriptorError::HardenedDerivationXpub));
let descriptor = "wpkh(tpubD6NzVbkrYhZ4XHndKkuB8FifXm8r5FQHwrN6oZuWCz13qb93rtgKvD4PQsqC4HP4yhV3tA2fqr2RbY5mNXfM7RxXUoeABoDtsFUq2zJq6YK/<0;1>/*)";
let (descriptor, _) = descriptor
.into_wallet_descriptor(&secp, Network::Testnet)
.expect("must parse");
let result = check_wallet_descriptor(&descriptor);
assert_matches!(result, Err(DescriptorError::MultiPath));
// repeated pubkeys
let descriptor = "wsh(multi(2,tpubD6NzVbkrYhZ4XHndKkuB8FifXm8r5FQHwrN6oZuWCz13qb93rtgKvD4PQsqC4HP4yhV3tA2fqr2RbY5mNXfM7RxXUoeABoDtsFUq2zJq6YK/0/*,tpubD6NzVbkrYhZ4XHndKkuB8FifXm8r5FQHwrN6oZuWCz13qb93rtgKvD4PQsqC4HP4yhV3tA2fqr2RbY5mNXfM7RxXUoeABoDtsFUq2zJq6YK/0/*))";
let (descriptor, _) = descriptor
.into_wallet_descriptor(&secp, Network::Testnet)
.expect("must parse");
let result = check_wallet_descriptor(&descriptor);
let result = into_wallet_descriptor_checked(descriptor, &secp, Network::Testnet);
assert!(result.is_err());
}
@@ -896,21 +858,19 @@ mod test {
let secp = Secp256k1::new();
let descriptor = "sh(wsh(sortedmulti(3,tpubDEsqS36T4DVsKJd9UH8pAKzrkGBYPLEt9jZMwpKtzh1G6mgYehfHt9WCgk7MJG5QGSFWf176KaBNoXbcuFcuadAFKxDpUdMDKGBha7bY3QM/0/*,tpubDF3cpwfs7fMvXXuoQbohXtLjNM6ehwYT287LWtmLsd4r77YLg6MZg4vTETx5MSJ2zkfigbYWu31VA2Z2Vc1cZugCYXgS7FQu6pE8V6TriEH/0/*,tpubDE1SKfcW76Tb2AASv5bQWMuScYNAdoqLHoexw13sNDXwmUhQDBbCD3QAedKGLhxMrWQdMDKENzYtnXPDRvexQPNuDrLj52wAjHhNEm8sJ4p/0/*,tpubDFLc6oXwJmhm3FGGzXkfJNTh2KitoY3WhmmQvuAjMhD8YbyWn5mAqckbxXfm2etM3p5J6JoTpSrMqRSTfMLtNW46poDaEZJ1kjd3csRSjwH/0/*,tpubDEWD9NBeWP59xXmdqSNt4VYdtTGwbpyP8WS962BuqpQeMZmX9Pur14dhXdZT5a7wR1pK6dPtZ9fP5WR493hPzemnBvkfLLYxnUjAKj1JCQV/0/*,tpubDEHyZkkwd7gZWCTgQuYQ9C4myF2hMEmyHsBCCmLssGqoqUxeT3gzohF5uEVURkf9TtmeepJgkSUmteac38FwZqirjApzNX59XSHLcwaTZCH/0/*,tpubDEqLouCekwnMUWN486kxGzD44qVgeyuqHyxUypNEiQt5RnUZNJe386TKPK99fqRV1vRkZjYAjtXGTECz98MCsdLcnkM67U6KdYRzVubeCgZ/0/*)))";
let (descriptor, _) = descriptor
.into_wallet_descriptor(&secp, Network::Testnet)
.unwrap();
check_wallet_descriptor(&descriptor).expect("descriptor");
let (descriptor, _) =
into_wallet_descriptor_checked(descriptor, &secp, Network::Testnet).unwrap();
let descriptor = descriptor.at_derivation_index(0).unwrap();
let descriptor = descriptor.at_derivation_index(0);
let script = ScriptBuf::from_hex("5321022f533b667e2ea3b36e21961c9fe9dca340fbe0af5210173a83ae0337ab20a57621026bb53a98e810bd0ee61a0ed1164ba6c024786d76554e793e202dc6ce9c78c4ea2102d5b8a7d66a41ffdb6f4c53d61994022e886b4f45001fb158b95c9164d45f8ca3210324b75eead2c1f9c60e8adeb5e7009fec7a29afcdb30d829d82d09562fe8bae8521032d34f8932200833487bd294aa219dcbe000b9f9b3d824799541430009f0fa55121037468f8ea99b6c64788398b5ad25480cad08f4b0d65be54ce3a55fd206b5ae4722103f72d3d96663b0ea99b0aeb0d7f273cab11a8de37885f1dddc8d9112adb87169357ae").unwrap();
let script = Script::from_str("5321022f533b667e2ea3b36e21961c9fe9dca340fbe0af5210173a83ae0337ab20a57621026bb53a98e810bd0ee61a0ed1164ba6c024786d76554e793e202dc6ce9c78c4ea2102d5b8a7d66a41ffdb6f4c53d61994022e886b4f45001fb158b95c9164d45f8ca3210324b75eead2c1f9c60e8adeb5e7009fec7a29afcdb30d829d82d09562fe8bae8521032d34f8932200833487bd294aa219dcbe000b9f9b3d824799541430009f0fa55121037468f8ea99b6c64788398b5ad25480cad08f4b0d65be54ce3a55fd206b5ae4722103f72d3d96663b0ea99b0aeb0d7f273cab11a8de37885f1dddc8d9112adb87169357ae").unwrap();
let mut psbt_input = psbt::Input::default();
psbt_input
.update_with_descriptor_unchecked(&descriptor)
.unwrap();
assert_eq!(psbt_input.redeem_script, Some(script.to_p2wsh()));
assert_eq!(psbt_input.redeem_script, Some(script.to_v0_p2wsh()));
assert_eq!(psbt_input.witness_script, Some(script));
}
}

View File

@@ -20,10 +20,10 @@
//!
//! ```
//! # use std::sync::Arc;
//! # use bdk_wallet::descriptor::*;
//! # use bdk_wallet::signer::*;
//! # use bdk_wallet::bitcoin::secp256k1::Secp256k1;
//! use bdk_wallet::descriptor::policy::BuildSatisfaction;
//! # use bdk::descriptor::*;
//! # use bdk::wallet::signer::*;
//! # use bdk::bitcoin::secp256k1::Secp256k1;
//! use bdk::descriptor::policy::BuildSatisfaction;
//! let secp = Secp256k1::new();
//! let desc = "wsh(and_v(v:pk(cV3oCth6zxZ1UVsHLnGothsWNsaoxRhC6aeNi5VbSdFpwUkgkEci),or_d(pk(cVMTy7uebJgvFaSBwcgvwk8qn8xSLc97dKow4MBetjrrahZoimm2),older(12960))))";
//!
@@ -33,32 +33,33 @@
//! let signers = Arc::new(SignersContainer::build(key_map, &extended_desc, &secp));
//! let policy = extended_desc.extract_policy(&signers, BuildSatisfaction::None, &secp)?;
//! println!("policy: {}", serde_json::to_string(&policy).unwrap());
//! # Ok::<(), anyhow::Error>(())
//! # Ok::<(), bdk::Error>(())
//! ```
use crate::collections::{BTreeMap, HashSet, VecDeque};
use alloc::string::String;
use alloc::vec::Vec;
use core::cmp::max;
use miniscript::miniscript::limits::{MAX_PUBKEYS_IN_CHECKSIGADD, MAX_PUBKEYS_PER_MULTISIG};
use core::fmt;
use serde::ser::SerializeMap;
use serde::{Serialize, Serializer};
use bitcoin::bip32::Fingerprint;
use bitcoin::hashes::{hash160, ripemd160, sha256};
use bitcoin::{absolute, key::XOnlyPublicKey, relative, PublicKey, Sequence};
use bitcoin::util::bip32::Fingerprint;
use bitcoin::{LockTime, PublicKey, Sequence, XOnlyPublicKey};
use miniscript::descriptor::{
DescriptorPublicKey, ShInner, SinglePub, SinglePubKey, SortedMultiVec, WshInner,
};
use miniscript::{hash256, Threshold};
use miniscript::hash256;
use miniscript::{
Descriptor, Miniscript, Satisfier, ScriptContext, SigType, Terminal, ToPublicKey,
};
#[allow(unused_imports)]
use log::{debug, error, info, trace};
use crate::descriptor::ExtractPolicy;
use crate::keys::ExtScriptContext;
use crate::wallet::signer::{SignerId, SignersContainer};
@@ -67,7 +68,7 @@ use crate::wallet::utils::{After, Older, SecpCtx};
use super::checksum::calc_checksum;
use super::error::Error;
use super::XKeyUtils;
use bitcoin::psbt::{self, Psbt};
use bitcoin::util::psbt::{Input as PsbtInput, PartiallySignedTransaction as Psbt};
use miniscript::psbt::PsbtInputSatisfier;
/// A unique identifier for a key
@@ -94,9 +95,6 @@ impl PkOrF {
..
}) => PkOrF::XOnlyPubkey(*pk),
DescriptorPublicKey::XPub(xpub) => PkOrF::Fingerprint(xpub.root_fingerprint(secp)),
DescriptorPublicKey::MultiXPub(multi) => {
PkOrF::Fingerprint(multi.root_fingerprint(secp))
}
}
}
}
@@ -133,12 +131,12 @@ pub enum SatisfiableItem {
/// Absolute timeclock timestamp
AbsoluteTimelock {
/// The timelock value
value: absolute::LockTime,
value: LockTime,
},
/// Relative timelock locktime
RelativeTimelock {
/// The timelock value
value: relative::LockTime,
value: Sequence,
},
/// Multi-signature public keys with threshold count
Multisig {
@@ -453,14 +451,11 @@ pub struct Condition {
pub csv: Option<Sequence>,
/// Optional timelock condition
#[serde(skip_serializing_if = "Option::is_none")]
pub timelock: Option<absolute::LockTime>,
pub timelock: Option<LockTime>,
}
impl Condition {
fn merge_nlocktime(
a: absolute::LockTime,
b: absolute::LockTime,
) -> Result<absolute::LockTime, PolicyError> {
fn merge_nlocktime(a: LockTime, b: LockTime) -> Result<LockTime, PolicyError> {
if !a.is_same_unit(b) {
Err(PolicyError::MixedTimelockUnits)
} else if a > b {
@@ -520,7 +515,7 @@ pub enum PolicyError {
impl fmt::Display for PolicyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::NotEnoughItemsSelected(err) => write!(f, "Not enough items selected: {}", err),
Self::NotEnoughItemsSelected(err) => write!(f, "Not enought items selected: {}", err),
Self::IndexOutOfRange(index) => write!(f, "Index out of range: {}", index),
Self::AddOnLeaf => write!(f, "Add on leaf"),
Self::AddOnPartialComplete => write!(f, "Add on partial complete"),
@@ -587,25 +582,30 @@ impl Policy {
Ok(Some(policy))
}
fn make_multi<Ctx: ScriptContext + 'static, const MAX: usize>(
threshold: &Threshold<DescriptorPublicKey, MAX>,
fn make_multisig<Ctx: ScriptContext + 'static>(
keys: &[DescriptorPublicKey],
signers: &SignersContainer,
build_sat: BuildSatisfaction,
threshold: usize,
sorted: bool,
secp: &SecpCtx,
) -> Result<Option<Policy>, PolicyError> {
let parsed_keys = threshold.iter().map(|k| PkOrF::from_key(k, secp)).collect();
if threshold == 0 {
return Ok(None);
}
let parsed_keys = keys.iter().map(|k| PkOrF::from_key(k, secp)).collect();
let mut contribution = Satisfaction::Partial {
n: threshold.n(),
m: threshold.k(),
n: keys.len(),
m: threshold,
items: vec![],
conditions: Default::default(),
sorted: Some(sorted),
};
let mut satisfaction = contribution.clone();
for (index, key) in threshold.iter().enumerate() {
for (index, key) in keys.iter().enumerate() {
if signers.find(signer_id(key, secp)).is_some() {
contribution.add(
&Satisfaction::Complete {
@@ -614,6 +614,7 @@ impl Policy {
index,
)?;
}
if let Some(psbt) = build_sat.psbt() {
if Ctx::find_signature(psbt, key, secp) {
satisfaction.add(
@@ -630,11 +631,12 @@ impl Policy {
let mut policy: Policy = SatisfiableItem::Multisig {
keys: parsed_keys,
threshold: threshold.k(),
threshold,
}
.into();
policy.contribution = contribution;
policy.satisfaction = satisfaction;
Ok(Some(policy))
}
@@ -660,11 +662,11 @@ impl Policy {
(0..*threshold).collect()
}
SatisfiableItem::Multisig { keys, .. } => (0..keys.len()).collect(),
_ => HashSet::new(),
_ => vec![],
};
let selected: HashSet<_> = match path.get(&self.id) {
Some(arr) => arr.iter().copied().collect(),
_ => default,
let selected = match path.get(&self.id) {
Some(arr) => arr,
_ => &default,
};
match &self.item {
@@ -672,24 +674,14 @@ impl Policy {
let mapped_req = items
.iter()
.map(|i| i.get_condition(path))
.collect::<Vec<_>>();
.collect::<Result<Vec<_>, _>>()?;
// if all the requirements are null we don't care about `selected` because there
// are no requirements
if mapped_req
.iter()
.all(|cond| matches!(cond, Ok(c) if c.is_null()))
{
if mapped_req.iter().all(Condition::is_null) {
return Ok(Condition::default());
}
// make sure all the indexes in the `selected` list are within range
for index in &selected {
if *index >= items.len() {
return Err(PolicyError::IndexOutOfRange(*index));
}
}
// if we have something, make sure we have enough items. note that the user can set
// an empty value for this step in case of n-of-n, because `selected` is set to all
// the elements above
@@ -698,18 +690,23 @@ impl Policy {
}
// check the selected items, see if there are conflicting requirements
mapped_req
.into_iter()
.enumerate()
.filter(|(index, _)| selected.contains(index))
.try_fold(Condition::default(), |acc, (_, cond)| acc.merge(&cond?))
let mut requirements = Condition::default();
for item_index in selected {
requirements = requirements.merge(
mapped_req
.get(*item_index)
.ok_or(PolicyError::IndexOutOfRange(*item_index))?,
)?;
}
Ok(requirements)
}
SatisfiableItem::Multisig { keys, threshold } => {
if selected.len() < *threshold {
return Err(PolicyError::NotEnoughItemsSelected(self.id.clone()));
}
if let Some(item) = selected.into_iter().find(|&i| i >= keys.len()) {
return Err(PolicyError::IndexOutOfRange(item));
if let Some(item) = selected.iter().find(|i| **i >= keys.len()) {
return Err(PolicyError::IndexOutOfRange(*item));
}
Ok(Condition::default())
@@ -719,7 +716,7 @@ impl Policy {
timelock: Some(*value),
}),
SatisfiableItem::RelativeTimelock { value } => Ok(Condition {
csv: Some((*value).into()),
csv: Some(*value),
timelock: None,
}),
_ => Ok(Condition::default()),
@@ -747,7 +744,6 @@ fn signer_id(key: &DescriptorPublicKey, secp: &SecpCtx) -> SignerId {
..
}) => pk.to_pubkeyhash(SigType::Ecdsa).into(),
DescriptorPublicKey::XPub(xpub) => xpub.root_fingerprint(secp).into(),
DescriptorPublicKey::MultiXPub(xpub) => xpub.root_fingerprint(secp).into(),
}
}
@@ -785,9 +781,9 @@ fn make_generic_signature<M: Fn() -> SatisfiableItem, F: Fn(&Psbt) -> bool>(
fn generic_sig_in_psbt<
// C is for "check", it's a closure we use to *check* if a psbt input contains the signature
// for a specific key
C: Fn(&psbt::Input, &SinglePubKey) -> bool,
C: Fn(&PsbtInput, &SinglePubKey) -> bool,
// E is for "extract", it extracts a key from the bip32 derivations found in the psbt input
E: Fn(&psbt::Input, Fingerprint) -> Option<SinglePubKey>,
E: Fn(&PsbtInput, Fingerprint) -> Option<SinglePubKey>,
>(
psbt: &Psbt,
key: &DescriptorPublicKey,
@@ -805,13 +801,6 @@ fn generic_sig_in_psbt<
None => false,
}
}
DescriptorPublicKey::MultiXPub(xpub) => {
//TODO check actual derivation matches
match extract(input, xpub.root_fingerprint(secp)) {
Some(pubkey) => check(input, &pubkey),
None => false,
}
}
})
}
@@ -917,12 +906,12 @@ impl<Ctx: ScriptContext + 'static> ExtractPolicy for Miniscript<DescriptorPublic
}
Terminal::After(value) => {
let mut policy: Policy = SatisfiableItem::AbsoluteTimelock {
value: (*value).into(),
value: value.into(),
}
.into();
policy.contribution = Satisfaction::Complete {
condition: Condition {
timelock: Some((*value).into()),
timelock: Some(value.into()),
csv: None,
},
};
@@ -934,9 +923,9 @@ impl<Ctx: ScriptContext + 'static> ExtractPolicy for Miniscript<DescriptorPublic
{
let after = After::new(Some(current_height), false);
let after_sat =
Satisfier::<bitcoin::PublicKey>::check_after(&after, (*value).into());
Satisfier::<bitcoin::PublicKey>::check_after(&after, value.into());
let inputs_sat = psbt_inputs_sat(psbt).all(|sat| {
Satisfier::<bitcoin::PublicKey>::check_after(&sat, (*value).into())
Satisfier::<bitcoin::PublicKey>::check_after(&sat, value.into())
});
if after_sat && inputs_sat {
policy.satisfaction = policy.contribution.clone();
@@ -946,14 +935,11 @@ impl<Ctx: ScriptContext + 'static> ExtractPolicy for Miniscript<DescriptorPublic
Some(policy)
}
Terminal::Older(value) => {
let mut policy: Policy = SatisfiableItem::RelativeTimelock {
value: (*value).into(),
}
.into();
let mut policy: Policy = SatisfiableItem::RelativeTimelock { value: *value }.into();
policy.contribution = Satisfaction::Complete {
condition: Condition {
timelock: None,
csv: Some((*value).into()),
csv: Some(*value),
},
};
if let BuildSatisfaction::PsbtTimelocks {
@@ -963,11 +949,9 @@ impl<Ctx: ScriptContext + 'static> ExtractPolicy for Miniscript<DescriptorPublic
} = build_sat
{
let older = Older::new(Some(current_height), Some(input_max_height), false);
let older_sat =
Satisfier::<bitcoin::PublicKey>::check_older(&older, (*value).into());
let inputs_sat = psbt_inputs_sat(psbt).all(|sat| {
Satisfier::<bitcoin::PublicKey>::check_older(&sat, (*value).into())
});
let older_sat = Satisfier::<bitcoin::PublicKey>::check_older(&older, *value);
let inputs_sat = psbt_inputs_sat(psbt)
.all(|sat| Satisfier::<bitcoin::PublicKey>::check_older(&sat, *value));
if older_sat && inputs_sat {
policy.satisfaction = policy.contribution.clone();
}
@@ -985,12 +969,9 @@ impl<Ctx: ScriptContext + 'static> ExtractPolicy for Miniscript<DescriptorPublic
Terminal::Hash160(hash) => {
Some(SatisfiableItem::Hash160Preimage { hash: *hash }.into())
}
Terminal::Multi(threshold) => Policy::make_multi::<Ctx, MAX_PUBKEYS_PER_MULTISIG>(
threshold, signers, build_sat, false, secp,
)?,
Terminal::MultiA(threshold) => Policy::make_multi::<Ctx, MAX_PUBKEYS_IN_CHECKSIGADD>(
threshold, signers, build_sat, false, secp,
)?,
Terminal::Multi(k, pks) | Terminal::MultiA(k, pks) => {
Policy::make_multisig::<Ctx>(pks, signers, build_sat, *k, false, secp)?
}
// Identities
Terminal::Alt(inner)
| Terminal::Swap(inner)
@@ -1018,9 +999,8 @@ impl<Ctx: ScriptContext + 'static> ExtractPolicy for Miniscript<DescriptorPublic
a.extract_policy(signers, build_sat, secp)?,
b.extract_policy(signers, build_sat, secp)?,
)?,
Terminal::Thresh(threshold) => {
let mut k = threshold.k();
let nodes = threshold.data();
Terminal::Thresh(k, nodes) => {
let mut threshold = *k;
let mapped: Vec<_> = nodes
.iter()
.map(|n| n.extract_policy(signers, build_sat, secp))
@@ -1030,13 +1010,13 @@ impl<Ctx: ScriptContext + 'static> ExtractPolicy for Miniscript<DescriptorPublic
.collect();
if mapped.len() < nodes.len() {
k = match k.checked_sub(nodes.len() - mapped.len()) {
threshold = match threshold.checked_sub(nodes.len() - mapped.len()) {
None => return Ok(None),
Some(x) => x,
};
}
Policy::make_thresh(mapped, k)?
Policy::make_thresh(mapped, threshold)?
}
// Unsupported
@@ -1090,10 +1070,13 @@ impl ExtractPolicy for Descriptor<DescriptorPublicKey> {
build_sat: BuildSatisfaction,
secp: &SecpCtx,
) -> Result<Option<Policy>, Error> {
let threshold = Threshold::new(keys.k(), keys.pks().to_vec())
.expect("valid threshold and pks collection");
Ok(Policy::make_multi::<Ctx, MAX_PUBKEYS_PER_MULTISIG>(
&threshold, signers, build_sat, true, secp,
Ok(Policy::make_multisig::<Ctx>(
keys.pks.as_ref(),
signers,
build_sat,
keys.k,
true,
secp,
)?)
}
@@ -1137,7 +1120,7 @@ impl ExtractPolicy for Descriptor<DescriptorPublicKey> {
let key_spend_sig =
miniscript::Tap::make_signature(tr.internal_key(), signers, build_sat, secp);
if tr.tap_tree().is_none() {
if tr.taptree().is_none() {
Ok(Some(key_spend_sig))
} else {
let mut items = vec![key_spend_sig];
@@ -1168,8 +1151,8 @@ mod test {
use crate::wallet::signer::SignersContainer;
use alloc::{string::ToString, sync::Arc};
use assert_matches::assert_matches;
use bitcoin::bip32;
use bitcoin::secp256k1::Secp256k1;
use bitcoin::util::bip32;
use bitcoin::Network;
use core::str::FromStr;
@@ -1184,8 +1167,8 @@ mod test {
secp: &SecpCtx,
) -> (DescriptorKey<Ctx>, DescriptorKey<Ctx>, Fingerprint) {
let path = bip32::DerivationPath::from_str(path).unwrap();
let tprv = bip32::Xpriv::from_str(tprv).unwrap();
let tpub = bip32::Xpub::from_priv(secp, &tprv);
let tprv = bip32::ExtendedPrivKey::from_str(tprv).unwrap();
let tpub = bip32::ExtendedPubKey::from_priv(secp, &tprv);
let fingerprint = tprv.fingerprint(secp);
let prvkey = (tprv, path.clone()).into_descriptor_key().unwrap();
let pubkey = (tpub, path).into_descriptor_key().unwrap();
@@ -1587,7 +1570,6 @@ mod test {
let addr = wallet_desc
.at_derivation_index(0)
.unwrap()
.address(Network::Testnet)
.unwrap();
assert_eq!(
@@ -1654,7 +1636,6 @@ mod test {
let addr = wallet_desc
.at_derivation_index(0)
.unwrap()
.address(Network::Testnet)
.unwrap();
assert_eq!(

View File

@@ -14,10 +14,10 @@
//! This module contains the definition of various common script templates that are ready to be
//! used. See the documentation of each template for an example.
use bitcoin::bip32;
use bitcoin::util::bip32;
use bitcoin::Network;
use miniscript::{Legacy, Segwitv0, Tap};
use miniscript::{Legacy, Segwitv0};
use super::{ExtendedDescriptor, IntoWalletDescriptor, KeyMap};
use crate::descriptor::DescriptorError;
@@ -36,17 +36,17 @@ pub type DescriptorTemplateOut = (ExtendedDescriptor, KeyMap, ValidNetworks);
/// ## Example
///
/// ```
/// use bdk_wallet::descriptor::error::Error as DescriptorError;
/// use bdk_wallet::keys::{IntoDescriptorKey, KeyError};
/// use bdk_wallet::miniscript::Legacy;
/// use bdk_wallet::template::{DescriptorTemplate, DescriptorTemplateOut};
/// use bdk::descriptor::error::Error as DescriptorError;
/// use bdk::keys::{IntoDescriptorKey, KeyError};
/// use bdk::miniscript::Legacy;
/// use bdk::template::{DescriptorTemplate, DescriptorTemplateOut};
/// use bitcoin::Network;
///
/// struct MyP2PKH<K: IntoDescriptorKey<Legacy>>(K);
///
/// impl<K: IntoDescriptorKey<Legacy>> DescriptorTemplate for MyP2PKH<K> {
/// fn build(self, network: Network) -> Result<DescriptorTemplateOut, DescriptorError> {
/// Ok(bdk_wallet::descriptor!(pkh(self.0))?)
/// Ok(bdk::descriptor!(pkh(self.0))?)
/// }
/// }
/// ```
@@ -72,28 +72,21 @@ impl<T: DescriptorTemplate> IntoWalletDescriptor for T {
/// ## Example
///
/// ```
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::Wallet;
/// # use bdk_wallet::KeychainKind;
/// use bdk_wallet::template::P2Pkh;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::Wallet;
/// # use bdk::wallet::AddressIndex::New;
/// use bdk::template::P2Pkh;
///
/// let key_external =
/// let key =
/// bitcoin::PrivateKey::from_wif("cTc4vURSzdx6QE6KVynWGomDbLaA75dNALMNyfjh3p8DRRar84Um")?;
/// let key_internal =
/// bitcoin::PrivateKey::from_wif("cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW")?;
/// let mut wallet = Wallet::create(P2Pkh(key_external), P2Pkh(key_internal))
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// let mut wallet = Wallet::new_no_persist(P2Pkh(key), None, Network::Testnet)?;
///
/// assert_eq!(
/// wallet
/// .next_unused_address(KeychainKind::External)
/// .to_string(),
/// wallet.get_address(New).to_string(),
/// "mwJ8hxFYW19JLuc65RCTaP4v1rzVU8cVMT"
/// );
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct P2Pkh<K: IntoDescriptorKey<Legacy>>(pub K);
impl<K: IntoDescriptorKey<Legacy>> DescriptorTemplate for P2Pkh<K> {
@@ -107,29 +100,22 @@ impl<K: IntoDescriptorKey<Legacy>> DescriptorTemplate for P2Pkh<K> {
/// ## Example
///
/// ```
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::Wallet;
/// # use bdk_wallet::KeychainKind;
/// use bdk_wallet::template::P2Wpkh_P2Sh;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::Wallet;
/// use bdk::template::P2Wpkh_P2Sh;
/// use bdk::wallet::AddressIndex;
///
/// let key_external =
/// let key =
/// bitcoin::PrivateKey::from_wif("cTc4vURSzdx6QE6KVynWGomDbLaA75dNALMNyfjh3p8DRRar84Um")?;
/// let key_internal =
/// bitcoin::PrivateKey::from_wif("cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW")?;
/// let mut wallet = Wallet::create(P2Wpkh_P2Sh(key_external), P2Wpkh_P2Sh(key_internal))
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// let mut wallet = Wallet::new_no_persist(P2Wpkh_P2Sh(key), None, Network::Testnet)?;
///
/// assert_eq!(
/// wallet
/// .next_unused_address(KeychainKind::External)
/// .to_string(),
/// wallet.get_address(AddressIndex::New).to_string(),
/// "2NB4ox5VDRw1ecUv6SnT3VQHPXveYztRqk5"
/// );
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[allow(non_camel_case_types)]
#[derive(Debug, Clone)]
pub struct P2Wpkh_P2Sh<K: IntoDescriptorKey<Segwitv0>>(pub K);
impl<K: IntoDescriptorKey<Segwitv0>> DescriptorTemplate for P2Wpkh_P2Sh<K> {
@@ -143,28 +129,21 @@ impl<K: IntoDescriptorKey<Segwitv0>> DescriptorTemplate for P2Wpkh_P2Sh<K> {
/// ## Example
///
/// ```
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::Wallet;
/// # use bdk_wallet::KeychainKind;
/// use bdk_wallet::template::P2Wpkh;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::{Wallet};
/// use bdk::template::P2Wpkh;
/// use bdk::wallet::AddressIndex::New;
///
/// let key_external =
/// let key =
/// bitcoin::PrivateKey::from_wif("cTc4vURSzdx6QE6KVynWGomDbLaA75dNALMNyfjh3p8DRRar84Um")?;
/// let key_internal =
/// bitcoin::PrivateKey::from_wif("cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW")?;
/// let mut wallet = Wallet::create(P2Wpkh(key_external), P2Wpkh(key_internal))
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// let mut wallet = Wallet::new_no_persist(P2Wpkh(key), None, Network::Testnet)?;
///
/// assert_eq!(
/// wallet
/// .next_unused_address(KeychainKind::External)
/// .to_string(),
/// wallet.get_address(New).to_string(),
/// "tb1q4525hmgw265tl3drrl8jjta7ayffu6jf68ltjd"
/// );
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct P2Wpkh<K: IntoDescriptorKey<Segwitv0>>(pub K);
impl<K: IntoDescriptorKey<Segwitv0>> DescriptorTemplate for P2Wpkh<K> {
@@ -173,41 +152,6 @@ impl<K: IntoDescriptorKey<Segwitv0>> DescriptorTemplate for P2Wpkh<K> {
}
}
/// P2TR template. Expands to a descriptor `tr(key)`
///
/// ## Example
///
/// ```
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::Wallet;
/// # use bdk_wallet::KeychainKind;
/// use bdk_wallet::template::P2TR;
///
/// let key_external =
/// bitcoin::PrivateKey::from_wif("cTc4vURSzdx6QE6KVynWGomDbLaA75dNALMNyfjh3p8DRRar84Um")?;
/// let key_internal =
/// bitcoin::PrivateKey::from_wif("cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW")?;
/// let mut wallet = Wallet::create(P2TR(key_external), P2TR(key_internal))
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
///
/// assert_eq!(
/// wallet
/// .next_unused_address(KeychainKind::External)
/// .to_string(),
/// "tb1pvjf9t34fznr53u5tqhejz4nr69luzkhlvsdsdfq9pglutrpve2xq7hps46"
/// );
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct P2TR<K: IntoDescriptorKey<Tap>>(pub K);
impl<K: IntoDescriptorKey<Tap>> DescriptorTemplate for P2TR<K> {
fn build(self, _network: Network) -> Result<DescriptorTemplateOut, DescriptorError> {
descriptor!(tr(self.0))
}
}
/// BIP44 template. Expands to `pkh(key/44'/{0,1}'/0'/{0,1}/*)`
///
/// Since there are hardened derivation steps, this template requires a private derivable key (generally a `xprv`/`tprv`).
@@ -216,22 +160,24 @@ impl<K: IntoDescriptorKey<Tap>> DescriptorTemplate for P2TR<K> {
///
/// ## Example
///
/// ```rust
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{Wallet, KeychainKind};
/// use bdk_wallet::template::Bip44;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::{Wallet, KeychainKind};
/// # use bdk::wallet::AddressIndex::New;
/// use bdk::template::Bip44;
///
/// let key = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPeZRHk4rTG6orPS2CRNFX3njhUXx5vj9qGog5ZMH4uGReDWN5kCkY3jmWEtWause41CDvBRXD1shKknAMKxT99o9qUTRVC6m")?;
/// let mut wallet = Wallet::create(Bip44(key.clone(), KeychainKind::External), Bip44(key, KeychainKind::Internal))
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// let key = bitcoin::util::bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPeZRHk4rTG6orPS2CRNFX3njhUXx5vj9qGog5ZMH4uGReDWN5kCkY3jmWEtWause41CDvBRXD1shKknAMKxT99o9qUTRVC6m")?;
/// let mut wallet = Wallet::new_no_persist(
/// Bip44(key.clone(), KeychainKind::External),
/// Some(Bip44(key, KeychainKind::Internal)),
/// Network::Testnet,
/// )?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "mmogjc7HJEZkrLqyQYqJmxUqFaC7i4uf89");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "pkh([c55b303f/44'/1'/0']tpubDCuorCpzvYS2LCD75BR46KHE8GdDeg1wsAgNZeNr6DaB5gQK1o14uErKwKLuFmeemkQ6N2m3rNgvctdJLyr7nwu2yia7413Hhg8WWE44cgT/0/*)#5wrnv0xt");
/// assert_eq!(wallet.get_address(New).to_string(), "mmogjc7HJEZkrLqyQYqJmxUqFaC7i4uf89");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).unwrap().to_string(), "pkh([c55b303f/44'/1'/0']tpubDCuorCpzvYS2LCD75BR46KHE8GdDeg1wsAgNZeNr6DaB5gQK1o14uErKwKLuFmeemkQ6N2m3rNgvctdJLyr7nwu2yia7413Hhg8WWE44cgT/0/*)#5wrnv0xt");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip44<K: DerivableKey<Legacy>>(pub K, pub KeychainKind);
impl<K: DerivableKey<Legacy>> DescriptorTemplate for Bip44<K> {
@@ -253,24 +199,23 @@ impl<K: DerivableKey<Legacy>> DescriptorTemplate for Bip44<K> {
///
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{KeychainKind, Wallet};
/// use bdk_wallet::template::Bip44Public;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::{Wallet, KeychainKind};
/// # use bdk::wallet::AddressIndex::New;
/// use bdk::template::Bip44Public;
///
/// let key = bitcoin::bip32::Xpub::from_str("tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU")?;
/// let fingerprint = bitcoin::bip32::Fingerprint::from_str("c55b303f")?;
/// let mut wallet = Wallet::create(
/// let key = bitcoin::util::bip32::ExtendedPubKey::from_str("tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU")?;
/// let fingerprint = bitcoin::util::bip32::Fingerprint::from_str("c55b303f")?;
/// let mut wallet = Wallet::new_no_persist(
/// Bip44Public(key.clone(), fingerprint, KeychainKind::External),
/// Bip44Public(key, fingerprint, KeychainKind::Internal),
/// )
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// Some(Bip44Public(key, fingerprint, KeychainKind::Internal)),
/// Network::Testnet,
/// )?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "miNG7dJTzJqNbFS19svRdTCisC65dsubtR");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "pkh([c55b303f/44'/1'/0']tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU/0/*)#cfhumdqz");
/// assert_eq!(wallet.get_address(New).to_string(), "miNG7dJTzJqNbFS19svRdTCisC65dsubtR");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).unwrap().to_string(), "pkh([c55b303f/44'/1'/0']tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU/0/*)#cfhumdqz");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip44Public<K: DerivableKey<Legacy>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
impl<K: DerivableKey<Legacy>> DescriptorTemplate for Bip44Public<K> {
@@ -292,23 +237,22 @@ impl<K: DerivableKey<Legacy>> DescriptorTemplate for Bip44Public<K> {
///
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{Wallet, KeychainKind};
/// use bdk_wallet::template::Bip49;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::{Wallet, KeychainKind};
/// # use bdk::wallet::AddressIndex::New;
/// use bdk::template::Bip49;
///
/// let key = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPeZRHk4rTG6orPS2CRNFX3njhUXx5vj9qGog5ZMH4uGReDWN5kCkY3jmWEtWause41CDvBRXD1shKknAMKxT99o9qUTRVC6m")?;
/// let mut wallet = Wallet::create(
/// let key = bitcoin::util::bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPeZRHk4rTG6orPS2CRNFX3njhUXx5vj9qGog5ZMH4uGReDWN5kCkY3jmWEtWause41CDvBRXD1shKknAMKxT99o9qUTRVC6m")?;
/// let mut wallet = Wallet::new_no_persist(
/// Bip49(key.clone(), KeychainKind::External),
/// Bip49(key, KeychainKind::Internal),
/// )
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// Some(Bip49(key, KeychainKind::Internal)),
/// Network::Testnet,
/// )?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "2N4zkWAoGdUv4NXhSsU8DvS5MB36T8nKHEB");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDDYr4kdnZgjjShzYNjZUZXUUtpXaofdkMaipyS8ThEh45qFmhT4hKYways7UXmg6V7het1QiFo9kf4kYUXyDvV4rHEyvSpys9pjCB3pukxi/0/*))#s9vxlc8e");
/// assert_eq!(wallet.get_address(New).to_string(), "2N4zkWAoGdUv4NXhSsU8DvS5MB36T8nKHEB");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).unwrap().to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDDYr4kdnZgjjShzYNjZUZXUUtpXaofdkMaipyS8ThEh45qFmhT4hKYways7UXmg6V7het1QiFo9kf4kYUXyDvV4rHEyvSpys9pjCB3pukxi/0/*))#s9vxlc8e");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip49<K: DerivableKey<Segwitv0>>(pub K, pub KeychainKind);
impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip49<K> {
@@ -330,24 +274,23 @@ impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip49<K> {
///
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{Wallet, KeychainKind};
/// use bdk_wallet::template::Bip49Public;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::{Wallet, KeychainKind};
/// # use bdk::wallet::AddressIndex::New;
/// use bdk::template::Bip49Public;
///
/// let key = bitcoin::bip32::Xpub::from_str("tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L")?;
/// let fingerprint = bitcoin::bip32::Fingerprint::from_str("c55b303f")?;
/// let mut wallet = Wallet::create(
/// let key = bitcoin::util::bip32::ExtendedPubKey::from_str("tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L")?;
/// let fingerprint = bitcoin::util::bip32::Fingerprint::from_str("c55b303f")?;
/// let mut wallet = Wallet::new_no_persist(
/// Bip49Public(key.clone(), fingerprint, KeychainKind::External),
/// Bip49Public(key, fingerprint, KeychainKind::Internal),
/// )
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// Some(Bip49Public(key, fingerprint, KeychainKind::Internal)),
/// Network::Testnet,
/// )?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "2N3K4xbVAHoiTQSwxkZjWDfKoNC27pLkYnt");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L/0/*))#3tka9g0q");
/// assert_eq!(wallet.get_address(New).to_string(), "2N3K4xbVAHoiTQSwxkZjWDfKoNC27pLkYnt");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).unwrap().to_string(), "sh(wpkh([c55b303f/49'/1'/0']tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L/0/*))#3tka9g0q");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip49Public<K: DerivableKey<Segwitv0>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip49Public<K> {
@@ -369,23 +312,22 @@ impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip49Public<K> {
///
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{Wallet, KeychainKind};
/// use bdk_wallet::template::Bip84;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::{Wallet, KeychainKind};
/// # use bdk::wallet::AddressIndex::New;
/// use bdk::template::Bip84;
///
/// let key = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPeZRHk4rTG6orPS2CRNFX3njhUXx5vj9qGog5ZMH4uGReDWN5kCkY3jmWEtWause41CDvBRXD1shKknAMKxT99o9qUTRVC6m")?;
/// let mut wallet = Wallet::create(
/// let key = bitcoin::util::bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPeZRHk4rTG6orPS2CRNFX3njhUXx5vj9qGog5ZMH4uGReDWN5kCkY3jmWEtWause41CDvBRXD1shKknAMKxT99o9qUTRVC6m")?;
/// let mut wallet = Wallet::new_no_persist(
/// Bip84(key.clone(), KeychainKind::External),
/// Bip84(key, KeychainKind::Internal),
/// )
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// Some(Bip84(key, KeychainKind::Internal)),
/// Network::Testnet,
/// )?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "tb1qhl85z42h7r4su5u37rvvw0gk8j2t3n9y7zsg4n");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "wpkh([c55b303f/84'/1'/0']tpubDDc5mum24DekpNw92t6fHGp8Gr2JjF9J7i4TZBtN6Vp8xpAULG5CFaKsfugWa5imhrQQUZKXe261asP5koDHo5bs3qNTmf3U3o4v9SaB8gg/0/*)#6kfecsmr");
/// assert_eq!(wallet.get_address(New).to_string(), "tb1qhl85z42h7r4su5u37rvvw0gk8j2t3n9y7zsg4n");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).unwrap().to_string(), "wpkh([c55b303f/84'/1'/0']tpubDDc5mum24DekpNw92t6fHGp8Gr2JjF9J7i4TZBtN6Vp8xpAULG5CFaKsfugWa5imhrQQUZKXe261asP5koDHo5bs3qNTmf3U3o4v9SaB8gg/0/*)#6kfecsmr");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip84<K: DerivableKey<Segwitv0>>(pub K, pub KeychainKind);
impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip84<K> {
@@ -407,24 +349,23 @@ impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip84<K> {
///
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{Wallet, KeychainKind};
/// use bdk_wallet::template::Bip84Public;
/// # use bdk::bitcoin::{PrivateKey, Network};
/// # use bdk::{Wallet, KeychainKind};
/// # use bdk::wallet::AddressIndex::New;
/// use bdk::template::Bip84Public;
///
/// let key = bitcoin::bip32::Xpub::from_str("tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q")?;
/// let fingerprint = bitcoin::bip32::Fingerprint::from_str("c55b303f")?;
/// let mut wallet = Wallet::create(
/// let key = bitcoin::util::bip32::ExtendedPubKey::from_str("tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q")?;
/// let fingerprint = bitcoin::util::bip32::Fingerprint::from_str("c55b303f")?;
/// let mut wallet = Wallet::new_no_persist(
/// Bip84Public(key.clone(), fingerprint, KeychainKind::External),
/// Bip84Public(key, fingerprint, KeychainKind::Internal),
/// )
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
/// Some(Bip84Public(key, fingerprint, KeychainKind::Internal)),
/// Network::Testnet,
/// )?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "tb1qedg9fdlf8cnnqfd5mks6uz5w4kgpk2pr6y4qc7");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "wpkh([c55b303f/84'/1'/0']tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q/0/*)#dhu402yv");
/// assert_eq!(wallet.get_address(New).to_string(), "tb1qedg9fdlf8cnnqfd5mks6uz5w4kgpk2pr6y4qc7");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).unwrap().to_string(), "wpkh([c55b303f/84'/1'/0']tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q/0/*)#dhu402yv");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip84Public<K: DerivableKey<Segwitv0>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip84Public<K> {
@@ -436,83 +377,6 @@ impl<K: DerivableKey<Segwitv0>> DescriptorTemplate for Bip84Public<K> {
}
}
/// BIP86 template. Expands to `tr(key/86'/{0,1}'/0'/{0,1}/*)`
///
/// Since there are hardened derivation steps, this template requires a private derivable key (generally a `xprv`/`tprv`).
///
/// See [`Bip86Public`] for a template that can work with a `xpub`/`tpub`.
///
/// ## Example
///
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{Wallet, KeychainKind};
/// use bdk_wallet::template::Bip86;
///
/// let key = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPeZRHk4rTG6orPS2CRNFX3njhUXx5vj9qGog5ZMH4uGReDWN5kCkY3jmWEtWause41CDvBRXD1shKknAMKxT99o9qUTRVC6m")?;
/// let mut wallet = Wallet::create(
/// Bip86(key.clone(), KeychainKind::External),
/// Bip86(key, KeychainKind::Internal),
/// )
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "tb1p5unlj09djx8xsjwe97269kqtxqpwpu2epeskgqjfk4lnf69v4tnqpp35qu");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "tr([c55b303f/86'/1'/0']tpubDCiHofpEs47kx358bPdJmTZHmCDqQ8qw32upCSxHrSEdeeBs2T5Mq6QMB2ukeMqhNBiyhosBvJErteVhfURPGXPv3qLJPw5MVpHUewsbP2m/0/*)#dkgvr5hm");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip86<K: DerivableKey<Tap>>(pub K, pub KeychainKind);
impl<K: DerivableKey<Tap>> DescriptorTemplate for Bip86<K> {
fn build(self, network: Network) -> Result<DescriptorTemplateOut, DescriptorError> {
P2TR(segwit_v1::make_bipxx_private(86, self.0, self.1, network)?).build(network)
}
}
/// BIP86 public template. Expands to `tr(key/{0,1}/*)`
///
/// This assumes that the key used has already been derived with `m/86'/0'/0'` for Mainnet or `m/86'/1'/0'` for Testnet.
///
/// This template requires the parent fingerprint to populate correctly the metadata of PSBTs.
///
/// See [`Bip86`] for a template that does the full derivation, but requires private data
/// for the key.
///
/// ## Example
///
/// ```
/// # use std::str::FromStr;
/// # use bdk_wallet::bitcoin::{PrivateKey, Network};
/// # use bdk_wallet::{Wallet, KeychainKind};
/// use bdk_wallet::template::Bip86Public;
///
/// let key = bitcoin::bip32::Xpub::from_str("tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q")?;
/// let fingerprint = bitcoin::bip32::Fingerprint::from_str("c55b303f")?;
/// let mut wallet = Wallet::create(
/// Bip86Public(key.clone(), fingerprint, KeychainKind::External),
/// Bip86Public(key, fingerprint, KeychainKind::Internal),
/// )
/// .network(Network::Testnet)
/// .create_wallet_no_persist()?;
///
/// assert_eq!(wallet.next_unused_address(KeychainKind::External).to_string(), "tb1pwjp9f2k5n0xq73ecuu0c5njvgqr3vkh7yaylmpqvsuuaafymh0msvcmh37");
/// assert_eq!(wallet.public_descriptor(KeychainKind::External).to_string(), "tr([c55b303f/86'/1'/0']tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q/0/*)#2p65srku");
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
#[derive(Debug, Clone)]
pub struct Bip86Public<K: DerivableKey<Tap>>(pub K, pub bip32::Fingerprint, pub KeychainKind);
impl<K: DerivableKey<Tap>> DescriptorTemplate for Bip86Public<K> {
fn build(self, network: Network) -> Result<DescriptorTemplateOut, DescriptorError> {
P2TR(segwit_v1::make_bipxx_public(
86, self.0, self.1, self.2, network,
)?)
.build(network)
}
}
macro_rules! expand_make_bipxx {
( $mod_name:ident, $ctx:ty ) => {
mod $mod_name {
@@ -579,7 +443,6 @@ macro_rules! expand_make_bipxx {
expand_make_bipxx!(legacy, Legacy);
expand_make_bipxx!(segwit_v0, Segwitv0);
expand_make_bipxx!(segwit_v1, Tap);
#[cfg(test)]
mod test {
@@ -592,37 +455,38 @@ mod test {
use crate::descriptor::{DescriptorError, DescriptorMeta};
use crate::keys::ValidNetworks;
use assert_matches::assert_matches;
use bitcoin::network::constants::Network::Regtest;
use miniscript::descriptor::{DescriptorPublicKey, KeyMap};
use miniscript::Descriptor;
// BIP44 `pkh(key/44'/{0,1}'/0'/{0,1}/*)`
#[test]
fn test_bip44_template_cointype() {
use bitcoin::bip32::ChildNumber::{self, Hardened};
use bitcoin::util::bip32::ChildNumber::{self, Hardened};
let xprvkey = bitcoin::bip32::Xpriv::from_str("xprv9s21ZrQH143K2fpbqApQL69a4oKdGVnVN52R82Ft7d1pSqgKmajF62acJo3aMszZb6qQ22QsVECSFxvf9uyxFUvFYQMq3QbtwtRSMjLAhMf").unwrap();
assert!(xprvkey.network.is_mainnet());
let xprvkey = bitcoin::util::bip32::ExtendedPrivKey::from_str("xprv9s21ZrQH143K2fpbqApQL69a4oKdGVnVN52R82Ft7d1pSqgKmajF62acJo3aMszZb6qQ22QsVECSFxvf9uyxFUvFYQMq3QbtwtRSMjLAhMf").unwrap();
assert_eq!(Network::Bitcoin, xprvkey.network);
let xdesc = Bip44(xprvkey, KeychainKind::Internal)
.build(Network::Bitcoin)
.unwrap();
if let ExtendedDescriptor::Pkh(pkh) = xdesc.0 {
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
let purpose = path.first().unwrap();
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().into();
let purpose = path.get(0).unwrap();
assert_matches!(purpose, Hardened { index: 44 });
let coin_type = path.get(1).unwrap();
assert_matches!(coin_type, Hardened { index: 0 });
}
let tprvkey = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
assert!(!tprvkey.network.is_mainnet());
let tprvkey = bitcoin::util::bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
assert_eq!(Network::Testnet, tprvkey.network);
let tdesc = Bip44(tprvkey, KeychainKind::Internal)
.build(Network::Testnet)
.unwrap();
if let ExtendedDescriptor::Pkh(pkh) = tdesc.0 {
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
let purpose = path.first().unwrap();
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().into();
let purpose = path.get(0).unwrap();
assert_matches!(purpose, Hardened { index: 44 });
let coin_type = path.get(1).unwrap();
assert_matches!(coin_type, Hardened { index: 1 });
@@ -633,23 +497,20 @@ mod test {
fn check(
desc: Result<(Descriptor<DescriptorPublicKey>, KeyMap, ValidNetworks), DescriptorError>,
is_witness: bool,
is_taproot: bool,
is_fixed: bool,
network: Network,
expected: &[&str],
) {
let (desc, _key_map, _networks) = desc.unwrap();
assert_eq!(desc.is_witness(), is_witness);
assert_eq!(desc.is_taproot(), is_taproot);
assert_eq!(!desc.has_wildcard(), is_fixed);
for i in 0..expected.len() {
let index = i as u32;
let child_desc = if !desc.has_wildcard() {
desc.at_derivation_index(0).unwrap()
desc.at_derivation_index(0)
} else {
desc.at_derivation_index(index).unwrap()
desc.at_derivation_index(index)
};
let address = child_desc.address(network).unwrap();
let address = child_desc.address(Regtest).unwrap();
assert_eq!(address.to_string(), *expected.get(i).unwrap());
}
}
@@ -663,9 +524,7 @@ mod test {
check(
P2Pkh(prvkey).build(Network::Bitcoin),
false,
false,
true,
Network::Regtest,
&["mwJ8hxFYW19JLuc65RCTaP4v1rzVU8cVMT"],
);
@@ -676,9 +535,7 @@ mod test {
check(
P2Pkh(pubkey).build(Network::Bitcoin),
false,
false,
true,
Network::Regtest,
&["muZpTpBYhxmRFuCjLc7C6BBDF32C8XVJUi"],
);
}
@@ -692,9 +549,7 @@ mod test {
check(
P2Wpkh_P2Sh(prvkey).build(Network::Bitcoin),
true,
false,
true,
Network::Regtest,
&["2NB4ox5VDRw1ecUv6SnT3VQHPXveYztRqk5"],
);
@@ -705,9 +560,7 @@ mod test {
check(
P2Wpkh_P2Sh(pubkey).build(Network::Bitcoin),
true,
false,
true,
Network::Regtest,
&["2N5LiC3CqzxDamRTPG1kiNv1FpNJQ7x28sb"],
);
}
@@ -721,9 +574,7 @@ mod test {
check(
P2Wpkh(prvkey).build(Network::Bitcoin),
true,
false,
true,
Network::Regtest,
&["bcrt1q4525hmgw265tl3drrl8jjta7ayffu6jfcwxx9y"],
);
@@ -734,52 +585,19 @@ mod test {
check(
P2Wpkh(pubkey).build(Network::Bitcoin),
true,
false,
true,
Network::Regtest,
&["bcrt1qngw83fg8dz0k749cg7k3emc7v98wy0c7azaa6h"],
);
}
// P2TR `tr(key)`
#[test]
fn test_p2tr_template() {
let prvkey =
bitcoin::PrivateKey::from_wif("cTc4vURSzdx6QE6KVynWGomDbLaA75dNALMNyfjh3p8DRRar84Um")
.unwrap();
check(
P2TR(prvkey).build(Network::Bitcoin),
false,
true,
true,
Network::Regtest,
&["bcrt1pvjf9t34fznr53u5tqhejz4nr69luzkhlvsdsdfq9pglutrpve2xqnwtkqq"],
);
let pubkey = bitcoin::PublicKey::from_str(
"03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd",
)
.unwrap();
check(
P2TR(pubkey).build(Network::Bitcoin),
false,
true,
true,
Network::Regtest,
&["bcrt1pw74tdcrxlzn5r8z6ku2vztr86fgq0m245s72mjktf4afwzsf8ugs4evwdf"],
);
}
// BIP44 `pkh(key/44'/0'/0'/{0,1}/*)`
#[test]
fn test_bip44_template() {
let prvkey = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let prvkey = bitcoin::util::bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
check(
Bip44(prvkey, KeychainKind::External).build(Network::Bitcoin),
false,
false,
false,
Network::Regtest,
&[
"n453VtnjDHPyDt2fDstKSu7A3YCJoHZ5g5",
"mvfrrumXgTtwFPWDNUecBBgzuMXhYM7KRP",
@@ -790,8 +608,6 @@ mod test {
Bip44(prvkey, KeychainKind::Internal).build(Network::Bitcoin),
false,
false,
false,
Network::Regtest,
&[
"muHF98X9KxEzdKrnFAX85KeHv96eXopaip",
"n4hpyLJE5ub6B5Bymv4eqFxS5KjrewSmYR",
@@ -803,14 +619,12 @@ mod test {
// BIP44 public `pkh(key/{0,1}/*)`
#[test]
fn test_bip44_public_template() {
let pubkey = bitcoin::bip32::Xpub::from_str("tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU").unwrap();
let fingerprint = bitcoin::bip32::Fingerprint::from_str("c55b303f").unwrap();
let pubkey = bitcoin::util::bip32::ExtendedPubKey::from_str("tpubDDDzQ31JkZB7VxUr9bjvBivDdqoFLrDPyLWtLapArAi51ftfmCb2DPxwLQzX65iNcXz1DGaVvyvo6JQ6rTU73r2gqdEo8uov9QKRb7nKCSU").unwrap();
let fingerprint = bitcoin::util::bip32::Fingerprint::from_str("c55b303f").unwrap();
check(
Bip44Public(pubkey, fingerprint, KeychainKind::External).build(Network::Bitcoin),
false,
false,
false,
Network::Regtest,
&[
"miNG7dJTzJqNbFS19svRdTCisC65dsubtR",
"n2UqaDbCjWSFJvpC84m3FjUk5UaeibCzYg",
@@ -821,8 +635,6 @@ mod test {
Bip44Public(pubkey, fingerprint, KeychainKind::Internal).build(Network::Bitcoin),
false,
false,
false,
Network::Regtest,
&[
"moDr3vJ8wpt5nNxSK55MPq797nXJb2Ru9H",
"ms7A1Yt4uTezT2XkefW12AvLoko8WfNJMG",
@@ -834,13 +646,11 @@ mod test {
// BIP49 `sh(wpkh(key/49'/0'/0'/{0,1}/*))`
#[test]
fn test_bip49_template() {
let prvkey = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let prvkey = bitcoin::util::bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
check(
Bip49(prvkey, KeychainKind::External).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"2N9bCAJXGm168MjVwpkBdNt6ucka3PKVoUV",
"2NDckYkqrYyDMtttEav5hB3Bfw9EGAW5HtS",
@@ -851,8 +661,6 @@ mod test {
Bip49(prvkey, KeychainKind::Internal).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"2NB3pA8PnzJLGV8YEKNDFpbViZv3Bm1K6CG",
"2NBiX2Wzxngb5rPiWpUiJQ2uLVB4HBjFD4p",
@@ -864,14 +672,12 @@ mod test {
// BIP49 public `sh(wpkh(key/{0,1}/*))`
#[test]
fn test_bip49_public_template() {
let pubkey = bitcoin::bip32::Xpub::from_str("tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L").unwrap();
let fingerprint = bitcoin::bip32::Fingerprint::from_str("c55b303f").unwrap();
let pubkey = bitcoin::util::bip32::ExtendedPubKey::from_str("tpubDC49r947KGK52X5rBWS4BLs5m9SRY3pYHnvRrm7HcybZ3BfdEsGFyzCMzayi1u58eT82ZeyFZwH7DD6Q83E3fM9CpfMtmnTygnLfP59jL9L").unwrap();
let fingerprint = bitcoin::util::bip32::Fingerprint::from_str("c55b303f").unwrap();
check(
Bip49Public(pubkey, fingerprint, KeychainKind::External).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"2N3K4xbVAHoiTQSwxkZjWDfKoNC27pLkYnt",
"2NCTQfJ1sZa3wQ3pPseYRHbaNEpC3AquEfX",
@@ -882,8 +688,6 @@ mod test {
Bip49Public(pubkey, fingerprint, KeychainKind::Internal).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"2NF2vttKibwyxigxtx95Zw8K7JhDbo5zPVJ",
"2Mtmyd8taksxNVWCJ4wVvaiss7QPZGcAJuH",
@@ -895,13 +699,11 @@ mod test {
// BIP84 `wpkh(key/84'/0'/0'/{0,1}/*)`
#[test]
fn test_bip84_template() {
let prvkey = bitcoin::bip32::Xpriv::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
let prvkey = bitcoin::util::bip32::ExtendedPrivKey::from_str("tprv8ZgxMBicQKsPcx5nBGsR63Pe8KnRUqmbJNENAfGftF3yuXoMMoVJJcYeUw5eVkm9WBPjWYt6HMWYJNesB5HaNVBaFc1M6dRjWSYnmewUMYy").unwrap();
check(
Bip84(prvkey, KeychainKind::External).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"bcrt1qkmvk2nadgplmd57ztld8nf8v2yxkzmdvwtjf8s",
"bcrt1qx0v6zgfwe50m4kqc58cqzcyem7ay2sfl3gvqhp",
@@ -912,8 +714,6 @@ mod test {
Bip84(prvkey, KeychainKind::Internal).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"bcrt1qtrwtz00wxl69e5xex7amy4xzlxkaefg3gfdkxa",
"bcrt1qqqasfhxpkkf7zrxqnkr2sfhn74dgsrc3e3ky45",
@@ -925,14 +725,12 @@ mod test {
// BIP84 public `wpkh(key/{0,1}/*)`
#[test]
fn test_bip84_public_template() {
let pubkey = bitcoin::bip32::Xpub::from_str("tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q").unwrap();
let fingerprint = bitcoin::bip32::Fingerprint::from_str("c55b303f").unwrap();
let pubkey = bitcoin::util::bip32::ExtendedPubKey::from_str("tpubDC2Qwo2TFsaNC4ju8nrUJ9mqVT3eSgdmy1yPqhgkjwmke3PRXutNGRYAUo6RCHTcVQaDR3ohNU9we59brGHuEKPvH1ags2nevW5opEE9Z5Q").unwrap();
let fingerprint = bitcoin::util::bip32::Fingerprint::from_str("c55b303f").unwrap();
check(
Bip84Public(pubkey, fingerprint, KeychainKind::External).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"bcrt1qedg9fdlf8cnnqfd5mks6uz5w4kgpk2prcdvd0h",
"bcrt1q3lncdlwq3lgcaaeyruynjnlccr0ve0kakh6ana",
@@ -943,8 +741,6 @@ mod test {
Bip84Public(pubkey, fingerprint, KeychainKind::Internal).build(Network::Bitcoin),
true,
false,
false,
Network::Regtest,
&[
"bcrt1qm6wqukenh7guu792lj2njgw9n78cmwsy8xy3z2",
"bcrt1q694twxtjn4nnrvnyvra769j0a23rllj5c6cgwp",
@@ -952,67 +748,4 @@ mod test {
],
);
}
// BIP86 `tr(key/86'/0'/0'/{0,1}/*)`
// Used addresses in test vector in https://github.com/bitcoin/bips/blob/master/bip-0086.mediawiki
#[test]
fn test_bip86_template() {
let prvkey = bitcoin::bip32::Xpriv::from_str("xprv9s21ZrQH143K3GJpoapnV8SFfukcVBSfeCficPSGfubmSFDxo1kuHnLisriDvSnRRuL2Qrg5ggqHKNVpxR86QEC8w35uxmGoggxtQTPvfUu").unwrap();
check(
Bip86(prvkey, KeychainKind::External).build(Network::Bitcoin),
false,
true,
false,
Network::Bitcoin,
&[
"bc1p5cyxnuxmeuwuvkwfem96lqzszd02n6xdcjrs20cac6yqjjwudpxqkedrcr",
"bc1p4qhjn9zdvkux4e44uhx8tc55attvtyu358kutcqkudyccelu0was9fqzwh",
"bc1p0d0rhyynq0awa9m8cqrcr8f5nxqx3aw29w4ru5u9my3h0sfygnzs9khxz8",
],
);
check(
Bip86(prvkey, KeychainKind::Internal).build(Network::Bitcoin),
false,
true,
false,
Network::Bitcoin,
&[
"bc1p3qkhfews2uk44qtvauqyr2ttdsw7svhkl9nkm9s9c3x4ax5h60wqwruhk7",
"bc1ptdg60grjk9t3qqcqczp4tlyy3z47yrx9nhlrjsmw36q5a72lhdrs9f00nj",
"bc1pgcwgsu8naxp7xlp5p7ufzs7emtfza2las7r2e7krzjhe5qj5xz2q88kmk5",
],
);
}
// BIP86 public `tr(key/{0,1}/*)`
// Used addresses in test vector in https://github.com/bitcoin/bips/blob/master/bip-0086.mediawiki
#[test]
fn test_bip86_public_template() {
let pubkey = bitcoin::bip32::Xpub::from_str("xpub6BgBgsespWvERF3LHQu6CnqdvfEvtMcQjYrcRzx53QJjSxarj2afYWcLteoGVky7D3UKDP9QyrLprQ3VCECoY49yfdDEHGCtMMj92pReUsQ").unwrap();
let fingerprint = bitcoin::bip32::Fingerprint::from_str("73c5da0a").unwrap();
check(
Bip86Public(pubkey, fingerprint, KeychainKind::External).build(Network::Bitcoin),
false,
true,
false,
Network::Bitcoin,
&[
"bc1p5cyxnuxmeuwuvkwfem96lqzszd02n6xdcjrs20cac6yqjjwudpxqkedrcr",
"bc1p4qhjn9zdvkux4e44uhx8tc55attvtyu358kutcqkudyccelu0was9fqzwh",
"bc1p0d0rhyynq0awa9m8cqrcr8f5nxqx3aw29w4ru5u9my3h0sfygnzs9khxz8",
],
);
check(
Bip86Public(pubkey, fingerprint, KeychainKind::Internal).build(Network::Bitcoin),
false,
true,
false,
Network::Bitcoin,
&[
"bc1p3qkhfews2uk44qtvauqyr2ttdsw7svhkl9nkm9s9c3x4ax5h60wqwruhk7",
"bc1ptdg60grjk9t3qqcqczp4tlyy3z47yrx9nhlrjsmw36q5a72lhdrs9f00nj",
"bc1pgcwgsu8naxp7xlp5p7ufzs7emtfza2las7r2e7krzjhe5qj5xz2q88kmk5",
],
);
}
}

199
crates/bdk/src/error.rs Normal file
View File

@@ -0,0 +1,199 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
use crate::bitcoin::Network;
use crate::{descriptor, wallet};
use alloc::{string::String, vec::Vec};
use bitcoin::{OutPoint, Txid};
use core::fmt;
/// Errors that can be thrown by the [`Wallet`](crate::wallet::Wallet)
#[derive(Debug)]
pub enum Error {
/// Generic error
Generic(String),
/// Cannot build a tx without recipients
NoRecipients,
/// `manually_selected_only` option is selected but no utxo has been passed
NoUtxosSelected,
/// Output created is under the dust limit, 546 satoshis
OutputBelowDustLimit(usize),
/// Wallet's UTXO set is not enough to cover recipient's requested plus fee
InsufficientFunds {
/// Sats needed for some transaction
needed: u64,
/// Sats available for spending
available: u64,
},
/// Branch and bound coin selection possible attempts with sufficiently big UTXO set could grow
/// exponentially, thus a limit is set, and when hit, this error is thrown
BnBTotalTriesExceeded,
/// Branch and bound coin selection tries to avoid needing a change by finding the right inputs for
/// the desired outputs plus fee, if there is not such combination this error is thrown
BnBNoExactMatch,
/// Happens when trying to spend an UTXO that is not in the internal database
UnknownUtxo,
/// Thrown when a tx is not found in the internal database
TransactionNotFound,
/// Happens when trying to bump a transaction that is already confirmed
TransactionConfirmed,
/// Trying to replace a tx that has a sequence >= `0xFFFFFFFE`
IrreplaceableTransaction,
/// When bumping a tx the fee rate requested is lower than required
FeeRateTooLow {
/// Required fee rate (satoshi/vbyte)
required: crate::types::FeeRate,
},
/// When bumping a tx the absolute fee requested is lower than replaced tx absolute fee
FeeTooLow {
/// Required fee absolute value (satoshi)
required: u64,
},
/// Node doesn't have data to estimate a fee rate
FeeRateUnavailable,
/// In order to use the [`TxBuilder::add_global_xpubs`] option every extended
/// key in the descriptor must either be a master key itself (having depth = 0) or have an
/// explicit origin provided
///
/// [`TxBuilder::add_global_xpubs`]: crate::wallet::tx_builder::TxBuilder::add_global_xpubs
MissingKeyOrigin(String),
/// Error while working with [`keys`](crate::keys)
Key(crate::keys::KeyError),
/// Descriptor checksum mismatch
ChecksumMismatch,
/// Spending policy is not compatible with this [`KeychainKind`](crate::types::KeychainKind)
SpendingPolicyRequired(crate::types::KeychainKind),
/// Error while extracting and manipulating policies
InvalidPolicyPathError(crate::descriptor::policy::PolicyError),
/// Signing error
Signer(crate::wallet::signer::SignerError),
/// Requested outpoint doesn't exist in the tx (vout greater than available outputs)
InvalidOutpoint(OutPoint),
/// Error related to the parsing and usage of descriptors
Descriptor(crate::descriptor::error::Error),
/// Miniscript error
Miniscript(miniscript::Error),
/// Miniscript PSBT error
MiniscriptPsbt(MiniscriptPsbtError),
/// BIP32 error
Bip32(bitcoin::util::bip32::Error),
/// Partially signed bitcoin transaction error
Psbt(bitcoin::util::psbt::Error),
}
/// Errors returned by miniscript when updating inconsistent PSBTs
#[derive(Debug, Clone)]
pub enum MiniscriptPsbtError {
Conversion(miniscript::descriptor::ConversionError),
UtxoUpdate(miniscript::psbt::UtxoUpdateError),
OutputUpdate(miniscript::psbt::OutputUpdateError),
}
impl fmt::Display for MiniscriptPsbtError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Conversion(err) => write!(f, "Conversion error: {}", err),
Self::UtxoUpdate(err) => write!(f, "UTXO update error: {}", err),
Self::OutputUpdate(err) => write!(f, "Output update error: {}", err),
}
}
}
impl std::error::Error for MiniscriptPsbtError {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Generic(err) => write!(f, "Generic error: {}", err),
Self::NoRecipients => write!(f, "Cannot build tx without recipients"),
Self::NoUtxosSelected => write!(f, "No UTXO selected"),
Self::OutputBelowDustLimit(limit) => {
write!(f, "Output below the dust limit: {}", limit)
}
Self::InsufficientFunds { needed, available } => write!(
f,
"Insufficient funds: {} sat available of {} sat needed",
available, needed
),
Self::BnBTotalTriesExceeded => {
write!(f, "Branch and bound coin selection: total tries exceeded")
}
Self::BnBNoExactMatch => write!(f, "Branch and bound coin selection: not exact match"),
Self::UnknownUtxo => write!(f, "UTXO not found in the internal database"),
Self::TransactionNotFound => {
write!(f, "Transaction not found in the internal database")
}
Self::TransactionConfirmed => write!(f, "Transaction already confirmed"),
Self::IrreplaceableTransaction => write!(f, "Transaction can't be replaced"),
Self::FeeRateTooLow { required } => write!(
f,
"Fee rate too low: required {} sat/vbyte",
required.as_sat_per_vb()
),
Self::FeeTooLow { required } => write!(f, "Fee to low: required {} sat", required),
Self::FeeRateUnavailable => write!(f, "Fee rate unavailable"),
Self::MissingKeyOrigin(err) => write!(f, "Missing key origin: {}", err),
Self::Key(err) => write!(f, "Key error: {}", err),
Self::ChecksumMismatch => write!(f, "Descriptor checksum mismatch"),
Self::SpendingPolicyRequired(keychain_kind) => {
write!(f, "Spending policy required: {:?}", keychain_kind)
}
Self::InvalidPolicyPathError(err) => write!(f, "Invalid policy path: {}", err),
Self::Signer(err) => write!(f, "Signer error: {}", err),
Self::InvalidOutpoint(outpoint) => write!(
f,
"Requested outpoint doesn't exist in the tx: {}",
outpoint
),
Self::Descriptor(err) => write!(f, "Descriptor error: {}", err),
Self::Miniscript(err) => write!(f, "Miniscript error: {}", err),
Self::MiniscriptPsbt(err) => write!(f, "Miniscript PSBT error: {}", err),
Self::Bip32(err) => write!(f, "BIP32 error: {}", err),
Self::Psbt(err) => write!(f, "PSBT error: {}", err),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for Error {}
macro_rules! impl_error {
( $from:ty, $to:ident ) => {
impl_error!($from, $to, Error);
};
( $from:ty, $to:ident, $impl_for:ty ) => {
impl core::convert::From<$from> for $impl_for {
fn from(err: $from) -> Self {
<$impl_for>::$to(err)
}
}
};
}
impl_error!(descriptor::error::Error, Descriptor);
impl_error!(descriptor::policy::PolicyError, InvalidPolicyPathError);
impl_error!(wallet::signer::SignerError, Signer);
impl From<crate::keys::KeyError> for Error {
fn from(key_error: crate::keys::KeyError) -> Error {
match key_error {
crate::keys::KeyError::Miniscript(inner) => Error::Miniscript(inner),
crate::keys::KeyError::Bip32(inner) => Error::Bip32(inner),
crate::keys::KeyError::InvalidChecksum => Error::ChecksumMismatch,
e => Error::Key(e),
}
}
}
impl_error!(miniscript::Error, Miniscript);
impl_error!(MiniscriptPsbtError, MiniscriptPsbt);
impl_error!(bitcoin::util::bip32::Error, Bip32);
impl_error!(bitcoin::util::psbt::Error, Psbt);

View File

@@ -15,7 +15,7 @@
// something that should be fairly simple to re-implement.
use alloc::string::String;
use bitcoin::bip32;
use bitcoin::util::bip32;
use bitcoin::Network;
use miniscript::ScriptContext;
@@ -57,7 +57,7 @@ pub type MnemonicWithPassphrase = (Mnemonic, Option<String>);
#[cfg_attr(docsrs, doc(cfg(feature = "keys-bip39")))]
impl<Ctx: ScriptContext> DerivableKey<Ctx> for Seed {
fn into_extended_key(self) -> Result<ExtendedKey<Ctx>, KeyError> {
Ok(bip32::Xpriv::new_master(Network::Bitcoin, &self[..])?.into())
Ok(bip32::ExtendedPrivKey::new_master(Network::Bitcoin, &self[..])?.into())
}
fn into_descriptor_key(
@@ -142,7 +142,7 @@ impl<Ctx: ScriptContext> GeneratableKey<Ctx> for Mnemonic {
(word_count, language): Self::Options,
entropy: Self::Entropy,
) -> Result<GeneratedKey<Self, Ctx>, Self::Error> {
let entropy = &entropy[..(word_count as usize / 8)];
let entropy = &entropy.as_ref()[..(word_count as usize / 8)];
let mnemonic = Mnemonic::from_entropy_in(language, entropy)?;
Ok(GeneratedKey::new(mnemonic, any_network()))
@@ -154,7 +154,7 @@ mod test {
use alloc::string::ToString;
use core::str::FromStr;
use bitcoin::bip32;
use bitcoin::util::bip32;
use bip39::{Language, Mnemonic};

View File

@@ -15,17 +15,14 @@ use crate::collections::HashSet;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::any::TypeId;
use core::fmt;
use core::marker::PhantomData;
use core::ops::Deref;
use core::str::FromStr;
use rand_core::{CryptoRng, RngCore};
use bitcoin::secp256k1::{self, Secp256k1, Signing};
use bitcoin::bip32;
use bitcoin::{key::XOnlyPublicKey, Network, PrivateKey, PublicKey};
use bitcoin::util::bip32;
use bitcoin::{Network, PrivateKey, PublicKey, XOnlyPublicKey};
use miniscript::descriptor::{Descriptor, DescriptorXKey, Wildcard};
pub use miniscript::descriptor::{
@@ -99,7 +96,7 @@ impl<Ctx: ScriptContext> DescriptorKey<Ctx> {
}
}
// This method is used internally by `bdk_wallet::fragment!` and `bdk_wallet::descriptor!`. It has to be
// This method is used internally by `bdk::fragment!` and `bdk::descriptor!`. It has to be
// public because it is effectively called by external crates once the macros are expanded,
// but since it is not meant to be part of the public api we hide it from the docs.
#[doc(hidden)]
@@ -112,7 +109,7 @@ impl<Ctx: ScriptContext> DescriptorKey<Ctx> {
Ok((public, KeyMap::default(), valid_networks))
}
DescriptorKey::Secret(secret, valid_networks, _) => {
let mut key_map = KeyMap::new();
let mut key_map = KeyMap::with_capacity(1);
let public = secret
.to_public(secp)
@@ -208,9 +205,9 @@ impl<Ctx: ScriptContext + 'static> ExtScriptContext for Ctx {
/// Key type valid in any context:
///
/// ```
/// use bdk_wallet::bitcoin::PublicKey;
/// use bdk::bitcoin::PublicKey;
///
/// use bdk_wallet::keys::{DescriptorKey, IntoDescriptorKey, KeyError, ScriptContext};
/// use bdk::keys::{DescriptorKey, IntoDescriptorKey, KeyError, ScriptContext};
///
/// pub struct MyKeyType {
/// pubkey: PublicKey,
@@ -226,9 +223,9 @@ impl<Ctx: ScriptContext + 'static> ExtScriptContext for Ctx {
/// Key type that is only valid on mainnet:
///
/// ```
/// use bdk_wallet::bitcoin::PublicKey;
/// use bdk::bitcoin::PublicKey;
///
/// use bdk_wallet::keys::{
/// use bdk::keys::{
/// mainnet_network, DescriptorKey, DescriptorPublicKey, IntoDescriptorKey, KeyError,
/// ScriptContext, SinglePub, SinglePubKey,
/// };
@@ -253,11 +250,9 @@ impl<Ctx: ScriptContext + 'static> ExtScriptContext for Ctx {
/// Key type that internally encodes in which context it's valid. The context is checked at runtime:
///
/// ```
/// use bdk_wallet::bitcoin::PublicKey;
/// use bdk::bitcoin::PublicKey;
///
/// use bdk_wallet::keys::{
/// DescriptorKey, ExtScriptContext, IntoDescriptorKey, KeyError, ScriptContext,
/// };
/// use bdk::keys::{DescriptorKey, ExtScriptContext, IntoDescriptorKey, KeyError, ScriptContext};
///
/// pub struct MyKeyType {
/// is_legacy: bool,
@@ -283,17 +278,17 @@ impl<Ctx: ScriptContext + 'static> ExtScriptContext for Ctx {
/// makes the compiler (correctly) fail.
///
/// ```compile_fail
/// use bdk_wallet::bitcoin::PublicKey;
/// use bdk::bitcoin::PublicKey;
/// use core::str::FromStr;
///
/// use bdk_wallet::keys::{DescriptorKey, IntoDescriptorKey, KeyError};
/// use bdk::keys::{DescriptorKey, IntoDescriptorKey, KeyError};
///
/// pub struct MySegwitOnlyKeyType {
/// pubkey: PublicKey,
/// }
///
/// impl IntoDescriptorKey<bdk_wallet::miniscript::Segwitv0> for MySegwitOnlyKeyType {
/// fn into_descriptor_key(self) -> Result<DescriptorKey<bdk_wallet::miniscript::Segwitv0>, KeyError> {
/// impl IntoDescriptorKey<bdk::miniscript::Segwitv0> for MySegwitOnlyKeyType {
/// fn into_descriptor_key(self) -> Result<DescriptorKey<bdk::miniscript::Segwitv0>, KeyError> {
/// self.pubkey.into_descriptor_key()
/// }
/// }
@@ -301,8 +296,8 @@ impl<Ctx: ScriptContext + 'static> ExtScriptContext for Ctx {
/// let key = MySegwitOnlyKeyType {
/// pubkey: PublicKey::from_str("...")?,
/// };
/// let (descriptor, _, _) = bdk_wallet::descriptor!(pkh(key))?;
/// // ^^^^^ changing this to `wpkh` would make it compile
/// let (descriptor, _, _) = bdk::descriptor!(pkh(key))?;
/// // ^^^^^ changing this to `wpkh` would make it compile
///
/// # Ok::<_, Box<dyn std::error::Error>>(())
/// ```
@@ -313,15 +308,15 @@ pub trait IntoDescriptorKey<Ctx: ScriptContext>: Sized {
/// Enum for extended keys that can be either `xprv` or `xpub`
///
/// An instance of [`ExtendedKey`] can be constructed from an [`Xpriv`](bip32::Xpriv)
/// or an [`Xpub`](bip32::Xpub) by using the `From` trait.
/// An instance of [`ExtendedKey`] can be constructed from an [`ExtendedPrivKey`](bip32::ExtendedPrivKey)
/// or an [`ExtendedPubKey`](bip32::ExtendedPubKey) by using the `From` trait.
///
/// Defaults to the [`Legacy`](miniscript::Legacy) context.
pub enum ExtendedKey<Ctx: ScriptContext = miniscript::Legacy> {
/// A private extended key, aka an `xprv`
Private((bip32::Xpriv, PhantomData<Ctx>)),
Private((bip32::ExtendedPrivKey, PhantomData<Ctx>)),
/// A public extended key, aka an `xpub`
Public((bip32::Xpub, PhantomData<Ctx>)),
Public((bip32::ExtendedPubKey, PhantomData<Ctx>)),
}
impl<Ctx: ScriptContext> ExtendedKey<Ctx> {
@@ -333,43 +328,43 @@ impl<Ctx: ScriptContext> ExtendedKey<Ctx> {
}
}
/// Transform the [`ExtendedKey`] into an [`Xpriv`](bip32::Xpriv) for the
/// Transform the [`ExtendedKey`] into an [`ExtendedPrivKey`](bip32::ExtendedPrivKey) for the
/// given [`Network`], if the key contains the private data
pub fn into_xprv(self, network: Network) -> Option<bip32::Xpriv> {
pub fn into_xprv(self, network: Network) -> Option<bip32::ExtendedPrivKey> {
match self {
ExtendedKey::Private((mut xprv, _)) => {
xprv.network = network.into();
xprv.network = network;
Some(xprv)
}
ExtendedKey::Public(_) => None,
}
}
/// Transform the [`ExtendedKey`] into an [`Xpub`](bip32::Xpub) for the
/// Transform the [`ExtendedKey`] into an [`ExtendedPubKey`](bip32::ExtendedPubKey) for the
/// given [`Network`]
pub fn into_xpub<C: Signing>(
self,
network: bitcoin::Network,
secp: &Secp256k1<C>,
) -> bip32::Xpub {
) -> bip32::ExtendedPubKey {
let mut xpub = match self {
ExtendedKey::Private((xprv, _)) => bip32::Xpub::from_priv(secp, &xprv),
ExtendedKey::Private((xprv, _)) => bip32::ExtendedPubKey::from_priv(secp, &xprv),
ExtendedKey::Public((xpub, _)) => xpub,
};
xpub.network = network.into();
xpub.network = network;
xpub
}
}
impl<Ctx: ScriptContext> From<bip32::Xpub> for ExtendedKey<Ctx> {
fn from(xpub: bip32::Xpub) -> Self {
impl<Ctx: ScriptContext> From<bip32::ExtendedPubKey> for ExtendedKey<Ctx> {
fn from(xpub: bip32::ExtendedPubKey) -> Self {
ExtendedKey::Public((xpub, PhantomData))
}
}
impl<Ctx: ScriptContext> From<bip32::Xpriv> for ExtendedKey<Ctx> {
fn from(xprv: bip32::Xpriv) -> Self {
impl<Ctx: ScriptContext> From<bip32::ExtendedPrivKey> for ExtendedKey<Ctx> {
fn from(xprv: bip32::ExtendedPrivKey) -> Self {
ExtendedKey::Private((xprv, PhantomData))
}
}
@@ -387,28 +382,28 @@ impl<Ctx: ScriptContext> From<bip32::Xpriv> for ExtendedKey<Ctx> {
///
/// ## Examples
///
/// Key types that can be directly converted into an [`Xpriv`] or
/// an [`Xpub`] can implement only the required `into_extended_key()` method.
/// Key types that can be directly converted into an [`ExtendedPrivKey`] or
/// an [`ExtendedPubKey`] can implement only the required `into_extended_key()` method.
///
/// ```
/// use bdk_wallet::bitcoin;
/// use bdk_wallet::bitcoin::bip32;
/// use bdk_wallet::keys::{DerivableKey, ExtendedKey, KeyError, ScriptContext};
/// use bdk::bitcoin;
/// use bdk::bitcoin::util::bip32;
/// use bdk::keys::{DerivableKey, ExtendedKey, KeyError, ScriptContext};
///
/// struct MyCustomKeyType {
/// key_data: bitcoin::PrivateKey,
/// chain_code: [u8; 32],
/// chain_code: Vec<u8>,
/// network: bitcoin::Network,
/// }
///
/// impl<Ctx: ScriptContext> DerivableKey<Ctx> for MyCustomKeyType {
/// fn into_extended_key(self) -> Result<ExtendedKey<Ctx>, KeyError> {
/// let xprv = bip32::Xpriv {
/// network: self.network.into(),
/// let xprv = bip32::ExtendedPrivKey {
/// network: self.network,
/// depth: 0,
/// parent_fingerprint: bip32::Fingerprint::default(),
/// private_key: self.key_data.inner,
/// chain_code: bip32::ChainCode::from(&self.chain_code),
/// chain_code: bip32::ChainCode::from(self.chain_code.as_ref()),
/// child_number: bip32::ChildNumber::Normal { index: 0 },
/// };
///
@@ -417,30 +412,30 @@ impl<Ctx: ScriptContext> From<bip32::Xpriv> for ExtendedKey<Ctx> {
/// }
/// ```
///
/// Types that don't internally encode the [`Network`] in which they are valid need some extra
/// Types that don't internally encode the [`Network`](bitcoin::Network) in which they are valid need some extra
/// steps to override the set of valid networks, otherwise only the network specified in the
/// [`Xpriv`] or [`Xpub`] will be considered valid.
/// [`ExtendedPrivKey`] or [`ExtendedPubKey`] will be considered valid.
///
/// ```
/// use bdk_wallet::bitcoin;
/// use bdk_wallet::bitcoin::bip32;
/// use bdk_wallet::keys::{
/// use bdk::bitcoin;
/// use bdk::bitcoin::util::bip32;
/// use bdk::keys::{
/// any_network, DerivableKey, DescriptorKey, ExtendedKey, KeyError, ScriptContext,
/// };
///
/// struct MyCustomKeyType {
/// key_data: bitcoin::PrivateKey,
/// chain_code: [u8; 32],
/// chain_code: Vec<u8>,
/// }
///
/// impl<Ctx: ScriptContext> DerivableKey<Ctx> for MyCustomKeyType {
/// fn into_extended_key(self) -> Result<ExtendedKey<Ctx>, KeyError> {
/// let xprv = bip32::Xpriv {
/// network: bitcoin::Network::Bitcoin.into(), // pick an arbitrary network here
/// let xprv = bip32::ExtendedPrivKey {
/// network: bitcoin::Network::Bitcoin, // pick an arbitrary network here
/// depth: 0,
/// parent_fingerprint: bip32::Fingerprint::default(),
/// private_key: self.key_data.inner,
/// chain_code: bip32::ChainCode::from(&self.chain_code),
/// chain_code: bip32::ChainCode::from(self.chain_code.as_ref()),
/// child_number: bip32::ChildNumber::Normal { index: 0 },
/// };
///
@@ -463,8 +458,8 @@ impl<Ctx: ScriptContext> From<bip32::Xpriv> for ExtendedKey<Ctx> {
/// ```
///
/// [`DerivationPath`]: (bip32::DerivationPath)
/// [`Xpriv`]: (bip32::Xpriv)
/// [`Xpub`]: (bip32::Xpub)
/// [`ExtendedPrivKey`]: (bip32::ExtendedPrivKey)
/// [`ExtendedPubKey`]: (bip32::ExtendedPubKey)
pub trait DerivableKey<Ctx: ScriptContext = miniscript::Legacy>: Sized {
/// Consume `self` and turn it into an [`ExtendedKey`]
#[cfg_attr(
@@ -473,9 +468,9 @@ pub trait DerivableKey<Ctx: ScriptContext = miniscript::Legacy>: Sized {
This can be used to get direct access to `xprv`s and `xpub`s for types that implement this trait,
like [`Mnemonic`](bip39::Mnemonic) when the `keys-bip39` feature is enabled.
```rust
use bdk_wallet::bitcoin::Network;
use bdk_wallet::keys::{DerivableKey, ExtendedKey};
use bdk_wallet::keys::bip39::{Mnemonic, Language};
use bdk::bitcoin::Network;
use bdk::keys::{DerivableKey, ExtendedKey};
use bdk::keys::bip39::{Mnemonic, Language};
# fn main() -> Result<(), Box<dyn std::error::Error>> {
let xkey: ExtendedKey =
@@ -524,13 +519,13 @@ impl<Ctx: ScriptContext> DerivableKey<Ctx> for ExtendedKey<Ctx> {
}
}
impl<Ctx: ScriptContext> DerivableKey<Ctx> for bip32::Xpub {
impl<Ctx: ScriptContext> DerivableKey<Ctx> for bip32::ExtendedPubKey {
fn into_extended_key(self) -> Result<ExtendedKey<Ctx>, KeyError> {
Ok(self.into())
}
}
impl<Ctx: ScriptContext> DerivableKey<Ctx> for bip32::Xpriv {
impl<Ctx: ScriptContext> DerivableKey<Ctx> for bip32::ExtendedPrivKey {
fn into_extended_key(self) -> Result<ExtendedKey<Ctx>, KeyError> {
Ok(self.into())
}
@@ -633,23 +628,12 @@ pub trait GeneratableKey<Ctx: ScriptContext>: Sized {
entropy: Self::Entropy,
) -> Result<GeneratedKey<Self, Ctx>, Self::Error>;
/// Generate a key given the options with random entropy.
///
/// Uses the thread-local random number generator.
#[cfg(feature = "std")]
/// Generate a key given the options with a random entropy
fn generate(options: Self::Options) -> Result<GeneratedKey<Self, Ctx>, Self::Error> {
Self::generate_with_aux_rand(options, &mut bitcoin::key::rand::thread_rng())
}
use rand::{thread_rng, Rng};
/// Generate a key given the options with random entropy.
///
/// Uses a provided random number generator (rng).
fn generate_with_aux_rand(
options: Self::Options,
rng: &mut (impl CryptoRng + RngCore),
) -> Result<GeneratedKey<Self, Ctx>, Self::Error> {
let mut entropy = Self::Entropy::default();
rng.fill_bytes(entropy.as_mut());
thread_rng().fill(entropy.as_mut());
Self::generate_with_entropy(options, entropy)
}
}
@@ -670,20 +654,8 @@ where
}
/// Generate a key with the default options and a random entropy
///
/// Uses the thread-local random number generator.
#[cfg(feature = "std")]
fn generate_default() -> Result<GeneratedKey<Self, Ctx>, Self::Error> {
Self::generate_with_aux_rand(Default::default(), &mut bitcoin::key::rand::thread_rng())
}
/// Generate a key with the default options and a random entropy
///
/// Uses a provided random number generator (rng).
fn generate_default_with_aux_rand(
rng: &mut (impl CryptoRng + RngCore),
) -> Result<GeneratedKey<Self, Ctx>, Self::Error> {
Self::generate_with_aux_rand(Default::default(), rng)
Self::generate(Default::default())
}
}
@@ -697,7 +669,7 @@ where
{
}
impl<Ctx: ScriptContext> GeneratableKey<Ctx> for bip32::Xpriv {
impl<Ctx: ScriptContext> GeneratableKey<Ctx> for bip32::ExtendedPrivKey {
type Entropy = [u8; 32];
type Options = ();
@@ -708,7 +680,7 @@ impl<Ctx: ScriptContext> GeneratableKey<Ctx> for bip32::Xpriv {
entropy: Self::Entropy,
) -> Result<GeneratedKey<Self, Ctx>, Self::Error> {
// pick a arbitrary network here, but say that we support all of them
let xprv = bip32::Xpriv::new_master(Network::Bitcoin, entropy.as_ref())?;
let xprv = bip32::ExtendedPrivKey::new_master(Network::Bitcoin, entropy.as_ref())?;
Ok(GeneratedKey::new(xprv, any_network()))
}
}
@@ -742,7 +714,7 @@ impl<Ctx: ScriptContext> GeneratableKey<Ctx> for PrivateKey {
let inner = secp256k1::SecretKey::from_slice(&entropy)?;
let private_key = PrivateKey {
compressed: options.compressed,
network: Network::Bitcoin.into(),
network: Network::Bitcoin,
inner,
};
@@ -781,7 +753,7 @@ fn expand_multi_keys<Pk: IntoDescriptorKey<Ctx>, Ctx: ScriptContext>(
let (key_map, valid_networks) = key_maps_networks.into_iter().fold(
(KeyMap::default(), any_network()),
|(mut keys_acc, net_acc), (key, net)| {
keys_acc.extend(key);
keys_acc.extend(key.into_iter());
let net_acc = merge_networks(&net_acc, &net);
(keys_acc, net_acc)
@@ -791,7 +763,7 @@ fn expand_multi_keys<Pk: IntoDescriptorKey<Ctx>, Ctx: ScriptContext>(
Ok((pks, key_map, valid_networks))
}
// Used internally by `bdk_wallet::fragment!` to build `pk_k()` fragments
// Used internally by `bdk::fragment!` to build `pk_k()` fragments
#[doc(hidden)]
pub fn make_pk<Pk: IntoDescriptorKey<Ctx>, Ctx: ScriptContext>(
descriptor_key: Pk,
@@ -805,7 +777,7 @@ pub fn make_pk<Pk: IntoDescriptorKey<Ctx>, Ctx: ScriptContext>(
Ok((minisc, key_map, valid_networks))
}
// Used internally by `bdk_wallet::fragment!` to build `pk_h()` fragments
// Used internally by `bdk::fragment!` to build `pk_h()` fragments
#[doc(hidden)]
pub fn make_pkh<Pk: IntoDescriptorKey<Ctx>, Ctx: ScriptContext>(
descriptor_key: Pk,
@@ -819,7 +791,7 @@ pub fn make_pkh<Pk: IntoDescriptorKey<Ctx>, Ctx: ScriptContext>(
Ok((minisc, key_map, valid_networks))
}
// Used internally by `bdk_wallet::fragment!` to build `multi()` fragments
// Used internally by `bdk::fragment!` to build `multi()` fragments
#[doc(hidden)]
pub fn make_multi<
Pk: IntoDescriptorKey<Ctx>,
@@ -839,7 +811,7 @@ pub fn make_multi<
Ok((minisc, key_map, valid_networks))
}
// Used internally by `bdk_wallet::descriptor!` to build `sortedmulti()` fragments
// Used internally by `bdk::descriptor!` to build `sortedmulti()` fragments
#[doc(hidden)]
pub fn make_sortedmulti<Pk, Ctx, F>(
thresh: usize,
@@ -861,7 +833,7 @@ where
Ok((descriptor, key_map, valid_networks))
}
/// The "identity" conversion is used internally by some `bdk_wallet::fragment`s
/// The "identity" conversion is used internally by some `bdk::fragment`s
impl<Ctx: ScriptContext> IntoDescriptorKey<Ctx> for DescriptorKey<Ctx> {
fn into_descriptor_key(self) -> Result<DescriptorKey<Ctx>, KeyError> {
Ok(self)
@@ -872,7 +844,9 @@ impl<Ctx: ScriptContext> IntoDescriptorKey<Ctx> for DescriptorPublicKey {
fn into_descriptor_key(self) -> Result<DescriptorKey<Ctx>, KeyError> {
let networks = match self {
DescriptorPublicKey::Single(_) => any_network(),
DescriptorPublicKey::XPub(DescriptorXKey { xkey, .. }) if xkey.network.is_mainnet() => {
DescriptorPublicKey::XPub(DescriptorXKey { xkey, .. })
if xkey.network == Network::Bitcoin =>
{
mainnet_network()
}
_ => test_networks(),
@@ -905,8 +879,12 @@ impl<Ctx: ScriptContext> IntoDescriptorKey<Ctx> for XOnlyPublicKey {
impl<Ctx: ScriptContext> IntoDescriptorKey<Ctx> for DescriptorSecretKey {
fn into_descriptor_key(self) -> Result<DescriptorKey<Ctx>, KeyError> {
let networks = match &self {
DescriptorSecretKey::Single(sk) if sk.key.network.is_mainnet() => mainnet_network(),
DescriptorSecretKey::XPrv(DescriptorXKey { xkey, .. }) if xkey.network.is_mainnet() => {
DescriptorSecretKey::Single(sk) if sk.key.network == Network::Bitcoin => {
mainnet_network()
}
DescriptorSecretKey::XPrv(DescriptorXKey { xkey, .. })
if xkey.network == Network::Bitcoin =>
{
mainnet_network()
}
_ => test_networks(),
@@ -935,7 +913,7 @@ impl<Ctx: ScriptContext> IntoDescriptorKey<Ctx> for PrivateKey {
}
/// Errors thrown while working with [`keys`](crate::keys)
#[derive(Debug, PartialEq)]
#[derive(Debug)]
pub enum KeyError {
/// The key cannot exist in the given script context
InvalidScriptContext,
@@ -948,25 +926,16 @@ pub enum KeyError {
Message(String),
/// BIP32 error
Bip32(bitcoin::bip32::Error),
Bip32(bitcoin::util::bip32::Error),
/// Miniscript error
Miniscript(miniscript::Error),
}
impl From<miniscript::Error> for KeyError {
fn from(err: miniscript::Error) -> Self {
KeyError::Miniscript(err)
}
}
impl_error!(miniscript::Error, Miniscript, KeyError);
impl_error!(bitcoin::util::bip32::Error, Bip32, KeyError);
impl From<bip32::Error> for KeyError {
fn from(err: bip32::Error) -> Self {
KeyError::Bip32(err)
}
}
impl fmt::Display for KeyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
impl std::fmt::Display for KeyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::InvalidScriptContext => write!(f, "Invalid script context"),
Self::InvalidNetwork => write!(f, "Invalid network"),
@@ -983,7 +952,7 @@ impl std::error::Error for KeyError {}
#[cfg(test)]
pub mod test {
use bitcoin::bip32;
use bitcoin::util::bip32;
use super::*;
@@ -992,7 +961,7 @@ pub mod test {
#[test]
fn test_keys_generate_xprv() {
let generated_xprv: GeneratedKey<_, miniscript::Segwitv0> =
bip32::Xpriv::generate_with_entropy_default(TEST_ENTROPY).unwrap();
bip32::ExtendedPrivKey::generate_with_entropy_default(TEST_ENTROPY).unwrap();
assert_eq!(generated_xprv.valid_networks, any_network());
assert_eq!(generated_xprv.to_string(), "xprv9s21ZrQH143K4Xr1cJyqTvuL2FWR8eicgY9boWqMBv8MDVUZ65AXHnzBrK1nyomu6wdcabRgmGTaAKawvhAno1V5FowGpTLVx3jxzE5uk3Q");
@@ -1022,6 +991,6 @@ pub mod test {
.unwrap();
let xprv = xkey.into_xprv(Network::Testnet).unwrap();
assert_eq!(xprv.network, Network::Testnet.into());
assert_eq!(xprv.network, Network::Testnet);
}
}

46
crates/bdk/src/lib.rs Normal file
View File

@@ -0,0 +1,46 @@
#![doc = include_str!("../README.md")]
#![no_std]
#[cfg(feature = "std")]
#[macro_use]
extern crate std;
#[doc(hidden)]
#[macro_use]
pub extern crate alloc;
pub extern crate bitcoin;
#[cfg(feature = "hardware-signer")]
pub extern crate hwi;
extern crate log;
pub extern crate miniscript;
extern crate serde;
extern crate serde_json;
#[cfg(feature = "keys-bip39")]
extern crate bip39;
#[allow(unused_imports)]
#[macro_use]
pub(crate) mod error;
pub mod descriptor;
pub mod keys;
pub mod psbt;
pub(crate) mod types;
pub mod wallet;
pub use descriptor::template;
pub use descriptor::HdKeyPaths;
pub use error::Error;
pub use types::*;
pub use wallet::signer;
pub use wallet::signer::SignOptions;
pub use wallet::tx_builder::TxBuilder;
pub use wallet::Wallet;
/// Get the version of BDK at runtime
pub fn version() -> &'static str {
env!("CARGO_PKG_VERSION", "unknown")
}
pub use bdk_chain as chain;
pub(crate) use bdk_chain::collections;

View File

@@ -9,12 +9,11 @@
// You may not use this file except in accordance with one or both of these
// licenses.
//! Additional functions on the `rust-bitcoin` `Psbt` structure.
//! Additional functions on the `rust-bitcoin` `PartiallySignedTransaction` structure.
use crate::FeeRate;
use alloc::vec::Vec;
use bitcoin::Amount;
use bitcoin::FeeRate;
use bitcoin::Psbt;
use bitcoin::util::psbt::PartiallySignedTransaction as Psbt;
use bitcoin::TxOut;
// TODO upstream the functions here to `rust-bitcoin`?
@@ -26,36 +25,44 @@ pub trait PsbtUtils {
/// The total transaction fee amount, sum of input amounts minus sum of output amounts, in sats.
/// If the PSBT is missing a TxOut for an input returns None.
fn fee_amount(&self) -> Option<Amount>;
fn fee_amount(&self) -> Option<u64>;
/// The transaction's fee rate. This value will only be accurate if calculated AFTER the
/// `Psbt` is finalized and all witness/signature data is added to the
/// `PartiallySignedTransaction` is finalized and all witness/signature data is added to the
/// transaction.
/// If the PSBT is missing a TxOut for an input returns None.
fn fee_rate(&self) -> Option<FeeRate>;
}
impl PsbtUtils for Psbt {
#[allow(clippy::all)] // We want to allow `manual_map` but it is too new.
fn get_utxo_for(&self, input_index: usize) -> Option<TxOut> {
let tx = &self.unsigned_tx;
let input = self.inputs.get(input_index)?;
match (&input.witness_utxo, &input.non_witness_utxo) {
(Some(_), _) => input.witness_utxo.clone(),
(_, Some(_)) => input.non_witness_utxo.as_ref().map(|in_tx| {
in_tx.output[tx.input[input_index].previous_output.vout as usize].clone()
}),
_ => None,
if input_index >= tx.input.len() {
return None;
}
if let Some(input) = self.inputs.get(input_index) {
if let Some(wit_utxo) = &input.witness_utxo {
Some(wit_utxo.clone())
} else if let Some(in_tx) = &input.non_witness_utxo {
Some(in_tx.output[tx.input[input_index].previous_output.vout as usize].clone())
} else {
None
}
} else {
None
}
}
fn fee_amount(&self) -> Option<Amount> {
fn fee_amount(&self) -> Option<u64> {
let tx = &self.unsigned_tx;
let utxos: Option<Vec<TxOut>> = (0..tx.input.len()).map(|i| self.get_utxo_for(i)).collect();
utxos.map(|inputs| {
let input_amount: Amount = inputs.iter().map(|i| i.value).sum();
let output_amount: Amount = self.unsigned_tx.output.iter().map(|o| o.value).sum();
let input_amount: u64 = inputs.iter().map(|i| i.value).sum();
let output_amount: u64 = self.unsigned_tx.output.iter().map(|o| o.value).sum();
input_amount
.checked_sub(output_amount)
.expect("input amount must be greater than output amount")
@@ -64,7 +71,9 @@ impl PsbtUtils for Psbt {
fn fee_rate(&self) -> Option<FeeRate> {
let fee_amount = self.fee_amount();
let weight = self.clone().extract_tx().ok()?.weight();
fee_amount.map(|fee| fee / weight)
fee_amount.map(|fee| {
let weight = self.clone().extract_tx().weight();
FeeRate::from_wu(fee, weight)
})
}
}

333
crates/bdk/src/types.rs Normal file
View File

@@ -0,0 +1,333 @@
// Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <alekos.filini@gmail.com>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
use alloc::boxed::Box;
use core::convert::AsRef;
use core::ops::Sub;
use bdk_chain::ConfirmationTime;
use bitcoin::blockdata::transaction::{OutPoint, Transaction, TxOut};
use bitcoin::{hash_types::Txid, util::psbt};
use serde::{Deserialize, Serialize};
/// Types of keychains
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)]
pub enum KeychainKind {
/// External
External = 0,
/// Internal, usually used for change outputs
Internal = 1,
}
impl KeychainKind {
/// Return [`KeychainKind`] as a byte
pub fn as_byte(&self) -> u8 {
match self {
KeychainKind::External => b'e',
KeychainKind::Internal => b'i',
}
}
}
impl AsRef<[u8]> for KeychainKind {
fn as_ref(&self) -> &[u8] {
match self {
KeychainKind::External => b"e",
KeychainKind::Internal => b"i",
}
}
}
/// Fee rate
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd)]
// Internally stored as satoshi/vbyte
pub struct FeeRate(f32);
impl FeeRate {
/// Create a new instance checking the value provided
///
/// ## Panics
///
/// Panics if the value is not [normal](https://doc.rust-lang.org/std/primitive.f32.html#method.is_normal) (except if it's a positive zero) or negative.
fn new_checked(value: f32) -> Self {
assert!(value.is_normal() || value == 0.0);
assert!(value.is_sign_positive());
FeeRate(value)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in sats/kwu
pub fn from_sat_per_kwu(sat_per_kwu: f32) -> Self {
FeeRate::new_checked(sat_per_kwu / 250.0_f32)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in sats/kvb
pub fn from_sat_per_kvb(sat_per_kvb: f32) -> Self {
FeeRate::new_checked(sat_per_kvb / 1000.0_f32)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in btc/kvbytes
///
/// ## Panics
///
/// Panics if the value is not [normal](https://doc.rust-lang.org/std/primitive.f32.html#method.is_normal) (except if it's a positive zero) or negative.
pub fn from_btc_per_kvb(btc_per_kvb: f32) -> Self {
FeeRate::new_checked(btc_per_kvb * 1e5)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in satoshi/vbyte
///
/// ## Panics
///
/// Panics if the value is not [normal](https://doc.rust-lang.org/std/primitive.f32.html#method.is_normal) (except if it's a positive zero) or negative.
pub fn from_sat_per_vb(sat_per_vb: f32) -> Self {
FeeRate::new_checked(sat_per_vb)
}
/// Create a new [`FeeRate`] with the default min relay fee value
pub const fn default_min_relay_fee() -> Self {
FeeRate(1.0)
}
/// Calculate fee rate from `fee` and weight units (`wu`).
pub fn from_wu(fee: u64, wu: usize) -> FeeRate {
Self::from_vb(fee, wu.vbytes())
}
/// Calculate fee rate from `fee` and `vbytes`.
pub fn from_vb(fee: u64, vbytes: usize) -> FeeRate {
let rate = fee as f32 / vbytes as f32;
Self::from_sat_per_vb(rate)
}
/// Return the value as satoshi/vbyte
pub fn as_sat_per_vb(&self) -> f32 {
self.0
}
/// Calculate absolute fee in Satoshis using size in weight units.
pub fn fee_wu(&self, wu: usize) -> u64 {
self.fee_vb(wu.vbytes())
}
/// Calculate absolute fee in Satoshis using size in virtual bytes.
pub fn fee_vb(&self, vbytes: usize) -> u64 {
(self.as_sat_per_vb() * vbytes as f32).ceil() as u64
}
}
impl Default for FeeRate {
fn default() -> Self {
FeeRate::default_min_relay_fee()
}
}
impl Sub for FeeRate {
type Output = Self;
fn sub(self, other: FeeRate) -> Self::Output {
FeeRate(self.0 - other.0)
}
}
/// Trait implemented by types that can be used to measure weight units.
pub trait Vbytes {
/// Convert weight units to virtual bytes.
fn vbytes(self) -> usize;
}
impl Vbytes for usize {
fn vbytes(self) -> usize {
// ref: https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#transaction-size-calculations
(self as f32 / 4.0).ceil() as usize
}
}
/// An unspent output owned by a [`Wallet`].
///
/// [`Wallet`]: crate::Wallet
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
pub struct LocalUtxo {
/// Reference to a transaction output
pub outpoint: OutPoint,
/// Transaction output
pub txout: TxOut,
/// Type of keychain
pub keychain: KeychainKind,
/// Whether this UTXO is spent or not
pub is_spent: bool,
/// The derivation index for the script pubkey in the wallet
pub derivation_index: u32,
/// The confirmation time for transaction containing this utxo
pub confirmation_time: ConfirmationTime,
}
/// A [`Utxo`] with its `satisfaction_weight`.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct WeightedUtxo {
/// The weight of the witness data and `scriptSig` expressed in [weight units]. This is used to
/// properly maintain the feerate when adding this input to a transaction during coin selection.
///
/// [weight units]: https://en.bitcoin.it/wiki/Weight_units
pub satisfaction_weight: usize,
/// The UTXO
pub utxo: Utxo,
}
#[derive(Debug, Clone, PartialEq, Eq)]
/// An unspent transaction output (UTXO).
pub enum Utxo {
/// A UTXO owned by the local wallet.
Local(LocalUtxo),
/// A UTXO owned by another wallet.
Foreign {
/// The location of the output.
outpoint: OutPoint,
/// The information about the input we require to add it to a PSBT.
// Box it to stop the type being too big.
psbt_input: Box<psbt::Input>,
},
}
impl Utxo {
/// Get the location of the UTXO
pub fn outpoint(&self) -> OutPoint {
match &self {
Utxo::Local(local) => local.outpoint,
Utxo::Foreign { outpoint, .. } => *outpoint,
}
}
/// Get the `TxOut` of the UTXO
pub fn txout(&self) -> &TxOut {
match &self {
Utxo::Local(local) => &local.txout,
Utxo::Foreign {
outpoint,
psbt_input,
} => {
if let Some(prev_tx) = &psbt_input.non_witness_utxo {
return &prev_tx.output[outpoint.vout as usize];
}
if let Some(txout) = &psbt_input.witness_utxo {
return txout;
}
unreachable!("Foreign UTXOs will always have one of these set")
}
}
}
}
/// A wallet transaction
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct TransactionDetails {
/// Optional transaction
pub transaction: Option<Transaction>,
/// Transaction id
pub txid: Txid,
/// Received value (sats)
/// Sum of owned outputs of this transaction.
pub received: u64,
/// Sent value (sats)
/// Sum of owned inputs of this transaction.
pub sent: u64,
/// Fee value in sats if it was available.
pub fee: Option<u64>,
/// If the transaction is confirmed, contains height and Unix timestamp of the block containing the
/// transaction, unconfirmed transaction contains `None`.
pub confirmation_time: ConfirmationTime,
}
impl PartialOrd for TransactionDetails {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for TransactionDetails {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.confirmation_time
.cmp(&other.confirmation_time)
.then_with(|| self.txid.cmp(&other.txid))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_store_feerate_in_const() {
const _MIN_RELAY: FeeRate = FeeRate::default_min_relay_fee();
}
#[test]
#[should_panic]
fn test_invalid_feerate_neg_zero() {
let _ = FeeRate::from_sat_per_vb(-0.0);
}
#[test]
#[should_panic]
fn test_invalid_feerate_neg_value() {
let _ = FeeRate::from_sat_per_vb(-5.0);
}
#[test]
#[should_panic]
fn test_invalid_feerate_nan() {
let _ = FeeRate::from_sat_per_vb(f32::NAN);
}
#[test]
#[should_panic]
fn test_invalid_feerate_inf() {
let _ = FeeRate::from_sat_per_vb(f32::INFINITY);
}
#[test]
fn test_valid_feerate_pos_zero() {
let _ = FeeRate::from_sat_per_vb(0.0);
}
#[test]
fn test_fee_from_btc_per_kvb() {
let fee = FeeRate::from_btc_per_kvb(1e-5);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_from_sat_per_vbyte() {
let fee = FeeRate::from_sat_per_vb(1.0);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_default_min_relay_fee() {
let fee = FeeRate::default_min_relay_fee();
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_from_sat_per_kvb() {
let fee = FeeRate::from_sat_per_kvb(1000.0);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_from_sat_per_kwu() {
let fee = FeeRate::from_sat_per_kwu(250.0);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
}

View File

@@ -20,8 +20,8 @@
//! ```
//! # use std::str::FromStr;
//! # use bitcoin::*;
//! # use bdk_wallet::export::*;
//! # use bdk_wallet::*;
//! # use bdk::wallet::export::*;
//! # use bdk::*;
//! let import = r#"{
//! "descriptor": "wpkh([c258d2e4\/84h\/1h\/0h]tpubDD3ynpHgJQW8VvWRzQ5WFDCrs4jqVFGHB3vLC3r49XHJSqP8bHKdK4AriuUKLccK68zfzowx7YhmDN8SiSkgCDENUFx9qVw65YyqM78vyVe\/0\/*)",
//! "blockheight":1782088,
@@ -29,35 +29,34 @@
//! }"#;
//!
//! let import = FullyNodedExport::from_str(import)?;
//! let wallet = Wallet::create(
//! import.descriptor(),
//! import.change_descriptor().expect("change descriptor"),
//! )
//! .network(Network::Testnet)
//! .create_wallet_no_persist()?;
//! let wallet = Wallet::new_no_persist(
//! &import.descriptor(),
//! import.change_descriptor().as_ref(),
//! Network::Testnet,
//! )?;
//! # Ok::<_, Box<dyn std::error::Error>>(())
//! ```
//!
//! ### Export a `Wallet`
//! ```
//! # use bitcoin::*;
//! # use bdk_wallet::export::*;
//! # use bdk_wallet::*;
//! let wallet = Wallet::create(
//! # use bdk::wallet::export::*;
//! # use bdk::*;
//! let wallet = Wallet::new_no_persist(
//! "wpkh([c258d2e4/84h/1h/0h]tpubDD3ynpHgJQW8VvWRzQ5WFDCrs4jqVFGHB3vLC3r49XHJSqP8bHKdK4AriuUKLccK68zfzowx7YhmDN8SiSkgCDENUFx9qVw65YyqM78vyVe/0/*)",
//! "wpkh([c258d2e4/84h/1h/0h]tpubDD3ynpHgJQW8VvWRzQ5WFDCrs4jqVFGHB3vLC3r49XHJSqP8bHKdK4AriuUKLccK68zfzowx7YhmDN8SiSkgCDENUFx9qVw65YyqM78vyVe/1/*)",
//! )
//! .network(Network::Testnet)
//! .create_wallet_no_persist()?;
//! Some("wpkh([c258d2e4/84h/1h/0h]tpubDD3ynpHgJQW8VvWRzQ5WFDCrs4jqVFGHB3vLC3r49XHJSqP8bHKdK4AriuUKLccK68zfzowx7YhmDN8SiSkgCDENUFx9qVw65YyqM78vyVe/1/*)"),
//! Network::Testnet,
//! )?;
//! let export = FullyNodedExport::export_wallet(&wallet, "exported wallet", true).unwrap();
//!
//! println!("Exported: {}", export.to_string());
//! # Ok::<_, Box<dyn std::error::Error>>(())
//! ```
use alloc::string::String;
use core::fmt;
use core::str::FromStr;
use alloc::string::{String, ToString};
use bdk_chain::sparse_chain::ChainPosition;
use serde::{Deserialize, Serialize};
use miniscript::descriptor::{ShInner, WshInner};
@@ -82,9 +81,9 @@ pub struct FullyNodedExport {
pub label: String,
}
impl fmt::Display for FullyNodedExport {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", serde_json::to_string(self).unwrap())
impl ToString for FullyNodedExport {
fn to_string(&self) -> String {
serde_json::to_string(self).unwrap()
}
}
@@ -112,13 +111,13 @@ impl FullyNodedExport {
///
/// If the database is empty or `include_blockheight` is false, the `blockheight` field
/// returned will be `0`.
pub fn export_wallet(
wallet: &Wallet,
pub fn export_wallet<D>(
wallet: &Wallet<D>,
label: &str,
include_blockheight: bool,
) -> Result<Self, &'static str> {
let descriptor = wallet
.public_descriptor(KeychainKind::External)
.get_descriptor_for_keychain(KeychainKind::External)
.to_string_with_secret(
&wallet
.get_signers(KeychainKind::External)
@@ -128,12 +127,11 @@ impl FullyNodedExport {
Self::is_compatible_with_core(&descriptor)?;
let blockheight = if include_blockheight {
wallet.transactions().next().map_or(0, |canonical_tx| {
match canonical_tx.chain_position {
bdk_chain::ChainPosition::Confirmed(a) => a.block_id.height,
bdk_chain::ChainPosition::Unconfirmed(_) => 0,
}
})
wallet
.transactions()
.next()
.and_then(|(pos, _)| pos.height().into())
.unwrap_or(0)
} else {
0
};
@@ -144,17 +142,19 @@ impl FullyNodedExport {
blockheight,
};
let change_descriptor = {
let descriptor = wallet
.public_descriptor(KeychainKind::Internal)
.to_string_with_secret(
&wallet
.get_signers(KeychainKind::Internal)
.as_key_map(wallet.secp_ctx()),
);
Some(remove_checksum(descriptor))
let change_descriptor = match wallet.public_descriptor(KeychainKind::Internal).is_some() {
false => None,
true => {
let descriptor = wallet
.get_descriptor_for_keychain(KeychainKind::Internal)
.to_string_with_secret(
&wallet
.get_signers(KeychainKind::Internal)
.as_key_map(wallet.secp_ctx()),
);
Some(remove_checksum(descriptor))
}
};
if export.change_descriptor() != change_descriptor {
return Err("Incompatible change descriptor");
}
@@ -166,7 +166,7 @@ impl FullyNodedExport {
fn check_ms<Ctx: ScriptContext>(
terminal: &Terminal<String, Ctx>,
) -> Result<(), &'static str> {
if let Terminal::Multi(_) = terminal {
if let Terminal::Multi(_, _) = terminal {
Ok(())
} else {
Err("The descriptor contains operators not supported by Bitcoin Core")
@@ -189,7 +189,6 @@ impl FullyNodedExport {
WshInner::SortedMulti(_) => Ok(()),
WshInner::Ms(ms) => check_ms(&ms.node),
},
Descriptor::Tr(_) => Ok(()),
_ => Err("The descriptor is not compatible with Bitcoin Core"),
}
}
@@ -215,51 +214,39 @@ impl FullyNodedExport {
mod test {
use core::str::FromStr;
use crate::std::string::ToString;
use bdk_chain::{BlockId, ConfirmationBlockTime};
use bdk_chain::{BlockId, ConfirmationTime};
use bitcoin::hashes::Hash;
use bitcoin::{transaction, BlockHash, Network, Transaction};
use bitcoin::{BlockHash, Network, Transaction};
use super::*;
use crate::Wallet;
use crate::wallet::Wallet;
fn get_test_wallet(descriptor: &str, change_descriptor: &str, network: Network) -> Wallet {
use crate::wallet::Update;
use bdk_chain::TxGraph;
let mut wallet = Wallet::create(descriptor.to_string(), change_descriptor.to_string())
.network(network)
.create_wallet_no_persist()
.expect("must create wallet");
fn get_test_wallet(
descriptor: &str,
change_descriptor: Option<&str>,
network: Network,
) -> Wallet<()> {
let mut wallet = Wallet::new_no_persist(descriptor, change_descriptor, network).unwrap();
let transaction = Transaction {
input: vec![],
output: vec![],
version: transaction::Version::non_standard(0),
lock_time: bitcoin::absolute::LockTime::ZERO,
version: 0,
lock_time: bitcoin::PackedLockTime::ZERO,
};
let txid = transaction.compute_txid();
let block_id = BlockId {
height: 5000,
hash: BlockHash::all_zeros(),
};
wallet.insert_checkpoint(block_id).unwrap();
wallet
.insert_checkpoint(BlockId {
height: 5001,
hash: BlockHash::all_zeros(),
})
.unwrap();
wallet.insert_tx(transaction);
let anchor = ConfirmationBlockTime {
confirmation_time: 0,
block_id,
};
let mut graph = TxGraph::default();
let _ = graph.insert_anchor(txid, anchor);
wallet
.apply_update(Update {
graph,
..Default::default()
})
.insert_tx(
transaction,
ConfirmationTime::Confirmed {
height: 5000,
time: 0,
},
)
.unwrap();
wallet
}
@@ -269,7 +256,7 @@ mod test {
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/1/*)";
let wallet = get_test_wallet(descriptor, change_descriptor, Network::Bitcoin);
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Bitcoin);
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
assert_eq!(export.descriptor(), descriptor);
@@ -281,14 +268,13 @@ mod test {
#[test]
#[should_panic(expected = "Incompatible change descriptor")]
fn test_export_no_change() {
// The wallet's change descriptor has no wildcard. It should be impossible to
// This wallet explicitly doesn't have a change descriptor. It should be impossible to
// export, because exporting this kind of external descriptor normally implies the
// existence of a compatible internal descriptor
// existence of an internal descriptor
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/1/0)";
let wallet = get_test_wallet(descriptor, change_descriptor, Network::Bitcoin);
let wallet = get_test_wallet(descriptor, None, Network::Bitcoin);
FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
}
@@ -301,7 +287,7 @@ mod test {
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/50'/0'/1/*)";
let wallet = get_test_wallet(descriptor, change_descriptor, Network::Bitcoin);
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Bitcoin);
FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
}
@@ -318,7 +304,7 @@ mod test {
[c98b1535/48'/0'/0'/2']tpubDCDi5W4sP6zSnzJeowy8rQDVhBdRARaPhK1axABi8V1661wEPeanpEXj4ZLAUEoikVtoWcyK26TKKJSecSfeKxwHCcRrge9k1ybuiL71z4a/1/*\
))";
let wallet = get_test_wallet(descriptor, change_descriptor, Network::Testnet);
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Testnet);
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
assert_eq!(export.descriptor(), descriptor);
@@ -327,24 +313,12 @@ mod test {
assert_eq!(export.label, "Test Label");
}
#[test]
fn test_export_tr() {
let descriptor = "tr([73c5da0a/86'/0'/0']tprv8fMn4hSKPRC1oaCPqxDb1JWtgkpeiQvZhsr8W2xuy3GEMkzoArcAWTfJxYb6Wj8XNNDWEjfYKK4wGQXh3ZUXhDF2NcnsALpWTeSwarJt7Vc/0/*)";
let change_descriptor = "tr([73c5da0a/86'/0'/0']tprv8fMn4hSKPRC1oaCPqxDb1JWtgkpeiQvZhsr8W2xuy3GEMkzoArcAWTfJxYb6Wj8XNNDWEjfYKK4wGQXh3ZUXhDF2NcnsALpWTeSwarJt7Vc/1/*)";
let wallet = get_test_wallet(descriptor, change_descriptor, Network::Testnet);
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
assert_eq!(export.descriptor(), descriptor);
assert_eq!(export.change_descriptor(), Some(change_descriptor.into()));
assert_eq!(export.blockheight, 5000);
assert_eq!(export.label, "Test Label");
}
#[test]
fn test_export_to_json() {
let descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/0/*)";
let change_descriptor = "wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44'/0'/0'/1/*)";
let wallet = get_test_wallet(descriptor, change_descriptor, Network::Bitcoin);
let wallet = get_test_wallet(descriptor, Some(change_descriptor), Network::Bitcoin);
let export = FullyNodedExport::export_wallet(&wallet, "Test Label", true).unwrap();
assert_eq!(export.to_string(), "{\"descriptor\":\"wpkh(xprv9s21ZrQH143K4CTb63EaMxja1YiTnSEWKMbn23uoEnAzxjdUJRQkazCAtzxGm4LSoTSVTptoV9RbchnKPW9HxKtZumdyxyikZFDLhogJ5Uj/44\'/0\'/0\'/0/*)\",\"blockheight\":5000,\"label\":\"Test Label\"}");

View File

@@ -14,12 +14,12 @@
//! This module contains HWISigner, an implementation of a [TransactionSigner] to be
//! used with hardware wallets.
//! ```no_run
//! # use bdk_wallet::bitcoin::Network;
//! # use bdk_wallet::signer::SignerOrdering;
//! # use bdk_wallet::hardwaresigner::HWISigner;
//! # use bdk_wallet::AddressIndex::New;
//! # use bdk_wallet::{CreateParams, KeychainKind, SignOptions};
//! # use hwi::HWIClient;
//! # use bdk::bitcoin::Network;
//! # use bdk::signer::SignerOrdering;
//! # use bdk::wallet::hardwaresigner::HWISigner;
//! # use bdk::wallet::AddressIndex::New;
//! # use bdk::{FeeRate, KeychainKind, SignOptions, Wallet};
//! # use hwi::{types::HWIChain, HWIClient};
//! # use std::sync::Arc;
//! #
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
@@ -28,9 +28,13 @@
//! panic!("No devices found!");
//! }
//! let first_device = devices.remove(0)?;
//! let custom_signer = HWISigner::from_device(&first_device, Network::Testnet.into())?;
//! let custom_signer = HWISigner::from_device(&first_device, HWIChain::Test)?;
//!
//! # let mut wallet = CreateParams::new("", "", Network::Testnet)?.create_wallet_no_persist()?;
//! # let mut wallet = Wallet::new_no_persist(
//! # "",
//! # None,
//! # Network::Testnet,
//! # )?;
//! #
//! // Adding the hardware signer to the BDK wallet
//! wallet.add_signer(
@@ -43,9 +47,9 @@
//! # }
//! ```
use bitcoin::bip32::Fingerprint;
use bitcoin::psbt::PartiallySignedTransaction;
use bitcoin::secp256k1::{All, Secp256k1};
use bitcoin::Psbt;
use bitcoin::util::bip32::Fingerprint;
use hwi::error::Error;
use hwi::types::{HWIChain, HWIDevice};
@@ -83,7 +87,7 @@ impl SignerCommon for HWISigner {
impl TransactionSigner for HWISigner {
fn sign_transaction(
&self,
psbt: &mut Psbt,
psbt: &mut PartiallySignedTransaction,
_sign_options: &crate::SignOptions,
_secp: &crate::wallet::utils::SecpCtx,
) -> Result<(), SignerError> {

1803
crates/bdk/src/wallet/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -19,12 +19,13 @@
//! # use core::str::FromStr;
//! # use bitcoin::secp256k1::{Secp256k1, All};
//! # use bitcoin::*;
//! # use bdk_wallet::signer::*;
//! # use bdk_wallet::*;
//! # use bitcoin::util::psbt;
//! # use bdk::signer::*;
//! # use bdk::*;
//! # #[derive(Debug)]
//! # struct CustomHSM;
//! # impl CustomHSM {
//! # fn hsm_sign_input(&self, _psbt: &mut Psbt, _input: usize) -> Result<(), SignerError> {
//! # fn hsm_sign_input(&self, _psbt: &mut psbt::PartiallySignedTransaction, _input: usize) -> Result<(), SignerError> {
//! # Ok(())
//! # }
//! # fn connect() -> Self {
@@ -54,7 +55,7 @@
//! impl InputSigner for CustomSigner {
//! fn sign_input(
//! &self,
//! psbt: &mut Psbt,
//! psbt: &mut psbt::PartiallySignedTransaction,
//! input_index: usize,
//! _sign_options: &SignOptions,
//! _secp: &Secp256k1<All>,
@@ -67,46 +68,42 @@
//!
//! let custom_signer = CustomSigner::connect();
//!
//! let descriptor = "wpkh(tpubD6NzVbkrYhZ4Xferm7Pz4VnjdcDPFyjVu5K4iZXQ4pVN8Cks4pHVowTBXBKRhX64pkRyJZJN5xAKj4UDNnLPb5p2sSKXhewoYx5GbTdUFWq/0/*)";
//! let change_descriptor = "wpkh(tpubD6NzVbkrYhZ4Xferm7Pz4VnjdcDPFyjVu5K4iZXQ4pVN8Cks4pHVowTBXBKRhX64pkRyJZJN5xAKj4UDNnLPb5p2sSKXhewoYx5GbTdUFWq/1/*)";
//! let mut wallet = Wallet::create(descriptor, change_descriptor)
//! .network(Network::Testnet)
//! .create_wallet_no_persist()?;
//! let descriptor = "wpkh(tpubD6NzVbkrYhZ4Xferm7Pz4VnjdcDPFyjVu5K4iZXQ4pVN8Cks4pHVowTBXBKRhX64pkRyJZJN5xAKj4UDNnLPb5p2sSKXhewoYx5GbTdUFWq/*)";
//! let mut wallet = Wallet::new_no_persist(descriptor, None, Network::Testnet)?;
//! wallet.add_signer(
//! KeychainKind::External,
//! SignerOrdering(200),
//! Arc::new(custom_signer)
//! );
//!
//! # Ok::<_, anyhow::Error>(())
//! # Ok::<_, bdk::Error>(())
//! ```
use crate::collections::BTreeMap;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::cmp::Ordering;
use core::fmt;
use core::ops::{Bound::Included, Deref};
use bitcoin::bip32::{ChildNumber, DerivationPath, Fingerprint, Xpriv};
use bitcoin::hashes::hash160;
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::script::Builder as ScriptBuilder;
use bitcoin::hashes::{hash160, Hash};
use bitcoin::secp256k1::Message;
use bitcoin::sighash::{EcdsaSighashType, TapSighash, TapSighashType};
use bitcoin::{ecdsa, psbt, sighash, taproot};
use bitcoin::{key::TapTweak, key::XOnlyPublicKey, secp256k1};
use bitcoin::{PrivateKey, Psbt, PublicKey};
use bitcoin::util::bip32::{ChildNumber, DerivationPath, ExtendedPrivKey, Fingerprint};
use bitcoin::util::{ecdsa, psbt, schnorr, sighash, taproot};
use bitcoin::{secp256k1, XOnlyPublicKey};
use bitcoin::{EcdsaSighashType, PrivateKey, PublicKey, SchnorrSighashType, Script};
use miniscript::descriptor::{
Descriptor, DescriptorMultiXKey, DescriptorPublicKey, DescriptorSecretKey, DescriptorXKey,
InnerXKey, KeyMap, SinglePriv, SinglePubKey,
Descriptor, DescriptorPublicKey, DescriptorSecretKey, DescriptorXKey, KeyMap, SinglePriv,
SinglePubKey,
};
use miniscript::{SigType, ToPublicKey};
use miniscript::{Legacy, Segwitv0, SigType, Tap, ToPublicKey};
use super::utils::SecpCtx;
use crate::descriptor::{DescriptorMeta, XKeyUtils};
use crate::psbt::PsbtUtils;
use crate::wallet::error::MiniscriptPsbtError;
/// Identifier of a signer in the `SignersContainers`. Used as a key to find the right signer among
/// multiple of them
@@ -133,7 +130,7 @@ impl From<Fingerprint> for SignerId {
}
/// Signing error
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SignerError {
/// The private key is missing for the required public key
MissingKey,
@@ -161,16 +158,24 @@ pub enum SignerError {
NonStandardSighash,
/// Invalid SIGHASH for the signing context in use
InvalidSighash,
/// Error while computing the hash to sign a Taproot input.
SighashTaproot(sighash::TaprootError),
/// PSBT sign error.
Psbt(psbt::SignError),
/// Miniscript PSBT error
MiniscriptPsbt(MiniscriptPsbtError),
/// To be used only by external libraries implementing [`InputSigner`] or
/// [`TransactionSigner`], so that they can return their own custom errors, without having to
/// modify [`SignerError`] in BDK.
External(String),
/// Error while computing the hash to sign
SighashError(sighash::Error),
/// Error while signing using hardware wallets
#[cfg(feature = "hardware-signer")]
HWIError(hwi::error::Error),
}
#[cfg(feature = "hardware-signer")]
impl From<hwi::error::Error> for SignerError {
fn from(e: hwi::error::Error) -> Self {
SignerError::HWIError(e)
}
}
impl From<sighash::Error> for SignerError {
fn from(e: sighash::Error) -> Self {
SignerError::SighashError(e)
}
}
impl fmt::Display for SignerError {
@@ -187,10 +192,9 @@ impl fmt::Display for SignerError {
Self::MissingHdKeypath => write!(f, "Missing fingerprint and derivation path"),
Self::NonStandardSighash => write!(f, "The psbt contains a non standard sighash"),
Self::InvalidSighash => write!(f, "Invalid SIGHASH for the signing context in use"),
Self::SighashTaproot(err) => write!(f, "Error while computing the hash to sign a Taproot input: {}", err),
Self::Psbt(err) => write!(f, "Error computing the sighash: {}", err),
Self::MiniscriptPsbt(err) => write!(f, "Miniscript PSBT error: {}", err),
Self::External(err) => write!(f, "{}", err),
Self::SighashError(err) => write!(f, "Error while computing the hash to sign: {}", err),
#[cfg(feature = "hardware-signer")]
Self::HWIError(err) => write!(f, "Error while signing using hardware wallets: {}", err),
}
}
}
@@ -214,7 +218,7 @@ pub enum SignerContext {
},
}
/// Wrapper to pair a signer with its context
/// Wrapper structure to pair a signer with its context
#[derive(Debug, Clone)]
pub struct SignerWrapper<S: Sized + fmt::Debug + Clone> {
signer: S,
@@ -263,7 +267,7 @@ pub trait InputSigner: SignerCommon {
/// Sign a single psbt input
fn sign_input(
&self,
psbt: &mut Psbt,
psbt: &mut psbt::PartiallySignedTransaction,
input_index: usize,
sign_options: &SignOptions,
secp: &SecpCtx,
@@ -278,7 +282,7 @@ pub trait TransactionSigner: SignerCommon {
/// Sign all the inputs of the psbt
fn sign_transaction(
&self,
psbt: &mut Psbt,
psbt: &mut psbt::PartiallySignedTransaction,
sign_options: &SignOptions,
secp: &SecpCtx,
) -> Result<(), SignerError>;
@@ -287,7 +291,7 @@ pub trait TransactionSigner: SignerCommon {
impl<T: InputSigner> TransactionSigner for T {
fn sign_transaction(
&self,
psbt: &mut Psbt,
psbt: &mut psbt::PartiallySignedTransaction,
sign_options: &SignOptions,
secp: &SecpCtx,
) -> Result<(), SignerError> {
@@ -299,7 +303,7 @@ impl<T: InputSigner> TransactionSigner for T {
}
}
impl SignerCommon for SignerWrapper<DescriptorXKey<Xpriv>> {
impl SignerCommon for SignerWrapper<DescriptorXKey<ExtendedPrivKey>> {
fn id(&self, secp: &SecpCtx) -> SignerId {
SignerId::from(self.root_fingerprint(secp))
}
@@ -309,10 +313,10 @@ impl SignerCommon for SignerWrapper<DescriptorXKey<Xpriv>> {
}
}
impl InputSigner for SignerWrapper<DescriptorXKey<Xpriv>> {
impl InputSigner for SignerWrapper<DescriptorXKey<ExtendedPrivKey>> {
fn sign_input(
&self,
psbt: &mut Psbt,
psbt: &mut psbt::PartiallySignedTransaction,
input_index: usize,
sign_options: &SignOptions,
secp: &SecpCtx,
@@ -379,48 +383,6 @@ impl InputSigner for SignerWrapper<DescriptorXKey<Xpriv>> {
}
}
fn multikey_to_xkeys<K: InnerXKey + Clone>(
multikey: DescriptorMultiXKey<K>,
) -> Vec<DescriptorXKey<K>> {
multikey
.derivation_paths
.into_paths()
.into_iter()
.map(|derivation_path| DescriptorXKey {
origin: multikey.origin.clone(),
xkey: multikey.xkey.clone(),
derivation_path,
wildcard: multikey.wildcard,
})
.collect()
}
impl SignerCommon for SignerWrapper<DescriptorMultiXKey<Xpriv>> {
fn id(&self, secp: &SecpCtx) -> SignerId {
SignerId::from(self.root_fingerprint(secp))
}
fn descriptor_secret_key(&self) -> Option<DescriptorSecretKey> {
Some(DescriptorSecretKey::MultiXPrv(self.signer.clone()))
}
}
impl InputSigner for SignerWrapper<DescriptorMultiXKey<Xpriv>> {
fn sign_input(
&self,
psbt: &mut Psbt,
input_index: usize,
sign_options: &SignOptions,
secp: &SecpCtx,
) -> Result<(), SignerError> {
let xkeys = multikey_to_xkeys(self.signer.clone());
for xkey in xkeys {
SignerWrapper::new(xkey, self.ctx).sign_input(psbt, input_index, sign_options, secp)?
}
Ok(())
}
}
impl SignerCommon for SignerWrapper<PrivateKey> {
fn id(&self, secp: &SecpCtx) -> SignerId {
SignerId::from(self.public_key(secp).to_pubkeyhash(SigType::Ecdsa))
@@ -437,7 +399,7 @@ impl SignerCommon for SignerWrapper<PrivateKey> {
impl InputSigner for SignerWrapper<PrivateKey> {
fn sign_input(
&self,
psbt: &mut Psbt,
psbt: &mut psbt::PartiallySignedTransaction,
input_index: usize,
sign_options: &SignOptions,
secp: &SecpCtx,
@@ -453,88 +415,82 @@ impl InputSigner for SignerWrapper<PrivateKey> {
}
let pubkey = PublicKey::from_private_key(secp, self);
let x_only_pubkey = XOnlyPublicKey::from(pubkey.inner);
match self.ctx {
SignerContext::Tap { is_internal_key } => {
let x_only_pubkey = XOnlyPublicKey::from(pubkey.inner);
if let Some(psbt_internal_key) = psbt.inputs[input_index].tap_internal_key {
if is_internal_key
&& psbt.inputs[input_index].tap_key_sig.is_none()
&& sign_options.sign_with_tap_internal_key
&& x_only_pubkey == psbt_internal_key
{
let (sighash, sighash_type) = compute_tap_sighash(psbt, input_index, None)?;
sign_psbt_schnorr(
&self.inner,
x_only_pubkey,
None,
&mut psbt.inputs[input_index],
sighash,
sighash_type,
secp,
);
}
}
if let Some((leaf_hashes, _)) =
psbt.inputs[input_index].tap_key_origins.get(&x_only_pubkey)
{
let leaf_hashes = leaf_hashes
.iter()
.filter(|lh| {
// Removing the leaves we shouldn't sign for
let should_sign = match &sign_options.tap_leaves_options {
TapLeavesOptions::All => true,
TapLeavesOptions::Include(v) => v.contains(lh),
TapLeavesOptions::Exclude(v) => !v.contains(lh),
TapLeavesOptions::None => false,
};
// Filtering out the leaves without our key
should_sign
&& !psbt.inputs[input_index]
.tap_script_sigs
.contains_key(&(x_only_pubkey, **lh))
})
.cloned()
.collect::<Vec<_>>();
for lh in leaf_hashes {
let (sighash, sighash_type) =
compute_tap_sighash(psbt, input_index, Some(lh))?;
sign_psbt_schnorr(
&self.inner,
x_only_pubkey,
Some(lh),
&mut psbt.inputs[input_index],
sighash,
sighash_type,
secp,
);
}
}
}
SignerContext::Segwitv0 | SignerContext::Legacy => {
if psbt.inputs[input_index].partial_sigs.contains_key(&pubkey) {
return Ok(());
}
let mut sighasher = sighash::SighashCache::new(psbt.unsigned_tx.clone());
let (msg, sighash_type) = psbt
.sighash_ecdsa(input_index, &mut sighasher)
.map_err(SignerError::Psbt)?;
sign_psbt_ecdsa(
if let SignerContext::Tap { is_internal_key } = self.ctx {
if is_internal_key
&& psbt.inputs[input_index].tap_key_sig.is_none()
&& sign_options.sign_with_tap_internal_key
{
let (hash, hash_ty) = Tap::sighash(psbt, input_index, None)?;
sign_psbt_schnorr(
&self.inner,
pubkey,
x_only_pubkey,
None,
&mut psbt.inputs[input_index],
&msg,
sighash_type,
hash,
hash_ty,
secp,
sign_options.allow_grinding,
);
}
if let Some((leaf_hashes, _)) =
psbt.inputs[input_index].tap_key_origins.get(&x_only_pubkey)
{
let leaf_hashes = leaf_hashes
.iter()
.filter(|lh| {
// Removing the leaves we shouldn't sign for
let should_sign = match &sign_options.tap_leaves_options {
TapLeavesOptions::All => true,
TapLeavesOptions::Include(v) => v.contains(lh),
TapLeavesOptions::Exclude(v) => !v.contains(lh),
TapLeavesOptions::None => false,
};
// Filtering out the leaves without our key
should_sign
&& !psbt.inputs[input_index]
.tap_script_sigs
.contains_key(&(x_only_pubkey, **lh))
})
.cloned()
.collect::<Vec<_>>();
for lh in leaf_hashes {
let (hash, hash_ty) = Tap::sighash(psbt, input_index, Some(lh))?;
sign_psbt_schnorr(
&self.inner,
x_only_pubkey,
Some(lh),
&mut psbt.inputs[input_index],
hash,
hash_ty,
secp,
);
}
}
return Ok(());
}
if psbt.inputs[input_index].partial_sigs.contains_key(&pubkey) {
return Ok(());
}
let (hash, hash_ty) = match self.ctx {
SignerContext::Segwitv0 => Segwitv0::sighash(psbt, input_index, ())?,
SignerContext::Legacy => Legacy::sighash(psbt, input_index, ())?,
_ => return Ok(()), // handled above
};
sign_psbt_ecdsa(
&self.inner,
pubkey,
&mut psbt.inputs[input_index],
hash,
hash_ty,
secp,
sign_options.allow_grinding,
);
Ok(())
}
}
@@ -543,23 +499,21 @@ fn sign_psbt_ecdsa(
secret_key: &secp256k1::SecretKey,
pubkey: PublicKey,
psbt_input: &mut psbt::Input,
msg: &Message,
sighash_type: EcdsaSighashType,
hash: bitcoin::Sighash,
hash_ty: EcdsaSighashType,
secp: &SecpCtx,
allow_grinding: bool,
) {
let signature = if allow_grinding {
let msg = &Message::from_slice(&hash.into_inner()[..]).unwrap();
let sig = if allow_grinding {
secp.sign_ecdsa_low_r(msg, secret_key)
} else {
secp.sign_ecdsa(msg, secret_key)
};
secp.verify_ecdsa(msg, &signature, &pubkey.inner)
secp.verify_ecdsa(msg, &sig, &pubkey.inner)
.expect("invalid or corrupted ecdsa signature");
let final_signature = ecdsa::Signature {
signature,
sighash_type,
};
let final_signature = ecdsa::EcdsaSig { sig, hash_ty };
psbt_input.partial_sigs.insert(pubkey, final_signature);
}
@@ -569,11 +523,13 @@ fn sign_psbt_schnorr(
pubkey: XOnlyPublicKey,
leaf_hash: Option<taproot::TapLeafHash>,
psbt_input: &mut psbt::Input,
sighash: TapSighash,
sighash_type: TapSighashType,
hash: taproot::TapSighashHash,
hash_ty: SchnorrSighashType,
secp: &SecpCtx,
) {
let keypair = secp256k1::Keypair::from_seckey_slice(secp, secret_key.as_ref()).unwrap();
use schnorr::TapTweak;
let keypair = secp256k1::KeyPair::from_seckey_slice(secp, secret_key.as_ref()).unwrap();
let keypair = match leaf_hash {
None => keypair
.tap_tweak(secp, psbt_input.tap_merkle_root)
@@ -581,15 +537,12 @@ fn sign_psbt_schnorr(
Some(_) => keypair, // no tweak for script spend
};
let msg = &Message::from(sighash);
let signature = secp.sign_schnorr_no_aux_rand(msg, &keypair);
secp.verify_schnorr(&signature, msg, &XOnlyPublicKey::from_keypair(&keypair).0)
let msg = &Message::from_slice(&hash.into_inner()[..]).unwrap();
let sig = secp.sign_schnorr(msg, &keypair);
secp.verify_schnorr(&sig, msg, &XOnlyPublicKey::from_keypair(&keypair).0)
.expect("invalid or corrupted schnorr signature");
let final_signature = taproot::Signature {
signature,
sighash_type,
};
let final_signature = schnorr::SchnorrSig { sig, hash_ty };
if let Some(lh) = leaf_hash {
psbt_input
@@ -679,11 +632,6 @@ impl SignersContainer {
SignerOrdering::default(),
Arc::new(SignerWrapper::new(xprv, ctx)),
),
DescriptorSecretKey::MultiXPrv(xprv) => container.add_external(
SignerId::from(xprv.root_fingerprint(secp)),
SignerOrdering::default(),
Arc::new(SignerWrapper::new(xprv, ctx)),
),
};
}
@@ -751,7 +699,7 @@ pub struct SignOptions {
/// Whether the signer should trust the `witness_utxo`, if the `non_witness_utxo` hasn't been
/// provided
///
/// Defaults to `false` to mitigate the "SegWit bug" which should trick the wallet into
/// Defaults to `false` to mitigate the "SegWit bug" which chould trick the wallet into
/// paying a fee larger than expected.
///
/// Some wallets, especially if relatively old, might not provide the `non_witness_utxo` for
@@ -776,6 +724,11 @@ pub struct SignOptions {
/// Defaults to `false` which will only allow signing using `SIGHASH_ALL`.
pub allow_all_sighashes: bool,
/// Whether to remove partial signatures from the PSBT inputs while finalizing PSBT.
///
/// Defaults to `true` which will remove partial signatures during finalization.
pub remove_partial_sigs: bool,
/// Whether to try finalizing the PSBT after the inputs are signed.
///
/// Defaults to `true` which will try finalizing PSBT after inputs are signed.
@@ -800,10 +753,9 @@ pub struct SignOptions {
}
/// Customize which taproot script-path leaves the signer should sign.
#[derive(Default, Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TapLeavesOptions {
/// The signer will sign all the leaves it has a key for.
#[default]
All,
/// The signer won't sign leaves other than the ones specified. Note that it could still ignore
/// some of the specified leaves, if it doesn't have the right key to sign them.
@@ -814,12 +766,20 @@ pub enum TapLeavesOptions {
None,
}
impl Default for TapLeavesOptions {
fn default() -> Self {
TapLeavesOptions::All
}
}
#[allow(clippy::derivable_impls)]
impl Default for SignOptions {
fn default() -> Self {
SignOptions {
trust_witness_utxo: false,
assume_height: None,
allow_all_sighashes: false,
remove_partial_sigs: true,
try_finalize: true,
tap_leaves_options: TapLeavesOptions::default(),
sign_with_tap_internal_key: true,
@@ -828,53 +788,202 @@ impl Default for SignOptions {
}
}
/// Computes the taproot sighash.
fn compute_tap_sighash(
psbt: &Psbt,
input_index: usize,
extra: Option<taproot::TapLeafHash>,
) -> Result<(sighash::TapSighash, TapSighashType), SignerError> {
if input_index >= psbt.inputs.len() || input_index >= psbt.unsigned_tx.input.len() {
return Err(SignerError::InputIndexOutOfRange);
pub(crate) trait ComputeSighash {
type Extra;
type Sighash;
type SighashType;
fn sighash(
psbt: &psbt::PartiallySignedTransaction,
input_index: usize,
extra: Self::Extra,
) -> Result<(Self::Sighash, Self::SighashType), SignerError>;
}
impl ComputeSighash for Legacy {
type Extra = ();
type Sighash = bitcoin::Sighash;
type SighashType = EcdsaSighashType;
fn sighash(
psbt: &psbt::PartiallySignedTransaction,
input_index: usize,
_extra: (),
) -> Result<(Self::Sighash, Self::SighashType), SignerError> {
if input_index >= psbt.inputs.len() || input_index >= psbt.unsigned_tx.input.len() {
return Err(SignerError::InputIndexOutOfRange);
}
let psbt_input = &psbt.inputs[input_index];
let tx_input = &psbt.unsigned_tx.input[input_index];
let sighash = psbt_input
.sighash_type
.unwrap_or_else(|| EcdsaSighashType::All.into())
.ecdsa_hash_ty()
.map_err(|_| SignerError::InvalidSighash)?;
let script = match psbt_input.redeem_script {
Some(ref redeem_script) => redeem_script.clone(),
None => {
let non_witness_utxo = psbt_input
.non_witness_utxo
.as_ref()
.ok_or(SignerError::MissingNonWitnessUtxo)?;
let prev_out = non_witness_utxo
.output
.get(tx_input.previous_output.vout as usize)
.ok_or(SignerError::InvalidNonWitnessUtxo)?;
prev_out.script_pubkey.clone()
}
};
Ok((
sighash::SighashCache::new(&psbt.unsigned_tx).legacy_signature_hash(
input_index,
&script,
sighash.to_u32(),
)?,
sighash,
))
}
}
let psbt_input = &psbt.inputs[input_index];
fn p2wpkh_script_code(script: &Script) -> Script {
ScriptBuilder::new()
.push_opcode(opcodes::all::OP_DUP)
.push_opcode(opcodes::all::OP_HASH160)
.push_slice(&script[2..])
.push_opcode(opcodes::all::OP_EQUALVERIFY)
.push_opcode(opcodes::all::OP_CHECKSIG)
.into_script()
}
let sighash_type = psbt_input
.sighash_type
.unwrap_or_else(|| TapSighashType::Default.into())
.taproot_hash_ty()
.map_err(|_| SignerError::InvalidSighash)?;
let witness_utxos = (0..psbt.inputs.len())
.map(|i| psbt.get_utxo_for(i))
.collect::<Vec<_>>();
let mut all_witness_utxos = vec![];
impl ComputeSighash for Segwitv0 {
type Extra = ();
type Sighash = bitcoin::Sighash;
type SighashType = EcdsaSighashType;
let mut cache = sighash::SighashCache::new(&psbt.unsigned_tx);
let is_anyone_can_pay = psbt::PsbtSighashType::from(sighash_type).to_u32() & 0x80 != 0;
let prevouts = if is_anyone_can_pay {
sighash::Prevouts::One(
input_index,
witness_utxos[input_index]
.as_ref()
.ok_or(SignerError::MissingWitnessUtxo)?,
)
} else if witness_utxos.iter().all(Option::is_some) {
all_witness_utxos.extend(witness_utxos.iter().filter_map(|x| x.as_ref()));
sighash::Prevouts::All(&all_witness_utxos)
} else {
return Err(SignerError::MissingWitnessUtxo);
};
fn sighash(
psbt: &psbt::PartiallySignedTransaction,
input_index: usize,
_extra: (),
) -> Result<(Self::Sighash, Self::SighashType), SignerError> {
if input_index >= psbt.inputs.len() || input_index >= psbt.unsigned_tx.input.len() {
return Err(SignerError::InputIndexOutOfRange);
}
// Assume no OP_CODESEPARATOR
let extra = extra.map(|leaf_hash| (leaf_hash, 0xFFFFFFFF));
let psbt_input = &psbt.inputs[input_index];
let tx_input = &psbt.unsigned_tx.input[input_index];
Ok((
cache
.taproot_signature_hash(input_index, &prevouts, None, extra, sighash_type)
.map_err(SignerError::SighashTaproot)?,
sighash_type,
))
let sighash = psbt_input
.sighash_type
.unwrap_or_else(|| EcdsaSighashType::All.into())
.ecdsa_hash_ty()
.map_err(|_| SignerError::InvalidSighash)?;
// Always try first with the non-witness utxo
let utxo = if let Some(prev_tx) = &psbt_input.non_witness_utxo {
// Check the provided prev-tx
if prev_tx.txid() != tx_input.previous_output.txid {
return Err(SignerError::InvalidNonWitnessUtxo);
}
// The output should be present, if it's missing the `non_witness_utxo` is invalid
prev_tx
.output
.get(tx_input.previous_output.vout as usize)
.ok_or(SignerError::InvalidNonWitnessUtxo)?
} else if let Some(witness_utxo) = &psbt_input.witness_utxo {
// Fallback to the witness_utxo. If we aren't allowed to use it, signing should fail
// before we get to this point
witness_utxo
} else {
// Nothing has been provided
return Err(SignerError::MissingNonWitnessUtxo);
};
let value = utxo.value;
let script = match psbt_input.witness_script {
Some(ref witness_script) => witness_script.clone(),
None => {
if utxo.script_pubkey.is_v0_p2wpkh() {
p2wpkh_script_code(&utxo.script_pubkey)
} else if psbt_input
.redeem_script
.as_ref()
.map(Script::is_v0_p2wpkh)
.unwrap_or(false)
{
p2wpkh_script_code(psbt_input.redeem_script.as_ref().unwrap())
} else {
return Err(SignerError::MissingWitnessScript);
}
}
};
Ok((
sighash::SighashCache::new(&psbt.unsigned_tx).segwit_signature_hash(
input_index,
&script,
value,
sighash,
)?,
sighash,
))
}
}
impl ComputeSighash for Tap {
type Extra = Option<taproot::TapLeafHash>;
type Sighash = taproot::TapSighashHash;
type SighashType = SchnorrSighashType;
fn sighash(
psbt: &psbt::PartiallySignedTransaction,
input_index: usize,
extra: Self::Extra,
) -> Result<(Self::Sighash, SchnorrSighashType), SignerError> {
if input_index >= psbt.inputs.len() || input_index >= psbt.unsigned_tx.input.len() {
return Err(SignerError::InputIndexOutOfRange);
}
let psbt_input = &psbt.inputs[input_index];
let sighash_type = psbt_input
.sighash_type
.unwrap_or_else(|| SchnorrSighashType::Default.into())
.schnorr_hash_ty()
.map_err(|_| SignerError::InvalidSighash)?;
let witness_utxos = (0..psbt.inputs.len())
.map(|i| psbt.get_utxo_for(i))
.collect::<Vec<_>>();
let mut all_witness_utxos = vec![];
let mut cache = sighash::SighashCache::new(&psbt.unsigned_tx);
let is_anyone_can_pay = psbt::PsbtSighashType::from(sighash_type).to_u32() & 0x80 != 0;
let prevouts = if is_anyone_can_pay {
sighash::Prevouts::One(
input_index,
witness_utxos[input_index]
.as_ref()
.ok_or(SignerError::MissingWitnessUtxo)?,
)
} else if witness_utxos.iter().all(Option::is_some) {
all_witness_utxos.extend(witness_utxos.iter().filter_map(|x| x.as_ref()));
sighash::Prevouts::All(&all_witness_utxos)
} else {
return Err(SignerError::MissingWitnessUtxo);
};
// Assume no OP_CODESEPARATOR
let extra = extra.map(|leaf_hash| (leaf_hash, 0xFFFFFFFF));
Ok((
cache.taproot_signature_hash(input_index, &prevouts, None, extra, sighash_type)?,
sighash_type,
))
}
}
impl PartialOrd for SignersContainerKey {
@@ -906,8 +1015,8 @@ mod signers_container_tests {
use crate::descriptor::IntoWalletDescriptor;
use crate::keys::{DescriptorKey, IntoDescriptorKey};
use assert_matches::assert_matches;
use bitcoin::bip32;
use bitcoin::secp256k1::{All, Secp256k1};
use bitcoin::util::bip32;
use bitcoin::Network;
use core::str::FromStr;
use miniscript::ScriptContext;
@@ -1002,7 +1111,7 @@ mod signers_container_tests {
impl TransactionSigner for DummySigner {
fn sign_transaction(
&self,
_psbt: &mut Psbt,
_psbt: &mut psbt::PartiallySignedTransaction,
_sign_options: &SignOptions,
_secp: &SecpCtx,
) -> Result<(), SignerError> {
@@ -1020,8 +1129,8 @@ mod signers_container_tests {
) -> (DescriptorKey<Ctx>, DescriptorKey<Ctx>, Fingerprint) {
let secp: Secp256k1<All> = Secp256k1::new();
let path = bip32::DerivationPath::from_str(PATH).unwrap();
let tprv = bip32::Xpriv::from_str(tprv).unwrap();
let tpub = bip32::Xpub::from_priv(&secp, &tprv);
let tprv = bip32::ExtendedPrivKey::from_str(tprv).unwrap();
let tpub = bip32::ExtendedPubKey::from_priv(&secp, &tprv);
let fingerprint = tprv.fingerprint(&secp);
let prvkey = (tprv, path.clone()).into_descriptor_key().unwrap();
let pubkey = (tpub, path).into_descriptor_key().unwrap();

View File

@@ -16,47 +16,56 @@
//! ```
//! # use std::str::FromStr;
//! # use bitcoin::*;
//! # use bdk_wallet::*;
//! # use bdk_wallet::ChangeSet;
//! # use bdk_wallet::error::CreateTxError;
//! # use anyhow::Error;
//! # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap().assume_checked();
//! # use bdk::*;
//! # use bdk::wallet::tx_builder::CreateTx;
//! # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap();
//! # let mut wallet = doctest_wallet!();
//! // create a TxBuilder from a wallet
//! let mut tx_builder = wallet.build_tx();
//!
//! tx_builder
//! // Create a transaction with one output to `to_address` of 50_000 satoshi
//! .add_recipient(to_address.script_pubkey(), Amount::from_sat(50_000))
//! .add_recipient(to_address.script_pubkey(), 50_000)
//! // With a custom fee rate of 5.0 satoshi/vbyte
//! .fee_rate(FeeRate::from_sat_per_vb(5).expect("valid feerate"))
//! .fee_rate(FeeRate::from_sat_per_vb(5.0))
//! // Only spend non-change outputs
//! .do_not_spend_change()
//! // Turn on RBF signaling
//! .enable_rbf();
//! let psbt = tx_builder.finish()?;
//! # Ok::<(), anyhow::Error>(())
//! let (psbt, tx_details) = tx_builder.finish()?;
//! # Ok::<(), bdk::Error>(())
//! ```
use crate::collections::BTreeMap;
use crate::collections::HashSet;
use alloc::{boxed::Box, rc::Rc, string::String, vec::Vec};
use bdk_chain::ConfirmationTime;
use core::cell::RefCell;
use core::fmt;
use core::marker::PhantomData;
use alloc::sync::Arc;
use bitcoin::util::psbt::{self, PartiallySignedTransaction as Psbt};
use bitcoin::{LockTime, OutPoint, Script, Sequence, Transaction};
use bitcoin::psbt::{self, Psbt};
use bitcoin::script::PushBytes;
use bitcoin::{
absolute, Amount, FeeRate, OutPoint, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Txid,
Weight,
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
use super::persist;
use crate::{
types::{FeeRate, KeychainKind, LocalUtxo, WeightedUtxo},
TransactionDetails,
};
use rand_core::RngCore;
use crate::{Error, Utxo, Wallet};
/// Context in which the [`TxBuilder`] is valid
pub trait TxBuilderContext: core::fmt::Debug + Default + Clone {}
use super::coin_selection::CoinSelectionAlgorithm;
use super::utils::shuffle_slice;
use super::{CreateTxError, Wallet};
use crate::collections::{BTreeMap, HashSet};
use crate::{KeychainKind, LocalOutput, Utxo, WeightedUtxo};
/// Marker type to indicate the [`TxBuilder`] is being used to create a new transaction (as opposed
/// to bumping the fee of an existing one).
#[derive(Debug, Default, Clone)]
pub struct CreateTx;
impl TxBuilderContext for CreateTx {}
/// Marker type to indicate the [`TxBuilder`] is being used to bump the fee of an existing transaction.
#[derive(Debug, Default, Clone)]
pub struct BumpFee;
impl TxBuilderContext for BumpFee {}
/// A transaction builder
///
@@ -68,38 +77,35 @@ use crate::{KeychainKind, LocalOutput, Utxo, WeightedUtxo};
/// as in the following example:
///
/// ```
/// # use bdk_wallet::*;
/// # use bdk_wallet::tx_builder::*;
/// # use bdk::*;
/// # use bdk::wallet::tx_builder::*;
/// # use bitcoin::*;
/// # use core::str::FromStr;
/// # use bdk_wallet::ChangeSet;
/// # use bdk_wallet::error::CreateTxError;
/// # use anyhow::Error;
/// # let mut wallet = doctest_wallet!();
/// # let addr1 = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap().assume_checked();
/// # let addr1 = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap();
/// # let addr2 = addr1.clone();
/// // chaining
/// let psbt1 = {
/// let (psbt1, details) = {
/// let mut builder = wallet.build_tx();
/// builder
/// .ordering(TxOrdering::Untouched)
/// .add_recipient(addr1.script_pubkey(), Amount::from_sat(50_000))
/// .add_recipient(addr2.script_pubkey(), Amount::from_sat(50_000));
/// .add_recipient(addr1.script_pubkey(), 50_000)
/// .add_recipient(addr2.script_pubkey(), 50_000);
/// builder.finish()?
/// };
///
/// // non-chaining
/// let psbt2 = {
/// let (psbt2, details) = {
/// let mut builder = wallet.build_tx();
/// builder.ordering(TxOrdering::Untouched);
/// for addr in &[addr1, addr2] {
/// builder.add_recipient(addr.script_pubkey(), Amount::from_sat(50_000));
/// builder.add_recipient(addr.script_pubkey(), 50_000);
/// }
/// builder.finish()?
/// };
///
/// assert_eq!(psbt1.unsigned_tx.output[..2], psbt2.unsigned_tx.output[..2]);
/// # Ok::<(), anyhow::Error>(())
/// # Ok::<(), bdk::Error>(())
/// ```
///
/// At the moment [`coin_selection`] is an exception to the rule as it consumes `self`.
@@ -112,19 +118,20 @@ use crate::{KeychainKind, LocalOutput, Utxo, WeightedUtxo};
/// [`finish`]: Self::finish
/// [`coin_selection`]: Self::coin_selection
#[derive(Debug)]
pub struct TxBuilder<'a, Cs> {
pub(crate) wallet: Rc<RefCell<&'a mut Wallet>>,
pub struct TxBuilder<'a, D, Cs, Ctx> {
pub(crate) wallet: Rc<RefCell<&'a mut Wallet<D>>>,
pub(crate) params: TxParams,
pub(crate) coin_selection: Cs,
pub(crate) phantom: PhantomData<Ctx>,
}
/// The parameters for transaction creation sans coin selection algorithm.
//TODO: TxParams should eventually be exposed publicly.
#[derive(Default, Debug, Clone)]
pub(crate) struct TxParams {
pub(crate) recipients: Vec<(ScriptBuf, u64)>,
pub(crate) recipients: Vec<(Script, u64)>,
pub(crate) drain_wallet: bool,
pub(crate) drain_to: Option<ScriptBuf>,
pub(crate) drain_to: Option<Script>,
pub(crate) fee_policy: Option<FeePolicy>,
pub(crate) internal_policy_path: Option<BTreeMap<String, Vec<usize>>>,
pub(crate) external_policy_path: Option<BTreeMap<String, Vec<usize>>>,
@@ -133,7 +140,7 @@ pub(crate) struct TxParams {
pub(crate) manually_selected_only: bool,
pub(crate) sighash: Option<psbt::PsbtSighashType>,
pub(crate) ordering: TxOrdering,
pub(crate) locktime: Option<absolute::LockTime>,
pub(crate) locktime: Option<LockTime>,
pub(crate) rbf: Option<RbfValue>,
pub(crate) version: Option<Version>,
pub(crate) change_policy: ChangeSpendPolicy,
@@ -141,14 +148,14 @@ pub(crate) struct TxParams {
pub(crate) add_global_xpubs: bool,
pub(crate) include_output_redeem_witness_script: bool,
pub(crate) bumping_fee: Option<PreviousFee>,
pub(crate) current_height: Option<absolute::LockTime>,
pub(crate) current_height: Option<LockTime>,
pub(crate) allow_dust: bool,
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct PreviousFee {
pub absolute: u64,
pub rate: FeeRate,
pub rate: f32,
}
#[derive(Debug, Clone, Copy)]
@@ -159,48 +166,32 @@ pub(crate) enum FeePolicy {
impl Default for FeePolicy {
fn default() -> Self {
FeePolicy::FeeRate(FeeRate::BROADCAST_MIN)
FeePolicy::FeeRate(FeeRate::default_min_relay_fee())
}
}
impl<'a, Cs: Clone> Clone for TxBuilder<'a, Cs> {
impl<'a, D, Cs: Clone, Ctx> Clone for TxBuilder<'a, D, Cs, Ctx> {
fn clone(&self) -> Self {
TxBuilder {
wallet: self.wallet.clone(),
params: self.params.clone(),
coin_selection: self.coin_selection.clone(),
phantom: PhantomData,
}
}
}
// Methods supported for any CoinSelectionAlgorithm.
impl<'a, Cs> TxBuilder<'a, Cs> {
/// Set a custom fee rate.
///
/// This method sets the mining fee paid by the transaction as a rate on its size.
/// This means that the total fee paid is equal to `fee_rate` times the size
/// of the transaction. Default is 1 sat/vB in accordance with Bitcoin Core's default
/// relay policy.
///
/// Note that this is really a minimum feerate -- it's possible to
/// overshoot it slightly since adding a change output to drain the remaining
/// excess might not be viable.
// methods supported by both contexts, for any CoinSelectionAlgorithm
impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D, Cs, Ctx> {
/// Set a custom fee rate
pub fn fee_rate(&mut self, fee_rate: FeeRate) -> &mut Self {
self.params.fee_policy = Some(FeePolicy::FeeRate(fee_rate));
self
}
/// Set an absolute fee
/// The fee_absolute method refers to the absolute transaction fee in [`Amount`].
/// If anyone sets both the `fee_absolute` method and the `fee_rate` method,
/// the `FeePolicy` enum will be set by whichever method was called last,
/// as the [`FeeRate`] and `FeeAmount` are mutually exclusive.
///
/// Note that this is really a minimum absolute fee -- it's possible to
/// overshoot it slightly since adding a change output to drain the remaining
/// excess might not be viable.
pub fn fee_absolute(&mut self, fee_amount: Amount) -> &mut Self {
self.params.fee_policy = Some(FeePolicy::FeeAmount(fee_amount.to_sat()));
pub fn fee_absolute(&mut self, fee_amount: u64) -> &mut Self {
self.params.fee_policy = Some(FeePolicy::FeeAmount(fee_amount));
self
}
@@ -250,21 +241,18 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
/// # use std::str::FromStr;
/// # use std::collections::BTreeMap;
/// # use bitcoin::*;
/// # use bdk_wallet::*;
/// # let to_address =
/// Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt")
/// .unwrap()
/// .assume_checked();
/// # use bdk::*;
/// # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap();
/// # let mut wallet = doctest_wallet!();
/// let mut path = BTreeMap::new();
/// path.insert("aabbccdd".to_string(), vec![0, 1]);
///
/// let builder = wallet
/// .build_tx()
/// .add_recipient(to_address.script_pubkey(), Amount::from_sat(50_000))
/// .add_recipient(to_address.script_pubkey(), 50_000)
/// .policy_path(path, KeychainKind::External);
///
/// # Ok::<(), anyhow::Error>(())
/// # Ok::<(), bdk::Error>(())
/// ```
pub fn policy_path(
&mut self,
@@ -286,21 +274,17 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
///
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
/// the "utxos" and the "unspendable" list, it will be spent.
pub fn add_utxos(&mut self, outpoints: &[OutPoint]) -> Result<&mut Self, AddUtxoError> {
pub fn add_utxos(&mut self, outpoints: &[OutPoint]) -> Result<&mut Self, Error> {
{
let wallet = self.wallet.borrow();
let utxos = outpoints
.iter()
.map(|outpoint| {
wallet
.get_utxo(*outpoint)
.ok_or(AddUtxoError::UnknownUtxo(*outpoint))
})
.map(|outpoint| wallet.get_utxo(*outpoint).ok_or(Error::UnknownUtxo))
.collect::<Result<Vec<_>, _>>()?;
for utxo in utxos {
let descriptor = wallet.public_descriptor(utxo.keychain);
let satisfaction_weight = descriptor.max_weight_to_satisfy().unwrap();
let descriptor = wallet.get_descriptor_for_keychain(utxo.keychain);
let satisfaction_weight = descriptor.max_satisfaction_weight().unwrap();
self.params.utxos.push(WeightedUtxo {
satisfaction_weight,
utxo: Utxo::Local(utxo),
@@ -315,7 +299,7 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
///
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
/// the "utxos" and the "unspendable" list, it will be spent.
pub fn add_utxo(&mut self, outpoint: OutPoint) -> Result<&mut Self, AddUtxoError> {
pub fn add_utxo(&mut self, outpoint: OutPoint) -> Result<&mut Self, Error> {
self.add_utxos(&[outpoint])
}
@@ -341,16 +325,12 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
/// causing you to pay a fee that is too high. The party who is broadcasting the transaction can
/// of course check the real input weight matches the expected weight prior to broadcasting.
///
/// To guarantee the `max_weight_to_satisfy` is correct, you can require the party providing the
/// To guarantee the `satisfaction_weight` is correct, you can require the party providing the
/// `psbt_input` provide a miniscript descriptor for the input so you can check it against the
/// `script_pubkey` and then ask it for the [`max_weight_to_satisfy`].
/// `script_pubkey` and then ask it for the [`max_satisfaction_weight`].
///
/// This is an **EXPERIMENTAL** feature, API and other major changes are expected.
///
/// In order to use [`Wallet::calculate_fee`] or [`Wallet::calculate_fee_rate`] for a transaction
/// created with foreign UTXO(s) you must manually insert the corresponding TxOut(s) into the tx
/// graph using the [`Wallet::insert_txout`] function.
///
/// # Errors
///
/// This method returns errors in the following circumstances:
@@ -364,44 +344,29 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
///
/// [`only_witness_utxo`]: Self::only_witness_utxo
/// [`finish`]: Self::finish
/// [`max_weight_to_satisfy`]: miniscript::Descriptor::max_weight_to_satisfy
/// [`max_satisfaction_weight`]: miniscript::Descriptor::max_satisfaction_weight
pub fn add_foreign_utxo(
&mut self,
outpoint: OutPoint,
psbt_input: psbt::Input,
satisfaction_weight: Weight,
) -> Result<&mut Self, AddForeignUtxoError> {
self.add_foreign_utxo_with_sequence(
outpoint,
psbt_input,
satisfaction_weight,
Sequence::MAX,
)
}
/// Same as [add_foreign_utxo](TxBuilder::add_foreign_utxo) but allows to set the nSequence value.
pub fn add_foreign_utxo_with_sequence(
&mut self,
outpoint: OutPoint,
psbt_input: psbt::Input,
satisfaction_weight: Weight,
sequence: Sequence,
) -> Result<&mut Self, AddForeignUtxoError> {
satisfaction_weight: usize,
) -> Result<&mut Self, Error> {
if psbt_input.witness_utxo.is_none() {
match psbt_input.non_witness_utxo.as_ref() {
Some(tx) => {
if tx.compute_txid() != outpoint.txid {
return Err(AddForeignUtxoError::InvalidTxid {
input_txid: tx.compute_txid(),
foreign_utxo: outpoint,
});
if tx.txid() != outpoint.txid {
return Err(Error::Generic(
"Foreign utxo outpoint does not match PSBT input".into(),
));
}
if tx.output.len() <= outpoint.vout as usize {
return Err(AddForeignUtxoError::InvalidOutpoint(outpoint));
return Err(Error::InvalidOutpoint(outpoint));
}
}
None => {
return Err(AddForeignUtxoError::MissingUtxo);
return Err(Error::Generic(
"Foreign utxo missing witness_utxo or non_witness_utxo".into(),
))
}
}
}
@@ -410,7 +375,6 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
satisfaction_weight,
utxo: Utxo::Foreign {
outpoint,
sequence: Some(sequence),
psbt_input: Box::new(psbt_input),
},
});
@@ -464,7 +428,7 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
/// Use a specific nLockTime while creating the transaction
///
/// This can cause conflicts if the wallet's descriptors contain an "after" (OP_CLTV) operator.
pub fn nlocktime(&mut self, locktime: absolute::LockTime) -> &mut Self {
pub fn nlocktime(&mut self, locktime: LockTime) -> &mut Self {
self.params.locktime = Some(locktime);
self
}
@@ -503,7 +467,7 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
self
}
/// Only Fill-in the [`psbt::Input::witness_utxo`](bitcoin::psbt::Input::witness_utxo) field when spending from
/// Only Fill-in the [`psbt::Input::witness_utxo`](bitcoin::util::psbt::Input::witness_utxo) field when spending from
/// SegWit descriptors.
///
/// This reduces the size of the PSBT, but some signers might reject them due to the lack of
@@ -513,8 +477,8 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
self
}
/// Fill-in the [`psbt::Output::redeem_script`](bitcoin::psbt::Output::redeem_script) and
/// [`psbt::Output::witness_script`](bitcoin::psbt::Output::witness_script) fields.
/// Fill-in the [`psbt::Output::redeem_script`](bitcoin::util::psbt::Output::redeem_script) and
/// [`psbt::Output::witness_script`](bitcoin::util::psbt::Output::witness_script) fields.
///
/// This is useful for signers which always require it, like ColdCard hardware wallets.
pub fn include_output_redeem_witness_script(&mut self) -> &mut Self {
@@ -540,17 +504,35 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
/// Choose the coin selection algorithm
///
/// Overrides the [`CoinSelectionAlgorithm`].
/// Overrides the [`DefaultCoinSelectionAlgorithm`](super::coin_selection::DefaultCoinSelectionAlgorithm).
///
/// Note that this function consumes the builder and returns it so it is usually best to put this as the first call on the builder.
pub fn coin_selection<P: CoinSelectionAlgorithm>(self, coin_selection: P) -> TxBuilder<'a, P> {
pub fn coin_selection<P: CoinSelectionAlgorithm>(
self,
coin_selection: P,
) -> TxBuilder<'a, D, P, Ctx> {
TxBuilder {
wallet: self.wallet,
params: self.params,
coin_selection,
phantom: PhantomData,
}
}
/// Finish building the transaction.
///
/// Returns the [`BIP174`] "PSBT" and summary details about the transaction.
///
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
pub fn finish(self) -> Result<(Psbt, TransactionDetails), Error>
where
D: persist::PersistBackend<KeychainKind, ConfirmationTime>,
{
self.wallet
.borrow_mut()
.create_tx(self.coin_selection, self.params)
}
/// Enable signaling RBF
///
/// This will use the default nSequence value of `0xFFFFFFFD`.
@@ -575,7 +557,7 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
///
/// This will be used to:
/// 1. Set the nLockTime for preventing fee sniping.
/// **Note**: This will be ignored if you manually specify a nlocktime using [`TxBuilder::nlocktime`].
/// **Note**: This will be ignored if you manually specify a nlocktime using [`TxBuilder::nlocktime`].
/// 2. Decide whether coinbase outputs are mature or not. If the coinbase outputs are not
/// mature at `current_height`, we ignore them in the coin selection.
/// If you want to create a transaction that spends immature coinbase inputs, manually
@@ -583,8 +565,7 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
///
/// In both cases, if you don't provide a current height, we use the last sync height.
pub fn current_height(&mut self, height: u32) -> &mut Self {
self.params.current_height =
Some(absolute::LockTime::from_height(height).expect("Invalid height"));
self.params.current_height = Some(LockTime::from_height(height).expect("Invalid height"));
self
}
@@ -595,28 +576,25 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
self.params.allow_dust = allow_dust;
self
}
}
impl<'a, D, Cs: CoinSelectionAlgorithm> TxBuilder<'a, D, Cs, CreateTx> {
/// Replace the recipients already added with a new list
pub fn set_recipients(&mut self, recipients: Vec<(ScriptBuf, Amount)>) -> &mut Self {
self.params.recipients = recipients
.into_iter()
.map(|(script, amount)| (script, amount.to_sat()))
.collect();
pub fn set_recipients(&mut self, recipients: Vec<(Script, u64)>) -> &mut Self {
self.params.recipients = recipients;
self
}
/// Add a recipient to the internal list
pub fn add_recipient(&mut self, script_pubkey: ScriptBuf, amount: Amount) -> &mut Self {
self.params
.recipients
.push((script_pubkey, amount.to_sat()));
pub fn add_recipient(&mut self, script_pubkey: Script, amount: u64) -> &mut Self {
self.params.recipients.push((script_pubkey, amount));
self
}
/// Add data as an output, using OP_RETURN
pub fn add_data<T: AsRef<PushBytes>>(&mut self, data: &T) -> &mut Self {
let script = ScriptBuf::new_op_return(data);
self.add_recipient(script, Amount::ZERO);
pub fn add_data(&mut self, data: &[u8]) -> &mut Self {
let script = Script::new_op_return(data);
self.add_recipient(script, 0u64);
self
}
@@ -629,8 +607,11 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
/// difference is that it is valid to use `drain_to` without setting any ordinary recipients
/// with [`add_recipient`] (but it is perfectly fine to add recipients as well).
///
/// If you choose not to set any recipients, you should provide the utxos that the
/// transaction should spend via [`add_utxos`].
/// If you choose not to set any recipients, you should either provide the utxos that the
/// transaction should spend via [`add_utxos`], or set [`drain_wallet`] to spend all of them.
///
/// When bumping the fees of a transaction made with this option, you probably want to
/// use [`allow_shrinking`] to allow this output to be reduced to pay for the extra fees.
///
/// # Example
///
@@ -640,14 +621,9 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
/// ```
/// # use std::str::FromStr;
/// # use bitcoin::*;
/// # use bdk_wallet::*;
/// # use bdk_wallet::ChangeSet;
/// # use bdk_wallet::error::CreateTxError;
/// # use anyhow::Error;
/// # let to_address =
/// Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt")
/// .unwrap()
/// .assume_checked();
/// # use bdk::*;
/// # use bdk::wallet::tx_builder::CreateTx;
/// # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap();
/// # let mut wallet = doctest_wallet!();
/// let mut tx_builder = wallet.build_tx();
///
@@ -656,170 +632,88 @@ impl<'a, Cs> TxBuilder<'a, Cs> {
/// .drain_wallet()
/// // Send the excess (which is all the coins minus the fee) to this address.
/// .drain_to(to_address.script_pubkey())
/// .fee_rate(FeeRate::from_sat_per_vb(5).expect("valid feerate"))
/// .fee_rate(FeeRate::from_sat_per_vb(5.0))
/// .enable_rbf();
/// let psbt = tx_builder.finish()?;
/// # Ok::<(), anyhow::Error>(())
/// let (psbt, tx_details) = tx_builder.finish()?;
/// # Ok::<(), bdk::Error>(())
/// ```
///
/// [`allow_shrinking`]: Self::allow_shrinking
/// [`add_recipient`]: Self::add_recipient
/// [`add_utxos`]: Self::add_utxos
/// [`drain_wallet`]: Self::drain_wallet
pub fn drain_to(&mut self, script_pubkey: ScriptBuf) -> &mut Self {
pub fn drain_to(&mut self, script_pubkey: Script) -> &mut Self {
self.params.drain_to = Some(script_pubkey);
self
}
}
impl<'a, Cs: CoinSelectionAlgorithm> TxBuilder<'a, Cs> {
/// Finish building the transaction.
// methods supported only by bump_fee
impl<'a, D> TxBuilder<'a, D, DefaultCoinSelectionAlgorithm, BumpFee> {
/// Explicitly tells the wallet that it is allowed to reduce the amount of the output matching this
/// `script_pubkey` in order to bump the transaction fee. Without specifying this the wallet
/// will attempt to find a change output to shrink instead.
///
/// Uses the thread-local random number generator (rng).
/// **Note** that the output may shrink to below the dust limit and therefore be removed. If it is
/// preserved then it is currently not guaranteed to be in the same position as it was
/// originally.
///
/// Returns a new [`Psbt`] per [`BIP174`].
///
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
///
/// **WARNING**: To avoid change address reuse you must persist the changes resulting from one
/// or more calls to this method before closing the wallet. See [`Wallet::reveal_next_address`].
#[cfg(feature = "std")]
pub fn finish(self) -> Result<Psbt, CreateTxError> {
self.finish_with_aux_rand(&mut bitcoin::key::rand::thread_rng())
}
/// Finish building the transaction.
///
/// Uses a provided random number generator (rng).
///
/// Returns a new [`Psbt`] per [`BIP174`].
///
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
///
/// **WARNING**: To avoid change address reuse you must persist the changes resulting from one
/// or more calls to this method before closing the wallet. See [`Wallet::reveal_next_address`].
pub fn finish_with_aux_rand(self, rng: &mut impl RngCore) -> Result<Psbt, CreateTxError> {
self.wallet
.borrow_mut()
.create_tx(self.coin_selection, self.params, rng)
}
}
#[derive(Debug)]
/// Error returned from [`TxBuilder::add_utxo`] and [`TxBuilder::add_utxos`]
pub enum AddUtxoError {
/// Happens when trying to spend an UTXO that is not in the internal database
UnknownUtxo(OutPoint),
}
impl fmt::Display for AddUtxoError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::UnknownUtxo(outpoint) => write!(
f,
"UTXO not found in the internal database for txid: {} with vout: {}",
outpoint.txid, outpoint.vout
),
/// Returns an `Err` if `script_pubkey` can't be found among the recipients of the
/// transaction we are bumping.
pub fn allow_shrinking(&mut self, script_pubkey: Script) -> Result<&mut Self, Error> {
match self
.params
.recipients
.iter()
.position(|(recipient_script, _)| *recipient_script == script_pubkey)
{
Some(position) => {
self.params.recipients.remove(position);
self.params.drain_to = Some(script_pubkey);
Ok(self)
}
None => Err(Error::Generic(format!(
"{} was not in the original transaction",
script_pubkey
))),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for AddUtxoError {}
#[derive(Debug)]
/// Error returned from [`TxBuilder::add_foreign_utxo`].
pub enum AddForeignUtxoError {
/// Foreign utxo outpoint txid does not match PSBT input txid
InvalidTxid {
/// PSBT input txid
input_txid: Txid,
/// Foreign UTXO outpoint
foreign_utxo: OutPoint,
},
/// Requested outpoint doesn't exist in the tx (vout greater than available outputs)
InvalidOutpoint(OutPoint),
/// Foreign utxo missing witness_utxo or non_witness_utxo
MissingUtxo,
}
impl fmt::Display for AddForeignUtxoError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidTxid {
input_txid,
foreign_utxo,
} => write!(
f,
"Foreign UTXO outpoint txid: {} does not match PSBT input txid: {}",
foreign_utxo.txid, input_txid,
),
Self::InvalidOutpoint(outpoint) => write!(
f,
"Requested outpoint doesn't exist for txid: {} with vout: {}",
outpoint.txid, outpoint.vout,
),
Self::MissingUtxo => write!(f, "Foreign utxo missing witness_utxo or non_witness_utxo"),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for AddForeignUtxoError {}
type TxSort<T> = dyn Fn(&T, &T) -> core::cmp::Ordering;
/// Ordering of the transaction's inputs and outputs
#[derive(Clone, Default)]
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum TxOrdering {
/// Randomized (default)
#[default]
Shuffle,
/// Unchanged
Untouched,
/// Provide custom comparison functions for sorting
Custom {
/// Transaction inputs sort function
input_sort: Arc<TxSort<TxIn>>,
/// Transaction outputs sort function
output_sort: Arc<TxSort<TxOut>>,
},
/// BIP69 / Lexicographic
Bip69Lexicographic,
}
impl core::fmt::Debug for TxOrdering {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self {
TxOrdering::Shuffle => write!(f, "Shuffle"),
TxOrdering::Untouched => write!(f, "Untouched"),
TxOrdering::Custom { .. } => write!(f, "Custom"),
}
impl Default for TxOrdering {
fn default() -> Self {
TxOrdering::Shuffle
}
}
impl TxOrdering {
/// Sort transaction inputs and outputs by [`TxOrdering`] variant.
///
/// Uses the thread-local random number generator (rng).
#[cfg(feature = "std")]
/// Sort transaction inputs and outputs by [`TxOrdering`] variant
pub fn sort_tx(&self, tx: &mut Transaction) {
self.sort_tx_with_aux_rand(tx, &mut bitcoin::key::rand::thread_rng())
}
/// Sort transaction inputs and outputs by [`TxOrdering`] variant.
///
/// Uses a provided random number generator (rng).
pub fn sort_tx_with_aux_rand(&self, tx: &mut Transaction, rng: &mut impl RngCore) {
match self {
TxOrdering::Untouched => {}
TxOrdering::Shuffle => {
shuffle_slice(&mut tx.input, rng);
shuffle_slice(&mut tx.output, rng);
use rand::seq::SliceRandom;
let mut rng = rand::thread_rng();
tx.input.shuffle(&mut rng);
tx.output.shuffle(&mut rng);
}
TxOrdering::Custom {
input_sort,
output_sort,
} => {
tx.input.sort_unstable_by(|a, b| input_sort(a, b));
tx.output.sort_unstable_by(|a, b| output_sort(a, b));
TxOrdering::Bip69Lexicographic => {
tx.input.sort_unstable_by_key(|txin| {
(txin.previous_output.txid, txin.previous_output.vout)
});
tx.output
.sort_unstable_by_key(|txout| (txout.value, txout.script_pubkey.clone()));
}
}
}
@@ -856,10 +750,9 @@ impl RbfValue {
}
/// Policy regarding the use of change outputs when creating a transaction
#[derive(Default, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum ChangeSpendPolicy {
/// Use both change and non-change outputs (default)
#[default]
ChangeAllowed,
/// Only use change outputs (see [`TxBuilder::only_spend_change`])
OnlyChange,
@@ -867,8 +760,14 @@ pub enum ChangeSpendPolicy {
ChangeForbidden,
}
impl Default for ChangeSpendPolicy {
fn default() -> Self {
ChangeSpendPolicy::ChangeAllowed
}
}
impl ChangeSpendPolicy {
pub(crate) fn is_satisfied_by(&self, utxo: &LocalOutput) -> bool {
pub(crate) fn is_satisfied_by(&self, utxo: &LocalUtxo) -> bool {
match self {
ChangeSpendPolicy::ChangeAllowed => true,
ChangeSpendPolicy::OnlyChange => utxo.keychain == KeychainKind::Internal,
@@ -894,10 +793,15 @@ mod test {
use bdk_chain::ConfirmationTime;
use bitcoin::consensus::deserialize;
use bitcoin::hex::FromHex;
use bitcoin::TxOut;
use bitcoin::hashes::hex::FromHex;
use super::*;
#[test]
fn test_output_ordering_default_shuffle() {
assert_eq!(TxOrdering::default(), TxOrdering::Shuffle);
}
#[test]
fn test_output_ordering_untouched() {
let original_tx = ordering_test_tx!();
@@ -930,28 +834,13 @@ mod test {
}
#[test]
fn test_output_ordering_custom_but_bip69() {
fn test_output_ordering_bip69() {
use core::str::FromStr;
let original_tx = ordering_test_tx!();
let mut tx = original_tx;
let bip69_txin_cmp = |tx_a: &TxIn, tx_b: &TxIn| {
let project_outpoint = |t: &TxIn| (t.previous_output.txid, t.previous_output.vout);
project_outpoint(tx_a).cmp(&project_outpoint(tx_b))
};
let bip69_txout_cmp = |tx_a: &TxOut, tx_b: &TxOut| {
let project_utxo = |t: &TxOut| (t.value, t.script_pubkey.clone());
project_utxo(tx_a).cmp(&project_utxo(tx_b))
};
let custom_bip69_ordering = TxOrdering::Custom {
input_sort: Arc::new(bip69_txin_cmp),
output_sort: Arc::new(bip69_txout_cmp),
};
custom_bip69_ordering.sort_tx(&mut tx);
TxOrdering::Bip69Lexicographic.sort_tx(&mut tx);
assert_eq!(
tx.input[0].previous_output,
@@ -975,92 +864,32 @@ mod test {
.unwrap()
);
assert_eq!(tx.output[0].value.to_sat(), 800);
assert_eq!(tx.output[1].script_pubkey, ScriptBuf::from(vec![0xAA]));
assert_eq!(
tx.output[2].script_pubkey,
ScriptBuf::from(vec![0xAA, 0xEE])
);
assert_eq!(tx.output[0].value, 800);
assert_eq!(tx.output[1].script_pubkey, From::from(vec![0xAA]));
assert_eq!(tx.output[2].script_pubkey, From::from(vec![0xAA, 0xEE]));
}
#[test]
fn test_output_ordering_custom_with_sha256() {
use bitcoin::hashes::{sha256, Hash};
let original_tx = ordering_test_tx!();
let mut tx_1 = original_tx.clone();
let mut tx_2 = original_tx.clone();
let shared_secret = "secret_tweak";
let hash_txin_with_shared_secret_seed = Arc::new(|tx_a: &TxIn, tx_b: &TxIn| {
let secret_digest_from_txin = |txin: &TxIn| {
sha256::Hash::hash(
&[
&txin.previous_output.txid.to_raw_hash()[..],
&txin.previous_output.vout.to_be_bytes(),
shared_secret.as_bytes(),
]
.concat(),
)
};
secret_digest_from_txin(tx_a).cmp(&secret_digest_from_txin(tx_b))
});
let hash_txout_with_shared_secret_seed = Arc::new(|tx_a: &TxOut, tx_b: &TxOut| {
let secret_digest_from_txout = |txin: &TxOut| {
sha256::Hash::hash(
&[
&txin.value.to_sat().to_be_bytes(),
&txin.script_pubkey.clone().into_bytes()[..],
shared_secret.as_bytes(),
]
.concat(),
)
};
secret_digest_from_txout(tx_a).cmp(&secret_digest_from_txout(tx_b))
});
let custom_ordering_from_salted_sha256_1 = TxOrdering::Custom {
input_sort: hash_txin_with_shared_secret_seed.clone(),
output_sort: hash_txout_with_shared_secret_seed.clone(),
};
let custom_ordering_from_salted_sha256_2 = TxOrdering::Custom {
input_sort: hash_txin_with_shared_secret_seed,
output_sort: hash_txout_with_shared_secret_seed,
};
custom_ordering_from_salted_sha256_1.sort_tx(&mut tx_1);
custom_ordering_from_salted_sha256_2.sort_tx(&mut tx_2);
// Check the ordering is consistent between calls
assert_eq!(tx_1, tx_2);
// Check transaction order has changed
assert_ne!(tx_1, original_tx);
assert_ne!(tx_2, original_tx);
}
fn get_test_utxos() -> Vec<LocalOutput> {
fn get_test_utxos() -> Vec<LocalUtxo> {
use bitcoin::hashes::Hash;
vec![
LocalOutput {
LocalUtxo {
outpoint: OutPoint {
txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(),
txid: bitcoin::Txid::from_inner([0; 32]),
vout: 0,
},
txout: TxOut::NULL,
txout: Default::default(),
keychain: KeychainKind::External,
is_spent: false,
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
confirmation_time: ConfirmationTime::Unconfirmed,
derivation_index: 0,
},
LocalOutput {
LocalUtxo {
outpoint: OutPoint {
txid: bitcoin::Txid::from_slice(&[0; 32]).unwrap(),
txid: bitcoin::Txid::from_inner([0; 32]),
vout: 1,
},
txout: TxOut::NULL,
txout: Default::default(),
keychain: KeychainKind::Internal,
is_spent: false,
confirmation_time: ConfirmationTime::Confirmed {

View File

@@ -10,12 +10,10 @@
// licenses.
use bitcoin::secp256k1::{All, Secp256k1};
use bitcoin::{absolute, relative, Script, Sequence};
use bitcoin::{LockTime, Script, Sequence};
use miniscript::{MiniscriptKey, Satisfier, ToPublicKey};
use rand_core::RngCore;
/// Trait to check if a value is below the dust limit.
/// We are performing dust value calculation for a given script public key using rust-bitcoin to
/// keep it compatible with network dust rate
@@ -28,7 +26,7 @@ pub trait IsDust {
impl IsDust for u64 {
fn is_dust(&self, script: &Script) -> bool {
*self < script.minimal_non_dust().to_sat()
*self < script.dust_value().to_sat()
}
}
@@ -67,7 +65,7 @@ pub(crate) fn check_nsequence_rbf(rbf: Sequence, csv: Sequence) -> bool {
}
impl<Pk: MiniscriptKey + ToPublicKey> Satisfier<Pk> for After {
fn check_after(&self, n: absolute::LockTime) -> bool {
fn check_after(&self, n: LockTime) -> bool {
if let Some(current_height) = self.current_height {
current_height >= n.to_consensus_u32()
} else {
@@ -97,7 +95,7 @@ impl Older {
}
impl<Pk: MiniscriptKey + ToPublicKey> Satisfier<Pk> for Older {
fn check_older(&self, n: relative::LockTime) -> bool {
fn check_older(&self, n: Sequence) -> bool {
if let Some(current_height) = self.current_height {
// TODO: test >= / >
current_height
@@ -112,19 +110,6 @@ impl<Pk: MiniscriptKey + ToPublicKey> Satisfier<Pk> for Older {
}
}
// The Knuth shuffling algorithm based on the original [Fisher-Yates method](https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle)
pub(crate) fn shuffle_slice<T>(list: &mut [T], rng: &mut impl RngCore) {
if list.is_empty() {
return;
}
let mut current_index = list.len() - 1;
while current_index > 0 {
let random_index = rng.next_u32() as usize % (current_index + 1);
list.swap(current_index, random_index);
current_index -= 1;
}
}
pub(crate) type SecpCtx = Secp256k1<All>;
#[cfg(test)]
@@ -133,17 +118,13 @@ mod test {
// otherwise it's time-based
pub(crate) const SEQUENCE_LOCKTIME_TYPE_FLAG: u32 = 1 << 22;
use super::{check_nsequence_rbf, shuffle_slice, IsDust};
use crate::bitcoin::{Address, Network, Sequence};
use alloc::vec::Vec;
use super::{check_nsequence_rbf, IsDust};
use crate::bitcoin::{Address, Sequence};
use core::str::FromStr;
use rand::{rngs::StdRng, thread_rng, SeedableRng};
#[test]
fn test_is_dust() {
let script_p2pkh = Address::from_str("1GNgwA8JfG7Kc8akJ8opdNWJUihqUztfPe")
.unwrap()
.require_network(Network::Bitcoin)
.unwrap()
.script_pubkey();
assert!(script_p2pkh.is_p2pkh());
@@ -151,11 +132,9 @@ mod test {
assert!(!546.is_dust(&script_p2pkh));
let script_p2wpkh = Address::from_str("bc1qxlh2mnc0yqwas76gqq665qkggee5m98t8yskd8")
.unwrap()
.require_network(Network::Bitcoin)
.unwrap()
.script_pubkey();
assert!(script_p2wpkh.is_p2wpkh());
assert!(script_p2wpkh.is_v0_p2wpkh());
assert!(293.is_dust(&script_p2wpkh));
assert!(!294.is_dust(&script_p2wpkh));
}
@@ -199,46 +178,4 @@ mod test {
);
assert!(result);
}
#[test]
#[cfg(feature = "std")]
fn test_shuffle_slice_empty_vec() {
let mut test: Vec<u8> = vec![];
shuffle_slice(&mut test, &mut thread_rng());
}
#[test]
#[cfg(feature = "std")]
fn test_shuffle_slice_single_vec() {
let mut test: Vec<u8> = vec![0];
shuffle_slice(&mut test, &mut thread_rng());
}
#[test]
fn test_shuffle_slice_duple_vec() {
let seed = [0; 32];
let mut rng: StdRng = SeedableRng::from_seed(seed);
let mut test: Vec<u8> = vec![0, 1];
shuffle_slice(&mut test, &mut rng);
assert_eq!(test, &[0, 1]);
let seed = [6; 32];
let mut rng: StdRng = SeedableRng::from_seed(seed);
let mut test: Vec<u8> = vec![0, 1];
shuffle_slice(&mut test, &mut rng);
assert_eq!(test, &[1, 0]);
}
#[test]
fn test_shuffle_slice_multi_vec() {
let seed = [0; 32];
let mut rng: StdRng = SeedableRng::from_seed(seed);
let mut test: Vec<u8> = vec![0, 1, 2, 4, 5];
shuffle_slice(&mut test, &mut rng);
assert_eq!(test, &[2, 1, 0, 4, 5]);
let seed = [25; 32];
let mut rng: StdRng = SeedableRng::from_seed(seed);
let mut test: Vec<u8> = vec![0, 1, 2, 4, 5];
shuffle_slice(&mut test, &mut rng);
assert_eq!(test, &[0, 4, 1, 2, 5]);
}
}

View File

@@ -0,0 +1,93 @@
#![allow(unused)]
use bdk::{wallet::AddressIndex, Wallet};
use bdk_chain::{BlockId, ConfirmationTime};
use bitcoin::hashes::Hash;
use bitcoin::{BlockHash, Network, Transaction, TxOut};
/// Return a fake wallet that appears to be funded for testing.
pub fn get_funded_wallet_with_change(
descriptor: &str,
change: Option<&str>,
) -> (Wallet, bitcoin::Txid) {
let mut wallet = Wallet::new_no_persist(descriptor, change, Network::Regtest).unwrap();
let address = wallet.get_address(AddressIndex::New).address;
let tx = Transaction {
version: 1,
lock_time: bitcoin::PackedLockTime(0),
input: vec![],
output: vec![TxOut {
value: 50_000,
script_pubkey: address.script_pubkey(),
}],
};
wallet
.insert_checkpoint(BlockId {
height: 1_000,
hash: BlockHash::all_zeros(),
})
.unwrap();
wallet
.insert_tx(
tx.clone(),
ConfirmationTime::Confirmed {
height: 1_000,
time: 100,
},
)
.unwrap();
(wallet, tx.txid())
}
pub fn get_funded_wallet(descriptor: &str) -> (Wallet, bitcoin::Txid) {
get_funded_wallet_with_change(descriptor, None)
}
pub fn get_test_wpkh() -> &'static str {
"wpkh(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW)"
}
pub fn get_test_single_sig_csv() -> &'static str {
// and(pk(Alice),older(6))
"wsh(and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),older(6)))"
}
pub fn get_test_a_or_b_plus_csv() -> &'static str {
// or(pk(Alice),and(pk(Bob),older(144)))
"wsh(or_d(pk(cRjo6jqfVNP33HhSS76UhXETZsGTZYx8FMFvR9kpbtCSV1PmdZdu),and_v(v:pk(cMnkdebixpXMPfkcNEjjGin7s94hiehAH4mLbYkZoh9KSiNNmqC8),older(144))))"
}
pub fn get_test_single_sig_cltv() -> &'static str {
// and(pk(Alice),after(100000))
"wsh(and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),after(100000)))"
}
pub fn get_test_tr_single_sig() -> &'static str {
"tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG)"
}
pub fn get_test_tr_with_taptree() -> &'static str {
"tr(b511bd5771e47ee27558b1765e87b541668304ec567721c7b880edc0a010da55,{pk(cPZzKuNmpuUjD1e8jUU4PVzy2b5LngbSip8mBsxf4e7rSFZVb4Uh),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})"
}
pub fn get_test_tr_with_taptree_both_priv() -> &'static str {
"tr(b511bd5771e47ee27558b1765e87b541668304ec567721c7b880edc0a010da55,{pk(cPZzKuNmpuUjD1e8jUU4PVzy2b5LngbSip8mBsxf4e7rSFZVb4Uh),pk(cNaQCDwmmh4dS9LzCgVtyy1e1xjCJ21GUDHe9K98nzb689JvinGV)})"
}
pub fn get_test_tr_repeated_key() -> &'static str {
"tr(b511bd5771e47ee27558b1765e87b541668304ec567721c7b880edc0a010da55,{and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),after(100)),and_v(v:pk(cVpPVruEDdmutPzisEsYvtST1usBR3ntr8pXSyt6D2YYqXRyPcFW),after(200))})"
}
pub fn get_test_tr_single_sig_xprv() -> &'static str {
"tr(tprv8ZgxMBicQKsPdDArR4xSAECuVxeX1jwwSXR4ApKbkYgZiziDc4LdBy2WvJeGDfUSE4UT4hHhbgEwbdq8ajjUHiKDegkwrNU6V55CxcxonVN/*)"
}
pub fn get_test_tr_with_taptree_xprv() -> &'static str {
"tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG,{pk(tprv8ZgxMBicQKsPdDArR4xSAECuVxeX1jwwSXR4ApKbkYgZiziDc4LdBy2WvJeGDfUSE4UT4hHhbgEwbdq8ajjUHiKDegkwrNU6V55CxcxonVN/*),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})"
}
pub fn get_test_tr_dup_keys() -> &'static str {
"tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG,{pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})"
}

158
crates/bdk/tests/psbt.rs Normal file
View File

@@ -0,0 +1,158 @@
use bdk::bitcoin::TxIn;
use bdk::wallet::AddressIndex;
use bdk::wallet::AddressIndex::New;
use bdk::{psbt, FeeRate, SignOptions};
use bitcoin::util::psbt::PartiallySignedTransaction as Psbt;
use core::str::FromStr;
mod common;
use common::*;
// from bip 174
const PSBT_STR: &str = "cHNidP8BAKACAAAAAqsJSaCMWvfEm4IS9Bfi8Vqz9cM9zxU4IagTn4d6W3vkAAAAAAD+////qwlJoIxa98SbghL0F+LxWrP1wz3PFTghqBOfh3pbe+QBAAAAAP7///8CYDvqCwAAAAAZdqkUdopAu9dAy+gdmI5x3ipNXHE5ax2IrI4kAAAAAAAAGXapFG9GILVT+glechue4O/p+gOcykWXiKwAAAAAAAEHakcwRAIgR1lmF5fAGwNrJZKJSGhiGDR9iYZLcZ4ff89X0eURZYcCIFMJ6r9Wqk2Ikf/REf3xM286KdqGbX+EhtdVRs7tr5MZASEDXNxh/HupccC1AaZGoqg7ECy0OIEhfKaC3Ibi1z+ogpIAAQEgAOH1BQAAAAAXqRQ1RebjO4MsRwUPJNPuuTycA5SLx4cBBBYAFIXRNTfy4mVAWjTbr6nj3aAfuCMIAAAA";
#[test]
#[should_panic(expected = "InputIndexOutOfRange")]
fn test_psbt_malformed_psbt_input_legacy() {
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.get_address(AddressIndex::New);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), 10_000);
let (mut psbt, _) = builder.finish().unwrap();
psbt.inputs.push(psbt_bip.inputs[0].clone());
let options = SignOptions {
trust_witness_utxo: true,
..Default::default()
};
let _ = wallet.sign(&mut psbt, options).unwrap();
}
#[test]
#[should_panic(expected = "InputIndexOutOfRange")]
fn test_psbt_malformed_psbt_input_segwit() {
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.get_address(AddressIndex::New);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), 10_000);
let (mut psbt, _) = builder.finish().unwrap();
psbt.inputs.push(psbt_bip.inputs[1].clone());
let options = SignOptions {
trust_witness_utxo: true,
..Default::default()
};
let _ = wallet.sign(&mut psbt, options).unwrap();
}
#[test]
#[should_panic(expected = "InputIndexOutOfRange")]
fn test_psbt_malformed_tx_input() {
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.get_address(AddressIndex::New);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), 10_000);
let (mut psbt, _) = builder.finish().unwrap();
psbt.unsigned_tx.input.push(TxIn::default());
let options = SignOptions {
trust_witness_utxo: true,
..Default::default()
};
let _ = wallet.sign(&mut psbt, options).unwrap();
}
#[test]
fn test_psbt_sign_with_finalized() {
let psbt_bip = Psbt::from_str(PSBT_STR).unwrap();
let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let send_to = wallet.get_address(AddressIndex::New);
let mut builder = wallet.build_tx();
builder.add_recipient(send_to.script_pubkey(), 10_000);
let (mut psbt, _) = builder.finish().unwrap();
// add a finalized input
psbt.inputs.push(psbt_bip.inputs[0].clone());
psbt.unsigned_tx
.input
.push(psbt_bip.unsigned_tx.input[0].clone());
let _ = wallet.sign(&mut psbt, SignOptions::default()).unwrap();
}
#[test]
fn test_psbt_fee_rate_with_witness_utxo() {
use psbt::PsbtUtils;
let expected_fee_rate = 1.2345;
let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.get_address(New);
let mut builder = wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let (mut psbt, _) = builder.finish().unwrap();
let fee_amount = psbt.fee_amount();
assert!(fee_amount.is_some());
let unfinalized_fee_rate = psbt.fee_rate().unwrap();
let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
assert!(finalized);
let finalized_fee_rate = psbt.fee_rate().unwrap();
assert!(finalized_fee_rate.as_sat_per_vb() >= expected_fee_rate);
assert!(finalized_fee_rate.as_sat_per_vb() < unfinalized_fee_rate.as_sat_per_vb());
}
#[test]
fn test_psbt_fee_rate_with_nonwitness_utxo() {
use psbt::PsbtUtils;
let expected_fee_rate = 1.2345;
let (mut wallet, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.get_address(New);
let mut builder = wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let (mut psbt, _) = builder.finish().unwrap();
let fee_amount = psbt.fee_amount();
assert!(fee_amount.is_some());
let unfinalized_fee_rate = psbt.fee_rate().unwrap();
let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
assert!(finalized);
let finalized_fee_rate = psbt.fee_rate().unwrap();
assert!(finalized_fee_rate.as_sat_per_vb() >= expected_fee_rate);
assert!(finalized_fee_rate.as_sat_per_vb() < unfinalized_fee_rate.as_sat_per_vb());
}
#[test]
fn test_psbt_fee_rate_with_missing_txout() {
use psbt::PsbtUtils;
let expected_fee_rate = 1.2345;
let (mut wpkh_wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wpkh_wallet.get_address(New);
let mut builder = wpkh_wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let (mut wpkh_psbt, _) = builder.finish().unwrap();
wpkh_psbt.inputs[0].witness_utxo = None;
wpkh_psbt.inputs[0].non_witness_utxo = None;
assert!(wpkh_psbt.fee_amount().is_none());
assert!(wpkh_psbt.fee_rate().is_none());
let (mut pkh_wallet, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = pkh_wallet.get_address(New);
let mut builder = pkh_wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let (mut pkh_psbt, _) = builder.finish().unwrap();
pkh_psbt.inputs[0].non_witness_utxo = None;
assert!(pkh_psbt.fee_amount().is_none());
assert!(pkh_psbt.fee_rate().is_none());
}

3309
crates/bdk/tests/wallet.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,26 +0,0 @@
[package]
name = "bdk_bitcoind_rpc"
version = "0.13.0"
edition = "2021"
rust-version = "1.63"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_bitcoind_rpc"
description = "This crate is used for emitting blockchain data from the `bitcoind` RPC interface."
license = "MIT OR Apache-2.0"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitcoin = { version = "0.32.0", default-features = false }
bitcoincore-rpc = { version = "0.19.0" }
bdk_chain = { path = "../chain", version = "0.17", default-features = false }
[dev-dependencies]
bdk_testenv = { path = "../testenv", default-features = false }
[features]
default = ["std"]
std = ["bitcoin/std", "bdk_chain/std"]
serde = ["bitcoin/serde", "bdk_chain/serde"]

View File

@@ -1,3 +0,0 @@
# BDK Bitcoind RPC
This crate is used for emitting blockchain data from the `bitcoind` RPC interface.

View File

@@ -1,328 +0,0 @@
//! This crate is used for emitting blockchain data from the `bitcoind` RPC interface. It does not
//! use the wallet RPC API, so this crate can be used with wallet-disabled Bitcoin Core nodes.
//!
//! [`Emitter`] is the main structure which sources blockchain data from [`bitcoincore_rpc::Client`].
//!
//! To only get block updates (exclude mempool transactions), the caller can use
//! [`Emitter::next_block`] or/and [`Emitter::next_header`] until it returns `Ok(None)` (which means
//! the chain tip is reached). A separate method, [`Emitter::mempool`] can be used to emit the whole
//! mempool.
#![warn(missing_docs)]
use bdk_chain::{local_chain::CheckPoint, BlockId};
use bitcoin::{block::Header, Block, BlockHash, Transaction};
pub use bitcoincore_rpc;
use bitcoincore_rpc::bitcoincore_rpc_json;
/// The [`Emitter`] is used to emit data sourced from [`bitcoincore_rpc::Client`].
///
/// Refer to [module-level documentation] for more.
///
/// [module-level documentation]: crate
pub struct Emitter<'c, C> {
client: &'c C,
start_height: u32,
/// The checkpoint of the last-emitted block that is in the best chain. If it is later found
/// that the block is no longer in the best chain, it will be popped off from here.
last_cp: CheckPoint,
/// The block result returned from rpc of the last-emitted block. As this result contains the
/// next block's block hash (which we use to fetch the next block), we set this to `None`
/// whenever there are no more blocks, or the next block is no longer in the best chain. This
/// gives us an opportunity to re-fetch this result.
last_block: Option<bitcoincore_rpc_json::GetBlockResult>,
/// The latest first-seen epoch of emitted mempool transactions. This is used to determine
/// whether a mempool transaction is already emitted.
last_mempool_time: usize,
/// The last emitted block during our last mempool emission. This is used to determine whether
/// there has been a reorg since our last mempool emission.
last_mempool_tip: Option<u32>,
}
impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> {
/// Construct a new [`Emitter`].
///
/// `last_cp` informs the emitter of the chain we are starting off with. This way, the emitter
/// can start emission from a block that connects to the original chain.
///
/// `start_height` starts emission from a given height (if there are no conflicts with the
/// original chain).
pub fn new(client: &'c C, last_cp: CheckPoint, start_height: u32) -> Self {
Self {
client,
start_height,
last_cp,
last_block: None,
last_mempool_time: 0,
last_mempool_tip: None,
}
}
/// Emit mempool transactions, alongside their first-seen unix timestamps.
///
/// This method emits each transaction only once, unless we cannot guarantee the transaction's
/// ancestors are already emitted.
///
/// To understand why, consider a receiver which filters transactions based on whether it
/// alters the UTXO set of tracked script pubkeys. If an emitted mempool transaction spends a
/// tracked UTXO which is confirmed at height `h`, but the receiver has only seen up to block
/// of height `h-1`, we want to re-emit this transaction until the receiver has seen the block
/// at height `h`.
pub fn mempool(&mut self) -> Result<Vec<(Transaction, u64)>, bitcoincore_rpc::Error> {
let client = self.client;
// This is the emitted tip height during the last mempool emission.
let prev_mempool_tip = self
.last_mempool_tip
// We use `start_height - 1` as we cannot guarantee that the block at
// `start_height` has been emitted.
.unwrap_or(self.start_height.saturating_sub(1));
// Mempool txs come with a timestamp of when the tx is introduced to the mempool. We keep
// track of the latest mempool tx's timestamp to determine whether we have seen a tx
// before. `prev_mempool_time` is the previous timestamp and `last_time` records what will
// be the new latest timestamp.
let prev_mempool_time = self.last_mempool_time;
let mut latest_time = prev_mempool_time;
let txs_to_emit = client
.get_raw_mempool_verbose()?
.into_iter()
.filter_map({
let latest_time = &mut latest_time;
move |(txid, tx_entry)| -> Option<Result<_, bitcoincore_rpc::Error>> {
let tx_time = tx_entry.time as usize;
if tx_time > *latest_time {
*latest_time = tx_time;
}
// Avoid emitting transactions that are already emitted if we can guarantee
// blocks containing ancestors are already emitted. The bitcoind rpc interface
// provides us with the block height that the tx is introduced to the mempool.
// If we have already emitted the block of height, we can assume that all
// ancestor txs have been processed by the receiver.
let is_already_emitted = tx_time <= prev_mempool_time;
let is_within_height = tx_entry.height <= prev_mempool_tip as _;
if is_already_emitted && is_within_height {
return None;
}
let tx = match client.get_raw_transaction(&txid, None) {
Ok(tx) => tx,
// the tx is confirmed or evicted since `get_raw_mempool_verbose`
Err(err) if err.is_not_found_error() => return None,
Err(err) => return Some(Err(err)),
};
Some(Ok((tx, tx_time as u64)))
}
})
.collect::<Result<Vec<_>, _>>()?;
self.last_mempool_time = latest_time;
self.last_mempool_tip = Some(self.last_cp.height());
Ok(txs_to_emit)
}
/// Emit the next block height and header (if any).
pub fn next_header(&mut self) -> Result<Option<BlockEvent<Header>>, bitcoincore_rpc::Error> {
Ok(poll(self, |hash| self.client.get_block_header(hash))?
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
}
/// Emit the next block height and block (if any).
pub fn next_block(&mut self) -> Result<Option<BlockEvent<Block>>, bitcoincore_rpc::Error> {
Ok(poll(self, |hash| self.client.get_block(hash))?
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
}
}
/// A newly emitted block from [`Emitter`].
#[derive(Debug)]
pub struct BlockEvent<B> {
/// Either a full [`Block`] or [`Header`] of the new block.
pub block: B,
/// The checkpoint of the new block.
///
/// A [`CheckPoint`] is a node of a linked list of [`BlockId`]s. This checkpoint is linked to
/// all [`BlockId`]s originally passed in [`Emitter::new`] as well as emitted blocks since then.
/// These blocks are guaranteed to be of the same chain.
///
/// This is important as BDK structures require block-to-apply to be connected with another
/// block in the original chain.
pub checkpoint: CheckPoint,
}
impl<B> BlockEvent<B> {
/// The block height of this new block.
pub fn block_height(&self) -> u32 {
self.checkpoint.height()
}
/// The block hash of this new block.
pub fn block_hash(&self) -> BlockHash {
self.checkpoint.hash()
}
/// The [`BlockId`] of a previous block that this block connects to.
///
/// This either returns a [`BlockId`] of a previously emitted block or from the chain we started
/// with (passed in as `last_cp` in [`Emitter::new`]).
///
/// This value is derived from [`BlockEvent::checkpoint`].
pub fn connected_to(&self) -> BlockId {
match self.checkpoint.prev() {
Some(prev_cp) => prev_cp.block_id(),
// there is no previous checkpoint, so just connect with itself
None => self.checkpoint.block_id(),
}
}
}
enum PollResponse {
Block(bitcoincore_rpc_json::GetBlockResult),
NoMoreBlocks,
/// Fetched block is not in the best chain.
BlockNotInBestChain,
AgreementFound(bitcoincore_rpc_json::GetBlockResult, CheckPoint),
/// Force the genesis checkpoint down the receiver's throat.
AgreementPointNotFound(BlockHash),
}
fn poll_once<C>(emitter: &Emitter<C>) -> Result<PollResponse, bitcoincore_rpc::Error>
where
C: bitcoincore_rpc::RpcApi,
{
let client = emitter.client;
if let Some(last_res) = &emitter.last_block {
let next_hash = if last_res.height < emitter.start_height as _ {
// enforce start height
let next_hash = client.get_block_hash(emitter.start_height as _)?;
// make sure last emission is still in best chain
if client.get_block_hash(last_res.height as _)? != last_res.hash {
return Ok(PollResponse::BlockNotInBestChain);
}
next_hash
} else {
match last_res.nextblockhash {
None => return Ok(PollResponse::NoMoreBlocks),
Some(next_hash) => next_hash,
}
};
let res = client.get_block_info(&next_hash)?;
if res.confirmations < 0 {
return Ok(PollResponse::BlockNotInBestChain);
}
return Ok(PollResponse::Block(res));
}
for cp in emitter.last_cp.iter() {
let res = match client.get_block_info(&cp.hash()) {
// block not in best chain
Ok(res) if res.confirmations < 0 => continue,
Ok(res) => res,
Err(e) if e.is_not_found_error() => {
if cp.height() > 0 {
continue;
}
// if we can't find genesis block, we can't create an update that connects
break;
}
Err(e) => return Err(e),
};
// agreement point found
return Ok(PollResponse::AgreementFound(res, cp));
}
let genesis_hash = client.get_block_hash(0)?;
Ok(PollResponse::AgreementPointNotFound(genesis_hash))
}
fn poll<C, V, F>(
emitter: &mut Emitter<C>,
get_item: F,
) -> Result<Option<(CheckPoint, V)>, bitcoincore_rpc::Error>
where
C: bitcoincore_rpc::RpcApi,
F: Fn(&BlockHash) -> Result<V, bitcoincore_rpc::Error>,
{
loop {
match poll_once(emitter)? {
PollResponse::Block(res) => {
let height = res.height as u32;
let hash = res.hash;
let item = get_item(&hash)?;
let new_cp = emitter
.last_cp
.clone()
.push(BlockId { height, hash })
.expect("must push");
emitter.last_cp = new_cp.clone();
emitter.last_block = Some(res);
return Ok(Some((new_cp, item)));
}
PollResponse::NoMoreBlocks => {
emitter.last_block = None;
return Ok(None);
}
PollResponse::BlockNotInBestChain => {
emitter.last_block = None;
continue;
}
PollResponse::AgreementFound(res, cp) => {
let agreement_h = res.height as u32;
// The tip during the last mempool emission needs to in the best chain, we reduce
// it if it is not.
if let Some(h) = emitter.last_mempool_tip.as_mut() {
if *h > agreement_h {
*h = agreement_h;
}
}
// get rid of evicted blocks
emitter.last_cp = cp;
emitter.last_block = Some(res);
continue;
}
PollResponse::AgreementPointNotFound(genesis_hash) => {
emitter.last_cp = CheckPoint::new(BlockId {
height: 0,
hash: genesis_hash,
});
emitter.last_block = None;
continue;
}
}
}
}
/// Extends [`bitcoincore_rpc::Error`].
pub trait BitcoindRpcErrorExt {
/// Returns whether the error is a "not found" error.
///
/// This is useful since [`Emitter`] emits [`Result<_, bitcoincore_rpc::Error>`]s as
/// [`Iterator::Item`].
fn is_not_found_error(&self) -> bool;
}
impl BitcoindRpcErrorExt for bitcoincore_rpc::Error {
fn is_not_found_error(&self) -> bool {
if let bitcoincore_rpc::Error::JsonRpc(bitcoincore_rpc::jsonrpc::Error::Rpc(rpc_err)) = self
{
rpc_err.code == -5
} else {
false
}
}
}

View File

@@ -1,738 +0,0 @@
use std::collections::{BTreeMap, BTreeSet};
use bdk_bitcoind_rpc::Emitter;
use bdk_chain::{
bitcoin::{Address, Amount, Txid},
local_chain::{CheckPoint, LocalChain},
spk_txout::SpkTxOutIndex,
Balance, BlockId, IndexedTxGraph, Merge,
};
use bdk_testenv::{anyhow, TestEnv};
use bitcoin::{hashes::Hash, Block, OutPoint, ScriptBuf, WScriptHash};
use bitcoincore_rpc::RpcApi;
/// Ensure that blocks are emitted in order even after reorg.
///
/// 1. Mine 101 blocks.
/// 2. Emit blocks from [`Emitter`] and update the [`LocalChain`].
/// 3. Reorg highest 6 blocks.
/// 4. Emit blocks from [`Emitter`] and re-update the [`LocalChain`].
#[test]
pub fn test_sync_local_chain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let network_tip = env.rpc_client().get_block_count()?;
let (mut local_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?);
let mut emitter = Emitter::new(env.rpc_client(), local_chain.tip(), 0);
// Mine some blocks and return the actual block hashes.
// Because initializing `ElectrsD` already mines some blocks, we must include those too when
// returning block hashes.
let exp_hashes = {
let mut hashes = (0..=network_tip)
.map(|height| env.rpc_client().get_block_hash(height))
.collect::<Result<Vec<_>, _>>()?;
hashes.extend(env.mine_blocks(101 - network_tip as usize, None)?);
hashes
};
// See if the emitter outputs the right blocks.
println!("first sync:");
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
let hash = emission.block_hash();
assert_eq!(
emission.block_hash(),
exp_hashes[height as usize],
"emitted block hash is unexpected"
);
assert_eq!(
local_chain.apply_update(emission.checkpoint,)?,
[(height, Some(hash))].into(),
"chain update changeset is unexpected",
);
}
assert_eq!(
local_chain
.iter_checkpoints()
.map(|cp| (cp.height(), cp.hash()))
.collect::<BTreeSet<_>>(),
exp_hashes
.iter()
.enumerate()
.map(|(i, hash)| (i as u32, *hash))
.collect::<BTreeSet<_>>(),
"final local_chain state is unexpected",
);
// Perform reorg.
let reorged_blocks = env.reorg(6)?;
let exp_hashes = exp_hashes
.iter()
.take(exp_hashes.len() - reorged_blocks.len())
.chain(&reorged_blocks)
.cloned()
.collect::<Vec<_>>();
// See if the emitter outputs the right blocks.
println!("after reorg:");
let mut exp_height = exp_hashes.len() - reorged_blocks.len();
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
let hash = emission.block_hash();
assert_eq!(
height, exp_height as u32,
"emitted block has unexpected height"
);
assert_eq!(
hash, exp_hashes[height as usize],
"emitted block is unexpected"
);
assert_eq!(
local_chain.apply_update(emission.checkpoint,)?,
if exp_height == exp_hashes.len() - reorged_blocks.len() {
bdk_chain::local_chain::ChangeSet {
blocks: core::iter::once((height, Some(hash)))
.chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None)))
.collect(),
}
} else {
[(height, Some(hash))].into()
},
"chain update changeset is unexpected",
);
exp_height += 1;
}
assert_eq!(
local_chain
.iter_checkpoints()
.map(|cp| (cp.height(), cp.hash()))
.collect::<BTreeSet<_>>(),
exp_hashes
.iter()
.enumerate()
.map(|(i, hash)| (i as u32, *hash))
.collect::<BTreeSet<_>>(),
"final local_chain state is unexpected after reorg",
);
Ok(())
}
/// Ensure that [`EmittedUpdate::into_tx_graph_update`] behaves appropriately for both mempool and
/// block updates.
///
/// [`EmittedUpdate::into_tx_graph_update`]: bdk_bitcoind_rpc::EmittedUpdate::into_tx_graph_update
#[test]
fn test_into_tx_graph() -> anyhow::Result<()> {
let env = TestEnv::new()?;
println!("getting new addresses!");
let addr_0 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let addr_1 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let addr_2 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
println!("got new addresses!");
println!("mining block!");
env.mine_blocks(101, None)?;
println!("mined blocks!");
let (mut chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?);
let mut indexed_tx_graph = IndexedTxGraph::<BlockId, _>::new({
let mut index = SpkTxOutIndex::<usize>::default();
index.insert_spk(0, addr_0.script_pubkey());
index.insert_spk(1, addr_1.script_pubkey());
index.insert_spk(2, addr_2.script_pubkey());
index
});
let emitter = &mut Emitter::new(env.rpc_client(), chain.tip(), 0);
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
let _ = chain.apply_update(emission.checkpoint)?;
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
assert!(indexed_additions.is_empty());
}
// send 3 txs to a tracked address, these txs will be in the mempool
let exp_txids = {
let mut txids = BTreeSet::new();
for _ in 0..3 {
txids.insert(env.rpc_client().send_to_address(
&addr_0,
Amount::from_sat(10_000),
None,
None,
None,
None,
None,
None,
)?);
}
txids
};
// expect that the next block should be none and we should get 3 txs from mempool
{
// next block should be `None`
assert!(emitter.next_block()?.is_none());
let mempool_txs = emitter.mempool()?;
let indexed_additions = indexed_tx_graph.batch_insert_unconfirmed(mempool_txs);
assert_eq!(
indexed_additions
.tx_graph
.txs
.iter()
.map(|tx| tx.compute_txid())
.collect::<BTreeSet<Txid>>(),
exp_txids,
"changeset should have the 3 mempool transactions",
);
assert!(indexed_additions.tx_graph.anchors.is_empty());
}
// mine a block that confirms the 3 txs
let exp_block_hash = env.mine_blocks(1, None)?[0];
let exp_block_height = env.rpc_client().get_block_info(&exp_block_hash)?.height as u32;
let exp_anchors = exp_txids
.iter()
.map({
let anchor = BlockId {
height: exp_block_height,
hash: exp_block_hash,
};
move |&txid| (anchor, txid)
})
.collect::<BTreeSet<_>>();
// must receive mined block which will confirm the transactions.
{
let emission = emitter.next_block()?.expect("must get mined block");
let height = emission.block_height();
let _ = chain.apply_update(emission.checkpoint)?;
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
assert!(indexed_additions.tx_graph.txs.is_empty());
assert!(indexed_additions.tx_graph.txouts.is_empty());
assert_eq!(indexed_additions.tx_graph.anchors, exp_anchors);
}
Ok(())
}
/// Ensure next block emitted after reorg is at reorg height.
///
/// After a reorg, if the last-emitted block height is equal or greater than the reorg height, and
/// the fallback height is equal to or lower than the reorg height, the next block/header emission
/// should be at the reorg height.
///
/// TODO: If the reorg height is lower than the fallback height, how do we find a block height to
/// emit that can connect with our receiver chain?
#[test]
fn ensure_block_emitted_after_reorg_is_at_reorg_height() -> anyhow::Result<()> {
const EMITTER_START_HEIGHT: usize = 100;
const CHAIN_TIP_HEIGHT: usize = 110;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
EMITTER_START_HEIGHT as _,
);
env.mine_blocks(CHAIN_TIP_HEIGHT, None)?;
while emitter.next_header()?.is_some() {}
for reorg_count in 1..=10 {
let replaced_blocks = env.reorg_empty_blocks(reorg_count)?;
let next_emission = emitter.next_header()?.expect("must emit block after reorg");
assert_eq!(
(
next_emission.block_height() as usize,
next_emission.block_hash()
),
replaced_blocks[0],
"block emitted after reorg should be at the reorg height"
);
while emitter.next_header()?.is_some() {}
}
Ok(())
}
fn process_block(
recv_chain: &mut LocalChain,
recv_graph: &mut IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
block: Block,
block_height: u32,
) -> anyhow::Result<()> {
recv_chain.apply_update(CheckPoint::from_header(&block.header, block_height))?;
let _ = recv_graph.apply_block(block, block_height);
Ok(())
}
fn sync_from_emitter<C>(
recv_chain: &mut LocalChain,
recv_graph: &mut IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
emitter: &mut Emitter<C>,
) -> anyhow::Result<()>
where
C: bitcoincore_rpc::RpcApi,
{
while let Some(emission) = emitter.next_block()? {
let height = emission.block_height();
process_block(recv_chain, recv_graph, emission.block, height)?;
}
Ok(())
}
fn get_balance(
recv_chain: &LocalChain,
recv_graph: &IndexedTxGraph<BlockId, SpkTxOutIndex<()>>,
) -> anyhow::Result<Balance> {
let chain_tip = recv_chain.tip().block_id();
let outpoints = recv_graph.index.outpoints().clone();
let balance = recv_graph
.graph()
.balance(recv_chain, chain_tip, outpoints, |_, _| true);
Ok(balance)
}
/// If a block is reorged out, ensure that containing transactions that do not exist in the
/// replacement block(s) become unconfirmed.
#[test]
fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
const PREMINE_COUNT: usize = 101;
const ADDITIONAL_COUNT: usize = 11;
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// setup addresses
let addr_to_mine = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bitcoin::Network::Regtest)?;
// setup receiver
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<BlockId, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// mine and sync receiver up to tip
env.mine_blocks(PREMINE_COUNT, Some(addr_to_mine))?;
// create transactions that are tracked by our receiver
for _ in 0..ADDITIONAL_COUNT {
let txid = env.send(&addr_to_track, SEND_AMOUNT)?;
// lock outputs that send to `addr_to_track`
let outpoints_to_lock = env
.rpc_client()
.get_transaction(&txid, None)?
.transaction()?
.output
.into_iter()
.enumerate()
.filter(|(_, txo)| txo.script_pubkey == spk_to_track)
.map(|(vout, _)| OutPoint::new(txid, vout as _))
.collect::<Vec<_>>();
env.rpc_client().lock_unspent(&outpoints_to_lock)?;
let _ = env.mine_blocks(1, None)?;
}
// get emitter up to tip
sync_from_emitter(&mut recv_chain, &mut recv_graph, &mut emitter)?;
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * ADDITIONAL_COUNT as u64,
..Balance::default()
},
"initial balance must be correct",
);
// perform reorgs with different depths
for reorg_count in 1..=ADDITIONAL_COUNT {
env.reorg_empty_blocks(reorg_count)?;
sync_from_emitter(&mut recv_chain, &mut recv_graph, &mut emitter)?;
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * (ADDITIONAL_COUNT - reorg_count) as u64,
..Balance::default()
},
"reorg_count: {}",
reorg_count,
);
}
Ok(())
}
/// Ensure avoid-re-emission-logic is sound when [`Emitter`] is synced to tip.
///
/// The receiver (bdk_chain structures) is synced to the chain tip, and there is txs in the mempool.
/// When we call Emitter::mempool multiple times, mempool txs should not be re-emitted, even if the
/// chain tip is extended.
#[test]
fn mempool_avoids_re_emission() -> anyhow::Result<()> {
const BLOCKS_TO_MINE: usize = 101;
const MEMPOOL_TX_COUNT: usize = 2;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// mine blocks and sync up emitter
let addr = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(BLOCKS_TO_MINE, Some(addr.clone()))?;
while emitter.next_header()?.is_some() {}
// have some random txs in mempool
let exp_txids = (0..MEMPOOL_TX_COUNT)
.map(|_| env.send(&addr, Amount::from_sat(2100)))
.collect::<Result<BTreeSet<Txid>, _>>()?;
// the first emission should include all transactions
let emitted_txids = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<Txid>>();
assert_eq!(
emitted_txids, exp_txids,
"all mempool txs should be emitted"
);
// second emission should be empty
assert!(
emitter.mempool()?.is_empty(),
"second emission should be empty"
);
// mine empty blocks + sync up our emitter -> we should still not re-emit
for _ in 0..BLOCKS_TO_MINE {
env.mine_empty_block()?;
}
while emitter.next_header()?.is_some() {}
assert!(
emitter.mempool()?.is_empty(),
"third emission, after chain tip is extended, should also be empty"
);
Ok(())
}
/// Ensure mempool tx is still re-emitted if [`Emitter`] has not reached the tx's introduction
/// height.
///
/// We introduce a mempool tx after each block, where blocks are empty (does not confirm previous
/// mempool txs). Then we emit blocks from [`Emitter`] (intertwining `mempool` calls). We check
/// that `mempool` should always re-emit txs that have introduced at a height greater than the last
/// emitted block height.
#[test]
fn mempool_re_emits_if_tx_introduction_height_not_reached() -> anyhow::Result<()> {
const PREMINE_COUNT: usize = 101;
const MEMPOOL_TX_COUNT: usize = 21;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// mine blocks to get initial balance, sync emitter up to tip
let addr = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?;
while emitter.next_header()?.is_some() {}
// mine blocks to introduce txs to mempool at different heights
let tx_introductions = (0..MEMPOOL_TX_COUNT)
.map(|_| -> anyhow::Result<_> {
let (height, _) = env.mine_empty_block()?;
let txid = env.send(&addr, Amount::from_sat(2100))?;
Ok((height, txid))
})
.collect::<anyhow::Result<BTreeSet<_>>>()?;
assert_eq!(
emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>(),
tx_introductions.iter().map(|&(_, txid)| txid).collect(),
"first mempool emission should include all txs",
);
assert_eq!(
emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>(),
tx_introductions.iter().map(|&(_, txid)| txid).collect(),
"second mempool emission should still include all txs",
);
// At this point, the emitter has seen all mempool transactions. It should only re-emit those
// that have introduction heights less than the emitter's last-emitted block tip.
while let Some(emission) = emitter.next_header()? {
let height = emission.block_height();
// We call `mempool()` twice.
// The second call (at height `h`) should skip the tx introduced at height `h`.
for try_index in 0..2 {
let exp_txids = tx_introductions
.range((height as usize + try_index, Txid::all_zeros())..)
.map(|&(_, txid)| txid)
.collect::<BTreeSet<_>>();
let emitted_txids = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>();
assert_eq!(
emitted_txids, exp_txids,
"\n emission {} (try {}) must only contain txs introduced at that height or lower: \n\t missing: {:?} \n\t extra: {:?}",
height,
try_index,
exp_txids
.difference(&emitted_txids)
.map(|txid| (txid, tx_introductions.iter().find_map(|(h, id)| if id == txid { Some(h) } else { None }).unwrap()))
.collect::<Vec<_>>(),
emitted_txids
.difference(&exp_txids)
.map(|txid| (txid, tx_introductions.iter().find_map(|(h, id)| if id == txid { Some(h) } else { None }).unwrap()))
.collect::<Vec<_>>(),
);
}
}
Ok(())
}
/// Ensure we force re-emit all mempool txs after reorg.
#[test]
fn mempool_during_reorg() -> anyhow::Result<()> {
const TIP_DIFF: usize = 10;
const PREMINE_COUNT: usize = 101;
let env = TestEnv::new()?;
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
0,
);
// mine blocks to get initial balance
let addr = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?;
// introduce mempool tx at each block extension
for _ in 0..TIP_DIFF {
env.mine_empty_block()?;
env.send(&addr, Amount::from_sat(2100))?;
}
// sync emitter to tip, first mempool emission should include all txs (as we haven't emitted
// from the mempool yet)
while emitter.next_header()?.is_some() {}
assert_eq!(
emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>(),
env.rpc_client()
.get_raw_mempool()?
.into_iter()
.collect::<BTreeSet<_>>(),
"first mempool emission should include all txs",
);
// perform reorgs at different heights, these reorgs will not confirm transactions in the
// mempool
for reorg_count in 1..TIP_DIFF {
println!("REORG COUNT: {}", reorg_count);
env.reorg_empty_blocks(reorg_count)?;
// This is a map of mempool txids to tip height where the tx was introduced to the mempool
// we recalculate this at every loop as reorgs may evict transactions from mempool. We use
// the introduction height to determine whether we expect a tx to appear in a mempool
// emission.
// TODO: How can have have reorg logic in `TestEnv` NOT blacklast old blocks first?
let tx_introductions = dbg!(env
.rpc_client()
.get_raw_mempool_verbose()?
.into_iter()
.map(|(txid, entry)| (txid, entry.height as usize))
.collect::<BTreeMap<_, _>>());
// `next_header` emits the replacement block of the reorg
if let Some(emission) = emitter.next_header()? {
let height = emission.block_height();
println!("\t- replacement height: {}", height);
// the mempool emission (that follows the first block emission after reorg) should only
// include mempool txs introduced at reorg height or greater
let mempool = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>();
let exp_mempool = tx_introductions
.iter()
.filter(|(_, &intro_h)| intro_h >= (height as usize))
.map(|(&txid, _)| txid)
.collect::<BTreeSet<_>>();
assert_eq!(
mempool, exp_mempool,
"the first mempool emission after reorg should only include mempool txs introduced at reorg height or greater"
);
let mempool = emitter
.mempool()?
.into_iter()
.map(|(tx, _)| tx.compute_txid())
.collect::<BTreeSet<_>>();
let exp_mempool = tx_introductions
.iter()
.filter(|&(_, &intro_height)| intro_height > (height as usize))
.map(|(&txid, _)| txid)
.collect::<BTreeSet<_>>();
assert_eq!(
mempool, exp_mempool,
"following mempool emissions after reorg should exclude mempool introduction heights <= last emitted block height: \n\t missing: {:?} \n\t extra: {:?}",
exp_mempool
.difference(&mempool)
.map(|txid| (txid, tx_introductions.get(txid).unwrap()))
.collect::<Vec<_>>(),
mempool
.difference(&exp_mempool)
.map(|txid| (txid, tx_introductions.get(txid).unwrap()))
.collect::<Vec<_>>(),
);
}
// sync emitter to tip
while emitter.next_header()?.is_some() {}
}
Ok(())
}
/// If blockchain re-org includes the start height, emit new start height block
///
/// 1. mine 101 blocks
/// 2. emit blocks 99a, 100a
/// 3. invalidate blocks 99a, 100a, 101a
/// 4. mine new blocks 99b, 100b, 101b
/// 5. emit block 99b
///
/// The block hash of 99b should be different than 99a, but their previous block hashes should
/// be the same.
#[test]
fn no_agreement_point() -> anyhow::Result<()> {
const PREMINE_COUNT: usize = 101;
let env = TestEnv::new()?;
// start height is 99
let mut emitter = Emitter::new(
env.rpc_client(),
CheckPoint::new(BlockId {
height: 0,
hash: env.rpc_client().get_block_hash(0)?,
}),
(PREMINE_COUNT - 2) as u32,
);
// mine 101 blocks
env.mine_blocks(PREMINE_COUNT, None)?;
// emit block 99a
let block_header_99a = emitter.next_header()?.expect("block 99a header").block;
let block_hash_99a = block_header_99a.block_hash();
let block_hash_98a = block_header_99a.prev_blockhash;
// emit block 100a
let block_header_100a = emitter.next_header()?.expect("block 100a header").block;
let block_hash_100a = block_header_100a.block_hash();
// get hash for block 101a
let block_hash_101a = env.rpc_client().get_block_hash(101)?;
// invalidate blocks 99a, 100a, 101a
env.rpc_client().invalidate_block(&block_hash_99a)?;
env.rpc_client().invalidate_block(&block_hash_100a)?;
env.rpc_client().invalidate_block(&block_hash_101a)?;
// mine new blocks 99b, 100b, 101b
env.mine_blocks(3, None)?;
// emit block header 99b
let block_header_99b = emitter.next_header()?.expect("block 99b header").block;
let block_hash_99b = block_header_99b.block_hash();
let block_hash_98b = block_header_99b.prev_blockhash;
assert_ne!(block_hash_99a, block_hash_99b);
assert_eq!(block_hash_98a, block_hash_98b);
Ok(())
}

View File

@@ -1,8 +1,8 @@
[package]
name = "bdk_chain"
version = "0.17.0"
version = "0.4.0"
edition = "2021"
rust-version = "1.63"
rust-version = "1.57"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_chain"
@@ -13,23 +13,18 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitcoin = { version = "0.32.0", default-features = false }
serde_crate = { package = "serde", version = "1", optional = true, features = ["derive", "rc"] }
bitcoin = { version = "0.29" }
serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] }
# Use hashbrown as a feature flag to have HashSet and HashMap from it.
hashbrown = { version = "0.9.1", optional = true, features = ["serde"] }
miniscript = { version = "12.0.0", optional = true, default-features = false }
# Feature dependencies
rusqlite_crate = { package = "rusqlite", version = "0.31.0", features = ["bundled"], optional = true }
serde_json = {version = "1", optional = true }
# note version 0.13 breaks outs MSRV.
hashbrown = { version = "0.12", optional = true, features = ["serde"] }
miniscript = { version = "9.0.0", optional = true }
[dev-dependencies]
rand = "0.8"
proptest = "1.2.0"
[features]
default = ["std", "miniscript"]
std = ["bitcoin/std", "miniscript?/std"]
serde = ["serde_crate", "bitcoin/serde", "miniscript?/serde"]
rusqlite = ["std", "rusqlite_crate", "serde", "serde_json"]
std = []
serde = ["serde_crate", "bitcoin/serde" ]

View File

@@ -1,57 +0,0 @@
use bitcoin::Amount;
/// Balance, differentiated into various categories.
#[derive(Debug, PartialEq, Eq, Clone, Default)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate",)
)]
pub struct Balance {
/// All coinbase outputs not yet matured
pub immature: Amount,
/// Unconfirmed UTXOs generated by a wallet tx
pub trusted_pending: Amount,
/// Unconfirmed UTXOs received from an external wallet
pub untrusted_pending: Amount,
/// Confirmed and immediately spendable balance
pub confirmed: Amount,
}
impl Balance {
/// Get sum of trusted_pending and confirmed coins.
///
/// This is the balance you can spend right now that shouldn't get cancelled via another party
/// double spending it.
pub fn trusted_spendable(&self) -> Amount {
self.confirmed + self.trusted_pending
}
/// Get the whole balance visible to the wallet.
pub fn total(&self) -> Amount {
self.confirmed + self.trusted_pending + self.untrusted_pending + self.immature
}
}
impl core::fmt::Display for Balance {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"{{ immature: {}, trusted_pending: {}, untrusted_pending: {}, confirmed: {} }}",
self.immature, self.trusted_pending, self.untrusted_pending, self.confirmed
)
}
}
impl core::ops::Add for Balance {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
immature: self.immature + other.immature,
trusted_pending: self.trusted_pending + other.trusted_pending,
untrusted_pending: self.untrusted_pending + other.untrusted_pending,
confirmed: self.confirmed + other.confirmed,
}
}
}

View File

@@ -1,45 +1,75 @@
use bitcoin::{hashes::Hash, BlockHash, OutPoint, TxOut, Txid};
use crate::{Anchor, AnchorFromBlockPosition, COINBASE_MATURITY};
use crate::{
sparse_chain::{self, ChainPosition},
COINBASE_MATURITY,
};
/// Represents the observed position of some chain data.
///
/// The generic `A` should be a [`Anchor`] implementation.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)]
pub enum ChainPosition<A> {
/// The chain data is seen as confirmed, and in anchored by `A`.
Confirmed(A),
/// The chain data is not confirmed and last seen in the mempool at this timestamp.
Unconfirmed(u64),
/// Represents the height at which a transaction is confirmed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub enum TxHeight {
Confirmed(u32),
Unconfirmed,
}
impl<A> ChainPosition<A> {
/// Returns whether [`ChainPosition`] is confirmed or not.
impl Default for TxHeight {
fn default() -> Self {
Self::Unconfirmed
}
}
impl core::fmt::Display for TxHeight {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Confirmed(h) => core::write!(f, "confirmed_at({})", h),
Self::Unconfirmed => core::write!(f, "unconfirmed"),
}
}
}
impl From<Option<u32>> for TxHeight {
fn from(opt: Option<u32>) -> Self {
match opt {
Some(h) => Self::Confirmed(h),
None => Self::Unconfirmed,
}
}
}
impl From<TxHeight> for Option<u32> {
fn from(height: TxHeight) -> Self {
match height {
TxHeight::Confirmed(h) => Some(h),
TxHeight::Unconfirmed => None,
}
}
}
impl crate::sparse_chain::ChainPosition for TxHeight {
fn height(&self) -> TxHeight {
*self
}
fn max_ord_of_height(height: TxHeight) -> Self {
height
}
fn min_ord_of_height(height: TxHeight) -> Self {
height
}
}
impl TxHeight {
pub fn is_confirmed(&self) -> bool {
matches!(self, Self::Confirmed(_))
}
}
impl<A: Clone> ChainPosition<&A> {
/// Maps a [`ChainPosition<&A>`] into a [`ChainPosition<A>`] by cloning the contents.
pub fn cloned(self) -> ChainPosition<A> {
match self {
ChainPosition::Confirmed(a) => ChainPosition::Confirmed(a.clone()),
ChainPosition::Unconfirmed(last_seen) => ChainPosition::Unconfirmed(last_seen),
}
}
}
impl<A: Anchor> ChainPosition<A> {
/// Determines the upper bound of the confirmation height.
pub fn confirmation_height_upper_bound(&self) -> Option<u32> {
match self {
ChainPosition::Confirmed(a) => Some(a.confirmation_height_upper_bound()),
ChainPosition::Unconfirmed(_) => None,
}
}
}
/// Block height and timestamp at which a transaction is confirmed.
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr(
@@ -48,49 +78,47 @@ impl<A: Anchor> ChainPosition<A> {
serde(crate = "serde_crate")
)]
pub enum ConfirmationTime {
/// The transaction is confirmed
Confirmed {
/// Confirmation height.
height: u32,
/// Confirmation time in unix seconds.
time: u64,
},
/// The transaction is unconfirmed
Unconfirmed {
/// The last-seen timestamp in unix seconds.
last_seen: u64,
},
Confirmed { height: u32, time: u64 },
Unconfirmed,
}
impl sparse_chain::ChainPosition for ConfirmationTime {
fn height(&self) -> TxHeight {
match self {
ConfirmationTime::Confirmed { height, .. } => TxHeight::Confirmed(*height),
ConfirmationTime::Unconfirmed => TxHeight::Unconfirmed,
}
}
fn max_ord_of_height(height: TxHeight) -> Self {
match height {
TxHeight::Confirmed(height) => Self::Confirmed {
height,
time: u64::MAX,
},
TxHeight::Unconfirmed => Self::Unconfirmed,
}
}
fn min_ord_of_height(height: TxHeight) -> Self {
match height {
TxHeight::Confirmed(height) => Self::Confirmed {
height,
time: u64::MIN,
},
TxHeight::Unconfirmed => Self::Unconfirmed,
}
}
}
impl ConfirmationTime {
/// Construct an unconfirmed variant using the given `last_seen` time in unix seconds.
pub fn unconfirmed(last_seen: u64) -> Self {
Self::Unconfirmed { last_seen }
}
/// Returns whether [`ConfirmationTime`] is the confirmed variant.
pub fn is_confirmed(&self) -> bool {
matches!(self, Self::Confirmed { .. })
}
}
impl From<ChainPosition<ConfirmationBlockTime>> for ConfirmationTime {
fn from(observed_as: ChainPosition<ConfirmationBlockTime>) -> Self {
match observed_as {
ChainPosition::Confirmed(a) => Self::Confirmed {
height: a.block_id.height,
time: a.confirmation_time,
},
ChainPosition::Unconfirmed(last_seen) => Self::Unconfirmed { last_seen },
}
}
}
/// A reference to a block in the canonical chain.
///
/// `BlockId` implements [`Anchor`]. When a transaction is anchored to `BlockId`, the confirmation
/// block and anchor block are the same block.
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[derive(Debug, Clone, PartialEq, Eq, Copy, PartialOrd, Ord)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
@@ -103,23 +131,11 @@ pub struct BlockId {
pub hash: BlockHash,
}
impl Anchor for BlockId {
fn anchor_block(&self) -> Self {
*self
}
}
impl AnchorFromBlockPosition for BlockId {
fn from_block_position(_block: &bitcoin::Block, block_id: BlockId, _tx_pos: usize) -> Self {
block_id
}
}
impl Default for BlockId {
fn default() -> Self {
Self {
height: Default::default(),
hash: BlockHash::all_zeros(),
hash: BlockHash::from_inner([0u8; 32]),
}
}
}
@@ -145,74 +161,51 @@ impl From<(&u32, &BlockHash)> for BlockId {
}
}
/// An [`Anchor`] implementation that also records the exact confirmation time of the transaction.
///
/// Refer to [`Anchor`] for more details.
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub struct ConfirmationBlockTime {
/// The anchor block.
pub block_id: BlockId,
/// The confirmation time of the transaction being anchored.
pub confirmation_time: u64,
}
impl Anchor for ConfirmationBlockTime {
fn anchor_block(&self) -> BlockId {
self.block_id
}
fn confirmation_height_upper_bound(&self) -> u32 {
self.block_id.height
}
}
impl AnchorFromBlockPosition for ConfirmationBlockTime {
fn from_block_position(block: &bitcoin::Block, block_id: BlockId, _tx_pos: usize) -> Self {
Self {
block_id,
confirmation_time: block.header.time as _,
}
}
}
/// A `TxOut` with as much data as we can retrieve about it
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct FullTxOut<A> {
/// The position of the transaction in `outpoint` in the overall chain.
pub chain_position: ChainPosition<A>,
#[derive(Debug, Clone, PartialEq)]
pub struct FullTxOut<I> {
/// The location of the `TxOut`.
pub outpoint: OutPoint,
/// The `TxOut`.
pub txout: TxOut,
/// The position of the transaction in `outpoint` in the overall chain.
pub chain_position: I,
/// The txid and chain position of the transaction (if any) that has spent this output.
pub spent_by: Option<(ChainPosition<A>, Txid)>,
pub spent_by: Option<(I, Txid)>,
/// Whether this output is on a coinbase transaction.
pub is_on_coinbase: bool,
}
impl<A: Anchor> FullTxOut<A> {
/// Whether the `txout` is considered mature.
impl<I: ChainPosition> FullTxOut<I> {
/// Whether the utxo is/was/will be spendable at `height`.
///
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
/// method may return false-negatives. In other words, interpreted confirmation count may be
/// less than the actual value.
///
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
pub fn is_mature(&self, tip: u32) -> bool {
/// It is spendable if it is not an immature coinbase output and no spending tx has been
/// confirmed by that height.
pub fn is_spendable_at(&self, height: u32) -> bool {
if !self.is_mature(height) {
return false;
}
if self.chain_position.height() > TxHeight::Confirmed(height) {
return false;
}
match &self.spent_by {
Some((spending_height, _)) => spending_height.height() > TxHeight::Confirmed(height),
None => true,
}
}
pub fn is_mature(&self, height: u32) -> bool {
if self.is_on_coinbase {
let tx_height = match &self.chain_position {
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ChainPosition::Unconfirmed(_) => {
let tx_height = match self.chain_position.height() {
TxHeight::Confirmed(tx_height) => tx_height,
TxHeight::Unconfirmed => {
debug_assert!(false, "coinbase tx can never be unconfirmed");
return false;
}
};
let age = tip.saturating_sub(tx_height);
let age = height.saturating_sub(tx_height);
if age + 1 < COINBASE_MATURITY {
return false;
}
@@ -220,68 +213,6 @@ impl<A: Anchor> FullTxOut<A> {
true
}
/// Whether the utxo is/was/will be spendable with chain `tip`.
///
/// This method does not take into account the lock time.
///
/// Depending on the implementation of [`confirmation_height_upper_bound`] in [`Anchor`], this
/// method may return false-negatives. In other words, interpreted confirmation count may be
/// less than the actual value.
///
/// [`confirmation_height_upper_bound`]: Anchor::confirmation_height_upper_bound
pub fn is_confirmed_and_spendable(&self, tip: u32) -> bool {
if !self.is_mature(tip) {
return false;
}
let confirmation_height = match &self.chain_position {
ChainPosition::Confirmed(anchor) => anchor.confirmation_height_upper_bound(),
ChainPosition::Unconfirmed(_) => return false,
};
if confirmation_height > tip {
return false;
}
// if the spending tx is confirmed within tip height, the txout is no longer spendable
if let Some((ChainPosition::Confirmed(spending_anchor), _)) = &self.spent_by {
if spending_anchor.anchor_block().height <= tip {
return false;
}
}
true
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn chain_position_ord() {
let unconf1 = ChainPosition::<ConfirmationBlockTime>::Unconfirmed(10);
let unconf2 = ChainPosition::<ConfirmationBlockTime>::Unconfirmed(20);
let conf1 = ChainPosition::Confirmed(ConfirmationBlockTime {
confirmation_time: 20,
block_id: BlockId {
height: 9,
..Default::default()
},
});
let conf2 = ChainPosition::Confirmed(ConfirmationBlockTime {
confirmation_time: 15,
block_id: BlockId {
height: 12,
..Default::default()
},
});
assert!(unconf2 > unconf1, "higher last_seen means higher ord");
assert!(unconf1 > conf1, "unconfirmed is higher ord than confirmed");
assert!(
conf2 > conf1,
"confirmation_height is higher then it should be higher ord"
);
}
}
// TODO: make test

View File

@@ -0,0 +1,639 @@
//! Module for structures that combine the features of [`sparse_chain`] and [`tx_graph`].
use crate::{
collections::HashSet,
sparse_chain::{self, ChainPosition, SparseChain},
tx_graph::{self, TxGraph},
BlockId, ForEachTxOut, FullTxOut, TxHeight,
};
use alloc::{string::ToString, vec::Vec};
use bitcoin::{OutPoint, Transaction, TxOut, Txid};
use core::fmt::Debug;
/// A consistent combination of a [`SparseChain<P>`] and a [`TxGraph<T>`].
///
/// `SparseChain` only keeps track of transaction ids and their position in the chain, but you often
/// want to store the full transactions as well. Additionally, you want to make sure that everything
/// in the chain is consistent with the full transaction data. `ChainGraph` enforces these two
/// invariants:
///
/// 1. Every transaction that is in the chain is also in the graph (you always have the full
/// transaction).
/// 2. No transactions in the chain conflict with each other, i.e., they don't double spend each
/// other or have ancestors that double spend each other.
///
/// Note that the `ChainGraph` guarantees a 1:1 mapping between transactions in the `chain` and
/// `graph` but not the other way around. Transactions may fall out of the *chain* (via re-org or
/// mempool eviction) but will remain in the *graph*.
#[derive(Clone, Debug, PartialEq)]
pub struct ChainGraph<P = TxHeight> {
chain: SparseChain<P>,
graph: TxGraph,
}
impl<P> Default for ChainGraph<P> {
fn default() -> Self {
Self {
chain: Default::default(),
graph: Default::default(),
}
}
}
impl<P> AsRef<SparseChain<P>> for ChainGraph<P> {
fn as_ref(&self) -> &SparseChain<P> {
&self.chain
}
}
impl<P> AsRef<TxGraph> for ChainGraph<P> {
fn as_ref(&self) -> &TxGraph {
&self.graph
}
}
impl<P> AsRef<ChainGraph<P>> for ChainGraph<P> {
fn as_ref(&self) -> &ChainGraph<P> {
self
}
}
impl<P> ChainGraph<P> {
/// Returns a reference to the internal [`SparseChain`].
pub fn chain(&self) -> &SparseChain<P> {
&self.chain
}
/// Returns a reference to the internal [`TxGraph`].
pub fn graph(&self) -> &TxGraph {
&self.graph
}
}
impl<P> ChainGraph<P>
where
P: ChainPosition,
{
/// Create a new chain graph from a `chain` and a `graph`.
///
/// There are two reasons this can return an `Err`:
///
/// 1. There is a transaction in the `chain` that does not have its corresponding full
/// transaction in `graph`.
/// 2. The `chain` has two transactions that are allegedly in it, but they conflict in the `graph`
/// (so could not possibly be in the same chain).
pub fn new(chain: SparseChain<P>, graph: TxGraph) -> Result<Self, NewError<P>> {
let mut missing = HashSet::default();
for (pos, txid) in chain.txids() {
if let Some(tx) = graph.get_tx(*txid) {
let conflict = graph
.walk_conflicts(tx, |_, txid| Some((chain.tx_position(txid)?.clone(), txid)))
.next();
if let Some((conflict_pos, conflict)) = conflict {
return Err(NewError::Conflict {
a: (pos.clone(), *txid),
b: (conflict_pos, conflict),
});
}
} else {
missing.insert(*txid);
}
}
if !missing.is_empty() {
return Err(NewError::Missing(missing));
}
Ok(Self { chain, graph })
}
/// Take an update in the form of a [`SparseChain<P>`][`SparseChain`] and attempt to turn it
/// into a chain graph by filling in full transactions from `self` and from `new_txs`. This
/// returns a `ChainGraph<P, Cow<T>>` where the [`Cow<'a, T>`] will borrow the transaction if it
/// got it from `self`.
///
/// This is useful when interacting with services like an electrum server which returns a list
/// of txids and heights when calling [`script_get_history`], which can easily be inserted into a
/// [`SparseChain<TxHeight>`][`SparseChain`]. From there, you need to figure out which full
/// transactions you are missing in your chain graph and form `new_txs`. You then use
/// `inflate_update` to turn this into an update `ChainGraph<P, Cow<Transaction>>` and finally
/// use [`determine_changeset`] to generate the changeset from it.
///
/// [`SparseChain`]: crate::sparse_chain::SparseChain
/// [`Cow<'a, T>`]: std::borrow::Cow
/// [`script_get_history`]: https://docs.rs/electrum-client/latest/electrum_client/trait.ElectrumApi.html#tymethod.script_get_history
/// [`determine_changeset`]: Self::determine_changeset
pub fn inflate_update(
&self,
update: SparseChain<P>,
new_txs: impl IntoIterator<Item = Transaction>,
) -> Result<ChainGraph<P>, NewError<P>> {
let mut inflated_chain = SparseChain::default();
let mut inflated_graph = TxGraph::default();
for (height, hash) in update.checkpoints().clone().into_iter() {
let _ = inflated_chain
.insert_checkpoint(BlockId { height, hash })
.expect("must insert");
}
// [TODO] @evanlinjin: These need better comments
// - copy transactions that have changed positions into the graph
// - add new transactions to an inflated chain
for (pos, txid) in update.txids() {
match self.chain.tx_position(*txid) {
Some(original_pos) => {
if original_pos != pos {
let tx = self
.graph
.get_tx(*txid)
.expect("tx must exist as it is referenced in sparsechain")
.clone();
let _ = inflated_chain
.insert_tx(*txid, pos.clone())
.expect("must insert since this was already in update");
let _ = inflated_graph.insert_tx(tx);
}
}
None => {
let _ = inflated_chain
.insert_tx(*txid, pos.clone())
.expect("must insert since this was already in update");
}
}
}
for tx in new_txs {
let _ = inflated_graph.insert_tx(tx);
}
ChainGraph::new(inflated_chain, inflated_graph)
}
/// Gets the checkpoint limit.
///
/// Refer to [`SparseChain::checkpoint_limit`] for more.
pub fn checkpoint_limit(&self) -> Option<usize> {
self.chain.checkpoint_limit()
}
/// Sets the checkpoint limit.
///
/// Refer to [`SparseChain::set_checkpoint_limit`] for more.
pub fn set_checkpoint_limit(&mut self, limit: Option<usize>) {
self.chain.set_checkpoint_limit(limit)
}
/// Determines the changes required to invalidate checkpoints `from_height` (inclusive) and
/// above. Displaced transactions will have their positions moved to [`TxHeight::Unconfirmed`].
pub fn invalidate_checkpoints_preview(&self, from_height: u32) -> ChangeSet<P> {
ChangeSet {
chain: self.chain.invalidate_checkpoints_preview(from_height),
..Default::default()
}
}
/// Invalidate checkpoints `from_height` (inclusive) and above. Displaced transactions will be
/// re-positioned to [`TxHeight::Unconfirmed`].
///
/// This is equivalent to calling [`Self::invalidate_checkpoints_preview`] and
/// [`Self::apply_changeset`] in sequence.
pub fn invalidate_checkpoints(&mut self, from_height: u32) -> ChangeSet<P>
where
ChangeSet<P>: Clone,
{
let changeset = self.invalidate_checkpoints_preview(from_height);
self.apply_changeset(changeset.clone());
changeset
}
/// Get a transaction currently in the underlying [`SparseChain`].
///
/// This does not necessarily mean that it is *confirmed* in the blockchain; it might just be in
/// the unconfirmed transaction list within the [`SparseChain`].
pub fn get_tx_in_chain(&self, txid: Txid) -> Option<(&P, &Transaction)> {
let position = self.chain.tx_position(txid)?;
let full_tx = self.graph.get_tx(txid).expect("must exist");
Some((position, full_tx))
}
/// Determines the changes required to insert a transaction into the inner [`ChainGraph`] and
/// [`SparseChain`] at the given `position`.
///
/// If inserting it into the chain `position` will result in conflicts, the returned
/// [`ChangeSet`] should evict conflicting transactions.
pub fn insert_tx_preview(
&self,
tx: Transaction,
pos: P,
) -> Result<ChangeSet<P>, InsertTxError<P>> {
let mut changeset = ChangeSet {
chain: self.chain.insert_tx_preview(tx.txid(), pos)?,
graph: self.graph.insert_tx_preview(tx),
};
self.fix_conflicts(&mut changeset)?;
Ok(changeset)
}
/// Inserts [`Transaction`] at the given chain position.
///
/// This is equivalent to calling [`Self::insert_tx_preview`] and [`Self::apply_changeset`] in
/// sequence.
pub fn insert_tx(&mut self, tx: Transaction, pos: P) -> Result<ChangeSet<P>, InsertTxError<P>> {
let changeset = self.insert_tx_preview(tx, pos)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Determines the changes required to insert a [`TxOut`] into the internal [`TxGraph`].
pub fn insert_txout_preview(&self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<P> {
ChangeSet {
chain: Default::default(),
graph: self.graph.insert_txout_preview(outpoint, txout),
}
}
/// Inserts a [`TxOut`] into the internal [`TxGraph`].
///
/// This is equivalent to calling [`Self::insert_txout_preview`] and [`Self::apply_changeset`]
/// in sequence.
pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<P> {
let changeset = self.insert_txout_preview(outpoint, txout);
self.apply_changeset(changeset.clone());
changeset
}
/// Determines the changes required to insert a `block_id` (a height and block hash) into the
/// chain.
///
/// If a checkpoint with a different hash already exists at that height, this will return an error.
pub fn insert_checkpoint_preview(
&self,
block_id: BlockId,
) -> Result<ChangeSet<P>, InsertCheckpointError> {
self.chain
.insert_checkpoint_preview(block_id)
.map(|chain_changeset| ChangeSet {
chain: chain_changeset,
..Default::default()
})
}
/// Inserts checkpoint into [`Self`].
///
/// This is equivalent to calling [`Self::insert_checkpoint_preview`] and
/// [`Self::apply_changeset`] in sequence.
pub fn insert_checkpoint(
&mut self,
block_id: BlockId,
) -> Result<ChangeSet<P>, InsertCheckpointError> {
let changeset = self.insert_checkpoint_preview(block_id)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Calculates the difference between self and `update` in the form of a [`ChangeSet`].
pub fn determine_changeset(
&self,
update: &ChainGraph<P>,
) -> Result<ChangeSet<P>, UpdateError<P>> {
let chain_changeset = self
.chain
.determine_changeset(&update.chain)
.map_err(UpdateError::Chain)?;
let mut changeset = ChangeSet {
chain: chain_changeset,
graph: self.graph.determine_additions(&update.graph),
};
self.fix_conflicts(&mut changeset)?;
Ok(changeset)
}
/// Given a transaction, return an iterator of `txid`s that conflict with it (spends at least
/// one of the same inputs). This iterator includes all descendants of conflicting transactions.
///
/// This method only returns conflicts that exist in the [`SparseChain`] as transactions that
/// are not included in [`SparseChain`] are already considered as evicted.
pub fn tx_conflicts_in_chain<'a>(
&'a self,
tx: &'a Transaction,
) -> impl Iterator<Item = (&'a P, Txid)> + 'a {
self.graph.walk_conflicts(tx, move |_, conflict_txid| {
self.chain
.tx_position(conflict_txid)
.map(|conflict_pos| (conflict_pos, conflict_txid))
})
}
/// Fix changeset conflicts.
///
/// **WARNING:** If there are any missing full txs, conflict resolution will not be complete. In
/// debug mode, this will result in panic.
fn fix_conflicts(&self, changeset: &mut ChangeSet<P>) -> Result<(), UnresolvableConflict<P>> {
let mut chain_conflicts = vec![];
for (&txid, pos_change) in &changeset.chain.txids {
let pos = match pos_change {
Some(pos) => {
// Ignore txs that are still in the chain -- we only care about new ones
if self.chain.tx_position(txid).is_some() {
continue;
}
pos
}
// Ignore txids that are being deleted by the change (they can't conflict)
None => continue,
};
let mut full_tx = self.graph.get_tx(txid);
if full_tx.is_none() {
full_tx = changeset.graph.tx.iter().find(|tx| tx.txid() == txid)
}
debug_assert!(full_tx.is_some(), "should have full tx at this point");
let full_tx = match full_tx {
Some(full_tx) => full_tx,
None => continue,
};
for (conflict_pos, conflict_txid) in self.tx_conflicts_in_chain(full_tx) {
chain_conflicts.push((pos.clone(), txid, conflict_pos, conflict_txid))
}
}
for (update_pos, update_txid, conflicting_pos, conflicting_txid) in chain_conflicts {
// We have found a tx that conflicts with our update txid. Only allow this when the
// conflicting tx will be positioned as "unconfirmed" after the update is applied.
// If so, we will modify the changeset to evict the conflicting txid.
// determine the position of the conflicting txid after the current changeset is applied
let conflicting_new_pos = changeset
.chain
.txids
.get(&conflicting_txid)
.map(Option::as_ref)
.unwrap_or(Some(conflicting_pos));
match conflicting_new_pos {
None => {
// conflicting txid will be deleted, can ignore
}
Some(existing_new_pos) => match existing_new_pos.height() {
TxHeight::Confirmed(_) => {
// the new position of the conflicting tx is "confirmed", therefore cannot be
// evicted, return error
return Err(UnresolvableConflict {
already_confirmed_tx: (conflicting_pos.clone(), conflicting_txid),
update_tx: (update_pos, update_txid),
});
}
TxHeight::Unconfirmed => {
// the new position of the conflicting tx is "unconfirmed", therefore it can
// be evicted
changeset.chain.txids.insert(conflicting_txid, None);
}
},
};
}
Ok(())
}
/// Applies `changeset` to `self`.
///
/// **Warning** this method assumes that the changeset is correctly formed. If it is not, the
/// chain graph may behave incorrectly in the future and panic unexpectedly.
pub fn apply_changeset(&mut self, changeset: ChangeSet<P>) {
self.chain.apply_changeset(changeset.chain);
self.graph.apply_additions(changeset.graph);
}
/// Applies the `update` chain graph. Note this is shorthand for calling
/// [`Self::determine_changeset()`] and [`Self::apply_changeset()`] in sequence.
pub fn apply_update(&mut self, update: ChainGraph<P>) -> Result<ChangeSet<P>, UpdateError<P>> {
let changeset = self.determine_changeset(&update)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Get the full transaction output at an outpoint if it exists in the chain and the graph.
pub fn full_txout(&self, outpoint: OutPoint) -> Option<FullTxOut<P>> {
self.chain.full_txout(&self.graph, outpoint)
}
/// Iterate over the full transactions and their position in the chain ordered by their position
/// in ascending order.
pub fn transactions_in_chain(&self) -> impl DoubleEndedIterator<Item = (&P, &Transaction)> {
self.chain
.txids()
.map(move |(pos, txid)| (pos, self.graph.get_tx(*txid).expect("must exist")))
}
/// Find the transaction in the chain that spends `outpoint`.
///
/// This uses the input/output relationships in the internal `graph`. Note that the transaction
/// which includes `outpoint` does not need to be in the `graph` or the `chain` for this to
/// return `Some(_)`.
pub fn spent_by(&self, outpoint: OutPoint) -> Option<(&P, Txid)> {
self.chain.spent_by(&self.graph, outpoint)
}
/// Whether the chain graph contains any data whatsoever.
pub fn is_empty(&self) -> bool {
self.chain.is_empty() && self.graph.is_empty()
}
}
/// Represents changes to [`ChainGraph`].
///
/// This is essentially a combination of [`sparse_chain::ChangeSet`] and [`tx_graph::Additions`].
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(
crate = "serde_crate",
bound(
deserialize = "P: serde::Deserialize<'de>",
serialize = "P: serde::Serialize"
)
)
)]
#[must_use]
pub struct ChangeSet<P> {
pub chain: sparse_chain::ChangeSet<P>,
pub graph: tx_graph::Additions,
}
impl<P> ChangeSet<P> {
/// Returns `true` if this [`ChangeSet`] records no changes.
pub fn is_empty(&self) -> bool {
self.chain.is_empty() && self.graph.is_empty()
}
/// Returns `true` if this [`ChangeSet`] contains transaction evictions.
pub fn contains_eviction(&self) -> bool {
self.chain
.txids
.iter()
.any(|(_, new_pos)| new_pos.is_none())
}
/// Appends the changes in `other` into self such that applying `self` afterward has the same
/// effect as sequentially applying the original `self` and `other`.
pub fn append(&mut self, other: ChangeSet<P>)
where
P: ChainPosition,
{
self.chain.append(other.chain);
self.graph.append(other.graph);
}
}
impl<P> Default for ChangeSet<P> {
fn default() -> Self {
Self {
chain: Default::default(),
graph: Default::default(),
}
}
}
impl<P> ForEachTxOut for ChainGraph<P> {
fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))) {
self.graph.for_each_txout(f)
}
}
impl<P> ForEachTxOut for ChangeSet<P> {
fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut))) {
self.graph.for_each_txout(f)
}
}
/// Error that may occur when calling [`ChainGraph::new`].
#[derive(Clone, Debug, PartialEq)]
pub enum NewError<P> {
/// Two transactions within the sparse chain conflicted with each other
Conflict { a: (P, Txid), b: (P, Txid) },
/// One or more transactions in the chain were not in the graph
Missing(HashSet<Txid>),
}
impl<P: core::fmt::Debug> core::fmt::Display for NewError<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
NewError::Conflict { a, b } => write!(
f,
"Unable to inflate sparse chain to chain graph since transactions {:?} and {:?}",
a, b
),
NewError::Missing(missing) => write!(
f,
"missing full transactions for {}",
missing
.iter()
.map(|txid| txid.to_string())
.collect::<Vec<_>>()
.join(", ")
),
}
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for NewError<P> {}
/// Error that may occur when inserting a transaction.
///
/// Refer to [`ChainGraph::insert_tx_preview`] and [`ChainGraph::insert_tx`].
#[derive(Clone, Debug, PartialEq)]
pub enum InsertTxError<P> {
Chain(sparse_chain::InsertTxError<P>),
UnresolvableConflict(UnresolvableConflict<P>),
}
impl<P: core::fmt::Debug> core::fmt::Display for InsertTxError<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
InsertTxError::Chain(inner) => core::fmt::Display::fmt(inner, f),
InsertTxError::UnresolvableConflict(inner) => core::fmt::Display::fmt(inner, f),
}
}
}
impl<P> From<sparse_chain::InsertTxError<P>> for InsertTxError<P> {
fn from(inner: sparse_chain::InsertTxError<P>) -> Self {
Self::Chain(inner)
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for InsertTxError<P> {}
/// A nice alias of [`sparse_chain::InsertCheckpointError`].
pub type InsertCheckpointError = sparse_chain::InsertCheckpointError;
/// Represents an update failure.
#[derive(Clone, Debug, PartialEq)]
pub enum UpdateError<P> {
/// The update chain was inconsistent with the existing chain
Chain(sparse_chain::UpdateError<P>),
/// A transaction in the update spent the same input as an already confirmed transaction
UnresolvableConflict(UnresolvableConflict<P>),
}
impl<P: core::fmt::Debug> core::fmt::Display for UpdateError<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
UpdateError::Chain(inner) => core::fmt::Display::fmt(inner, f),
UpdateError::UnresolvableConflict(inner) => core::fmt::Display::fmt(inner, f),
}
}
}
impl<P> From<sparse_chain::UpdateError<P>> for UpdateError<P> {
fn from(inner: sparse_chain::UpdateError<P>) -> Self {
Self::Chain(inner)
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for UpdateError<P> {}
/// Represents an unresolvable conflict between an update's transaction and an
/// already-confirmed transaction.
#[derive(Clone, Debug, PartialEq)]
pub struct UnresolvableConflict<P> {
pub already_confirmed_tx: (P, Txid),
pub update_tx: (P, Txid),
}
impl<P: core::fmt::Debug> core::fmt::Display for UnresolvableConflict<P> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let Self {
already_confirmed_tx,
update_tx,
} = self;
write!(f, "update transaction {} at height {:?} conflicts with an already confirmed transaction {} at height {:?}",
update_tx.1, update_tx.0, already_confirmed_tx.1, already_confirmed_tx.0)
}
}
impl<P> From<UnresolvableConflict<P>> for UpdateError<P> {
fn from(inner: UnresolvableConflict<P>) -> Self {
Self::UnresolvableConflict(inner)
}
}
impl<P> From<UnresolvableConflict<P>> for InsertTxError<P> {
fn from(inner: UnresolvableConflict<P>) -> Self {
Self::UnresolvableConflict(inner)
}
}
#[cfg(feature = "std")]
impl<P: core::fmt::Debug> std::error::Error for UnresolvableConflict<P> {}

View File

@@ -1,25 +0,0 @@
use crate::BlockId;
/// Represents a service that tracks the blockchain.
///
/// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`]
/// is an ancestor of the `chain_tip`.
///
/// [`is_block_in_chain`]: Self::is_block_in_chain
pub trait ChainOracle {
/// Error type.
type Error: core::fmt::Debug;
/// Determines whether `block` of [`BlockId`] exists as an ancestor of `chain_tip`.
///
/// If `None` is returned, it means the implementation cannot determine whether `block` exists
/// under `chain_tip`.
fn is_block_in_chain(
&self,
block: BlockId,
chain_tip: BlockId,
) -> Result<Option<bool>, Self::Error>;
/// Get the best chain's chain tip.
fn get_chain_tip(&self) -> Result<BlockId, Self::Error>;
}

View File

@@ -1,38 +1,16 @@
use crate::miniscript::{Descriptor, DescriptorPublicKey};
use bitcoin::hashes::{hash_newtype, sha256, Hash};
hash_newtype! {
/// Represents the unique ID of a descriptor.
///
/// This is useful for having a fixed-length unique representation of a descriptor,
/// in particular, we use it to persist application state changes related to the
/// descriptor without having to re-write the whole descriptor each time.
///
pub struct DescriptorId(pub sha256::Hash);
}
/// A trait to extend the functionality of a miniscript descriptor.
pub trait DescriptorExt {
/// Returns the minimum value (in satoshis) at which an output is broadcastable.
/// Panics if the descriptor wildcard is hardened.
fn dust_value(&self) -> u64;
/// Returns the descriptor ID, calculated as the sha256 hash of the spk derived from the
/// descriptor at index 0.
fn descriptor_id(&self) -> DescriptorId;
}
impl DescriptorExt for Descriptor<DescriptorPublicKey> {
fn dust_value(&self) -> u64 {
self.at_derivation_index(0)
.expect("descriptor can't have hardened derivation")
.script_pubkey()
.minimal_non_dust()
.dust_value()
.to_sat()
}
fn descriptor_id(&self) -> DescriptorId {
let spk = self.at_derivation_index(0).unwrap().script_pubkey();
DescriptorId(sha256::Hash::hash(spk.as_bytes()))
}
}

View File

@@ -1,362 +0,0 @@
//! Contains the [`IndexedTxGraph`] and associated types. Refer to the
//! [`IndexedTxGraph`] documentation for more.
use core::fmt::Debug;
use alloc::vec::Vec;
use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid};
use crate::{
tx_graph::{self, TxGraph},
Anchor, AnchorFromBlockPosition, BlockId, Indexer, Merge,
};
/// The [`IndexedTxGraph`] combines a [`TxGraph`] and an [`Indexer`] implementation.
///
/// It ensures that [`TxGraph`] and [`Indexer`] are updated atomically.
#[derive(Debug)]
pub struct IndexedTxGraph<A, I> {
/// Transaction index.
pub index: I,
graph: TxGraph<A>,
}
impl<A, I: Default> Default for IndexedTxGraph<A, I> {
fn default() -> Self {
Self {
graph: Default::default(),
index: Default::default(),
}
}
}
impl<A, I> IndexedTxGraph<A, I> {
/// Construct a new [`IndexedTxGraph`] with a given `index`.
pub fn new(index: I) -> Self {
Self {
index,
graph: TxGraph::default(),
}
}
/// Get a reference of the internal transaction graph.
pub fn graph(&self) -> &TxGraph<A> {
&self.graph
}
}
impl<A: Anchor, I: Indexer> IndexedTxGraph<A, I> {
/// Applies the [`ChangeSet`] to the [`IndexedTxGraph`].
pub fn apply_changeset(&mut self, changeset: ChangeSet<A, I::ChangeSet>) {
self.index.apply_changeset(changeset.indexer);
for tx in &changeset.tx_graph.txs {
self.index.index_tx(tx);
}
for (&outpoint, txout) in &changeset.tx_graph.txouts {
self.index.index_txout(outpoint, txout);
}
self.graph.apply_changeset(changeset.tx_graph);
}
/// Determines the [`ChangeSet`] between `self` and an empty [`IndexedTxGraph`].
pub fn initial_changeset(&self) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.initial_changeset();
let indexer = self.index.initial_changeset();
ChangeSet {
tx_graph: graph,
indexer,
}
}
}
impl<A: Anchor, I: Indexer> IndexedTxGraph<A, I>
where
I::ChangeSet: Default + Merge,
{
fn index_tx_graph_changeset(
&mut self,
tx_graph_changeset: &tx_graph::ChangeSet<A>,
) -> I::ChangeSet {
let mut changeset = I::ChangeSet::default();
for added_tx in &tx_graph_changeset.txs {
changeset.merge(self.index.index_tx(added_tx));
}
for (&added_outpoint, added_txout) in &tx_graph_changeset.txouts {
changeset.merge(self.index.index_txout(added_outpoint, added_txout));
}
changeset
}
/// Apply an `update` directly.
///
/// `update` is a [`TxGraph<A>`] and the resultant changes is returned as [`ChangeSet`].
pub fn apply_update(&mut self, update: TxGraph<A>) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.apply_update(update);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Insert a floating `txout` of given `outpoint`.
pub fn insert_txout(&mut self, outpoint: OutPoint, txout: TxOut) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.insert_txout(outpoint, txout);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Insert and index a transaction into the graph.
pub fn insert_tx(&mut self, tx: Transaction) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.insert_tx(tx);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Insert an `anchor` for a given transaction.
pub fn insert_anchor(&mut self, txid: Txid, anchor: A) -> ChangeSet<A, I::ChangeSet> {
self.graph.insert_anchor(txid, anchor).into()
}
/// Insert a unix timestamp of when a transaction is seen in the mempool.
///
/// This is used for transaction conflict resolution in [`TxGraph`] where the transaction with
/// the later last-seen is prioritized.
pub fn insert_seen_at(&mut self, txid: Txid, seen_at: u64) -> ChangeSet<A, I::ChangeSet> {
self.graph.insert_seen_at(txid, seen_at).into()
}
/// Batch insert transactions, filtering out those that are irrelevant.
///
/// Relevancy is determined by the [`Indexer::is_tx_relevant`] implementation of `I`. Irrelevant
/// transactions in `txs` will be ignored. `txs` do not need to be in topological order.
pub fn batch_insert_relevant<'t>(
&mut self,
txs: impl IntoIterator<Item = (&'t Transaction, impl IntoIterator<Item = A>)>,
) -> ChangeSet<A, I::ChangeSet> {
// The algorithm below allows for non-topologically ordered transactions by using two loops.
// This is achieved by:
// 1. insert all txs into the index. If they are irrelevant then that's fine it will just
// not store anything about them.
// 2. decide whether to insert them into the graph depending on whether `is_tx_relevant`
// returns true or not. (in a second loop).
let txs = txs.into_iter().collect::<Vec<_>>();
let mut indexer = I::ChangeSet::default();
for (tx, _) in &txs {
indexer.merge(self.index.index_tx(tx));
}
let mut graph = tx_graph::ChangeSet::default();
for (tx, anchors) in txs {
if self.index.is_tx_relevant(tx) {
let txid = tx.compute_txid();
graph.merge(self.graph.insert_tx(tx.clone()));
for anchor in anchors {
graph.merge(self.graph.insert_anchor(txid, anchor));
}
}
}
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Batch insert unconfirmed transactions, filtering out those that are irrelevant.
///
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
/// Irrelevant transactions in `txs` will be ignored.
///
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
/// *last seen* communicates when the transaction is last seen in the mempool which is used for
/// conflict-resolution in [`TxGraph`] (refer to [`TxGraph::insert_seen_at`] for details).
pub fn batch_insert_relevant_unconfirmed<'t>(
&mut self,
unconfirmed_txs: impl IntoIterator<Item = (&'t Transaction, u64)>,
) -> ChangeSet<A, I::ChangeSet> {
// The algorithm below allows for non-topologically ordered transactions by using two loops.
// This is achieved by:
// 1. insert all txs into the index. If they are irrelevant then that's fine it will just
// not store anything about them.
// 2. decide whether to insert them into the graph depending on whether `is_tx_relevant`
// returns true or not. (in a second loop).
let txs = unconfirmed_txs.into_iter().collect::<Vec<_>>();
let mut indexer = I::ChangeSet::default();
for (tx, _) in &txs {
indexer.merge(self.index.index_tx(tx));
}
let graph = self.graph.batch_insert_unconfirmed(
txs.into_iter()
.filter(|(tx, _)| self.index.is_tx_relevant(tx))
.map(|(tx, seen_at)| (tx.clone(), seen_at)),
);
ChangeSet {
tx_graph: graph,
indexer,
}
}
/// Batch insert unconfirmed transactions.
///
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
/// *last seen* communicates when the transaction is last seen in the mempool which is used for
/// conflict-resolution in [`TxGraph`] (refer to [`TxGraph::insert_seen_at`] for details).
///
/// To filter out irrelevant transactions, use [`batch_insert_relevant_unconfirmed`] instead.
///
/// [`batch_insert_relevant_unconfirmed`]: IndexedTxGraph::batch_insert_relevant_unconfirmed
pub fn batch_insert_unconfirmed(
&mut self,
txs: impl IntoIterator<Item = (Transaction, u64)>,
) -> ChangeSet<A, I::ChangeSet> {
let graph = self.graph.batch_insert_unconfirmed(txs);
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
}
/// Methods are available if the anchor (`A`) implements [`AnchorFromBlockPosition`].
impl<A: Anchor, I: Indexer> IndexedTxGraph<A, I>
where
I::ChangeSet: Default + Merge,
A: AnchorFromBlockPosition,
{
/// Batch insert all transactions of the given `block` of `height`, filtering out those that are
/// irrelevant.
///
/// Each inserted transaction's anchor will be constructed from
/// [`AnchorFromBlockPosition::from_block_position`].
///
/// Relevancy is determined by the internal [`Indexer::is_tx_relevant`] implementation of `I`.
/// Irrelevant transactions in `txs` will be ignored.
pub fn apply_block_relevant(
&mut self,
block: &Block,
height: u32,
) -> ChangeSet<A, I::ChangeSet> {
let block_id = BlockId {
hash: block.block_hash(),
height,
};
let mut changeset = ChangeSet::<A, I::ChangeSet>::default();
for (tx_pos, tx) in block.txdata.iter().enumerate() {
changeset.indexer.merge(self.index.index_tx(tx));
if self.index.is_tx_relevant(tx) {
let txid = tx.compute_txid();
let anchor = A::from_block_position(block, block_id, tx_pos);
changeset.tx_graph.merge(self.graph.insert_tx(tx.clone()));
changeset
.tx_graph
.merge(self.graph.insert_anchor(txid, anchor));
}
}
changeset
}
/// Batch insert all transactions of the given `block` of `height`.
///
/// Each inserted transaction's anchor will be constructed from
/// [`AnchorFromBlockPosition::from_block_position`].
///
/// To only insert relevant transactions, use [`apply_block_relevant`] instead.
///
/// [`apply_block_relevant`]: IndexedTxGraph::apply_block_relevant
pub fn apply_block(&mut self, block: Block, height: u32) -> ChangeSet<A, I::ChangeSet> {
let block_id = BlockId {
hash: block.block_hash(),
height,
};
let mut graph = tx_graph::ChangeSet::default();
for (tx_pos, tx) in block.txdata.iter().enumerate() {
let anchor = A::from_block_position(&block, block_id, tx_pos);
graph.merge(self.graph.insert_anchor(tx.compute_txid(), anchor));
graph.merge(self.graph.insert_tx(tx.clone()));
}
let indexer = self.index_tx_graph_changeset(&graph);
ChangeSet {
tx_graph: graph,
indexer,
}
}
}
impl<A, I> AsRef<TxGraph<A>> for IndexedTxGraph<A, I> {
fn as_ref(&self) -> &TxGraph<A> {
&self.graph
}
}
/// Represents changes to an [`IndexedTxGraph`].
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(
crate = "serde_crate",
bound(
deserialize = "A: Ord + serde::Deserialize<'de>, IA: serde::Deserialize<'de>",
serialize = "A: Ord + serde::Serialize, IA: serde::Serialize"
)
)
)]
#[must_use]
pub struct ChangeSet<A, IA> {
/// [`TxGraph`] changeset.
pub tx_graph: tx_graph::ChangeSet<A>,
/// [`Indexer`] changeset.
pub indexer: IA,
}
impl<A, IA: Default> Default for ChangeSet<A, IA> {
fn default() -> Self {
Self {
tx_graph: Default::default(),
indexer: Default::default(),
}
}
}
impl<A: Anchor, IA: Merge> Merge for ChangeSet<A, IA> {
fn merge(&mut self, other: Self) {
self.tx_graph.merge(other.tx_graph);
self.indexer.merge(other.indexer);
}
fn is_empty(&self) -> bool {
self.tx_graph.is_empty() && self.indexer.is_empty()
}
}
impl<A, IA: Default> From<tx_graph::ChangeSet<A>> for ChangeSet<A, IA> {
fn from(graph: tx_graph::ChangeSet<A>) -> Self {
Self {
tx_graph: graph,
..Default::default()
}
}
}
#[cfg(feature = "miniscript")]
impl<A> From<crate::keychain_txout::ChangeSet> for ChangeSet<A, crate::keychain_txout::ChangeSet> {
fn from(indexer: crate::keychain_txout::ChangeSet) -> Self {
Self {
tx_graph: Default::default(),
indexer,
}
}
}

View File

@@ -1,33 +0,0 @@
//! [`Indexer`] provides utilities for indexing transaction data.
use bitcoin::{OutPoint, Transaction, TxOut};
#[cfg(feature = "miniscript")]
pub mod keychain_txout;
pub mod spk_txout;
/// Utilities for indexing transaction data.
///
/// Types which implement this trait can be used to construct an [`IndexedTxGraph`].
/// This trait's methods should rarely be called directly.
///
/// [`IndexedTxGraph`]: crate::IndexedTxGraph
pub trait Indexer {
/// The resultant "changeset" when new transaction data is indexed.
type ChangeSet;
/// Scan and index the given `outpoint` and `txout`.
fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet;
/// Scans a transaction for relevant outpoints, which are stored and indexed internally.
fn index_tx(&mut self, tx: &Transaction) -> Self::ChangeSet;
/// Apply changeset to itself.
fn apply_changeset(&mut self, changeset: Self::ChangeSet);
/// Determines the [`ChangeSet`](Indexer::ChangeSet) between `self` and an empty [`Indexer`].
fn initial_changeset(&self) -> Self::ChangeSet;
/// Determines whether the transaction should be included in the index.
fn is_tx_relevant(&self, tx: &Transaction) -> bool;
}

View File

@@ -1,881 +0,0 @@
//! [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains and
//! indexes [`TxOut`]s with them.
use crate::{
collections::*,
miniscript::{Descriptor, DescriptorPublicKey},
spk_iter::BIP32_MAX_INDEX,
spk_txout::SpkTxOutIndex,
DescriptorExt, DescriptorId, Indexed, Indexer, KeychainIndexed, SpkIterator,
};
use alloc::{borrow::ToOwned, vec::Vec};
use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txid};
use core::{
fmt::Debug,
ops::{Bound, RangeBounds},
};
use crate::Merge;
/// The default lookahead for a [`KeychainTxOutIndex`]
pub const DEFAULT_LOOKAHEAD: u32 = 25;
/// [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains, and
/// indexes [`TxOut`]s with them.
///
/// A single keychain is a chain of script pubkeys derived from a single [`Descriptor`]. Keychains
/// are identified using the `K` generic. Script pubkeys are identified by the keychain that they
/// are derived from `K`, as well as the derivation index `u32`.
///
/// There is a strict 1-to-1 relationship between descriptors and keychains. Each keychain has one
/// and only one descriptor and each descriptor has one and only one keychain. The
/// [`insert_descriptor`] method will return an error if you try and violate this invariant. This
/// rule is a proxy for a stronger rule: no two descriptors should produce the same script pubkey.
/// Having two descriptors produce the same script pubkey should cause whichever keychain derives
/// the script pubkey first to be the effective owner of it but you should not rely on this
/// behaviour. ⚠ It is up you, the developer, not to violate this invariant.
///
/// # Revealed script pubkeys
///
/// Tracking how script pubkeys are revealed is useful for collecting chain data. For example, if
/// the user has requested 5 script pubkeys (to receive money with), we only need to use those
/// script pubkeys to scan for chain data.
///
/// Call [`reveal_to_target`] or [`reveal_next_spk`] to reveal more script pubkeys.
/// Call [`revealed_keychain_spks`] or [`revealed_spks`] to iterate through revealed script pubkeys.
///
/// # Lookahead script pubkeys
///
/// When an user first recovers a wallet (i.e. from a recovery phrase and/or descriptor), we will
/// NOT have knowledge of which script pubkeys are revealed. So when we index a transaction or
/// txout (using [`index_tx`]/[`index_txout`]) we scan the txouts against script pubkeys derived
/// above the last revealed index. These additionally-derived script pubkeys are called the
/// lookahead.
///
/// The [`KeychainTxOutIndex`] is constructed with the `lookahead` and cannot be altered. See
/// [`DEFAULT_LOOKAHEAD`] for the value used in the `Default` implementation. Use [`new`] to set a
/// custom `lookahead`.
///
/// # Unbounded script pubkey iterator
///
/// For script-pubkey-based chain sources (such as Electrum/Esplora), an initial scan is best done
/// by iterating though derived script pubkeys one by one and requesting transaction histories for
/// each script pubkey. We will stop after x-number of script pubkeys have empty histories. An
/// unbounded script pubkey iterator is useful to pass to such a chain source because it doesn't
/// require holding a reference to the index.
///
/// Call [`unbounded_spk_iter`] to get an unbounded script pubkey iterator for a given keychain.
/// Call [`all_unbounded_spk_iters`] to get unbounded script pubkey iterators for all keychains.
///
/// # Change sets
///
/// Methods that can update the last revealed index or add keychains will return [`ChangeSet`] to report
/// these changes. This should be persisted for future recovery.
///
/// ## Synopsis
///
/// ```
/// use bdk_chain::indexer::keychain_txout::KeychainTxOutIndex;
/// # use bdk_chain::{ miniscript::{Descriptor, DescriptorPublicKey} };
/// # use core::str::FromStr;
///
/// // imagine our service has internal and external addresses but also addresses for users
/// #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
/// enum MyKeychain {
/// External,
/// Internal,
/// MyAppUser {
/// user_id: u32
/// }
/// }
///
/// let mut txout_index = KeychainTxOutIndex::<MyKeychain>::default();
///
/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
/// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
/// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
/// # let (descriptor_42, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/2/*)").unwrap();
/// let _ = txout_index.insert_descriptor(MyKeychain::External, external_descriptor)?;
/// let _ = txout_index.insert_descriptor(MyKeychain::Internal, internal_descriptor)?;
/// let _ = txout_index.insert_descriptor(MyKeychain::MyAppUser { user_id: 42 }, descriptor_42)?;
///
/// let new_spk_for_user = txout_index.reveal_next_spk(MyKeychain::MyAppUser{ user_id: 42 });
/// # Ok::<_, bdk_chain::indexer::keychain_txout::InsertDescriptorError<_>>(())
/// ```
///
/// [`Ord`]: core::cmp::Ord
/// [`SpkTxOutIndex`]: crate::spk_txout_index::SpkTxOutIndex
/// [`Descriptor`]: crate::miniscript::Descriptor
/// [`reveal_to_target`]: Self::reveal_to_target
/// [`reveal_next_spk`]: Self::reveal_next_spk
/// [`revealed_keychain_spks`]: Self::revealed_keychain_spks
/// [`revealed_spks`]: Self::revealed_spks
/// [`index_tx`]: Self::index_tx
/// [`index_txout`]: Self::index_txout
/// [`new`]: Self::new
/// [`unbounded_spk_iter`]: Self::unbounded_spk_iter
/// [`all_unbounded_spk_iters`]: Self::all_unbounded_spk_iters
/// [`outpoints`]: Self::outpoints
/// [`txouts`]: Self::txouts
/// [`unused_spks`]: Self::unused_spks
/// [`insert_descriptor`]: Self::insert_descriptor
#[derive(Clone, Debug)]
pub struct KeychainTxOutIndex<K> {
inner: SpkTxOutIndex<(K, u32)>,
keychain_to_descriptor_id: BTreeMap<K, DescriptorId>,
descriptor_id_to_keychain: HashMap<DescriptorId, K>,
descriptors: HashMap<DescriptorId, Descriptor<DescriptorPublicKey>>,
last_revealed: HashMap<DescriptorId, u32>,
lookahead: u32,
}
impl<K> Default for KeychainTxOutIndex<K> {
fn default() -> Self {
Self::new(DEFAULT_LOOKAHEAD)
}
}
impl<K: Clone + Ord + Debug> Indexer for KeychainTxOutIndex<K> {
type ChangeSet = ChangeSet;
fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet {
let mut changeset = ChangeSet::default();
if let Some((keychain, index)) = self.inner.scan_txout(outpoint, txout).cloned() {
let did = self
.keychain_to_descriptor_id
.get(&keychain)
.expect("invariant");
if self.last_revealed.get(did) < Some(&index) {
self.last_revealed.insert(*did, index);
changeset.last_revealed.insert(*did, index);
self.replenish_inner_index(*did, &keychain, self.lookahead);
}
}
changeset
}
fn index_tx(&mut self, tx: &bitcoin::Transaction) -> Self::ChangeSet {
let mut changeset = ChangeSet::default();
let txid = tx.compute_txid();
for (op, txout) in tx.output.iter().enumerate() {
changeset.merge(self.index_txout(OutPoint::new(txid, op as u32), txout));
}
changeset
}
fn initial_changeset(&self) -> Self::ChangeSet {
ChangeSet {
last_revealed: self.last_revealed.clone().into_iter().collect(),
}
}
fn apply_changeset(&mut self, changeset: Self::ChangeSet) {
self.apply_changeset(changeset)
}
fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool {
self.inner.is_relevant(tx)
}
}
impl<K> KeychainTxOutIndex<K> {
/// Construct a [`KeychainTxOutIndex`] with the given `lookahead`.
///
/// The `lookahead` is the number of script pubkeys to derive and cache from the internal
/// descriptors over and above the last revealed script index. Without a lookahead the index
/// will miss outputs you own when processing transactions whose output script pubkeys lie
/// beyond the last revealed index. In certain situations, such as when performing an initial
/// scan of the blockchain during wallet import, it may be uncertain or unknown what the index
/// of the last revealed script pubkey actually is.
///
/// Refer to [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
pub fn new(lookahead: u32) -> Self {
Self {
inner: SpkTxOutIndex::default(),
keychain_to_descriptor_id: Default::default(),
descriptors: Default::default(),
descriptor_id_to_keychain: Default::default(),
last_revealed: Default::default(),
lookahead,
}
}
}
/// Methods that are *re-exposed* from the internal [`SpkTxOutIndex`].
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Return a reference to the internal [`SpkTxOutIndex`].
///
/// **WARNING**: The internal index will contain lookahead spks. Refer to
/// [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> {
&self.inner
}
/// Get the set of indexed outpoints, corresponding to tracked keychains.
pub fn outpoints(&self) -> &BTreeSet<KeychainIndexed<K, OutPoint>> {
self.inner.outpoints()
}
/// Iterate over known txouts that spend to tracked script pubkeys.
pub fn txouts(
&self,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, (OutPoint, &TxOut)>> + ExactSizeIterator
{
self.inner
.txouts()
.map(|(index, op, txout)| (index.clone(), (op, txout)))
}
/// Finds all txouts on a transaction that has previously been scanned and indexed.
pub fn txouts_in_tx(
&self,
txid: Txid,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, (OutPoint, &TxOut)>> {
self.inner
.txouts_in_tx(txid)
.map(|(index, op, txout)| (index.clone(), (op, txout)))
}
/// Return the [`TxOut`] of `outpoint` if it has been indexed, and if it corresponds to a
/// tracked keychain.
///
/// The associated keychain and keychain index of the txout's spk is also returned.
///
/// This calls [`SpkTxOutIndex::txout`] internally.
pub fn txout(&self, outpoint: OutPoint) -> Option<KeychainIndexed<K, &TxOut>> {
self.inner
.txout(outpoint)
.map(|(index, txout)| (index.clone(), txout))
}
/// Return the script that exists under the given `keychain`'s `index`.
///
/// This calls [`SpkTxOutIndex::spk_at_index`] internally.
pub fn spk_at_index(&self, keychain: K, index: u32) -> Option<ScriptBuf> {
self.inner.spk_at_index(&(keychain.clone(), index))
}
/// Returns the keychain and keychain index associated with the spk.
///
/// This calls [`SpkTxOutIndex::index_of_spk`] internally.
pub fn index_of_spk(&self, script: ScriptBuf) -> Option<&(K, u32)> {
self.inner.index_of_spk(script)
}
/// Returns whether the spk under the `keychain`'s `index` has been used.
///
/// Here, "unused" means that after the script pubkey was stored in the index, the index has
/// never scanned a transaction output with it.
///
/// This calls [`SpkTxOutIndex::is_used`] internally.
pub fn is_used(&self, keychain: K, index: u32) -> bool {
self.inner.is_used(&(keychain, index))
}
/// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output
/// with it.
///
/// This only has an effect when the `index` had been added to `self` already and was unused.
///
/// Returns whether the spk under the given `keychain` and `index` is successfully
/// marked as used. Returns false either when there is no descriptor under the given
/// keychain, or when the spk is already marked as used.
///
/// This is useful when you want to reserve a script pubkey for something but don't want to add
/// the transaction output using it to the index yet. Other callers will consider `index` on
/// `keychain` used until you call [`unmark_used`].
///
/// This calls [`SpkTxOutIndex::mark_used`] internally.
///
/// [`unmark_used`]: Self::unmark_used
pub fn mark_used(&mut self, keychain: K, index: u32) -> bool {
self.inner.mark_used(&(keychain, index))
}
/// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
/// `unused`.
///
/// Note that if `self` has scanned an output with this script pubkey, then this will have no
/// effect.
///
/// This calls [`SpkTxOutIndex::unmark_used`] internally.
///
/// [`mark_used`]: Self::mark_used
pub fn unmark_used(&mut self, keychain: K, index: u32) -> bool {
self.inner.unmark_used(&(keychain, index))
}
/// Computes the total value transfer effect `tx` has on the script pubkeys belonging to the
/// keychains in `range`. Value is *sent* when a script pubkey in the `range` is on an input and
/// *received* when it is on an output. For `sent` to be computed correctly, the output being
/// spent must have already been scanned by the index. Calculating received just uses the
/// [`Transaction`] outputs directly, so it will be correct even if it has not been scanned.
pub fn sent_and_received(
&self,
tx: &Transaction,
range: impl RangeBounds<K>,
) -> (Amount, Amount) {
self.inner
.sent_and_received(tx, self.map_to_inner_bounds(range))
}
/// Computes the net value that this transaction gives to the script pubkeys in the index and
/// *takes* from the transaction outputs in the index. Shorthand for calling
/// [`sent_and_received`] and subtracting sent from received.
///
/// This calls [`SpkTxOutIndex::net_value`] internally.
///
/// [`sent_and_received`]: Self::sent_and_received
pub fn net_value(&self, tx: &Transaction, range: impl RangeBounds<K>) -> SignedAmount {
self.inner.net_value(tx, self.map_to_inner_bounds(range))
}
}
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Return all keychains and their corresponding descriptors.
pub fn keychains(
&self,
) -> impl DoubleEndedIterator<Item = (K, &Descriptor<DescriptorPublicKey>)> + ExactSizeIterator + '_
{
self.keychain_to_descriptor_id
.iter()
.map(|(k, did)| (k.clone(), self.descriptors.get(did).expect("invariant")))
}
/// Insert a descriptor with a keychain associated to it.
///
/// Adding a descriptor means you will be able to derive new script pubkeys under it and the
/// txout index will discover transaction outputs with those script pubkeys (once they've been
/// derived and added to the index).
///
/// keychain <-> descriptor is a one-to-one mapping that cannot be changed. Attempting to do so
/// will return a [`InsertDescriptorError<K>`].
///
/// [`KeychainTxOutIndex`] will prevent you from inserting two descriptors which derive the same
/// script pubkey at index 0, but it's up to you to ensure that descriptors don't collide at
/// other indices. If they do nothing catastrophic happens at the `KeychainTxOutIndex` level
/// (one keychain just becomes the defacto owner of that spk arbitrarily) but this may have
/// subtle implications up the application stack like one UTXO being missing from one keychain
/// because it has been assigned to another which produces the same script pubkey.
pub fn insert_descriptor(
&mut self,
keychain: K,
descriptor: Descriptor<DescriptorPublicKey>,
) -> Result<bool, InsertDescriptorError<K>> {
let did = descriptor.descriptor_id();
if !self.keychain_to_descriptor_id.contains_key(&keychain)
&& !self.descriptor_id_to_keychain.contains_key(&did)
{
self.descriptors.insert(did, descriptor.clone());
self.keychain_to_descriptor_id.insert(keychain.clone(), did);
self.descriptor_id_to_keychain.insert(did, keychain.clone());
self.replenish_inner_index(did, &keychain, self.lookahead);
return Ok(true);
}
if let Some(existing_desc_id) = self.keychain_to_descriptor_id.get(&keychain) {
let descriptor = self.descriptors.get(existing_desc_id).expect("invariant");
if *existing_desc_id != did {
return Err(InsertDescriptorError::KeychainAlreadyAssigned {
existing_assignment: descriptor.clone(),
keychain,
});
}
}
if let Some(existing_keychain) = self.descriptor_id_to_keychain.get(&did) {
let descriptor = self.descriptors.get(&did).expect("invariant").clone();
if *existing_keychain != keychain {
return Err(InsertDescriptorError::DescriptorAlreadyAssigned {
existing_assignment: existing_keychain.clone(),
descriptor,
});
}
}
Ok(false)
}
/// Gets the descriptor associated with the keychain. Returns `None` if the keychain doesn't
/// have a descriptor associated with it.
pub fn get_descriptor(&self, keychain: K) -> Option<&Descriptor<DescriptorPublicKey>> {
let did = self.keychain_to_descriptor_id.get(&keychain)?;
self.descriptors.get(did)
}
/// Get the lookahead setting.
///
/// Refer to [`new`] for more information on the `lookahead`.
///
/// [`new`]: Self::new
pub fn lookahead(&self) -> u32 {
self.lookahead
}
/// Store lookahead scripts until `target_index` (inclusive).
///
/// This does not change the global `lookahead` setting.
pub fn lookahead_to_target(&mut self, keychain: K, target_index: u32) {
if let Some((next_index, _)) = self.next_index(keychain.clone()) {
let temp_lookahead = (target_index + 1)
.checked_sub(next_index)
.filter(|&index| index > 0);
if let Some(temp_lookahead) = temp_lookahead {
self.replenish_inner_index_keychain(keychain, temp_lookahead);
}
}
}
fn replenish_inner_index_did(&mut self, did: DescriptorId, lookahead: u32) {
if let Some(keychain) = self.descriptor_id_to_keychain.get(&did).cloned() {
self.replenish_inner_index(did, &keychain, lookahead);
}
}
fn replenish_inner_index_keychain(&mut self, keychain: K, lookahead: u32) {
if let Some(did) = self.keychain_to_descriptor_id.get(&keychain) {
self.replenish_inner_index(*did, &keychain, lookahead);
}
}
/// Syncs the state of the inner spk index after changes to a keychain
fn replenish_inner_index(&mut self, did: DescriptorId, keychain: &K, lookahead: u32) {
let descriptor = self.descriptors.get(&did).expect("invariant");
let next_store_index = self
.inner
.all_spks()
.range(&(keychain.clone(), u32::MIN)..=&(keychain.clone(), u32::MAX))
.last()
.map_or(0, |((_, index), _)| *index + 1);
let next_reveal_index = self.last_revealed.get(&did).map_or(0, |v| *v + 1);
for (new_index, new_spk) in
SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead)
{
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={:?}, lookahead={}, next_store_index={}, next_reveal_index={}", keychain, lookahead, next_store_index, next_reveal_index);
}
}
/// Get an unbounded spk iterator over a given `keychain`. Returns `None` if the provided
/// keychain doesn't exist
pub fn unbounded_spk_iter(
&self,
keychain: K,
) -> Option<SpkIterator<Descriptor<DescriptorPublicKey>>> {
let descriptor = self.get_descriptor(keychain)?.clone();
Some(SpkIterator::new(descriptor))
}
/// Get unbounded spk iterators for all keychains.
pub fn all_unbounded_spk_iters(
&self,
) -> BTreeMap<K, SpkIterator<Descriptor<DescriptorPublicKey>>> {
self.keychain_to_descriptor_id
.iter()
.map(|(k, did)| {
(
k.clone(),
SpkIterator::new(self.descriptors.get(did).expect("invariant").clone()),
)
})
.collect()
}
/// Iterate over revealed spks of keychains in `range`
pub fn revealed_spks(
&self,
range: impl RangeBounds<K>,
) -> impl Iterator<Item = KeychainIndexed<K, ScriptBuf>> + '_ {
let start = range.start_bound();
let end = range.end_bound();
let mut iter_last_revealed = self
.keychain_to_descriptor_id
.range((start, end))
.map(|(k, did)| (k, self.last_revealed.get(did).cloned()));
let mut iter_spks = self
.inner
.all_spks()
.range(self.map_to_inner_bounds((start, end)));
let mut current_keychain = iter_last_revealed.next();
// The reason we need a tricky algorithm is because of the "lookahead" feature which means
// that some of the spks in the SpkTxoutIndex will not have been revealed yet. So we need to
// filter out those spks that are above the last_revealed for that keychain. To do this we
// iterate through the last_revealed for each keychain and the spks for each keychain in
// tandem. This minimizes BTreeMap queries.
core::iter::from_fn(move || loop {
let ((keychain, index), spk) = iter_spks.next()?;
// We need to find the last revealed that matches the current spk we are considering so
// we skip ahead.
while current_keychain?.0 < keychain {
current_keychain = iter_last_revealed.next();
}
let (current_keychain, last_revealed) = current_keychain?;
if current_keychain == keychain && Some(*index) <= last_revealed {
break Some(((keychain.clone(), *index), spk.clone()));
}
})
}
/// Iterate over revealed spks of the given `keychain` with ascending indices.
///
/// This is a double ended iterator so you can easily reverse it to get an iterator where
/// the script pubkeys that were most recently revealed are first.
pub fn revealed_keychain_spks(
&self,
keychain: K,
) -> impl DoubleEndedIterator<Item = Indexed<ScriptBuf>> + '_ {
let end = self
.last_revealed_index(keychain.clone())
.map(|v| v + 1)
.unwrap_or(0);
self.inner
.all_spks()
.range((keychain.clone(), 0)..(keychain.clone(), end))
.map(|((_, index), spk)| (*index, spk.clone()))
}
/// Iterate over revealed, but unused, spks of all keychains.
pub fn unused_spks(
&self,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, ScriptBuf>> + Clone + '_ {
self.keychain_to_descriptor_id.keys().flat_map(|keychain| {
self.unused_keychain_spks(keychain.clone())
.map(|(i, spk)| ((keychain.clone(), i), spk.clone()))
})
}
/// Iterate over revealed, but unused, spks of the given `keychain`.
/// Returns an empty iterator if the provided keychain doesn't exist.
pub fn unused_keychain_spks(
&self,
keychain: K,
) -> impl DoubleEndedIterator<Item = Indexed<ScriptBuf>> + Clone + '_ {
let end = match self.keychain_to_descriptor_id.get(&keychain) {
Some(did) => self.last_revealed.get(did).map(|v| *v + 1).unwrap_or(0),
None => 0,
};
self.inner
.unused_spks((keychain.clone(), 0)..(keychain.clone(), end))
.map(|((_, i), spk)| (*i, spk))
}
/// Get the next derivation index for `keychain`. The next index is the index after the last revealed
/// derivation index.
///
/// The second field in the returned tuple represents whether the next derivation index is new.
/// There are two scenarios where the next derivation index is reused (not new):
///
/// 1. The keychain's descriptor has no wildcard, and a script has already been revealed.
/// 2. The number of revealed scripts has already reached 2^31 (refer to BIP-32).
///
/// Not checking the second field of the tuple may result in address reuse.
///
/// Returns None if the provided `keychain` doesn't exist.
pub fn next_index(&self, keychain: K) -> Option<(u32, bool)> {
let did = self.keychain_to_descriptor_id.get(&keychain)?;
let last_index = self.last_revealed.get(did).cloned();
let descriptor = self.descriptors.get(did).expect("invariant");
// we can only get the next index if the wildcard exists.
let has_wildcard = descriptor.has_wildcard();
Some(match last_index {
// if there is no index, next_index is always 0.
None => (0, true),
// descriptors without wildcards can only have one index.
Some(_) if !has_wildcard => (0, false),
// derivation index must be < 2^31 (BIP-32).
Some(index) if index > BIP32_MAX_INDEX => {
unreachable!("index is out of bounds")
}
Some(index) if index == BIP32_MAX_INDEX => (index, false),
// get the next derivation index.
Some(index) => (index + 1, true),
})
}
/// Get the last derivation index that is revealed for each keychain.
///
/// Keychains with no revealed indices will not be included in the returned [`BTreeMap`].
pub fn last_revealed_indices(&self) -> BTreeMap<K, u32> {
self.last_revealed
.iter()
.filter_map(|(desc_id, index)| {
let keychain = self.descriptor_id_to_keychain.get(desc_id)?;
Some((keychain.clone(), *index))
})
.collect()
}
/// Get the last derivation index revealed for `keychain`. Returns None if the keychain doesn't
/// exist, or if the keychain doesn't have any revealed scripts.
pub fn last_revealed_index(&self, keychain: K) -> Option<u32> {
let descriptor_id = self.keychain_to_descriptor_id.get(&keychain)?;
self.last_revealed.get(descriptor_id).cloned()
}
/// Convenience method to call [`Self::reveal_to_target`] on multiple keychains.
pub fn reveal_to_target_multi(&mut self, keychains: &BTreeMap<K, u32>) -> ChangeSet {
let mut changeset = ChangeSet::default();
for (keychain, &index) in keychains {
if let Some((_, new_changeset)) = self.reveal_to_target(keychain.clone(), index) {
changeset.merge(new_changeset);
}
}
changeset
}
/// Reveals script pubkeys of the `keychain`'s descriptor **up to and including** the
/// `target_index`.
///
/// If the `target_index` cannot be reached (due to the descriptor having no wildcard and/or
/// the `target_index` is in the hardened index range), this method will make a best-effort and
/// reveal up to the last possible index.
///
/// This returns list of newly revealed indices (alongside their scripts) and a
/// [`ChangeSet`], which reports updates to the latest revealed index. If no new script
/// pubkeys are revealed, then both of these will be empty.
///
/// Returns None if the provided `keychain` doesn't exist.
#[must_use]
pub fn reveal_to_target(
&mut self,
keychain: K,
target_index: u32,
) -> Option<(Vec<Indexed<ScriptBuf>>, ChangeSet)> {
let mut changeset = ChangeSet::default();
let mut spks: Vec<Indexed<ScriptBuf>> = vec![];
while let Some((i, new)) = self.next_index(keychain.clone()) {
if !new || i > target_index {
break;
}
match self.reveal_next_spk(keychain.clone()) {
Some(((i, spk), change)) => {
spks.push((i, spk));
changeset.merge(change);
}
None => break,
}
}
Some((spks, changeset))
}
/// Attempts to reveal the next script pubkey for `keychain`.
///
/// Returns the derivation index of the revealed script pubkey, the revealed script pubkey and a
/// [`ChangeSet`] which represents changes in the last revealed index (if any).
/// Returns None if the provided keychain doesn't exist.
///
/// When a new script cannot be revealed, we return the last revealed script and an empty
/// [`ChangeSet`]. There are two scenarios when a new script pubkey cannot be derived:
///
/// 1. The descriptor has no wildcard and already has one script revealed.
/// 2. The descriptor has already revealed scripts up to the numeric bound.
/// 3. There is no descriptor associated with the given keychain.
pub fn reveal_next_spk(&mut self, keychain: K) -> Option<(Indexed<ScriptBuf>, ChangeSet)> {
let (next_index, new) = self.next_index(keychain.clone())?;
let mut changeset = ChangeSet::default();
if new {
let did = self.keychain_to_descriptor_id.get(&keychain)?;
self.last_revealed.insert(*did, next_index);
changeset.last_revealed.insert(*did, next_index);
self.replenish_inner_index(*did, &keychain, self.lookahead);
}
let script = self
.inner
.spk_at_index(&(keychain.clone(), next_index))
.expect("we just inserted it");
Some(((next_index, script), changeset))
}
/// Gets the next unused script pubkey in the keychain. I.e., the script pubkey with the lowest
/// index that has not been used yet.
///
/// This will derive and reveal a new script pubkey if no more unused script pubkeys exist.
///
/// If the descriptor has no wildcard and already has a used script pubkey or if a descriptor
/// has used all scripts up to the derivation bounds, then the last derived script pubkey will be
/// returned.
///
/// Returns `None` if there are no script pubkeys that have been used and no new script pubkey
/// could be revealed (see [`reveal_next_spk`] for when this happens).
///
/// [`reveal_next_spk`]: Self::reveal_next_spk
pub fn next_unused_spk(&mut self, keychain: K) -> Option<(Indexed<ScriptBuf>, ChangeSet)> {
let next_unused = self
.unused_keychain_spks(keychain.clone())
.next()
.map(|(i, spk)| ((i, spk.to_owned()), ChangeSet::default()));
next_unused.or_else(|| self.reveal_next_spk(keychain))
}
/// Iterate over all [`OutPoint`]s that have `TxOut`s with script pubkeys derived from
/// `keychain`.
pub fn keychain_outpoints(
&self,
keychain: K,
) -> impl DoubleEndedIterator<Item = Indexed<OutPoint>> + '_ {
self.keychain_outpoints_in_range(keychain.clone()..=keychain)
.map(|((_, i), op)| (i, op))
}
/// Iterate over [`OutPoint`]s that have script pubkeys derived from keychains in `range`.
pub fn keychain_outpoints_in_range<'a>(
&'a self,
range: impl RangeBounds<K> + 'a,
) -> impl DoubleEndedIterator<Item = KeychainIndexed<K, OutPoint>> + 'a {
self.inner
.outputs_in_range(self.map_to_inner_bounds(range))
.map(|((k, i), op)| ((k.clone(), *i), op))
}
fn map_to_inner_bounds(&self, bound: impl RangeBounds<K>) -> impl RangeBounds<(K, u32)> {
let start = match bound.start_bound() {
Bound::Included(keychain) => Bound::Included((keychain.clone(), u32::MIN)),
Bound::Excluded(keychain) => Bound::Excluded((keychain.clone(), u32::MAX)),
Bound::Unbounded => Bound::Unbounded,
};
let end = match bound.end_bound() {
Bound::Included(keychain) => Bound::Included((keychain.clone(), u32::MAX)),
Bound::Excluded(keychain) => Bound::Excluded((keychain.clone(), u32::MIN)),
Bound::Unbounded => Bound::Unbounded,
};
(start, end)
}
/// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has
/// found a [`TxOut`] with it's script pubkey.
pub fn last_used_index(&self, keychain: K) -> Option<u32> {
self.keychain_outpoints(keychain).last().map(|(i, _)| i)
}
/// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found
/// a [`TxOut`] with it's script pubkey.
pub fn last_used_indices(&self) -> BTreeMap<K, u32> {
self.keychain_to_descriptor_id
.iter()
.filter_map(|(keychain, _)| {
self.last_used_index(keychain.clone())
.map(|index| (keychain.clone(), index))
})
.collect()
}
/// Applies the `ChangeSet<K>` to the [`KeychainTxOutIndex<K>`]
pub fn apply_changeset(&mut self, changeset: ChangeSet) {
for (&desc_id, &index) in &changeset.last_revealed {
let v = self.last_revealed.entry(desc_id).or_default();
*v = index.max(*v);
self.replenish_inner_index_did(desc_id, self.lookahead);
}
}
}
#[derive(Clone, Debug, PartialEq)]
/// Error returned from [`KeychainTxOutIndex::insert_descriptor`]
pub enum InsertDescriptorError<K> {
/// The descriptor has already been assigned to a keychain so you can't assign it to another
DescriptorAlreadyAssigned {
/// The descriptor you have attempted to reassign
descriptor: Descriptor<DescriptorPublicKey>,
/// The keychain that the descriptor is already assigned to
existing_assignment: K,
},
/// The keychain is already assigned to a descriptor so you can't reassign it
KeychainAlreadyAssigned {
/// The keychain that you have attempted to reassign
keychain: K,
/// The descriptor that the keychain is already assigned to
existing_assignment: Descriptor<DescriptorPublicKey>,
},
}
impl<K: core::fmt::Debug> core::fmt::Display for InsertDescriptorError<K> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
InsertDescriptorError::DescriptorAlreadyAssigned {
existing_assignment: existing,
descriptor,
} => {
write!(
f,
"attempt to re-assign descriptor {descriptor:?} already assigned to {existing:?}"
)
}
InsertDescriptorError::KeychainAlreadyAssigned {
existing_assignment: existing,
keychain,
} => {
write!(
f,
"attempt to re-assign keychain {keychain:?} already assigned to {existing:?}"
)
}
}
}
}
#[cfg(feature = "std")]
impl<K: core::fmt::Debug> std::error::Error for InsertDescriptorError<K> {}
/// Represents updates to the derivation index of a [`KeychainTxOutIndex`].
/// It maps each keychain `K` to a descriptor and its last revealed index.
///
/// It can be applied to [`KeychainTxOutIndex`] with [`apply_changeset`].
///
/// The `last_revealed` field is monotone in that [`merge`] will never decrease it.
/// `keychains_added` is *not* monotone, once it is set any attempt to change it is subject to the
/// same *one-to-one* keychain <-> descriptor mapping invariant as [`KeychainTxOutIndex`] itself.
///
/// [`KeychainTxOutIndex`]: crate::keychain_txout::KeychainTxOutIndex
/// [`apply_changeset`]: crate::keychain_txout::KeychainTxOutIndex::apply_changeset
/// [`merge`]: Self::merge
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
#[must_use]
pub struct ChangeSet {
/// Contains for each descriptor_id the last revealed index of derivation
pub last_revealed: BTreeMap<DescriptorId, u32>,
}
impl Merge for ChangeSet {
/// Merge another [`ChangeSet`] into self.
fn merge(&mut self, other: Self) {
// for `last_revealed`, entries of `other` will take precedence ONLY if it is greater than
// what was originally in `self`.
for (desc_id, index) in other.last_revealed {
use crate::collections::btree_map::Entry;
match self.last_revealed.entry(desc_id) {
Entry::Vacant(entry) => {
entry.insert(index);
}
Entry::Occupied(mut entry) => {
if *entry.get() < index {
entry.insert(index);
}
}
}
}
}
/// Returns whether the changeset are empty.
fn is_empty(&self) -> bool {
self.last_revealed.is_empty()
}
}

View File

@@ -0,0 +1,309 @@
//! Module for keychain related structures.
//!
//! A keychain here is a set of application-defined indexes for a miniscript descriptor where we can
//! derive script pubkeys at a particular derivation index. The application's index is simply
//! anything that implements `Ord`.
//!
//! [`KeychainTxOutIndex`] indexes script pubkeys of keychains and scans in relevant outpoints (that
//! has a `txout` containing an indexed script pubkey). Internally, this uses [`SpkTxOutIndex`], but
//! also maintains "revealed" and "lookahead" index counts per keychain.
//!
//! [`KeychainTracker`] combines [`ChainGraph`] and [`KeychainTxOutIndex`] and enforces atomic
//! changes between both these structures. [`KeychainScan`] is a structure used to update to
//! [`KeychainTracker`] and changes made on a [`KeychainTracker`] are reported by
//! [`KeychainChangeSet`]s.
//!
//! [`SpkTxOutIndex`]: crate::SpkTxOutIndex
use crate::{
chain_graph::{self, ChainGraph},
collections::BTreeMap,
sparse_chain::ChainPosition,
tx_graph::TxGraph,
ForEachTxOut,
};
#[cfg(feature = "miniscript")]
pub mod persist;
#[cfg(feature = "miniscript")]
pub use persist::*;
#[cfg(feature = "miniscript")]
mod tracker;
#[cfg(feature = "miniscript")]
pub use tracker::*;
#[cfg(feature = "miniscript")]
mod txout_index;
#[cfg(feature = "miniscript")]
pub use txout_index::*;
/// Represents updates to the derivation index of a [`KeychainTxOutIndex`].
///
/// It can be applied to [`KeychainTxOutIndex`] with [`apply_additions`]. [`DerivationAdditions] are
/// monotone in that they will never decrease the revealed derivation index.
///
/// [`KeychainTxOutIndex`]: crate::keychain::KeychainTxOutIndex
/// [`apply_additions`]: crate::keychain::KeychainTxOutIndex::apply_additions
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(
crate = "serde_crate",
bound(
deserialize = "K: Ord + serde::Deserialize<'de>",
serialize = "K: Ord + serde::Serialize"
)
)
)]
#[must_use]
pub struct DerivationAdditions<K>(pub BTreeMap<K, u32>);
impl<K> DerivationAdditions<K> {
/// Returns whether the additions are empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Get the inner map of the keychain to its new derivation index.
pub fn as_inner(&self) -> &BTreeMap<K, u32> {
&self.0
}
}
impl<K: Ord> DerivationAdditions<K> {
/// Append another [`DerivationAdditions`] into self.
///
/// If the keychain already exists, increase the index when the other's index > self's index.
/// If the keychain did not exist, append the new keychain.
pub fn append(&mut self, mut other: Self) {
self.0.iter_mut().for_each(|(key, index)| {
if let Some(other_index) = other.0.remove(key) {
*index = other_index.max(*index);
}
});
self.0.append(&mut other.0);
}
}
impl<K> Default for DerivationAdditions<K> {
fn default() -> Self {
Self(Default::default())
}
}
impl<K> AsRef<BTreeMap<K, u32>> for DerivationAdditions<K> {
fn as_ref(&self) -> &BTreeMap<K, u32> {
&self.0
}
}
#[derive(Clone, Debug, PartialEq)]
/// An update that includes the last active indexes of each keychain.
pub struct KeychainScan<K, P> {
/// The update data in the form of a chain that could be applied
pub update: ChainGraph<P>,
/// The last active indexes of each keychain
pub last_active_indices: BTreeMap<K, u32>,
}
impl<K, P> Default for KeychainScan<K, P> {
fn default() -> Self {
Self {
update: Default::default(),
last_active_indices: Default::default(),
}
}
}
impl<K, P> From<ChainGraph<P>> for KeychainScan<K, P> {
fn from(update: ChainGraph<P>) -> Self {
KeychainScan {
update,
last_active_indices: Default::default(),
}
}
}
/// Represents changes to a [`KeychainTracker`].
///
/// This is essentially a combination of [`DerivationAdditions`] and [`chain_graph::ChangeSet`].
#[derive(Clone, Debug)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(
crate = "serde_crate",
bound(
deserialize = "K: Ord + serde::Deserialize<'de>, P: serde::Deserialize<'de>",
serialize = "K: Ord + serde::Serialize, P: serde::Serialize"
)
)
)]
#[must_use]
pub struct KeychainChangeSet<K, P> {
/// The changes in local keychain derivation indices
pub derivation_indices: DerivationAdditions<K>,
/// The changes that have occurred in the blockchain
pub chain_graph: chain_graph::ChangeSet<P>,
}
impl<K, P> Default for KeychainChangeSet<K, P> {
fn default() -> Self {
Self {
chain_graph: Default::default(),
derivation_indices: Default::default(),
}
}
}
impl<K, P> KeychainChangeSet<K, P> {
/// Returns whether the [`KeychainChangeSet`] is empty (no changes recorded).
pub fn is_empty(&self) -> bool {
self.chain_graph.is_empty() && self.derivation_indices.is_empty()
}
/// Appends the changes in `other` into `self` such that applying `self` afterward has the same
/// effect as sequentially applying the original `self` and `other`.
///
/// Note the derivation indices cannot be decreased, so `other` will only change the derivation
/// index for a keychain, if it's value is higher than the one in `self`.
pub fn append(&mut self, other: KeychainChangeSet<K, P>)
where
K: Ord,
P: ChainPosition,
{
self.derivation_indices.append(other.derivation_indices);
self.chain_graph.append(other.chain_graph);
}
}
impl<K, P> From<chain_graph::ChangeSet<P>> for KeychainChangeSet<K, P> {
fn from(changeset: chain_graph::ChangeSet<P>) -> Self {
Self {
chain_graph: changeset,
..Default::default()
}
}
}
impl<K, P> From<DerivationAdditions<K>> for KeychainChangeSet<K, P> {
fn from(additions: DerivationAdditions<K>) -> Self {
Self {
derivation_indices: additions,
..Default::default()
}
}
}
impl<K, P> AsRef<TxGraph> for KeychainScan<K, P> {
fn as_ref(&self) -> &TxGraph {
self.update.graph()
}
}
impl<K, P> ForEachTxOut for KeychainChangeSet<K, P> {
fn for_each_txout(&self, f: impl FnMut((bitcoin::OutPoint, &bitcoin::TxOut))) {
self.chain_graph.for_each_txout(f)
}
}
/// Balance, differentiated into various categories.
#[derive(Debug, PartialEq, Eq, Clone, Default)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate",)
)]
pub struct Balance {
/// All coinbase outputs not yet matured
pub immature: u64,
/// Unconfirmed UTXOs generated by a wallet tx
pub trusted_pending: u64,
/// Unconfirmed UTXOs received from an external wallet
pub untrusted_pending: u64,
/// Confirmed and immediately spendable balance
pub confirmed: u64,
}
impl Balance {
/// Get sum of trusted_pending and confirmed coins.
///
/// This is the balance you can spend right now that shouldn't get cancelled via another party
/// double spending it.
pub fn trusted_spendable(&self) -> u64 {
self.confirmed + self.trusted_pending
}
/// Get the whole balance visible to the wallet.
pub fn total(&self) -> u64 {
self.confirmed + self.trusted_pending + self.untrusted_pending + self.immature
}
}
impl core::fmt::Display for Balance {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"{{ immature: {}, trusted_pending: {}, untrusted_pending: {}, confirmed: {} }}",
self.immature, self.trusted_pending, self.untrusted_pending, self.confirmed
)
}
}
impl core::ops::Add for Balance {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
immature: self.immature + other.immature,
trusted_pending: self.trusted_pending + other.trusted_pending,
untrusted_pending: self.untrusted_pending + other.untrusted_pending,
confirmed: self.confirmed + other.confirmed,
}
}
}
#[cfg(test)]
mod test {
use crate::TxHeight;
use super::*;
#[test]
fn append_keychain_derivation_indices() {
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Debug)]
enum Keychain {
One,
Two,
Three,
Four,
}
let mut lhs_di = BTreeMap::<Keychain, u32>::default();
let mut rhs_di = BTreeMap::<Keychain, u32>::default();
lhs_di.insert(Keychain::One, 7);
lhs_di.insert(Keychain::Two, 0);
rhs_di.insert(Keychain::One, 3);
rhs_di.insert(Keychain::Two, 5);
lhs_di.insert(Keychain::Three, 3);
rhs_di.insert(Keychain::Four, 4);
let mut lhs = KeychainChangeSet {
derivation_indices: DerivationAdditions(lhs_di),
chain_graph: chain_graph::ChangeSet::<TxHeight>::default(),
};
let rhs = KeychainChangeSet {
derivation_indices: DerivationAdditions(rhs_di),
chain_graph: chain_graph::ChangeSet::<TxHeight>::default(),
};
lhs.append(rhs);
// Exiting index doesn't update if the new index in `other` is lower than `self`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::One), Some(&7));
// Existing index updates if the new index in `other` is higher than `self`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Two), Some(&5));
// Existing index is unchanged if keychain doesn't exist in `other`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Three), Some(&3));
// New keychain gets added if the keychain is in `other` but not in `self`.
assert_eq!(lhs.derivation_indices.0.get(&Keychain::Four), Some(&4));
}
}

View File

@@ -0,0 +1,108 @@
//! Persistence for changes made to a [`KeychainTracker`].
//!
//! BDK's [`KeychainTracker`] needs somewhere to persist changes it makes during operation.
//! Operations like giving out a new address are crucial to persist so that next time the
//! application is loaded, it can find transactions related to that address.
//!
//! Note that the [`KeychainTracker`] does not read this persisted data during operation since it
//! always has a copy in memory.
//!
//! [`KeychainTracker`]: crate::keychain::KeychainTracker
use crate::{keychain, sparse_chain::ChainPosition};
/// `Persist` wraps a [`PersistBackend`] to create a convenient staging area for changes before they
/// are persisted. Not all changes made to the [`KeychainTracker`] need to be written to disk right
/// away so you can use [`Persist::stage`] to *stage* it first and then [`Persist::commit`] to
/// finally, write it to disk.
///
/// [`KeychainTracker`]: keychain::KeychainTracker
#[derive(Debug)]
pub struct Persist<K, P, B> {
backend: B,
stage: keychain::KeychainChangeSet<K, P>,
}
impl<K, P, B> Persist<K, P, B> {
/// Create a new `Persist` from a [`PersistBackend`].
pub fn new(backend: B) -> Self {
Self {
backend,
stage: Default::default(),
}
}
/// Stage a `changeset` to later persistence with [`commit`].
///
/// [`commit`]: Self::commit
pub fn stage(&mut self, changeset: keychain::KeychainChangeSet<K, P>)
where
K: Ord,
P: ChainPosition,
{
self.stage.append(changeset)
}
/// Get the changes that haven't been committed yet
pub fn staged(&self) -> &keychain::KeychainChangeSet<K, P> {
&self.stage
}
/// Commit the staged changes to the underlying persistence backend.
///
/// Returns a backend-defined error if this fails.
pub fn commit(&mut self) -> Result<(), B::WriteError>
where
B: PersistBackend<K, P>,
{
self.backend.append_changeset(&self.stage)?;
self.stage = Default::default();
Ok(())
}
}
/// A persistence backend for [`Persist`].
pub trait PersistBackend<K, P> {
/// The error the backend returns when it fails to write.
type WriteError: core::fmt::Debug;
/// The error the backend returns when it fails to load.
type LoadError: core::fmt::Debug;
/// Appends a new changeset to the persistent backend.
///
/// It is up to the backend what it does with this. It could store every changeset in a list or
/// it inserts the actual changes into a more structured database. All it needs to guarantee is
/// that [`load_into_keychain_tracker`] restores a keychain tracker to what it should be if all
/// changesets had been applied sequentially.
///
/// [`load_into_keychain_tracker`]: Self::load_into_keychain_tracker
fn append_changeset(
&mut self,
changeset: &keychain::KeychainChangeSet<K, P>,
) -> Result<(), Self::WriteError>;
/// Applies all the changesets the backend has received to `tracker`.
fn load_into_keychain_tracker(
&mut self,
tracker: &mut keychain::KeychainTracker<K, P>,
) -> Result<(), Self::LoadError>;
}
impl<K, P> PersistBackend<K, P> for () {
type WriteError = ();
type LoadError = ();
fn append_changeset(
&mut self,
_changeset: &keychain::KeychainChangeSet<K, P>,
) -> Result<(), Self::WriteError> {
Ok(())
}
fn load_into_keychain_tracker(
&mut self,
_tracker: &mut keychain::KeychainTracker<K, P>,
) -> Result<(), Self::LoadError> {
Ok(())
}
}

View File

@@ -0,0 +1,308 @@
use bitcoin::Transaction;
use miniscript::{Descriptor, DescriptorPublicKey};
use crate::{
chain_graph::{self, ChainGraph},
collections::*,
keychain::{KeychainChangeSet, KeychainScan, KeychainTxOutIndex},
sparse_chain::{self, SparseChain},
tx_graph::TxGraph,
BlockId, FullTxOut, TxHeight,
};
use super::{Balance, DerivationAdditions};
/// A convenient combination of a [`KeychainTxOutIndex`] and a [`ChainGraph`].
///
/// The [`KeychainTracker`] atomically updates its [`KeychainTxOutIndex`] whenever new chain data is
/// incorporated into its internal [`ChainGraph`].
#[derive(Clone, Debug)]
pub struct KeychainTracker<K, P> {
/// Index between script pubkeys to transaction outputs
pub txout_index: KeychainTxOutIndex<K>,
chain_graph: ChainGraph<P>,
}
impl<K, P> KeychainTracker<K, P>
where
P: sparse_chain::ChainPosition,
K: Ord + Clone + core::fmt::Debug,
{
/// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses.
/// This is just shorthand for calling [`KeychainTxOutIndex::add_keychain`] on the internal
/// `txout_index`.
///
/// Adding a keychain means you will be able to derive new script pubkeys under that keychain
/// and the tracker will discover transaction outputs with those script pubkeys.
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
self.txout_index.add_keychain(keychain, descriptor)
}
/// Get the internal map of keychains to their descriptors. This is just shorthand for calling
/// [`KeychainTxOutIndex::keychains`] on the internal `txout_index`.
pub fn keychains(&mut self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
self.txout_index.keychains()
}
/// Get the checkpoint limit of the internal [`SparseChain`].
///
/// Refer to [`SparseChain::checkpoint_limit`] for more.
pub fn checkpoint_limit(&self) -> Option<usize> {
self.chain_graph.checkpoint_limit()
}
/// Set the checkpoint limit of the internal [`SparseChain`].
///
/// Refer to [`SparseChain::set_checkpoint_limit`] for more.
pub fn set_checkpoint_limit(&mut self, limit: Option<usize>) {
self.chain_graph.set_checkpoint_limit(limit)
}
/// Determines the resultant [`KeychainChangeSet`] if the given [`KeychainScan`] is applied.
///
/// Internally, we call [`ChainGraph::determine_changeset`] and also determine the additions of
/// [`KeychainTxOutIndex`].
pub fn determine_changeset(
&self,
scan: &KeychainScan<K, P>,
) -> Result<KeychainChangeSet<K, P>, chain_graph::UpdateError<P>> {
// TODO: `KeychainTxOutIndex::determine_additions`
let mut derivation_indices = scan.last_active_indices.clone();
derivation_indices.retain(|keychain, index| {
match self.txout_index.last_revealed_index(keychain) {
Some(existing) => *index > existing,
None => true,
}
});
Ok(KeychainChangeSet {
derivation_indices: DerivationAdditions(derivation_indices),
chain_graph: self.chain_graph.determine_changeset(&scan.update)?,
})
}
/// Directly applies a [`KeychainScan`] on [`KeychainTracker`].
///
/// This is equivalent to calling [`determine_changeset`] and [`apply_changeset`] in sequence.
///
/// [`determine_changeset`]: Self::determine_changeset
/// [`apply_changeset`]: Self::apply_changeset
pub fn apply_update(
&mut self,
scan: KeychainScan<K, P>,
) -> Result<KeychainChangeSet<K, P>, chain_graph::UpdateError<P>> {
let changeset = self.determine_changeset(&scan)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Applies the changes in `changeset` to [`KeychainTracker`].
///
/// Internally, this calls [`KeychainTxOutIndex::apply_additions`] and
/// [`ChainGraph::apply_changeset`] in sequence.
pub fn apply_changeset(&mut self, changeset: KeychainChangeSet<K, P>) {
let KeychainChangeSet {
derivation_indices,
chain_graph,
} = changeset;
self.txout_index.apply_additions(derivation_indices);
let _ = self.txout_index.scan(&chain_graph);
self.chain_graph.apply_changeset(chain_graph)
}
/// Iterates through [`FullTxOut`]s that are considered to exist in our representation of the
/// blockchain/mempool.
///
/// In other words, these are `txout`s of confirmed and in-mempool transactions, based on our
/// view of the blockchain/mempool.
pub fn full_txouts(&self) -> impl Iterator<Item = (&(K, u32), FullTxOut<P>)> + '_ {
self.txout_index
.txouts()
.filter_map(move |(spk_i, op, _)| Some((spk_i, self.chain_graph.full_txout(op)?)))
}
/// Iterates through [`FullTxOut`]s that are unspent outputs.
///
/// Refer to [`full_txouts`] for more.
///
/// [`full_txouts`]: Self::full_txouts
pub fn full_utxos(&self) -> impl Iterator<Item = (&(K, u32), FullTxOut<P>)> + '_ {
self.full_txouts()
.filter(|(_, txout)| txout.spent_by.is_none())
}
/// Returns a reference to the internal [`ChainGraph`].
pub fn chain_graph(&self) -> &ChainGraph<P> {
&self.chain_graph
}
/// Returns a reference to the internal [`TxGraph`] (which is part of the [`ChainGraph`]).
pub fn graph(&self) -> &TxGraph {
self.chain_graph().graph()
}
/// Returns a reference to the internal [`SparseChain`] (which is part of the [`ChainGraph`]).
pub fn chain(&self) -> &SparseChain<P> {
self.chain_graph().chain()
}
/// Determines the changes as a result of inserting `block_id` (a height and block hash) into the
/// tracker.
///
/// The caller is responsible for guaranteeing that a block exists at that height. If a
/// checkpoint already exists at that height with a different hash; this will return an error.
/// Otherwise it will return `Ok(true)` if the checkpoint didn't already exist or `Ok(false)`
/// if it did.
///
/// **Warning**: This function modifies the internal state of the tracker. You are responsible
/// for persisting these changes to disk if you need to restore them.
pub fn insert_checkpoint_preview(
&self,
block_id: BlockId,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertCheckpointError> {
Ok(KeychainChangeSet {
chain_graph: self.chain_graph.insert_checkpoint_preview(block_id)?,
..Default::default()
})
}
/// Directly insert a `block_id` into the tracker.
///
/// This is equivalent of calling [`insert_checkpoint_preview`] and [`apply_changeset`] in
/// sequence.
///
/// [`insert_checkpoint_preview`]: Self::insert_checkpoint_preview
/// [`apply_changeset`]: Self::apply_changeset
pub fn insert_checkpoint(
&mut self,
block_id: BlockId,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertCheckpointError> {
let changeset = self.insert_checkpoint_preview(block_id)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Determines the changes as a result of inserting a transaction into the inner [`ChainGraph`]
/// and optionally into the inner chain at `position`.
///
/// **Warning**: This function modifies the internal state of the chain graph. You are
/// responsible for persisting these changes to disk if you need to restore them.
pub fn insert_tx_preview(
&self,
tx: Transaction,
pos: P,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertTxError<P>> {
Ok(KeychainChangeSet {
chain_graph: self.chain_graph.insert_tx_preview(tx, pos)?,
..Default::default()
})
}
/// Directly insert a transaction into the inner [`ChainGraph`] and optionally into the inner
/// chain at `position`.
///
/// This is equivalent of calling [`insert_tx_preview`] and [`apply_changeset`] in sequence.
///
/// [`insert_tx_preview`]: Self::insert_tx_preview
/// [`apply_changeset`]: Self::apply_changeset
pub fn insert_tx(
&mut self,
tx: Transaction,
pos: P,
) -> Result<KeychainChangeSet<K, P>, chain_graph::InsertTxError<P>> {
let changeset = self.insert_tx_preview(tx, pos)?;
self.apply_changeset(changeset.clone());
Ok(changeset)
}
/// Returns the *balance* of the keychain, i.e., the value of unspent transaction outputs tracked.
///
/// The caller provides a `should_trust` predicate which must decide whether the value of
/// unconfirmed outputs on this keychain are guaranteed to be realized or not. For example:
///
/// - For an *internal* (change) keychain, `should_trust` should generally be `true` since even if
/// you lose an internal output due to eviction, you will always gain back the value from whatever output the
/// unconfirmed transaction was spending (since that output is presumably from your wallet).
/// - For an *external* keychain, you might want `should_trust` to return `false` since someone may cancel (by double spending)
/// a payment made to addresses on that keychain.
///
/// When in doubt set `should_trust` to return false. This doesn't do anything other than change
/// where the unconfirmed output's value is accounted for in `Balance`.
pub fn balance(&self, mut should_trust: impl FnMut(&K) -> bool) -> Balance {
let mut immature = 0;
let mut trusted_pending = 0;
let mut untrusted_pending = 0;
let mut confirmed = 0;
let last_sync_height = self.chain().latest_checkpoint().map(|latest| latest.height);
for ((keychain, _), utxo) in self.full_utxos() {
let chain_position = &utxo.chain_position;
match chain_position.height() {
TxHeight::Confirmed(_) => {
if utxo.is_on_coinbase {
if utxo.is_mature(
last_sync_height
.expect("since it's confirmed we must have a checkpoint"),
) {
confirmed += utxo.txout.value;
} else {
immature += utxo.txout.value;
}
} else {
confirmed += utxo.txout.value;
}
}
TxHeight::Unconfirmed => {
if should_trust(keychain) {
trusted_pending += utxo.txout.value;
} else {
untrusted_pending += utxo.txout.value;
}
}
}
}
Balance {
immature,
trusted_pending,
untrusted_pending,
confirmed,
}
}
/// Returns the balance of all spendable confirmed unspent outputs of this tracker at a
/// particular height.
pub fn balance_at(&self, height: u32) -> u64 {
self.full_txouts()
.filter(|(_, full_txout)| full_txout.is_spendable_at(height))
.map(|(_, full_txout)| full_txout.txout.value)
.sum()
}
}
impl<K, P> Default for KeychainTracker<K, P> {
fn default() -> Self {
Self {
txout_index: Default::default(),
chain_graph: Default::default(),
}
}
}
impl<K, P> AsRef<SparseChain<P>> for KeychainTracker<K, P> {
fn as_ref(&self) -> &SparseChain<P> {
self.chain_graph.chain()
}
}
impl<K, P> AsRef<TxGraph> for KeychainTracker<K, P> {
fn as_ref(&self) -> &TxGraph {
self.chain_graph.graph()
}
}
impl<K, P> AsRef<ChainGraph<P>> for KeychainTracker<K, P> {
fn as_ref(&self) -> &ChainGraph<P> {
&self.chain_graph
}
}

View File

@@ -0,0 +1,590 @@
use crate::{
collections::*,
miniscript::{Descriptor, DescriptorPublicKey},
ForEachTxOut, SpkTxOutIndex,
};
use alloc::{borrow::Cow, vec::Vec};
use bitcoin::{secp256k1::Secp256k1, OutPoint, Script, TxOut};
use core::{fmt::Debug, ops::Deref};
use super::DerivationAdditions;
/// Maximum [BIP32](https://bips.xyz/32) derivation index.
pub const BIP32_MAX_INDEX: u32 = (1 << 31) - 1;
/// A convenient wrapper around [`SpkTxOutIndex`] that relates script pubkeys to miniscript public
/// [`Descriptor`]s.
///
/// Descriptors are referenced by the provided keychain generic (`K`).
///
/// Script pubkeys for a descriptor are revealed chronologically from index 0. I.e., If the last
/// revealed index of a descriptor is 5; scripts of indices 0 to 4 are guaranteed to be already
/// revealed. In addition to revealed scripts, we have a `lookahead` parameter for each keychain,
/// which defines the number of script pubkeys to store ahead of the last revealed index.
///
/// Methods that could update the last revealed index will return [`DerivationAdditions`] to report
/// these changes. This can be persisted for future recovery.
///
/// ## Synopsis
///
/// ```
/// use bdk_chain::keychain::KeychainTxOutIndex;
/// # use bdk_chain::{ miniscript::{Descriptor, DescriptorPublicKey} };
/// # use core::str::FromStr;
///
/// // imagine our service has internal and external addresses but also addresses for users
/// #[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
/// enum MyKeychain {
/// External,
/// Internal,
/// MyAppUser {
/// user_id: u32
/// }
/// }
///
/// let mut txout_index = KeychainTxOutIndex::<MyKeychain>::default();
///
/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
/// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
/// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
/// # let descriptor_for_user_42 = external_descriptor.clone();
/// txout_index.add_keychain(MyKeychain::External, external_descriptor);
/// txout_index.add_keychain(MyKeychain::Internal, internal_descriptor);
/// txout_index.add_keychain(MyKeychain::MyAppUser { user_id: 42 }, descriptor_for_user_42);
///
/// let new_spk_for_user = txout_index.reveal_next_spk(&MyKeychain::MyAppUser{ user_id: 42 });
/// ```
///
/// [`Ord`]: core::cmp::Ord
/// [`SpkTxOutIndex`]: crate::spk_txout_index::SpkTxOutIndex
/// [`Descriptor`]: crate::miniscript::Descriptor
#[derive(Clone, Debug)]
pub struct KeychainTxOutIndex<K> {
inner: SpkTxOutIndex<(K, u32)>,
// descriptors of each keychain
keychains: BTreeMap<K, Descriptor<DescriptorPublicKey>>,
// last revealed indexes
last_revealed: BTreeMap<K, u32>,
// lookahead settings for each keychain
lookahead: BTreeMap<K, u32>,
}
impl<K> Default for KeychainTxOutIndex<K> {
fn default() -> Self {
Self {
inner: SpkTxOutIndex::default(),
keychains: BTreeMap::default(),
last_revealed: BTreeMap::default(),
lookahead: BTreeMap::default(),
}
}
}
impl<K> Deref for KeychainTxOutIndex<K> {
type Target = SpkTxOutIndex<(K, u32)>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Scans an object for relevant outpoints, which are stored and indexed internally.
///
/// If the matched script pubkey is part of the lookahead, the last stored index is updated for
/// the script pubkey's keychain and the [`DerivationAdditions`] returned will reflect the
/// change.
///
/// Typically, this method is used in two situations:
///
/// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all
/// your txouts.
/// 2. When getting new data from the chain, you usually scan it before incorporating it into
/// your chain state (i.e., `SparseChain`, `ChainGraph`).
///
/// See [`ForEachTxout`] for the types that support this.
///
/// [`ForEachTxout`]: crate::ForEachTxOut
pub fn scan(&mut self, txouts: &impl ForEachTxOut) -> DerivationAdditions<K> {
let mut additions = DerivationAdditions::<K>::default();
txouts.for_each_txout(|(op, txout)| additions.append(self.scan_txout(op, txout)));
additions
}
/// Scan a single outpoint for a matching script pubkey.
///
/// If it matches, this will store and index it.
pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> DerivationAdditions<K> {
match self.inner.scan_txout(op, txout).cloned() {
Some((keychain, index)) => self.reveal_to_target(&keychain, index).1,
None => DerivationAdditions::default(),
}
}
/// Return a reference to the internal [`SpkTxOutIndex`].
pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> {
&self.inner
}
/// Return a reference to the internal map of the keychain to descriptors.
pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
&self.keychains
}
/// Add a keychain to the tracker's `txout_index` with a descriptor to derive addresses.
///
/// Adding a keychain means you will be able to derive new script pubkeys under that keychain
/// and the txout index will discover transaction outputs with those script pubkeys.
///
/// # Panics
///
/// This will panic if a different `descriptor` is introduced to the same `keychain`.
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
let old_descriptor = &*self.keychains.entry(keychain).or_insert(descriptor.clone());
assert_eq!(
&descriptor, old_descriptor,
"keychain already contains a different descriptor"
);
}
/// Return the lookahead setting for each keychain.
///
/// Refer to [`set_lookahead`] for a deeper explanation of the `lookahead`.
///
/// [`set_lookahead`]: Self::set_lookahead
pub fn lookaheads(&self) -> &BTreeMap<K, u32> {
&self.lookahead
}
/// Convenience method to call [`set_lookahead`] for all keychains.
///
/// [`set_lookahead`]: Self::set_lookahead
pub fn set_lookahead_for_all(&mut self, lookahead: u32) {
for keychain in &self.keychains.keys().cloned().collect::<Vec<_>>() {
self.lookahead.insert(keychain.clone(), lookahead);
self.replenish_lookahead(keychain);
}
}
/// Set the lookahead count for `keychain`.
///
/// The lookahead is the number of scripts to cache ahead of the last stored script index. This
/// is useful during a scan via [`scan`] or [`scan_txout`].
///
/// # Panics
///
/// This will panic if the `keychain` does not exist.
///
/// [`scan`]: Self::scan
/// [`scan_txout`]: Self::scan_txout
pub fn set_lookahead(&mut self, keychain: &K, lookahead: u32) {
self.lookahead.insert(keychain.clone(), lookahead);
self.replenish_lookahead(keychain);
}
/// Convenience method to call [`lookahead_to_target`] for multiple keychains.
///
/// [`lookahead_to_target`]: Self::lookahead_to_target
pub fn lookahead_to_target_multi(&mut self, target_indexes: BTreeMap<K, u32>) {
for (keychain, target_index) in target_indexes {
self.lookahead_to_target(&keychain, target_index)
}
}
/// Store lookahead scripts until `target_index`.
///
/// This does not change the `lookahead` setting.
pub fn lookahead_to_target(&mut self, keychain: &K, target_index: u32) {
let next_index = self.next_store_index(keychain);
if let Some(temp_lookahead) = target_index.checked_sub(next_index).filter(|&v| v > 0) {
let old_lookahead = self.lookahead.insert(keychain.clone(), temp_lookahead);
self.replenish_lookahead(keychain);
// revert
match old_lookahead {
Some(lookahead) => self.lookahead.insert(keychain.clone(), lookahead),
None => self.lookahead.remove(keychain),
};
}
}
fn replenish_lookahead(&mut self, keychain: &K) {
let descriptor = self.keychains.get(keychain).expect("keychain must exist");
let next_store_index = self.next_store_index(keychain);
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
for (new_index, new_spk) in range_descriptor_spks(
Cow::Borrowed(descriptor),
next_store_index..next_reveal_index + lookahead,
) {
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
debug_assert!(_inserted, "replenish lookahead: must not have existing spk: keychain={:?}, lookahead={}, next_store_index={}, next_reveal_index={}", keychain, lookahead, next_store_index, next_reveal_index);
}
}
fn next_store_index(&self, keychain: &K) -> u32 {
self.inner()
.all_spks()
.range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX))
.last()
.map_or(0, |((_, v), _)| *v + 1)
}
/// Generates script pubkey iterators for every `keychain`. The iterators iterate over all
/// derivable script pubkeys.
pub fn spks_of_all_keychains(
&self,
) -> BTreeMap<K, impl Iterator<Item = (u32, Script)> + Clone> {
self.keychains
.iter()
.map(|(keychain, descriptor)| {
(
keychain.clone(),
range_descriptor_spks(Cow::Owned(descriptor.clone()), 0..),
)
})
.collect()
}
/// Generates a script pubkey iterator for the given `keychain`'s descriptor (if it exists). The
/// iterator iterates over all derivable scripts of the keychain's descriptor.
///
/// # Panics
///
/// This will panic if the `keychain` does not exist.
pub fn spks_of_keychain(&self, keychain: &K) -> impl Iterator<Item = (u32, Script)> + Clone {
let descriptor = self
.keychains
.get(keychain)
.expect("keychain must exist")
.clone();
range_descriptor_spks(Cow::Owned(descriptor), 0..)
}
/// Convenience method to get [`revealed_spks_of_keychain`] of all keychains.
///
/// [`revealed_spks_of_keychain`]: Self::revealed_spks_of_keychain
pub fn revealed_spks_of_all_keychains(
&self,
) -> BTreeMap<K, impl Iterator<Item = (u32, &Script)> + Clone> {
self.keychains
.keys()
.map(|keychain| (keychain.clone(), self.revealed_spks_of_keychain(keychain)))
.collect()
}
/// Iterates over the script pubkeys revealed by this index under `keychain`.
pub fn revealed_spks_of_keychain(
&self,
keychain: &K,
) -> impl DoubleEndedIterator<Item = (u32, &Script)> + Clone {
let next_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
self.inner
.all_spks()
.range((keychain.clone(), u32::MIN)..(keychain.clone(), next_index))
.map(|((_, derivation_index), spk)| (*derivation_index, spk))
}
/// Get the next derivation index for `keychain`. The next index is the index after the last revealed
/// derivation index.
///
/// The second field in the returned tuple represents whether the next derivation index is new.
/// There are two scenarios where the next derivation index is reused (not new):
///
/// 1. The keychain's descriptor has no wildcard, and a script has already been revealed.
/// 2. The number of revealed scripts has already reached 2^31 (refer to BIP-32).
///
/// Not checking the second field of the tuple may result in address reuse.
///
/// # Panics
///
/// Panics if the `keychain` does not exist.
pub fn next_index(&self, keychain: &K) -> (u32, bool) {
let descriptor = self.keychains.get(keychain).expect("keychain must exist");
let last_index = self.last_revealed.get(keychain).cloned();
// we can only get the next index if the wildcard exists.
let has_wildcard = descriptor.has_wildcard();
match last_index {
// if there is no index, next_index is always 0.
None => (0, true),
// descriptors without wildcards can only have one index.
Some(_) if !has_wildcard => (0, false),
// derivation index must be < 2^31 (BIP-32).
Some(index) if index > BIP32_MAX_INDEX => {
unreachable!("index is out of bounds")
}
Some(index) if index == BIP32_MAX_INDEX => (index, false),
// get the next derivation index.
Some(index) => (index + 1, true),
}
}
/// Get the last derivation index that is revealed for each keychain.
///
/// Keychains with no revealed indices will not be included in the returned [`BTreeMap`].
pub fn last_revealed_indices(&self) -> &BTreeMap<K, u32> {
&self.last_revealed
}
/// Get the last derivation index revealed for `keychain`.
pub fn last_revealed_index(&self, keychain: &K) -> Option<u32> {
self.last_revealed.get(keychain).cloned()
}
/// Convenience method to call [`Self::reveal_to_target`] on multiple keychains.
pub fn reveal_to_target_multi(
&mut self,
keychains: &BTreeMap<K, u32>,
) -> (
BTreeMap<K, impl Iterator<Item = (u32, Script)>>,
DerivationAdditions<K>,
) {
let mut additions = DerivationAdditions::default();
let mut spks = BTreeMap::new();
for (keychain, &index) in keychains {
let (new_spks, new_additions) = self.reveal_to_target(keychain, index);
if !new_additions.is_empty() {
spks.insert(keychain.clone(), new_spks);
additions.append(new_additions);
}
}
(spks, additions)
}
/// Reveals script pubkeys of the `keychain`'s descriptor **up to and including** the
/// `target_index`.
///
/// If the `target_index` cannot be reached (due to the descriptor having no wildcard and/or
/// the `target_index` is in the hardened index range), this method will make a best-effort and
/// reveal up to the last possible index.
///
/// This returns an iterator of newly revealed indices (alongside their scripts) and a
/// [`DerivationAdditions`], which reports updates to the latest revealed index. If no new script
/// pubkeys are revealed, then both of these will be empty.
///
/// # Panics
///
/// Panics if `keychain` does not exist.
pub fn reveal_to_target(
&mut self,
keychain: &K,
target_index: u32,
) -> (impl Iterator<Item = (u32, Script)>, DerivationAdditions<K>) {
let descriptor = self.keychains.get(keychain).expect("keychain must exist");
let has_wildcard = descriptor.has_wildcard();
let target_index = if has_wildcard { target_index } else { 0 };
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
debug_assert_eq!(
next_reveal_index + lookahead,
self.next_store_index(keychain)
);
// if we need to reveal new indices, the latest revealed index goes here
let mut reveal_to_index = None;
// if the target is not yet revealed, but is already stored (due to lookahead), we need to
// set the `reveal_to_index` as target here (as the `for` loop below only updates
// `reveal_to_index` for indexes that are NOT stored)
if next_reveal_index <= target_index && target_index < next_reveal_index + lookahead {
reveal_to_index = Some(target_index);
}
// we range over indexes that are not stored
let range = next_reveal_index + lookahead..=target_index + lookahead;
for (new_index, new_spk) in range_descriptor_spks(Cow::Borrowed(descriptor), range) {
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
debug_assert!(_inserted, "must not have existing spk",);
// everything after `target_index` is stored for lookahead only
if new_index <= target_index {
reveal_to_index = Some(new_index);
}
}
match reveal_to_index {
Some(index) => {
let _old_index = self.last_revealed.insert(keychain.clone(), index);
debug_assert!(_old_index < Some(index));
(
range_descriptor_spks(
Cow::Owned(descriptor.clone()),
next_reveal_index..index + 1,
),
DerivationAdditions(core::iter::once((keychain.clone(), index)).collect()),
)
}
None => (
range_descriptor_spks(
Cow::Owned(descriptor.clone()),
next_reveal_index..next_reveal_index,
),
DerivationAdditions::default(),
),
}
}
/// Attempts to reveal the next script pubkey for `keychain`.
///
/// Returns the derivation index of the revealed script pubkey, the revealed script pubkey and a
/// [`DerivationAdditions`] which represents changes in the last revealed index (if any).
///
/// When a new script cannot be revealed, we return the last revealed script and an empty
/// [`DerivationAdditions`]. There are two scenarios when a new script pubkey cannot be derived:
///
/// 1. The descriptor has no wildcard and already has one script revealed.
/// 2. The descriptor has already revealed scripts up to the numeric bound.
///
/// # Panics
///
/// Panics if the `keychain` does not exist.
pub fn reveal_next_spk(&mut self, keychain: &K) -> ((u32, &Script), DerivationAdditions<K>) {
let (next_index, _) = self.next_index(keychain);
let additions = self.reveal_to_target(keychain, next_index).1;
let script = self
.inner
.spk_at_index(&(keychain.clone(), next_index))
.expect("script must already be stored");
((next_index, script), additions)
}
/// Gets the next unused script pubkey in the keychain. I.e., the script pubkey with the lowest
/// index that has not been used yet.
///
/// This will derive and reveal a new script pubkey if no more unused script pubkeys exist.
///
/// If the descriptor has no wildcard and already has a used script pubkey or if a descriptor
/// has used all scripts up to the derivation bounds, then the last derived script pubkey will be
/// returned.
///
/// # Panics
///
/// Panics if `keychain` has never been added to the index
pub fn next_unused_spk(&mut self, keychain: &K) -> ((u32, &Script), DerivationAdditions<K>) {
let need_new = self.unused_spks_of_keychain(keychain).next().is_none();
// this rather strange branch is needed because of some lifetime issues
if need_new {
self.reveal_next_spk(keychain)
} else {
(
self.unused_spks_of_keychain(keychain)
.next()
.expect("we already know next exists"),
DerivationAdditions::default(),
)
}
}
/// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output with it.
/// This only has an effect when the `index` had been added to `self` already and was unused.
///
/// Returns whether the `index` was initially present as `unused`.
///
/// This is useful when you want to reserve a script pubkey for something but don't want to add
/// the transaction output using it to the index yet. Other callers will consider `index` on
/// `keychain` used until you call [`unmark_used`].
///
/// [`unmark_used`]: Self::unmark_used
pub fn mark_used(&mut self, keychain: &K, index: u32) -> bool {
self.inner.mark_used(&(keychain.clone(), index))
}
/// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
/// `unused`.
///
/// Note that if `self` has scanned an output with this script pubkey, then this will have no
/// effect.
///
/// [`mark_used`]: Self::mark_used
pub fn unmark_used(&mut self, keychain: &K, index: u32) -> bool {
self.inner.unmark_used(&(keychain.clone(), index))
}
/// Iterates over all unused script pubkeys for a `keychain` stored in the index.
pub fn unused_spks_of_keychain(
&self,
keychain: &K,
) -> impl DoubleEndedIterator<Item = (u32, &Script)> {
let next_index = self.last_revealed.get(keychain).map_or(0, |&v| v + 1);
let range = (keychain.clone(), u32::MIN)..(keychain.clone(), next_index);
self.inner
.unused_spks(range)
.map(|((_, i), script)| (*i, script))
}
/// Iterates over all the [`OutPoint`] that have a `TxOut` with a script pubkey derived from
/// `keychain`.
pub fn txouts_of_keychain(
&self,
keychain: &K,
) -> impl DoubleEndedIterator<Item = (u32, OutPoint)> + '_ {
self.inner
.outputs_in_range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX))
.map(|((_, i), op)| (*i, op))
}
/// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has
/// found a [`TxOut`] with it's script pubkey.
pub fn last_used_index(&self, keychain: &K) -> Option<u32> {
self.txouts_of_keychain(keychain).last().map(|(i, _)| i)
}
/// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found
/// a [`TxOut`] with it's script pubkey.
pub fn last_used_indices(&self) -> BTreeMap<K, u32> {
self.keychains
.iter()
.filter_map(|(keychain, _)| {
self.last_used_index(keychain)
.map(|index| (keychain.clone(), index))
})
.collect()
}
/// Applies the derivation additions to the [`KeychainTxOutIndex`], extending the number of
/// derived scripts per keychain, as specified in the `additions`.
pub fn apply_additions(&mut self, additions: DerivationAdditions<K>) {
let _ = self.reveal_to_target_multi(&additions.0);
}
}
fn range_descriptor_spks<'a, R>(
descriptor: Cow<'a, Descriptor<DescriptorPublicKey>>,
range: R,
) -> impl Iterator<Item = (u32, Script)> + Clone + Send + 'a
where
R: Iterator<Item = u32> + Clone + Send + 'a,
{
let secp = Secp256k1::verification_only();
let has_wildcard = descriptor.has_wildcard();
range
.into_iter()
// non-wildcard descriptors can only have one derivation index (0)
.take_while(move |&index| has_wildcard || index == 0)
// we can only iterate over non-hardened indices
.take_while(|&index| index <= BIP32_MAX_INDEX)
.map(
move |index| -> Result<_, miniscript::descriptor::ConversionError> {
Ok((
index,
descriptor
.at_derivation_index(index)
.derived_descriptor(&secp)?
.script_pubkey(),
))
},
)
.take_while(Result::is_ok)
.map(Result::unwrap)
}

View File

@@ -1,4 +1,4 @@
//! This crate is a collection of core structures for [Bitcoin Dev Kit].
//! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release).
//!
//! The goal of this crate is to give wallets the mechanisms needed to:
//!
@@ -12,33 +12,23 @@
//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
//! consistently.
//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
//! cache or how you retrieve it from persistent storage.
//! 2. Error-free APIs.
//! 3. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
//! cache or how you fetch it.
//!
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/
#![no_std]
#![warn(missing_docs)]
pub use bitcoin;
mod balance;
pub use balance::*;
pub mod chain_graph;
mod spk_txout_index;
pub use spk_txout_index::*;
mod chain_data;
pub use chain_data::*;
pub mod indexed_tx_graph;
pub use indexed_tx_graph::IndexedTxGraph;
pub mod indexer;
pub use indexer::spk_txout;
pub use indexer::Indexer;
pub mod local_chain;
pub mod keychain;
pub mod sparse_chain;
mod tx_data_traits;
pub mod tx_graph;
pub use tx_data_traits::*;
pub use tx_graph::TxGraph;
mod chain_oracle;
pub use chain_oracle::*;
mod persist;
pub use persist::*;
#[doc(hidden)]
pub mod example_utils;
@@ -48,25 +38,18 @@ pub use miniscript;
#[cfg(feature = "miniscript")]
mod descriptor_ext;
#[cfg(feature = "miniscript")]
pub use descriptor_ext::{DescriptorExt, DescriptorId};
#[cfg(feature = "miniscript")]
mod spk_iter;
#[cfg(feature = "miniscript")]
pub use indexer::keychain_txout;
#[cfg(feature = "miniscript")]
pub use spk_iter::*;
#[cfg(feature = "rusqlite")]
pub mod rusqlite_impl;
pub mod spk_client;
pub use descriptor_ext::DescriptorExt;
#[allow(unused_imports)]
#[macro_use]
extern crate alloc;
#[cfg(feature = "rusqlite")]
pub extern crate rusqlite_crate as rusqlite;
#[cfg(feature = "serde")]
pub extern crate serde_crate as serde;
#[cfg(feature = "bincode")]
extern crate bincode;
#[cfg(feature = "std")]
#[macro_use]
extern crate std;
@@ -104,25 +87,3 @@ pub mod collections {
/// How many confirmations are needed f or a coinbase output to be spent.
pub const COINBASE_MATURITY: u32 = 100;
/// A tuple of keychain index and `T` representing the indexed value.
pub type Indexed<T> = (u32, T);
/// A tuple of keychain `K`, derivation index (`u32`) and a `T` associated with them.
pub type KeychainIndexed<K, T> = ((K, u32), T);
/// A wrapper that we use to impl remote traits for types in our crate or dependency crates.
pub struct Impl<T>(pub T);
impl<T> From<T> for Impl<T> {
fn from(value: T) -> Self {
Self(value)
}
}
impl<T> core::ops::Deref for Impl<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@@ -1,902 +0,0 @@
//! The [`LocalChain`] is a local implementation of [`ChainOracle`].
use core::convert::Infallible;
use core::ops::RangeBounds;
use crate::collections::BTreeMap;
use crate::{BlockId, ChainOracle, Merge};
use alloc::sync::Arc;
use bitcoin::block::Header;
use bitcoin::BlockHash;
/// A [`LocalChain`] checkpoint is used to find the agreement point between two chains and as a
/// transaction anchor.
///
/// Each checkpoint contains the height and hash of a block ([`BlockId`]).
///
/// Internally, checkpoints are nodes of a reference-counted linked-list. This allows the caller to
/// cheaply clone a [`CheckPoint`] without copying the whole list and to view the entire chain
/// without holding a lock on [`LocalChain`].
#[derive(Debug, Clone)]
pub struct CheckPoint(Arc<CPInner>);
/// The internal contents of [`CheckPoint`].
#[derive(Debug, Clone)]
struct CPInner {
/// Block id (hash and height).
block: BlockId,
/// Previous checkpoint (if any).
prev: Option<Arc<CPInner>>,
}
impl PartialEq for CheckPoint {
fn eq(&self, other: &Self) -> bool {
let self_cps = self.iter().map(|cp| cp.block_id());
let other_cps = other.iter().map(|cp| cp.block_id());
self_cps.eq(other_cps)
}
}
impl CheckPoint {
/// Construct a new base block at the front of a linked list.
pub fn new(block: BlockId) -> Self {
Self(Arc::new(CPInner { block, prev: None }))
}
/// Construct a checkpoint from a list of [`BlockId`]s in ascending height order.
///
/// # Errors
///
/// This method will error if any of the follow occurs:
///
/// - The `blocks` iterator is empty, in which case, the error will be `None`.
/// - The `blocks` iterator is not in ascending height order.
/// - The `blocks` iterator contains multiple [`BlockId`]s of the same height.
///
/// The error type is the last successful checkpoint constructed (if any).
pub fn from_block_ids(
block_ids: impl IntoIterator<Item = BlockId>,
) -> Result<Self, Option<Self>> {
let mut blocks = block_ids.into_iter();
let mut acc = CheckPoint::new(blocks.next().ok_or(None)?);
for id in blocks {
acc = acc.push(id).map_err(Some)?;
}
Ok(acc)
}
/// Construct a checkpoint from the given `header` and block `height`.
///
/// If `header` is of the genesis block, the checkpoint won't have a [`prev`] node. Otherwise,
/// we return a checkpoint linked with the previous block.
///
/// [`prev`]: CheckPoint::prev
pub fn from_header(header: &bitcoin::block::Header, height: u32) -> Self {
let hash = header.block_hash();
let this_block_id = BlockId { height, hash };
let prev_height = match height.checked_sub(1) {
Some(h) => h,
None => return Self::new(this_block_id),
};
let prev_block_id = BlockId {
height: prev_height,
hash: header.prev_blockhash,
};
CheckPoint::new(prev_block_id)
.push(this_block_id)
.expect("must construct checkpoint")
}
/// Puts another checkpoint onto the linked list representing the blockchain.
///
/// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you
/// are pushing on to.
pub fn push(self, block: BlockId) -> Result<Self, Self> {
if self.height() < block.height {
Ok(Self(Arc::new(CPInner {
block,
prev: Some(self.0),
})))
} else {
Err(self)
}
}
/// Extends the checkpoint linked list by a iterator of block ids.
///
/// Returns an `Err(self)` if there is block which does not have a greater height than the
/// previous one.
pub fn extend(self, blocks: impl IntoIterator<Item = BlockId>) -> Result<Self, Self> {
let mut curr = self.clone();
for block in blocks {
curr = curr.push(block).map_err(|_| self.clone())?;
}
Ok(curr)
}
/// Get the [`BlockId`] of the checkpoint.
pub fn block_id(&self) -> BlockId {
self.0.block
}
/// Get the height of the checkpoint.
pub fn height(&self) -> u32 {
self.0.block.height
}
/// Get the block hash of the checkpoint.
pub fn hash(&self) -> BlockHash {
self.0.block.hash
}
/// Get the previous checkpoint in the chain
pub fn prev(&self) -> Option<CheckPoint> {
self.0.prev.clone().map(CheckPoint)
}
/// Iterate from this checkpoint in descending height.
pub fn iter(&self) -> CheckPointIter {
self.clone().into_iter()
}
/// Get checkpoint at `height`.
///
/// Returns `None` if checkpoint at `height` does not exist`.
pub fn get(&self, height: u32) -> Option<Self> {
self.range(height..=height).next()
}
/// Iterate checkpoints over a height range.
///
/// Note that we always iterate checkpoints in reverse height order (iteration starts at tip
/// height).
pub fn range<R>(&self, range: R) -> impl Iterator<Item = CheckPoint>
where
R: RangeBounds<u32>,
{
let start_bound = range.start_bound().cloned();
let end_bound = range.end_bound().cloned();
self.iter()
.skip_while(move |cp| match end_bound {
core::ops::Bound::Included(inc_bound) => cp.height() > inc_bound,
core::ops::Bound::Excluded(exc_bound) => cp.height() >= exc_bound,
core::ops::Bound::Unbounded => false,
})
.take_while(move |cp| match start_bound {
core::ops::Bound::Included(inc_bound) => cp.height() >= inc_bound,
core::ops::Bound::Excluded(exc_bound) => cp.height() > exc_bound,
core::ops::Bound::Unbounded => true,
})
}
/// Inserts `block_id` at its height within the chain.
///
/// The effect of `insert` depends on whether a height already exists. If it doesn't the
/// `block_id` we inserted and all pre-existing blocks higher than it will be re-inserted after
/// it. If the height already existed and has a conflicting block hash then it will be purged
/// along with all block followin it. The returned chain will have a tip of the `block_id`
/// passed in. Of course, if the `block_id` was already present then this just returns `self`.
#[must_use]
pub fn insert(self, block_id: BlockId) -> Self {
assert_ne!(block_id.height, 0, "cannot insert the genesis block");
let mut cp = self.clone();
let mut tail = vec![];
let base = loop {
if cp.height() == block_id.height {
if cp.hash() == block_id.hash {
return self;
}
// if we have a conflict we just return the inserted block because the tail is by
// implication invalid.
tail = vec![];
break cp.prev().expect("can't be called on genesis block");
}
if cp.height() < block_id.height {
break cp;
}
tail.push(cp.block_id());
cp = cp.prev().expect("will break before genesis block");
};
base.extend(core::iter::once(block_id).chain(tail.into_iter().rev()))
.expect("tail is in order")
}
/// Apply `changeset` to the checkpoint.
fn apply_changeset(mut self, changeset: &ChangeSet) -> Result<CheckPoint, MissingGenesisError> {
if let Some(start_height) = changeset.blocks.keys().next().cloned() {
// changes after point of agreement
let mut extension = BTreeMap::default();
// point of agreement
let mut base: Option<CheckPoint> = None;
for cp in self.iter() {
if cp.height() >= start_height {
extension.insert(cp.height(), cp.hash());
} else {
base = Some(cp);
break;
}
}
for (&height, &hash) in &changeset.blocks {
match hash {
Some(hash) => {
extension.insert(height, hash);
}
None => {
extension.remove(&height);
}
};
}
let new_tip = match base {
Some(base) => base
.extend(extension.into_iter().map(BlockId::from))
.expect("extension is strictly greater than base"),
None => LocalChain::from_blocks(extension)?.tip(),
};
self = new_tip;
}
Ok(self)
}
}
/// Iterates over checkpoints backwards.
pub struct CheckPointIter {
current: Option<Arc<CPInner>>,
}
impl Iterator for CheckPointIter {
type Item = CheckPoint;
fn next(&mut self) -> Option<Self::Item> {
let current = self.current.clone()?;
self.current.clone_from(&current.prev);
Some(CheckPoint(current))
}
}
impl IntoIterator for CheckPoint {
type Item = CheckPoint;
type IntoIter = CheckPointIter;
fn into_iter(self) -> Self::IntoIter {
CheckPointIter {
current: Some(self.0),
}
}
}
/// This is a local implementation of [`ChainOracle`].
#[derive(Debug, Clone, PartialEq)]
pub struct LocalChain {
tip: CheckPoint,
}
impl ChainOracle for LocalChain {
type Error = Infallible;
fn is_block_in_chain(
&self,
block: BlockId,
chain_tip: BlockId,
) -> Result<Option<bool>, Self::Error> {
let chain_tip_cp = match self.tip.get(chain_tip.height) {
// we can only determine whether `block` is in chain of `chain_tip` if `chain_tip` can
// be identified in chain
Some(cp) if cp.hash() == chain_tip.hash => cp,
_ => return Ok(None),
};
match chain_tip_cp.get(block.height) {
Some(cp) => Ok(Some(cp.hash() == block.hash)),
None => Ok(None),
}
}
fn get_chain_tip(&self) -> Result<BlockId, Self::Error> {
Ok(self.tip.block_id())
}
}
impl LocalChain {
/// Get the genesis hash.
pub fn genesis_hash(&self) -> BlockHash {
self.tip.get(0).expect("genesis must exist").hash()
}
/// Construct [`LocalChain`] from genesis `hash`.
#[must_use]
pub fn from_genesis_hash(hash: BlockHash) -> (Self, ChangeSet) {
let height = 0;
let chain = Self {
tip: CheckPoint::new(BlockId { height, hash }),
};
let changeset = chain.initial_changeset();
(chain, changeset)
}
/// Construct a [`LocalChain`] from an initial `changeset`.
pub fn from_changeset(changeset: ChangeSet) -> Result<Self, MissingGenesisError> {
let genesis_entry = changeset.blocks.get(&0).copied().flatten();
let genesis_hash = match genesis_entry {
Some(hash) => hash,
None => return Err(MissingGenesisError),
};
let (mut chain, _) = Self::from_genesis_hash(genesis_hash);
chain.apply_changeset(&changeset)?;
debug_assert!(chain._check_changeset_is_applied(&changeset));
Ok(chain)
}
/// Construct a [`LocalChain`] from a given `checkpoint` tip.
pub fn from_tip(tip: CheckPoint) -> Result<Self, MissingGenesisError> {
let genesis_cp = tip.iter().last().expect("must have at least one element");
if genesis_cp.height() != 0 {
return Err(MissingGenesisError);
}
Ok(Self { tip })
}
/// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`].
///
/// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are
/// all of the same chain.
pub fn from_blocks(blocks: BTreeMap<u32, BlockHash>) -> Result<Self, MissingGenesisError> {
if !blocks.contains_key(&0) {
return Err(MissingGenesisError);
}
let mut tip: Option<CheckPoint> = None;
for block in &blocks {
match tip {
Some(curr) => {
tip = Some(
curr.push(BlockId::from(block))
.expect("BTreeMap is ordered"),
)
}
None => tip = Some(CheckPoint::new(BlockId::from(block))),
}
}
Ok(Self {
tip: tip.expect("already checked to have genesis"),
})
}
/// Get the highest checkpoint.
pub fn tip(&self) -> CheckPoint {
self.tip.clone()
}
/// Applies the given `update` to the chain.
///
/// The method returns [`ChangeSet`] on success. This represents the changes applied to `self`.
///
/// There must be no ambiguity about which of the existing chain's blocks are still valid and
/// which are now invalid. That is, the new chain must implicitly connect to a definite block in
/// the existing chain and invalidate the block after it (if it exists) by including a block at
/// the same height but with a different hash to explicitly exclude it as a connection point.
///
/// # Errors
///
/// An error will occur if the update does not correctly connect with `self`.
///
/// [module-level documentation]: crate::local_chain
pub fn apply_update(&mut self, update: CheckPoint) -> Result<ChangeSet, CannotConnectError> {
let (new_tip, changeset) = merge_chains(self.tip.clone(), update)?;
self.tip = new_tip;
self._check_changeset_is_applied(&changeset);
Ok(changeset)
}
/// Update the chain with a given [`Header`] at `height` which you claim is connected to a existing block in the chain.
///
/// This is useful when you have a block header that you want to record as part of the chain but
/// don't necessarily know that the `prev_blockhash` is in the chain.
///
/// This will usually insert two new [`BlockId`]s into the chain: the header's block and the
/// header's `prev_blockhash` block. `connected_to` must already be in the chain but is allowed
/// to be `prev_blockhash` (in which case only one new block id will be inserted).
/// To be successful, `connected_to` must be chosen carefully so that `LocalChain`'s [update
/// rules][`apply_update`] are satisfied.
///
/// # Errors
///
/// [`ApplyHeaderError::InconsistentBlocks`] occurs if the `connected_to` block and the
/// [`Header`] is inconsistent. For example, if the `connected_to` block is the same height as
/// `header` or `prev_blockhash`, but has a different block hash. Or if the `connected_to`
/// height is greater than the header's `height`.
///
/// [`ApplyHeaderError::CannotConnect`] occurs if the internal call to [`apply_update`] fails.
///
/// [`apply_update`]: Self::apply_update
pub fn apply_header_connected_to(
&mut self,
header: &Header,
height: u32,
connected_to: BlockId,
) -> Result<ChangeSet, ApplyHeaderError> {
let this = BlockId {
height,
hash: header.block_hash(),
};
let prev = height.checked_sub(1).map(|prev_height| BlockId {
height: prev_height,
hash: header.prev_blockhash,
});
let conn = match connected_to {
// `connected_to` can be ignored if same as `this` or `prev` (duplicate)
conn if conn == this || Some(conn) == prev => None,
// this occurs if:
// - `connected_to` height is the same as `prev`, but different hash
// - `connected_to` height is the same as `this`, but different hash
// - `connected_to` height is greater than `this` (this is not allowed)
conn if conn.height >= height.saturating_sub(1) => {
return Err(ApplyHeaderError::InconsistentBlocks)
}
conn => Some(conn),
};
let update = CheckPoint::from_block_ids([conn, prev, Some(this)].into_iter().flatten())
.expect("block ids must be in order");
self.apply_update(update)
.map_err(ApplyHeaderError::CannotConnect)
}
/// Update the chain with a given [`Header`] connecting it with the previous block.
///
/// This is a convenience method to call [`apply_header_connected_to`] with the `connected_to`
/// parameter being `height-1:prev_blockhash`. If there is no previous block (i.e. genesis), we
/// use the current block as `connected_to`.
///
/// [`apply_header_connected_to`]: LocalChain::apply_header_connected_to
pub fn apply_header(
&mut self,
header: &Header,
height: u32,
) -> Result<ChangeSet, CannotConnectError> {
let connected_to = match height.checked_sub(1) {
Some(prev_height) => BlockId {
height: prev_height,
hash: header.prev_blockhash,
},
None => BlockId {
height,
hash: header.block_hash(),
},
};
self.apply_header_connected_to(header, height, connected_to)
.map_err(|err| match err {
ApplyHeaderError::InconsistentBlocks => {
unreachable!("connected_to is derived from the block so is always consistent")
}
ApplyHeaderError::CannotConnect(err) => err,
})
}
/// Apply the given `changeset`.
pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> {
let old_tip = self.tip.clone();
let new_tip = old_tip.apply_changeset(changeset)?;
self.tip = new_tip;
debug_assert!(self._check_changeset_is_applied(changeset));
Ok(())
}
/// Insert a [`BlockId`].
///
/// # Errors
///
/// Replacing the block hash of an existing checkpoint will result in an error.
pub fn insert_block(&mut self, block_id: BlockId) -> Result<ChangeSet, AlterCheckPointError> {
if let Some(original_cp) = self.tip.get(block_id.height) {
let original_hash = original_cp.hash();
if original_hash != block_id.hash {
return Err(AlterCheckPointError {
height: block_id.height,
original_hash,
update_hash: Some(block_id.hash),
});
}
return Ok(ChangeSet::default());
}
let mut changeset = ChangeSet::default();
changeset
.blocks
.insert(block_id.height, Some(block_id.hash));
self.apply_changeset(&changeset)
.map_err(|_| AlterCheckPointError {
height: 0,
original_hash: self.genesis_hash(),
update_hash: changeset.blocks.get(&0).cloned().flatten(),
})?;
Ok(changeset)
}
/// Removes blocks from (and inclusive of) the given `block_id`.
///
/// This will remove blocks with a height equal or greater than `block_id`, but only if
/// `block_id` exists in the chain.
///
/// # Errors
///
/// This will fail with [`MissingGenesisError`] if the caller attempts to disconnect from the
/// genesis block.
pub fn disconnect_from(&mut self, block_id: BlockId) -> Result<ChangeSet, MissingGenesisError> {
let mut remove_from = Option::<CheckPoint>::None;
let mut changeset = ChangeSet::default();
for cp in self.tip().iter() {
let cp_id = cp.block_id();
if cp_id.height < block_id.height {
break;
}
changeset.blocks.insert(cp_id.height, None);
if cp_id == block_id {
remove_from = Some(cp);
}
}
self.tip = match remove_from.map(|cp| cp.prev()) {
// The checkpoint below the earliest checkpoint to remove will be the new tip.
Some(Some(new_tip)) => new_tip,
// If there is no checkpoint below the earliest checkpoint to remove, it means the
// "earliest checkpoint to remove" is the genesis block. We disallow removing the
// genesis block.
Some(None) => return Err(MissingGenesisError),
// If there is nothing to remove, we return an empty changeset.
None => return Ok(ChangeSet::default()),
};
Ok(changeset)
}
/// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to
/// recover the current chain.
pub fn initial_changeset(&self) -> ChangeSet {
ChangeSet {
blocks: self
.tip
.iter()
.map(|cp| {
let block_id = cp.block_id();
(block_id.height, Some(block_id.hash))
})
.collect(),
}
}
/// Iterate over checkpoints in descending height order.
pub fn iter_checkpoints(&self) -> CheckPointIter {
CheckPointIter {
current: Some(self.tip.0.clone()),
}
}
fn _check_changeset_is_applied(&self, changeset: &ChangeSet) -> bool {
let mut curr_cp = self.tip.clone();
for (height, exp_hash) in changeset.blocks.iter().rev() {
match curr_cp.get(*height) {
Some(query_cp) => {
if query_cp.height() != *height || Some(query_cp.hash()) != *exp_hash {
return false;
}
curr_cp = query_cp;
}
None => {
if exp_hash.is_some() {
return false;
}
}
}
}
true
}
/// Get checkpoint at given `height` (if it exists).
///
/// This is a shorthand for calling [`CheckPoint::get`] on the [`tip`].
///
/// [`tip`]: LocalChain::tip
pub fn get(&self, height: u32) -> Option<CheckPoint> {
self.tip.get(height)
}
/// Iterate checkpoints over a height range.
///
/// Note that we always iterate checkpoints in reverse height order (iteration starts at tip
/// height).
///
/// This is a shorthand for calling [`CheckPoint::range`] on the [`tip`].
///
/// [`tip`]: LocalChain::tip
pub fn range<R>(&self, range: R) -> impl Iterator<Item = CheckPoint>
where
R: RangeBounds<u32>,
{
self.tip.range(range)
}
}
/// The [`ChangeSet`] represents changes to [`LocalChain`].
#[derive(Debug, Default, Clone, PartialEq)]
#[cfg_attr(
feature = "serde",
derive(serde::Deserialize, serde::Serialize),
serde(crate = "serde_crate")
)]
pub struct ChangeSet {
/// Changes to the [`LocalChain`] blocks.
///
/// The key represents the block height, and the value either represents added a new [`CheckPoint`]
/// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
pub blocks: BTreeMap<u32, Option<BlockHash>>,
}
impl Merge for ChangeSet {
fn merge(&mut self, other: Self) {
Merge::merge(&mut self.blocks, other.blocks)
}
fn is_empty(&self) -> bool {
self.blocks.is_empty()
}
}
impl<B: IntoIterator<Item = (u32, Option<BlockHash>)>> From<B> for ChangeSet {
fn from(blocks: B) -> Self {
Self {
blocks: blocks.into_iter().collect(),
}
}
}
impl FromIterator<(u32, Option<BlockHash>)> for ChangeSet {
fn from_iter<T: IntoIterator<Item = (u32, Option<BlockHash>)>>(iter: T) -> Self {
Self {
blocks: iter.into_iter().collect(),
}
}
}
impl FromIterator<(u32, BlockHash)> for ChangeSet {
fn from_iter<T: IntoIterator<Item = (u32, BlockHash)>>(iter: T) -> Self {
Self {
blocks: iter
.into_iter()
.map(|(height, hash)| (height, Some(hash)))
.collect(),
}
}
}
/// An error which occurs when a [`LocalChain`] is constructed without a genesis checkpoint.
#[derive(Clone, Debug, PartialEq)]
pub struct MissingGenesisError;
impl core::fmt::Display for MissingGenesisError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"cannot construct `LocalChain` without a genesis checkpoint"
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for MissingGenesisError {}
/// Represents a failure when trying to insert/remove a checkpoint to/from [`LocalChain`].
#[derive(Clone, Debug, PartialEq)]
pub struct AlterCheckPointError {
/// The checkpoint's height.
pub height: u32,
/// The original checkpoint's block hash which cannot be replaced/removed.
pub original_hash: BlockHash,
/// The attempted update to the `original_block` hash.
pub update_hash: Option<BlockHash>,
}
impl core::fmt::Display for AlterCheckPointError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self.update_hash {
Some(update_hash) => write!(
f,
"failed to insert block at height {}: original={} update={}",
self.height, self.original_hash, update_hash
),
None => write!(
f,
"failed to remove block at height {}: original={}",
self.height, self.original_hash
),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for AlterCheckPointError {}
/// Occurs when an update does not have a common checkpoint with the original chain.
#[derive(Clone, Debug, PartialEq)]
pub struct CannotConnectError {
/// The suggested checkpoint to include to connect the two chains.
pub try_include_height: u32,
}
impl core::fmt::Display for CannotConnectError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"introduced chain cannot connect with the original chain, try include height {}",
self.try_include_height,
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for CannotConnectError {}
/// The error type for [`LocalChain::apply_header_connected_to`].
#[derive(Debug, Clone, PartialEq)]
pub enum ApplyHeaderError {
/// Occurs when `connected_to` block conflicts with either the current block or previous block.
InconsistentBlocks,
/// Occurs when the update cannot connect with the original chain.
CannotConnect(CannotConnectError),
}
impl core::fmt::Display for ApplyHeaderError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
ApplyHeaderError::InconsistentBlocks => write!(
f,
"the `connected_to` block conflicts with either the current or previous block"
),
ApplyHeaderError::CannotConnect(err) => core::fmt::Display::fmt(err, f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ApplyHeaderError {}
/// Applies `update_tip` onto `original_tip`.
///
/// On success, a tuple is returned `(changeset, can_replace)`. If `can_replace` is true, then the
/// `update_tip` can replace the `original_tip`.
fn merge_chains(
original_tip: CheckPoint,
update_tip: CheckPoint,
) -> Result<(CheckPoint, ChangeSet), CannotConnectError> {
let mut changeset = ChangeSet::default();
let mut orig = original_tip.iter();
let mut update = update_tip.iter();
let mut curr_orig = None;
let mut curr_update = None;
let mut prev_orig: Option<CheckPoint> = None;
let mut prev_update: Option<CheckPoint> = None;
let mut point_of_agreement_found = false;
let mut prev_orig_was_invalidated = false;
let mut potentially_invalidated_heights = vec![];
// If we can, we want to return the update tip as the new tip because this allows checkpoints
// in multiple locations to keep the same `Arc` pointers when they are being updated from each
// other using this function. We can do this as long as long as the update contains every
// block's height of the original chain.
let mut is_update_height_superset_of_original = true;
// To find the difference between the new chain and the original we iterate over both of them
// from the tip backwards in tandem. We always dealing with the highest one from either chain
// first and move to the next highest. The crucial logic is applied when they have blocks at the
// same height.
loop {
if curr_orig.is_none() {
curr_orig = orig.next();
}
if curr_update.is_none() {
curr_update = update.next();
}
match (curr_orig.as_ref(), curr_update.as_ref()) {
// Update block that doesn't exist in the original chain
(o, Some(u)) if Some(u.height()) > o.map(|o| o.height()) => {
changeset.blocks.insert(u.height(), Some(u.hash()));
prev_update = curr_update.take();
}
// Original block that isn't in the update
(Some(o), u) if Some(o.height()) > u.map(|u| u.height()) => {
// this block might be gone if an earlier block gets invalidated
potentially_invalidated_heights.push(o.height());
prev_orig_was_invalidated = false;
prev_orig = curr_orig.take();
is_update_height_superset_of_original = false;
// OPTIMIZATION: we have run out of update blocks so we don't need to continue
// iterating because there's no possibility of adding anything to changeset.
if u.is_none() {
break;
}
}
(Some(o), Some(u)) => {
if o.hash() == u.hash() {
// We have found our point of agreement 🎉 -- we require that the previous (i.e.
// higher because we are iterating backwards) block in the original chain was
// invalidated (if it exists). This ensures that there is an unambiguous point of
// connection to the original chain from the update chain (i.e. we know the
// precisely which original blocks are invalid).
if !prev_orig_was_invalidated && !point_of_agreement_found {
if let (Some(prev_orig), Some(_prev_update)) = (&prev_orig, &prev_update) {
return Err(CannotConnectError {
try_include_height: prev_orig.height(),
});
}
}
point_of_agreement_found = true;
prev_orig_was_invalidated = false;
// OPTIMIZATION 2 -- if we have the same underlying pointer at this point, we
// can guarantee that no older blocks are introduced.
if Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) {
if is_update_height_superset_of_original {
return Ok((update_tip, changeset));
} else {
let new_tip =
original_tip.apply_changeset(&changeset).map_err(|_| {
CannotConnectError {
try_include_height: 0,
}
})?;
return Ok((new_tip, changeset));
}
}
} else {
// We have an invalidation height so we set the height to the updated hash and
// also purge all the original chain block hashes above this block.
changeset.blocks.insert(u.height(), Some(u.hash()));
for invalidated_height in potentially_invalidated_heights.drain(..) {
changeset.blocks.insert(invalidated_height, None);
}
prev_orig_was_invalidated = true;
}
prev_update = curr_update.take();
prev_orig = curr_orig.take();
}
(None, None) => {
break;
}
_ => {
unreachable!("compiler cannot tell that everything has been covered")
}
}
}
// When we don't have a point of agreement you can imagine it is implicitly the
// genesis block so we need to do the final connectivity check which in this case
// just means making sure the entire original chain was invalidated.
if !prev_orig_was_invalidated && !point_of_agreement_found {
if let Some(prev_orig) = prev_orig {
return Err(CannotConnectError {
try_include_height: prev_orig.height(),
});
}
}
let new_tip = original_tip
.apply_changeset(&changeset)
.map_err(|_| CannotConnectError {
try_include_height: 0,
})?;
Ok((new_tip, changeset))
}

View File

@@ -1,169 +0,0 @@
use core::{
future::Future,
ops::{Deref, DerefMut},
pin::Pin,
};
use alloc::boxed::Box;
use crate::Merge;
/// Represents a type that contains staged changes.
pub trait Staged {
/// Type for staged changes.
type ChangeSet: Merge;
/// Get mutable reference of staged changes.
fn staged(&mut self) -> &mut Self::ChangeSet;
}
/// Trait that persists the type with `Db`.
///
/// Methods of this trait should not be called directly.
pub trait PersistWith<Db>: Staged + Sized {
/// Parameters for [`PersistWith::create`].
type CreateParams;
/// Parameters for [`PersistWith::load`].
type LoadParams;
/// Error type of [`PersistWith::create`].
type CreateError;
/// Error type of [`PersistWith::load`].
type LoadError;
/// Error type of [`PersistWith::persist`].
type PersistError;
/// Initialize the `Db` and create `Self`.
fn create(db: &mut Db, params: Self::CreateParams) -> Result<Self, Self::CreateError>;
/// Initialize the `Db` and load a previously-persisted `Self`.
fn load(db: &mut Db, params: Self::LoadParams) -> Result<Option<Self>, Self::LoadError>;
/// Persist changes to the `Db`.
fn persist(
db: &mut Db,
changeset: &<Self as Staged>::ChangeSet,
) -> Result<(), Self::PersistError>;
}
type FutureResult<'a, T, E> = Pin<Box<dyn Future<Output = Result<T, E>> + Send + 'a>>;
/// Trait that persists the type with an async `Db`.
pub trait PersistAsyncWith<Db>: Staged + Sized {
/// Parameters for [`PersistAsyncWith::create`].
type CreateParams;
/// Parameters for [`PersistAsyncWith::load`].
type LoadParams;
/// Error type of [`PersistAsyncWith::create`].
type CreateError;
/// Error type of [`PersistAsyncWith::load`].
type LoadError;
/// Error type of [`PersistAsyncWith::persist`].
type PersistError;
/// Initialize the `Db` and create `Self`.
fn create(db: &mut Db, params: Self::CreateParams) -> FutureResult<Self, Self::CreateError>;
/// Initialize the `Db` and load a previously-persisted `Self`.
fn load(db: &mut Db, params: Self::LoadParams) -> FutureResult<Option<Self>, Self::LoadError>;
/// Persist changes to the `Db`.
fn persist<'a>(
db: &'a mut Db,
changeset: &'a <Self as Staged>::ChangeSet,
) -> FutureResult<'a, (), Self::PersistError>;
}
/// Represents a persisted `T`.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Persisted<T> {
inner: T,
}
impl<T> Persisted<T> {
/// Create a new persisted `T`.
pub fn create<Db>(db: &mut Db, params: T::CreateParams) -> Result<Self, T::CreateError>
where
T: PersistWith<Db>,
{
T::create(db, params).map(|inner| Self { inner })
}
/// Create a new persisted `T` with async `Db`.
pub async fn create_async<Db>(
db: &mut Db,
params: T::CreateParams,
) -> Result<Self, T::CreateError>
where
T: PersistAsyncWith<Db>,
{
T::create(db, params).await.map(|inner| Self { inner })
}
/// Construct a persisted `T` from `Db`.
pub fn load<Db>(db: &mut Db, params: T::LoadParams) -> Result<Option<Self>, T::LoadError>
where
T: PersistWith<Db>,
{
Ok(T::load(db, params)?.map(|inner| Self { inner }))
}
/// Construct a persisted `T` from an async `Db`.
pub async fn load_async<Db>(
db: &mut Db,
params: T::LoadParams,
) -> Result<Option<Self>, T::LoadError>
where
T: PersistAsyncWith<Db>,
{
Ok(T::load(db, params).await?.map(|inner| Self { inner }))
}
/// Persist staged changes of `T` into `Db`.
///
/// If the database errors, the staged changes will not be cleared.
pub fn persist<Db>(&mut self, db: &mut Db) -> Result<bool, T::PersistError>
where
T: PersistWith<Db>,
{
let stage = T::staged(&mut self.inner);
if stage.is_empty() {
return Ok(false);
}
T::persist(db, &*stage)?;
stage.take();
Ok(true)
}
/// Persist staged changes of `T` into an async `Db`.
///
/// If the database errors, the staged changes will not be cleared.
pub async fn persist_async<'a, Db>(
&'a mut self,
db: &'a mut Db,
) -> Result<bool, T::PersistError>
where
T: PersistAsyncWith<Db>,
{
let stage = T::staged(&mut self.inner);
if stage.is_empty() {
return Ok(false);
}
T::persist(db, &*stage).await?;
stage.take();
Ok(true)
}
}
impl<T> Deref for Persisted<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T> DerefMut for Persisted<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

View File

@@ -1,530 +0,0 @@
//! Module for stuff
use crate::*;
use core::str::FromStr;
use alloc::{borrow::ToOwned, boxed::Box, string::ToString, sync::Arc, vec::Vec};
use bitcoin::consensus::{Decodable, Encodable};
use rusqlite;
use rusqlite::named_params;
use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef};
use rusqlite::OptionalExtension;
use rusqlite::Transaction;
/// Table name for schemas.
pub const SCHEMAS_TABLE_NAME: &str = "bdk_schemas";
/// Initialize the schema table.
fn init_schemas_table(db_tx: &Transaction) -> rusqlite::Result<()> {
let sql = format!("CREATE TABLE IF NOT EXISTS {}( name TEXT PRIMARY KEY NOT NULL, version INTEGER NOT NULL ) STRICT", SCHEMAS_TABLE_NAME);
db_tx.execute(&sql, ())?;
Ok(())
}
/// Get schema version of `schema_name`.
fn schema_version(db_tx: &Transaction, schema_name: &str) -> rusqlite::Result<Option<u32>> {
let sql = format!(
"SELECT version FROM {} WHERE name=:name",
SCHEMAS_TABLE_NAME
);
db_tx
.query_row(&sql, named_params! { ":name": schema_name }, |row| {
row.get::<_, u32>("version")
})
.optional()
}
/// Set the `schema_version` of `schema_name`.
fn set_schema_version(
db_tx: &Transaction,
schema_name: &str,
schema_version: u32,
) -> rusqlite::Result<()> {
let sql = format!(
"REPLACE INTO {}(name, version) VALUES(:name, :version)",
SCHEMAS_TABLE_NAME,
);
db_tx.execute(
&sql,
named_params! { ":name": schema_name, ":version": schema_version },
)?;
Ok(())
}
/// Runs logic that initializes/migrates the table schemas.
pub fn migrate_schema(
db_tx: &Transaction,
schema_name: &str,
versioned_scripts: &[&[&str]],
) -> rusqlite::Result<()> {
init_schemas_table(db_tx)?;
let current_version = schema_version(db_tx, schema_name)?;
let exec_from = current_version.map_or(0_usize, |v| v as usize + 1);
let scripts_to_exec = versioned_scripts.iter().enumerate().skip(exec_from);
for (version, &script) in scripts_to_exec {
set_schema_version(db_tx, schema_name, version as u32)?;
for statement in script {
db_tx.execute(statement, ())?;
}
}
Ok(())
}
impl FromSql for Impl<bitcoin::Txid> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::Txid::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::Txid> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
impl FromSql for Impl<bitcoin::BlockHash> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::BlockHash::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::BlockHash> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
#[cfg(feature = "miniscript")]
impl FromSql for Impl<DescriptorId> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
DescriptorId::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
#[cfg(feature = "miniscript")]
impl ToSql for Impl<DescriptorId> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
impl FromSql for Impl<bitcoin::Transaction> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::Transaction::consensus_decode_from_finite_reader(&mut value.as_bytes()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::Transaction> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
let mut bytes = Vec::<u8>::new();
self.consensus_encode(&mut bytes).map_err(to_sql_error)?;
Ok(bytes.into())
}
}
impl FromSql for Impl<bitcoin::ScriptBuf> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
Ok(bitcoin::Script::from_bytes(value.as_bytes()?)
.to_owned()
.into())
}
}
impl ToSql for Impl<bitcoin::ScriptBuf> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.as_bytes().into())
}
}
impl FromSql for Impl<bitcoin::Amount> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
Ok(bitcoin::Amount::from_sat(value.as_i64()?.try_into().map_err(from_sql_error)?).into())
}
}
impl ToSql for Impl<bitcoin::Amount> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
let amount: i64 = self.to_sat().try_into().map_err(to_sql_error)?;
Ok(amount.into())
}
}
impl<A: Anchor + serde_crate::de::DeserializeOwned> FromSql for Impl<A> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
serde_json::from_str(value.as_str()?)
.map(Impl)
.map_err(from_sql_error)
}
}
impl<A: Anchor + serde_crate::Serialize> ToSql for Impl<A> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
serde_json::to_string(&self.0)
.map(Into::into)
.map_err(to_sql_error)
}
}
#[cfg(feature = "miniscript")]
impl FromSql for Impl<miniscript::Descriptor<miniscript::DescriptorPublicKey>> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
miniscript::Descriptor::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
#[cfg(feature = "miniscript")]
impl ToSql for Impl<miniscript::Descriptor<miniscript::DescriptorPublicKey>> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
impl FromSql for Impl<bitcoin::Network> {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
bitcoin::Network::from_str(value.as_str()?)
.map(Self)
.map_err(from_sql_error)
}
}
impl ToSql for Impl<bitcoin::Network> {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(self.to_string().into())
}
}
fn from_sql_error<E: std::error::Error + Send + Sync + 'static>(err: E) -> FromSqlError {
FromSqlError::Other(Box::new(err))
}
fn to_sql_error<E: std::error::Error + Send + Sync + 'static>(err: E) -> rusqlite::Error {
rusqlite::Error::ToSqlConversionFailure(Box::new(err))
}
impl<A> tx_graph::ChangeSet<A>
where
A: Anchor + Clone + Ord + serde::Serialize + serde::de::DeserializeOwned,
{
/// Schema name for [`tx_graph::ChangeSet`].
pub const SCHEMA_NAME: &'static str = "bdk_txgraph";
/// Name of table that stores full transactions and `last_seen` timestamps.
pub const TXS_TABLE_NAME: &'static str = "bdk_txs";
/// Name of table that stores floating txouts.
pub const TXOUTS_TABLE_NAME: &'static str = "bdk_txouts";
/// Name of table that stores [`Anchor`]s.
pub const ANCHORS_TABLE_NAME: &'static str = "bdk_anchors";
/// Initialize sqlite tables.
fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
let schema_v0: &[&str] = &[
// full transactions
&format!(
"CREATE TABLE {} ( \
txid TEXT PRIMARY KEY NOT NULL, \
raw_tx BLOB, \
last_seen INTEGER \
) STRICT",
Self::TXS_TABLE_NAME,
),
// floating txouts
&format!(
"CREATE TABLE {} ( \
txid TEXT NOT NULL, \
vout INTEGER NOT NULL, \
value INTEGER NOT NULL, \
script BLOB NOT NULL, \
PRIMARY KEY (txid, vout) \
) STRICT",
Self::TXOUTS_TABLE_NAME,
),
// anchors
&format!(
"CREATE TABLE {} ( \
txid TEXT NOT NULL REFERENCES {} (txid), \
block_height INTEGER NOT NULL, \
block_hash TEXT NOT NULL, \
anchor BLOB NOT NULL, \
PRIMARY KEY (txid, block_height, block_hash) \
) STRICT",
Self::ANCHORS_TABLE_NAME,
Self::TXS_TABLE_NAME,
),
];
migrate_schema(db_tx, Self::SCHEMA_NAME, &[schema_v0])
}
/// Construct a [`TxGraph`] from an sqlite database.
pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result<Self> {
Self::init_sqlite_tables(db_tx)?;
let mut changeset = Self::default();
let mut statement = db_tx.prepare(&format!(
"SELECT txid, raw_tx, last_seen FROM {}",
Self::TXS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<bitcoin::Txid>>("txid")?,
row.get::<_, Option<Impl<bitcoin::Transaction>>>("raw_tx")?,
row.get::<_, Option<u64>>("last_seen")?,
))
})?;
for row in row_iter {
let (Impl(txid), tx, last_seen) = row?;
if let Some(Impl(tx)) = tx {
changeset.txs.insert(Arc::new(tx));
}
if let Some(last_seen) = last_seen {
changeset.last_seen.insert(txid, last_seen);
}
}
let mut statement = db_tx.prepare(&format!(
"SELECT txid, vout, value, script FROM {}",
Self::TXOUTS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<bitcoin::Txid>>("txid")?,
row.get::<_, u32>("vout")?,
row.get::<_, Impl<bitcoin::Amount>>("value")?,
row.get::<_, Impl<bitcoin::ScriptBuf>>("script")?,
))
})?;
for row in row_iter {
let (Impl(txid), vout, Impl(value), Impl(script_pubkey)) = row?;
changeset.txouts.insert(
bitcoin::OutPoint { txid, vout },
bitcoin::TxOut {
value,
script_pubkey,
},
);
}
let mut statement = db_tx.prepare(&format!(
"SELECT json(anchor), txid FROM {}",
Self::ANCHORS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<A>>("json(anchor)")?,
row.get::<_, Impl<bitcoin::Txid>>("txid")?,
))
})?;
for row in row_iter {
let (Impl(anchor), Impl(txid)) = row?;
changeset.anchors.insert((anchor, txid));
}
Ok(changeset)
}
/// Persist `changeset` to the sqlite database.
pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
Self::init_sqlite_tables(db_tx)?;
let mut statement = db_tx.prepare_cached(&format!(
"INSERT INTO {}(txid, raw_tx) VALUES(:txid, :raw_tx) ON CONFLICT(txid) DO UPDATE SET raw_tx=:raw_tx",
Self::TXS_TABLE_NAME,
))?;
for tx in &self.txs {
statement.execute(named_params! {
":txid": Impl(tx.compute_txid()),
":raw_tx": Impl(tx.as_ref().clone()),
})?;
}
let mut statement = db_tx
.prepare_cached(&format!(
"INSERT INTO {}(txid, last_seen) VALUES(:txid, :last_seen) ON CONFLICT(txid) DO UPDATE SET last_seen=:last_seen",
Self::TXS_TABLE_NAME,
))?;
for (&txid, &last_seen) in &self.last_seen {
statement.execute(named_params! {
":txid": Impl(txid),
":last_seen": Some(last_seen),
})?;
}
let mut statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(txid, vout, value, script) VALUES(:txid, :vout, :value, :script)",
Self::TXOUTS_TABLE_NAME,
))?;
for (op, txo) in &self.txouts {
statement.execute(named_params! {
":txid": Impl(op.txid),
":vout": op.vout,
":value": Impl(txo.value),
":script": Impl(txo.script_pubkey.clone()),
})?;
}
let mut statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(txid, block_height, block_hash, anchor) VALUES(:txid, :block_height, :block_hash, jsonb(:anchor))",
Self::ANCHORS_TABLE_NAME,
))?;
for (anchor, txid) in &self.anchors {
let anchor_block = anchor.anchor_block();
statement.execute(named_params! {
":txid": Impl(*txid),
":block_height": anchor_block.height,
":block_hash": Impl(anchor_block.hash),
":anchor": Impl(anchor.clone()),
})?;
}
Ok(())
}
}
impl local_chain::ChangeSet {
/// Schema name for the changeset.
pub const SCHEMA_NAME: &'static str = "bdk_localchain";
/// Name of sqlite table that stores blocks of [`LocalChain`](local_chain::LocalChain).
pub const BLOCKS_TABLE_NAME: &'static str = "bdk_blocks";
/// Initialize sqlite tables for persisting [`local_chain::LocalChain`].
fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
let schema_v0: &[&str] = &[
// blocks
&format!(
"CREATE TABLE {} ( \
block_height INTEGER PRIMARY KEY NOT NULL, \
block_hash TEXT NOT NULL \
) STRICT",
Self::BLOCKS_TABLE_NAME,
),
];
migrate_schema(db_tx, Self::SCHEMA_NAME, &[schema_v0])
}
/// Construct a [`LocalChain`](local_chain::LocalChain) from sqlite database.
pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result<Self> {
Self::init_sqlite_tables(db_tx)?;
let mut changeset = Self::default();
let mut statement = db_tx.prepare(&format!(
"SELECT block_height, block_hash FROM {}",
Self::BLOCKS_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, u32>("block_height")?,
row.get::<_, Impl<bitcoin::BlockHash>>("block_hash")?,
))
})?;
for row in row_iter {
let (height, Impl(hash)) = row?;
changeset.blocks.insert(height, Some(hash));
}
Ok(changeset)
}
/// Persist `changeset` to the sqlite database.
pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
Self::init_sqlite_tables(db_tx)?;
let mut replace_statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(block_height, block_hash) VALUES(:block_height, :block_hash)",
Self::BLOCKS_TABLE_NAME,
))?;
let mut delete_statement = db_tx.prepare_cached(&format!(
"DELETE FROM {} WHERE block_height=:block_height",
Self::BLOCKS_TABLE_NAME,
))?;
for (&height, &hash) in &self.blocks {
match hash {
Some(hash) => replace_statement.execute(named_params! {
":block_height": height,
":block_hash": Impl(hash),
})?,
None => delete_statement.execute(named_params! {
":block_height": height,
})?,
};
}
Ok(())
}
}
#[cfg(feature = "miniscript")]
impl keychain_txout::ChangeSet {
/// Schema name for the changeset.
pub const SCHEMA_NAME: &'static str = "bdk_keychaintxout";
/// Name for table that stores last revealed indices per descriptor id.
pub const LAST_REVEALED_TABLE_NAME: &'static str = "bdk_descriptor_last_revealed";
/// Initialize sqlite tables for persisting
/// [`KeychainTxOutIndex`](keychain_txout::KeychainTxOutIndex).
fn init_sqlite_tables(db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
let schema_v0: &[&str] = &[
// last revealed
&format!(
"CREATE TABLE {} ( \
descriptor_id TEXT PRIMARY KEY NOT NULL, \
last_revealed INTEGER NOT NULL \
) STRICT",
Self::LAST_REVEALED_TABLE_NAME,
),
];
migrate_schema(db_tx, Self::SCHEMA_NAME, &[schema_v0])
}
/// Construct [`KeychainTxOutIndex`](keychain_txout::KeychainTxOutIndex) from sqlite database
/// and given parameters.
pub fn from_sqlite(db_tx: &rusqlite::Transaction) -> rusqlite::Result<Self> {
Self::init_sqlite_tables(db_tx)?;
let mut changeset = Self::default();
let mut statement = db_tx.prepare(&format!(
"SELECT descriptor_id, last_revealed FROM {}",
Self::LAST_REVEALED_TABLE_NAME,
))?;
let row_iter = statement.query_map([], |row| {
Ok((
row.get::<_, Impl<DescriptorId>>("descriptor_id")?,
row.get::<_, u32>("last_revealed")?,
))
})?;
for row in row_iter {
let (Impl(descriptor_id), last_revealed) = row?;
changeset.last_revealed.insert(descriptor_id, last_revealed);
}
Ok(changeset)
}
/// Persist `changeset` to the sqlite database.
pub fn persist_to_sqlite(&self, db_tx: &rusqlite::Transaction) -> rusqlite::Result<()> {
Self::init_sqlite_tables(db_tx)?;
let mut statement = db_tx.prepare_cached(&format!(
"REPLACE INTO {}(descriptor_id, last_revealed) VALUES(:descriptor_id, :last_revealed)",
Self::LAST_REVEALED_TABLE_NAME,
))?;
for (&descriptor_id, &last_revealed) in &self.last_revealed {
statement.execute(named_params! {
":descriptor_id": Impl(descriptor_id),
":last_revealed": last_revealed,
})?;
}
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,388 +0,0 @@
//! Helper types for spk-based blockchain clients.
use crate::{
collections::BTreeMap, local_chain::CheckPoint, ConfirmationBlockTime, Indexed, TxGraph,
};
use alloc::boxed::Box;
use bitcoin::{OutPoint, Script, ScriptBuf, Txid};
use core::marker::PhantomData;
/// Data required to perform a spk-based blockchain client sync.
///
/// A client sync fetches relevant chain data for a known list of scripts, transaction ids and
/// outpoints. The sync process also updates the chain from the given [`CheckPoint`].
pub struct SyncRequest {
/// A checkpoint for the current chain [`LocalChain::tip`].
/// The sync process will return a new chain update that extends this tip.
///
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
pub chain_tip: CheckPoint,
/// Transactions that spend from or to these indexed script pubkeys.
pub spks: Box<dyn ExactSizeIterator<Item = ScriptBuf> + Send>,
/// Transactions with these txids.
pub txids: Box<dyn ExactSizeIterator<Item = Txid> + Send>,
/// Transactions with these outpoints or spent from these outpoints.
pub outpoints: Box<dyn ExactSizeIterator<Item = OutPoint> + Send>,
}
impl SyncRequest {
/// Construct a new [`SyncRequest`] from a given `cp` tip.
pub fn from_chain_tip(cp: CheckPoint) -> Self {
Self {
chain_tip: cp,
spks: Box::new(core::iter::empty()),
txids: Box::new(core::iter::empty()),
outpoints: Box::new(core::iter::empty()),
}
}
/// Set the [`Script`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn set_spks(
mut self,
spks: impl IntoIterator<IntoIter = impl ExactSizeIterator<Item = ScriptBuf> + Send + 'static>,
) -> Self {
self.spks = Box::new(spks.into_iter());
self
}
/// Set the [`Txid`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn set_txids(
mut self,
txids: impl IntoIterator<IntoIter = impl ExactSizeIterator<Item = Txid> + Send + 'static>,
) -> Self {
self.txids = Box::new(txids.into_iter());
self
}
/// Set the [`OutPoint`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn set_outpoints(
mut self,
outpoints: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = OutPoint> + Send + 'static,
>,
) -> Self {
self.outpoints = Box::new(outpoints.into_iter());
self
}
/// Chain on additional [`Script`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn chain_spks(
mut self,
spks: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = ScriptBuf> + Send + 'static,
Item = ScriptBuf,
>,
) -> Self {
self.spks = Box::new(ExactSizeChain::new(self.spks, spks.into_iter()));
self
}
/// Chain on additional [`Txid`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn chain_txids(
mut self,
txids: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = Txid> + Send + 'static,
Item = Txid,
>,
) -> Self {
self.txids = Box::new(ExactSizeChain::new(self.txids, txids.into_iter()));
self
}
/// Chain on additional [`OutPoint`]s that will be synced against.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn chain_outpoints(
mut self,
outpoints: impl IntoIterator<
IntoIter = impl ExactSizeIterator<Item = OutPoint> + Send + 'static,
Item = OutPoint,
>,
) -> Self {
self.outpoints = Box::new(ExactSizeChain::new(self.outpoints, outpoints.into_iter()));
self
}
/// Add a closure that will be called for [`Script`]s previously added to this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_spks(
mut self,
mut inspect: impl FnMut(&Script) + Send + Sync + 'static,
) -> Self {
self.spks = Box::new(self.spks.inspect(move |spk| inspect(spk)));
self
}
/// Add a closure that will be called for [`Txid`]s previously added to this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_txids(mut self, mut inspect: impl FnMut(&Txid) + Send + Sync + 'static) -> Self {
self.txids = Box::new(self.txids.inspect(move |txid| inspect(txid)));
self
}
/// Add a closure that will be called for [`OutPoint`]s previously added to this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_outpoints(
mut self,
mut inspect: impl FnMut(&OutPoint) + Send + Sync + 'static,
) -> Self {
self.outpoints = Box::new(self.outpoints.inspect(move |op| inspect(op)));
self
}
/// Populate the request with revealed script pubkeys from `index` with the given `spk_range`.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[cfg(feature = "miniscript")]
#[must_use]
pub fn populate_with_revealed_spks<K: Clone + Ord + core::fmt::Debug + Send + Sync>(
self,
index: &crate::indexer::keychain_txout::KeychainTxOutIndex<K>,
spk_range: impl core::ops::RangeBounds<K>,
) -> Self {
use alloc::borrow::ToOwned;
use alloc::vec::Vec;
self.chain_spks(
index
.revealed_spks(spk_range)
.map(|(_, spk)| spk.to_owned())
.collect::<Vec<_>>(),
)
}
}
/// Data returned from a spk-based blockchain client sync.
///
/// See also [`SyncRequest`].
pub struct SyncResult<A = ConfirmationBlockTime> {
/// The update to apply to the receiving [`TxGraph`].
pub graph_update: TxGraph<A>,
/// The update to apply to the receiving [`LocalChain`](crate::local_chain::LocalChain).
pub chain_update: CheckPoint,
}
/// Data required to perform a spk-based blockchain client full scan.
///
/// A client full scan iterates through all the scripts for the given keychains, fetching relevant
/// data until some stop gap number of scripts is found that have no data. This operation is
/// generally only used when importing or restoring previously used keychains in which the list of
/// used scripts is not known. The full scan process also updates the chain from the given [`CheckPoint`].
pub struct FullScanRequest<K> {
/// A checkpoint for the current [`LocalChain::tip`].
/// The full scan process will return a new chain update that extends this tip.
///
/// [`LocalChain::tip`]: crate::local_chain::LocalChain::tip
pub chain_tip: CheckPoint,
/// Iterators of script pubkeys indexed by the keychain index.
pub spks_by_keychain: BTreeMap<K, Box<dyn Iterator<Item = Indexed<ScriptBuf>> + Send>>,
}
impl<K: Ord + Clone> FullScanRequest<K> {
/// Construct a new [`FullScanRequest`] from a given `chain_tip`.
#[must_use]
pub fn from_chain_tip(chain_tip: CheckPoint) -> Self {
Self {
chain_tip,
spks_by_keychain: BTreeMap::new(),
}
}
/// Construct a new [`FullScanRequest`] from a given `chain_tip` and `index`.
///
/// Unbounded script pubkey iterators for each keychain (`K`) are extracted using
/// [`KeychainTxOutIndex::all_unbounded_spk_iters`] and is used to populate the
/// [`FullScanRequest`].
///
/// [`KeychainTxOutIndex::all_unbounded_spk_iters`]: crate::indexer::keychain_txout::KeychainTxOutIndex::all_unbounded_spk_iters
#[cfg(feature = "miniscript")]
#[must_use]
pub fn from_keychain_txout_index(
chain_tip: CheckPoint,
index: &crate::indexer::keychain_txout::KeychainTxOutIndex<K>,
) -> Self
where
K: core::fmt::Debug,
{
let mut req = Self::from_chain_tip(chain_tip);
for (keychain, spks) in index.all_unbounded_spk_iters() {
req = req.set_spks_for_keychain(keychain, spks);
}
req
}
/// Set the [`Script`]s for a given `keychain`.
///
/// This consumes the [`FullScanRequest`] and returns the updated one.
#[must_use]
pub fn set_spks_for_keychain(
mut self,
keychain: K,
spks: impl IntoIterator<IntoIter = impl Iterator<Item = Indexed<ScriptBuf>> + Send + 'static>,
) -> Self {
self.spks_by_keychain
.insert(keychain, Box::new(spks.into_iter()));
self
}
/// Chain on additional [`Script`]s that will be synced against.
///
/// This consumes the [`FullScanRequest`] and returns the updated one.
#[must_use]
pub fn chain_spks_for_keychain(
mut self,
keychain: K,
spks: impl IntoIterator<IntoIter = impl Iterator<Item = Indexed<ScriptBuf>> + Send + 'static>,
) -> Self {
match self.spks_by_keychain.remove(&keychain) {
// clippy here suggests to remove `into_iter` from `spks.into_iter()`, but doing so
// results in a compilation error
#[allow(clippy::useless_conversion)]
Some(keychain_spks) => self
.spks_by_keychain
.insert(keychain, Box::new(keychain_spks.chain(spks.into_iter()))),
None => self
.spks_by_keychain
.insert(keychain, Box::new(spks.into_iter())),
};
self
}
/// Add a closure that will be called for every [`Script`] previously added to any keychain in
/// this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_spks_for_all_keychains(
mut self,
inspect: impl FnMut(K, u32, &Script) + Send + Sync + Clone + 'static,
) -> Self
where
K: Send + 'static,
{
for (keychain, spks) in core::mem::take(&mut self.spks_by_keychain) {
let mut inspect = inspect.clone();
self.spks_by_keychain.insert(
keychain.clone(),
Box::new(spks.inspect(move |(i, spk)| inspect(keychain.clone(), *i, spk))),
);
}
self
}
/// Add a closure that will be called for every [`Script`] previously added to a given
/// `keychain` in this request.
///
/// This consumes the [`SyncRequest`] and returns the updated one.
#[must_use]
pub fn inspect_spks_for_keychain(
mut self,
keychain: K,
mut inspect: impl FnMut(u32, &Script) + Send + Sync + 'static,
) -> Self
where
K: Send + 'static,
{
if let Some(spks) = self.spks_by_keychain.remove(&keychain) {
self.spks_by_keychain.insert(
keychain,
Box::new(spks.inspect(move |(i, spk)| inspect(*i, spk))),
);
}
self
}
}
/// Data returned from a spk-based blockchain client full scan.
///
/// See also [`FullScanRequest`].
pub struct FullScanResult<K, A = ConfirmationBlockTime> {
/// The update to apply to the receiving [`LocalChain`](crate::local_chain::LocalChain).
pub graph_update: TxGraph<A>,
/// The update to apply to the receiving [`TxGraph`].
pub chain_update: CheckPoint,
/// Last active indices for the corresponding keychains (`K`).
pub last_active_indices: BTreeMap<K, u32>,
}
/// A version of [`core::iter::Chain`] which can combine two [`ExactSizeIterator`]s to form a new
/// [`ExactSizeIterator`].
///
/// The danger of this is explained in [the `ExactSizeIterator` docs]
/// (https://doc.rust-lang.org/core/iter/trait.ExactSizeIterator.html#when-shouldnt-an-adapter-be-exactsizeiterator).
/// This does not apply here since it would be impossible to scan an item count that overflows
/// `usize` anyway.
struct ExactSizeChain<A, B, I> {
a: Option<A>,
b: Option<B>,
i: PhantomData<I>,
}
impl<A, B, I> ExactSizeChain<A, B, I> {
fn new(a: A, b: B) -> Self {
ExactSizeChain {
a: Some(a),
b: Some(b),
i: PhantomData,
}
}
}
impl<A, B, I> Iterator for ExactSizeChain<A, B, I>
where
A: Iterator<Item = I>,
B: Iterator<Item = I>,
{
type Item = I;
fn next(&mut self) -> Option<Self::Item> {
if let Some(a) = &mut self.a {
let item = a.next();
if item.is_some() {
return item;
}
self.a = None;
}
if let Some(b) = &mut self.b {
let item = b.next();
if item.is_some() {
return item;
}
self.b = None;
}
None
}
}
impl<A, B, I> ExactSizeIterator for ExactSizeChain<A, B, I>
where
A: ExactSizeIterator<Item = I>,
B: ExactSizeIterator<Item = I>,
{
fn len(&self) -> usize {
let a_len = self.a.as_ref().map(|a| a.len()).unwrap_or(0);
let b_len = self.b.as_ref().map(|a| a.len()).unwrap_or(0);
a_len + b_len
}
}

View File

@@ -1,272 +0,0 @@
use crate::{
bitcoin::{secp256k1::Secp256k1, ScriptBuf},
miniscript::{Descriptor, DescriptorPublicKey},
Indexed,
};
use core::{borrow::Borrow, ops::Bound, ops::RangeBounds};
/// Maximum [BIP32](https://bips.xyz/32) derivation index.
pub const BIP32_MAX_INDEX: u32 = (1 << 31) - 1;
/// An iterator for derived script pubkeys.
///
/// [`SpkIterator`] is an implementation of the [`Iterator`] trait which possesses its own `next()`
/// and `nth()` functions, both of which circumvent the unnecessary intermediate derivations required
/// when using their default implementations.
///
/// ## Examples
///
/// ```
/// use bdk_chain::SpkIterator;
/// # use miniscript::{Descriptor, DescriptorPublicKey};
/// # use bitcoin::{secp256k1::Secp256k1};
/// # use std::str::FromStr;
/// # let secp = bitcoin::secp256k1::Secp256k1::signing_only();
/// # let (descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
/// # let external_spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
/// # let external_spk_3 = descriptor.at_derivation_index(3).unwrap().script_pubkey();
/// # let external_spk_4 = descriptor.at_derivation_index(4).unwrap().script_pubkey();
///
/// // Creates a new script pubkey iterator starting at 0 from a descriptor.
/// let mut spk_iter = SpkIterator::new(&descriptor);
/// assert_eq!(spk_iter.next(), Some((0, external_spk_0)));
/// assert_eq!(spk_iter.next(), None);
/// ```
#[derive(Clone)]
pub struct SpkIterator<D> {
next_index: u32,
end: u32,
descriptor: D,
secp: Secp256k1<bitcoin::secp256k1::VerifyOnly>,
}
impl<D> SpkIterator<D>
where
D: Borrow<Descriptor<DescriptorPublicKey>>,
{
/// Create a new script pubkey iterator from `descriptor`.
///
/// This iterates from derivation index 0 and stops at index 0x7FFFFFFF (as specified in
/// BIP-32). Non-wildcard descriptors will only return one script pubkey at derivation index 0.
///
/// Use [`new_with_range`](SpkIterator::new_with_range) to create an iterator with a specified
/// derivation index range.
pub fn new(descriptor: D) -> Self {
SpkIterator::new_with_range(descriptor, 0..=BIP32_MAX_INDEX)
}
/// Create a new script pubkey iterator from `descriptor` and a given `range`.
///
/// Non-wildcard descriptors will only emit a single script pubkey (at derivation index 0).
/// Wildcard descriptors have an end-bound of 0x7FFFFFFF (inclusive).
///
/// Refer to [`new`](SpkIterator::new) for more.
pub fn new_with_range<R>(descriptor: D, range: R) -> Self
where
R: RangeBounds<u32>,
{
let start = match range.start_bound() {
Bound::Included(start) => *start,
Bound::Excluded(start) => *start + 1,
Bound::Unbounded => u32::MIN,
};
let mut end = match range.end_bound() {
Bound::Included(end) => *end + 1,
Bound::Excluded(end) => *end,
Bound::Unbounded => u32::MAX,
};
// Because `end` is exclusive, we want the maximum value to be BIP32_MAX_INDEX + 1.
end = end.min(BIP32_MAX_INDEX + 1);
Self {
next_index: start,
end,
descriptor,
secp: Secp256k1::verification_only(),
}
}
/// Get a reference to the internal descriptor.
pub fn descriptor(&self) -> &D {
&self.descriptor
}
}
impl<D> Iterator for SpkIterator<D>
where
D: Borrow<Descriptor<DescriptorPublicKey>>,
{
type Item = Indexed<ScriptBuf>;
fn next(&mut self) -> Option<Self::Item> {
// For non-wildcard descriptors, we expect the first element to be Some((0, spk)), then None after.
// For wildcard descriptors, we expect it to keep iterating until exhausted.
if self.next_index >= self.end {
return None;
}
// If the descriptor is non-wildcard, only index 0 will return an spk.
if !self.descriptor.borrow().has_wildcard() && self.next_index != 0 {
return None;
}
let script = self
.descriptor
.borrow()
.derived_descriptor(&self.secp, self.next_index)
.expect("the descriptor cannot need hardened derivation")
.script_pubkey();
let output = (self.next_index, script);
self.next_index += 1;
Some(output)
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.next_index = self
.next_index
.saturating_add(u32::try_from(n).unwrap_or(u32::MAX));
self.next()
}
}
#[cfg(test)]
mod test {
use crate::{
bitcoin::secp256k1::Secp256k1,
indexer::keychain_txout::KeychainTxOutIndex,
miniscript::{Descriptor, DescriptorPublicKey},
spk_iter::{SpkIterator, BIP32_MAX_INDEX},
};
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
enum TestKeychain {
External,
Internal,
}
fn init_txout_index() -> (
KeychainTxOutIndex<TestKeychain>,
Descriptor<DescriptorPublicKey>,
Descriptor<DescriptorPublicKey>,
) {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0);
let secp = Secp256k1::signing_only();
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
let _ = txout_index
.insert_descriptor(TestKeychain::External, external_descriptor.clone())
.unwrap();
let _ = txout_index
.insert_descriptor(TestKeychain::Internal, internal_descriptor.clone())
.unwrap();
(txout_index, external_descriptor, internal_descriptor)
}
#[test]
#[allow(clippy::iter_nth_zero)]
#[rustfmt::skip]
fn test_spkiterator_wildcard() {
let (_, external_desc, _) = init_txout_index();
let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey();
let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey();
let external_spk_20 = external_desc.at_derivation_index(20).unwrap().script_pubkey();
let external_spk_21 = external_desc.at_derivation_index(21).unwrap().script_pubkey();
let external_spk_max = external_desc.at_derivation_index(BIP32_MAX_INDEX).unwrap().script_pubkey();
let mut external_spk = SpkIterator::new(&external_desc);
let max_index = BIP32_MAX_INDEX - 22;
assert_eq!(external_spk.next(), Some((0, external_spk_0)));
assert_eq!(external_spk.nth(15), Some((16, external_spk_16)));
assert_eq!(external_spk.nth(3), Some((20, external_spk_20.clone())));
assert_eq!(external_spk.next(), Some((21, external_spk_21)));
assert_eq!(
external_spk.nth(max_index as usize),
Some((BIP32_MAX_INDEX, external_spk_max))
);
assert_eq!(external_spk.nth(0), None);
let mut external_spk = SpkIterator::new_with_range(&external_desc, 0..21);
assert_eq!(external_spk.nth(20), Some((20, external_spk_20)));
assert_eq!(external_spk.next(), None);
let mut external_spk = SpkIterator::new_with_range(&external_desc, 0..21);
assert_eq!(external_spk.nth(21), None);
}
#[test]
#[allow(clippy::iter_nth_zero)]
fn test_spkiterator_non_wildcard() {
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
let external_spk_0 = no_wildcard_descriptor
.at_derivation_index(0)
.unwrap()
.script_pubkey();
let mut external_spk = SpkIterator::new(&no_wildcard_descriptor);
assert_eq!(external_spk.next(), Some((0, external_spk_0.clone())));
assert_eq!(external_spk.next(), None);
let mut external_spk = SpkIterator::new(&no_wildcard_descriptor);
assert_eq!(external_spk.nth(0), Some((0, external_spk_0.clone())));
assert_eq!(external_spk.nth(0), None);
let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..0);
assert_eq!(external_spk.next(), None);
let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..1);
assert_eq!(external_spk.nth(0), Some((0, external_spk_0.clone())));
assert_eq!(external_spk.next(), None);
// We test that using new_with_range with range_len > 1 gives back an iterator with
// range_len = 1
let mut external_spk = SpkIterator::new_with_range(&no_wildcard_descriptor, 0..10);
assert_eq!(external_spk.nth(0), Some((0, external_spk_0)));
assert_eq!(external_spk.nth(0), None);
// non index-0 should NOT return an spk
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..1).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=1).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..2).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=2).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..11).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..=10).next(),
None
);
}
}
#[test]
fn spk_iterator_is_send_and_static() {
fn is_send_and_static<A: Send + 'static>() {}
is_send_and_static::<SpkIterator<Descriptor<DescriptorPublicKey>>>()
}

View File

@@ -1,19 +1,16 @@
//! [`SpkTxOutIndex`] is an index storing [`TxOut`]s that have a script pubkey that matches those in a list.
use core::ops::RangeBounds;
use crate::{
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap},
Indexer,
ForEachTxOut,
};
use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txid};
use bitcoin::{self, OutPoint, Script, Transaction, TxOut, Txid};
/// An index storing [`TxOut`]s that have a script pubkey that matches those in a list.
///
/// The basic idea is that you insert script pubkeys you care about into the index with
/// [`insert_spk`] and then when you call [`Indexer::index_tx`] or [`Indexer::index_txout`], the
/// index will look at any txouts you pass in and store and index any txouts matching one of its
/// script pubkeys.
/// [`insert_spk`] and then when you call [`scan`], the index will look at any txouts you pass in and
/// store and index any txouts matching one of its script pubkeys.
///
/// Each script pubkey is associated with an application-defined index script index `I`, which must be
/// [`Ord`]. Usually, this is used to associate the derivation index of the script pubkey or even a
@@ -22,18 +19,19 @@ use bitcoin::{Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxOut, Txi
/// Note there is no harm in scanning transactions that disappear from the blockchain or were never
/// in there in the first place. `SpkTxOutIndex` is intentionally *monotone* -- you cannot delete or
/// modify txouts that have been indexed. To find out which txouts from the index are actually in the
/// chain or unspent, you must use other sources of information like a [`TxGraph`].
/// chain or unspent, you must use other sources of information like a [`SparseChain`].
///
/// [`TxOut`]: bitcoin::TxOut
/// [`insert_spk`]: Self::insert_spk
/// [`Ord`]: core::cmp::Ord
/// [`TxGraph`]: crate::tx_graph::TxGraph
/// [`scan`]: Self::scan
/// [`SparseChain`]: crate::sparse_chain::SparseChain
#[derive(Clone, Debug)]
pub struct SpkTxOutIndex<I> {
/// script pubkeys ordered by index
spks: BTreeMap<I, ScriptBuf>,
spks: BTreeMap<I, Script>,
/// A reverse lookup from spk to spk index
spk_indices: HashMap<ScriptBuf, I>,
spk_indices: HashMap<Script, I>,
/// The set of unused indexes.
unused: BTreeSet<I>,
/// Lookup index and txout by outpoint.
@@ -54,47 +52,41 @@ impl<I> Default for SpkTxOutIndex<I> {
}
}
impl<I: Clone + Ord + core::fmt::Debug> Indexer for SpkTxOutIndex<I> {
type ChangeSet = ();
fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet {
self.scan_txout(outpoint, txout);
Default::default()
}
fn index_tx(&mut self, tx: &Transaction) -> Self::ChangeSet {
self.scan(tx);
Default::default()
}
fn initial_changeset(&self) -> Self::ChangeSet {}
fn apply_changeset(&mut self, _changeset: Self::ChangeSet) {
// This applies nothing.
}
fn is_tx_relevant(&self, tx: &Transaction) -> bool {
self.is_relevant(tx)
}
/// This macro is used instead of a member function of `SpkTxOutIndex`, which would result in a
/// compiler error[E0521]: "borrowed data escapes out of closure" when we attempt to take a
/// reference out of the `ForEachTxOut` closure during scanning.
macro_rules! scan_txout {
($self:ident, $op:expr, $txout:expr) => {{
let spk_i = $self.spk_indices.get(&$txout.script_pubkey);
if let Some(spk_i) = spk_i {
$self.txouts.insert($op, (spk_i.clone(), $txout.clone()));
$self.spk_txouts.insert((spk_i.clone(), $op));
$self.unused.remove(&spk_i);
}
spk_i
}};
}
impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
/// Scans a transaction's outputs for matching script pubkeys.
impl<I: Clone + Ord> SpkTxOutIndex<I> {
/// Scans an object containing many txouts.
///
/// Typically, this is used in two situations:
///
/// 1. After loading transaction data from the disk, you may scan over all the txouts to restore all
/// your txouts.
/// your txouts.
/// 2. When getting new data from the chain, you usually scan it before incorporating it into your chain state.
pub fn scan(&mut self, tx: &Transaction) -> BTreeSet<I> {
///
/// See [`ForEachTxout`] for the types that support this.
///
/// [`ForEachTxout`]: crate::ForEachTxOut
pub fn scan(&mut self, txouts: &impl ForEachTxOut) -> BTreeSet<I> {
let mut scanned_indices = BTreeSet::new();
let txid = tx.compute_txid();
for (i, txout) in tx.output.iter().enumerate() {
let op = OutPoint::new(txid, i as u32);
if let Some(spk_i) = self.scan_txout(op, txout) {
txouts.for_each_txout(|(op, txout)| {
if let Some(spk_i) = scan_txout!(self, op, txout) {
scanned_indices.insert(spk_i.clone());
}
}
});
scanned_indices
}
@@ -102,18 +94,7 @@ impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
/// Scan a single `TxOut` for a matching script pubkey and returns the index that matches the
/// script pubkey (if any).
pub fn scan_txout(&mut self, op: OutPoint, txout: &TxOut) -> Option<&I> {
let spk_i = self.spk_indices.get(&txout.script_pubkey);
if let Some(spk_i) = spk_i {
self.txouts.insert(op, (spk_i.clone(), txout.clone()));
self.spk_txouts.insert((spk_i.clone(), op));
self.unused.remove(spk_i);
}
spk_i
}
/// Get a reference to the set of indexed outpoints.
pub fn outpoints(&self) -> &BTreeSet<(I, OutPoint)> {
&self.spk_txouts
scan_txout!(self, op, txout)
}
/// Iterate over all known txouts that spend to tracked script pubkeys.
@@ -143,11 +124,11 @@ impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
use bitcoin::hashes::Hash;
use core::ops::Bound::*;
let min_op = OutPoint {
txid: Txid::all_zeros(),
txid: Txid::from_inner([0x00; 32]),
vout: u32::MIN,
};
let max_op = OutPoint {
txid: Txid::from_byte_array([0xff; Txid::LEN]),
txid: Txid::from_inner([0xff; 32]),
vout: u32::MAX,
};
@@ -170,25 +151,27 @@ impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
///
/// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there.
pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> {
self.txouts.get(&outpoint).map(|v| (&v.0, &v.1))
self.txouts
.get(&outpoint)
.map(|(spk_i, txout)| (spk_i, txout))
}
/// Returns the script that has been inserted at the `index`.
///
/// If that index hasn't been inserted yet, it will return `None`.
pub fn spk_at_index(&self, index: &I) -> Option<ScriptBuf> {
self.spks.get(index).cloned()
pub fn spk_at_index(&self, index: &I) -> Option<&Script> {
self.spks.get(index)
}
/// The script pubkeys that are being tracked by the index.
pub fn all_spks(&self) -> &BTreeMap<I, ScriptBuf> {
pub fn all_spks(&self) -> &BTreeMap<I, Script> {
&self.spks
}
/// Adds a script pubkey to scan for. Returns `false` and does nothing if spk already exists in the map
///
/// the index will look for outputs spending to this spk whenever it scans new data.
pub fn insert_spk(&mut self, index: I, spk: ScriptBuf) -> bool {
pub fn insert_spk(&mut self, index: I, spk: Script) -> bool {
match self.spk_indices.entry(spk.clone()) {
Entry::Vacant(value) => {
value.insert(index.clone());
@@ -208,7 +191,7 @@ impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
/// # Example
///
/// ```rust
/// # use bdk_chain::spk_txout::SpkTxOutIndex;
/// # use bdk_chain::SpkTxOutIndex;
///
/// // imagine our spks are indexed like (keychain, derivation_index).
/// let txout_index = SpkTxOutIndex::<(u32, u32)>::default();
@@ -217,10 +200,7 @@ impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
/// let unused_change_spks =
/// txout_index.unused_spks((change_index, u32::MIN)..(change_index, u32::MAX));
/// ```
pub fn unused_spks<R>(
&self,
range: R,
) -> impl DoubleEndedIterator<Item = (&I, ScriptBuf)> + Clone + '_
pub fn unused_spks<R>(&self, range: R) -> impl DoubleEndedIterator<Item = (&I, &Script)>
where
R: RangeBounds<I>,
{
@@ -234,7 +214,7 @@ impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
/// Here, "unused" means that after the script pubkey was stored in the index, the index has
/// never scanned a transaction output with it.
pub fn is_used(&self, index: &I) -> bool {
!self.unused.contains(index)
self.unused.get(index).is_none()
}
/// Marks the script pubkey at `index` as used even though it hasn't seen an output spending to it.
@@ -271,49 +251,41 @@ impl<I: Clone + Ord + core::fmt::Debug> SpkTxOutIndex<I> {
}
/// Returns the index associated with the script pubkey.
pub fn index_of_spk(&self, script: ScriptBuf) -> Option<&I> {
self.spk_indices.get(script.as_script())
pub fn index_of_spk(&self, script: &Script) -> Option<&I> {
self.spk_indices.get(script)
}
/// Computes the total value transfer effect `tx` has on the script pubkeys in `range`. Value is
/// *sent* when a script pubkey in the `range` is on an input and *received* when it is on an
/// output. For `sent` to be computed correctly, the output being spent must have already been
/// scanned by the index. Calculating received just uses the [`Transaction`] outputs directly,
/// so it will be correct even if it has not been scanned.
pub fn sent_and_received(
&self,
tx: &Transaction,
range: impl RangeBounds<I>,
) -> (Amount, Amount) {
let mut sent = Amount::ZERO;
let mut received = Amount::ZERO;
/// Computes total input value going from script pubkeys in the index (sent) and the total output
/// value going to script pubkeys in the index (received) in `tx`. For the `sent` to be computed
/// correctly, the output being spent must have already been scanned by the index. Calculating
/// received just uses the transaction outputs directly, so it will be correct even if it has not
/// been scanned.
pub fn sent_and_received(&self, tx: &Transaction) -> (u64, u64) {
let mut sent = 0;
let mut received = 0;
for txin in &tx.input {
if let Some((index, txout)) = self.txout(txin.previous_output) {
if range.contains(index) {
sent += txout.value;
}
if let Some((_, txout)) = self.txout(txin.previous_output) {
sent += txout.value;
}
}
for txout in &tx.output {
if let Some(index) = self.index_of_spk(txout.script_pubkey.clone()) {
if range.contains(index) {
received += txout.value;
}
if self.index_of_spk(&txout.script_pubkey).is_some() {
received += txout.value;
}
}
(sent, received)
}
/// Computes the net value transfer effect of `tx` on the script pubkeys in `range`. Shorthand
/// for calling [`sent_and_received`] and subtracting sent from received.
/// Computes the net value that this transaction gives to the script pubkeys in the index and
/// *takes* from the transaction outputs in the index. Shorthand for calling
/// [`sent_and_received`] and subtracting sent from received.
///
/// [`sent_and_received`]: Self::sent_and_received
pub fn net_value(&self, tx: &Transaction, range: impl RangeBounds<I>) -> SignedAmount {
let (sent, received) = self.sent_and_received(tx, range);
received.to_signed().expect("valid `SignedAmount`")
- sent.to_signed().expect("valid `SignedAmount`")
pub fn net_value(&self, tx: &Transaction) -> i64 {
let (sent, received) = self.sent_and_received(tx);
received as i64 - sent as i64
}
/// Whether any of the inputs of this transaction spend a txout tracked or whether any output

View File

@@ -1,173 +1,33 @@
use crate::collections::BTreeMap;
use crate::collections::BTreeSet;
use crate::BlockId;
use alloc::vec::Vec;
use bitcoin::{Block, OutPoint, Transaction, TxOut};
/// Trait that "anchors" blockchain data to a specific block of height and hash.
/// Trait to do something with every txout contained in a structure.
///
/// If transaction A is anchored in block B, and block B is in the best chain, we can
/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean
/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a
/// parent block of B.
///
/// Every [`Anchor`] implementation must contain a [`BlockId`] parameter, and must implement
/// [`Ord`]. When implementing [`Ord`], the anchors' [`BlockId`]s should take precedence
/// over other elements inside the [`Anchor`]s for comparison purposes, i.e., you should first
/// compare the anchors' [`BlockId`]s and then care about the rest.
///
/// The example shows different types of anchors:
/// ```
/// # use bdk_chain::local_chain::LocalChain;
/// # use bdk_chain::tx_graph::TxGraph;
/// # use bdk_chain::BlockId;
/// # use bdk_chain::ConfirmationBlockTime;
/// # use bdk_chain::example_utils::*;
/// # use bitcoin::hashes::Hash;
/// // Initialize the local chain with two blocks.
/// let chain = LocalChain::from_blocks(
/// [
/// (1, Hash::hash("first".as_bytes())),
/// (2, Hash::hash("second".as_bytes())),
/// ]
/// .into_iter()
/// .collect(),
/// );
///
/// // Transaction to be inserted into `TxGraph`s with different anchor types.
/// let tx = tx_from_hex(RAW_TX_1);
///
/// // Insert `tx` into a `TxGraph` that uses `BlockId` as the anchor type.
/// // When a transaction is anchored with `BlockId`, the anchor block and the confirmation block of
/// // the transaction is the same block.
/// let mut graph_a = TxGraph::<BlockId>::default();
/// let _ = graph_a.insert_tx(tx.clone());
/// graph_a.insert_anchor(
/// tx.compute_txid(),
/// BlockId {
/// height: 1,
/// hash: Hash::hash("first".as_bytes()),
/// },
/// );
///
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationBlockTime` as the anchor type.
/// // This anchor records the anchor block and the confirmation time of the transaction. When a
/// // transaction is anchored with `ConfirmationBlockTime`, the anchor block and confirmation block
/// // of the transaction is the same block.
/// let mut graph_c = TxGraph::<ConfirmationBlockTime>::default();
/// let _ = graph_c.insert_tx(tx.clone());
/// graph_c.insert_anchor(
/// tx.compute_txid(),
/// ConfirmationBlockTime {
/// block_id: BlockId {
/// height: 2,
/// hash: Hash::hash("third".as_bytes()),
/// },
/// confirmation_time: 123,
/// },
/// );
/// ```
pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash {
/// Returns the [`BlockId`] that the associated blockchain data is "anchored" in.
fn anchor_block(&self) -> BlockId;
/// Get the upper bound of the chain data's confirmation height.
///
/// The default definition gives a pessimistic answer. This can be overridden by the `Anchor`
/// implementation for a more accurate value.
fn confirmation_height_upper_bound(&self) -> u32 {
self.anchor_block().height
}
/// We would prefer to just work with things that can give us an `Iterator<Item=(OutPoint, &TxOut)>`
/// here, but rust's type system makes it extremely hard to do this (without trait objects).
pub trait ForEachTxOut {
/// The provided closure `f` will be called with each `outpoint/txout` pair.
fn for_each_txout(&self, f: impl FnMut((OutPoint, &TxOut)));
}
impl<'a, A: Anchor> Anchor for &'a A {
fn anchor_block(&self) -> BlockId {
<A as Anchor>::anchor_block(self)
}
}
/// An [`Anchor`] that can be constructed from a given block, block height and transaction position
/// within the block.
pub trait AnchorFromBlockPosition: Anchor {
/// Construct the anchor from a given `block`, block height and `tx_pos` within the block.
fn from_block_position(block: &bitcoin::Block, block_id: BlockId, tx_pos: usize) -> Self;
}
/// Trait that makes an object mergeable.
pub trait Merge: Default {
/// Merge another object of the same type onto `self`.
fn merge(&mut self, other: Self);
/// Returns whether the structure is considered empty.
fn is_empty(&self) -> bool;
/// Take the value, replacing it with the default value.
fn take(&mut self) -> Option<Self> {
if self.is_empty() {
None
} else {
Some(core::mem::take(self))
impl ForEachTxOut for Block {
fn for_each_txout(&self, mut f: impl FnMut((OutPoint, &TxOut))) {
for tx in self.txdata.iter() {
tx.for_each_txout(&mut f)
}
}
}
impl<K: Ord, V> Merge for BTreeMap<K, V> {
fn merge(&mut self, other: Self) {
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
BTreeMap::extend(self, other)
}
fn is_empty(&self) -> bool {
BTreeMap::is_empty(self)
}
}
impl<T: Ord> Merge for BTreeSet<T> {
fn merge(&mut self, other: Self) {
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
BTreeSet::extend(self, other)
}
fn is_empty(&self) -> bool {
BTreeSet::is_empty(self)
}
}
impl<T> Merge for Vec<T> {
fn merge(&mut self, mut other: Self) {
Vec::append(self, &mut other)
}
fn is_empty(&self) -> bool {
Vec::is_empty(self)
}
}
macro_rules! impl_merge_for_tuple {
($($a:ident $b:tt)*) => {
impl<$($a),*> Merge for ($($a,)*) where $($a: Merge),* {
fn merge(&mut self, _other: Self) {
$(Merge::merge(&mut self.$b, _other.$b) );*
}
fn is_empty(&self) -> bool {
$(Merge::is_empty(&self.$b) && )* true
}
impl ForEachTxOut for Transaction {
fn for_each_txout(&self, mut f: impl FnMut((OutPoint, &TxOut))) {
let txid = self.txid();
for (i, txout) in self.output.iter().enumerate() {
f((
OutPoint {
txid,
vout: i as u32,
},
txout,
))
}
}
}
impl_merge_for_tuple!();
impl_merge_for_tuple!(T0 0);
impl_merge_for_tuple!(T0 0 T1 1);
impl_merge_for_tuple!(T0 0 T1 1 T2 2);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8 T9 9);
impl_merge_for_tuple!(T0 0 T1 1 T2 2 T3 3 T4 4 T5 5 T6 6 T7 7 T8 8 T9 9 T10 10);

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +1,3 @@
#![cfg(feature = "miniscript")]
mod tx_template;
#[allow(unused_imports)]
pub use tx_template::*;
#[allow(unused_macros)]
macro_rules! block_id {
($height:expr, $hash:literal) => {{
bdk_chain::BlockId {
height: $height,
hash: bitcoin::hashes::Hash::hash($hash.as_bytes()),
}
}};
}
#[allow(unused_macros)]
macro_rules! h {
($index:literal) => {{
@@ -22,21 +6,20 @@ macro_rules! h {
}
#[allow(unused_macros)]
macro_rules! local_chain {
[ $(($height:expr, $block_hash:expr)), * ] => {{
macro_rules! chain {
($([$($tt:tt)*]),*) => { chain!( checkpoints: [$([$($tt)*]),*] ) };
(checkpoints: $($tail:tt)*) => { chain!( index: TxHeight, checkpoints: $($tail)*) };
(index: $ind:ty, checkpoints: [ $([$height:expr, $block_hash:expr]),* ] $(,txids: [$(($txid:expr, $tx_height:expr)),*])?) => {{
#[allow(unused_mut)]
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect())
.expect("chain must have genesis block")
}};
}
let mut chain = bdk_chain::sparse_chain::SparseChain::<$ind>::from_checkpoints([$(($height, $block_hash).into()),*]);
#[allow(unused_macros)]
macro_rules! chain_update {
[ $(($height:expr, $hash:expr)), * ] => {{
#[allow(unused_mut)]
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect())
.expect("chain must have genesis block")
.tip()
$(
$(
let _ = chain.insert_tx($txid, $tx_height).expect("should succeed");
)*
)?
chain
}};
}
@@ -69,21 +52,9 @@ macro_rules! changeset {
#[allow(unused)]
pub fn new_tx(lt: u32) -> bitcoin::Transaction {
bitcoin::Transaction {
version: bitcoin::transaction::Version::non_standard(0x00),
lock_time: bitcoin::absolute::LockTime::from_consensus(lt),
version: 0x00,
lock_time: bitcoin::PackedLockTime(lt),
input: vec![],
output: vec![],
}
}
#[allow(unused)]
pub const DESCRIPTORS: [&str; 7] = [
"tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)",
"tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)",
"wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0/*)",
"tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)",
"tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)",
"wpkh(xprv9s21ZrQH143K4EXURwMHuLS469fFzZyXk7UUpdKfQwhoHcAiYTakpe8pMU2RiEdvrU9McyuE7YDoKcXkoAwEGoK53WBDnKKv2zZbb9BzttX/1/0/*)",
// non-wildcard
"wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)",
];

View File

@@ -1,137 +0,0 @@
#![cfg(feature = "miniscript")]
use rand::distributions::{Alphanumeric, DistString};
use std::collections::HashMap;
use bdk_chain::{spk_txout::SpkTxOutIndex, tx_graph::TxGraph, Anchor};
use bitcoin::{
locktime::absolute::LockTime, secp256k1::Secp256k1, transaction, Amount, OutPoint, ScriptBuf,
Sequence, Transaction, TxIn, TxOut, Txid, Witness,
};
use miniscript::Descriptor;
/// Template for creating a transaction in `TxGraph`.
///
/// The incentive for transaction templates is to create a transaction history in a simple manner to
/// avoid having to explicitly hash previous transactions to form previous outpoints of later
/// transactions.
#[derive(Clone, Copy, Default)]
pub struct TxTemplate<'a, A> {
/// Uniquely identifies the transaction, before it can have a txid.
pub tx_name: &'a str,
pub inputs: &'a [TxInTemplate<'a>],
pub outputs: &'a [TxOutTemplate],
pub anchors: &'a [A],
pub last_seen: Option<u64>,
}
#[allow(dead_code)]
pub enum TxInTemplate<'a> {
/// This will give a random txid and vout.
Bogus,
/// This is used for coinbase transactions because they do not have previous outputs.
Coinbase,
/// Contains the `tx_name` and `vout` that we are spending. The rule is that we must only spend
/// from tx of a previous `TxTemplate`.
PrevTx(&'a str, usize),
}
pub struct TxOutTemplate {
pub value: u64,
pub spk_index: Option<u32>, // some = get spk from SpkTxOutIndex, none = random spk
}
#[allow(unused)]
impl TxOutTemplate {
pub fn new(value: u64, spk_index: Option<u32>) -> Self {
TxOutTemplate { value, spk_index }
}
}
#[allow(dead_code)]
pub fn init_graph<'a, A: Anchor + Clone + 'a>(
tx_templates: impl IntoIterator<Item = &'a TxTemplate<'a, A>>,
) -> (TxGraph<A>, SpkTxOutIndex<u32>, HashMap<&'a str, Txid>) {
let (descriptor, _) =
Descriptor::parse_descriptor(&Secp256k1::signing_only(), super::DESCRIPTORS[2]).unwrap();
let mut graph = TxGraph::<A>::default();
let mut spk_index = SpkTxOutIndex::default();
(0..10).for_each(|index| {
spk_index.insert_spk(
index,
descriptor
.at_derivation_index(index)
.unwrap()
.script_pubkey(),
);
});
let mut tx_ids = HashMap::<&'a str, Txid>::new();
for (bogus_txin_vout, tx_tmp) in tx_templates.into_iter().enumerate() {
let tx = Transaction {
version: transaction::Version::non_standard(0),
lock_time: LockTime::ZERO,
input: tx_tmp
.inputs
.iter()
.map(|input| match input {
TxInTemplate::Bogus => TxIn {
previous_output: OutPoint::new(
bitcoin::hashes::Hash::hash(
Alphanumeric
.sample_string(&mut rand::thread_rng(), 20)
.as_bytes(),
),
bogus_txin_vout as u32,
),
script_sig: ScriptBuf::new(),
sequence: Sequence::default(),
witness: Witness::new(),
},
TxInTemplate::Coinbase => TxIn {
previous_output: OutPoint::null(),
script_sig: ScriptBuf::new(),
sequence: Sequence::MAX,
witness: Witness::new(),
},
TxInTemplate::PrevTx(prev_name, prev_vout) => {
let prev_txid = tx_ids.get(prev_name).expect(
"txin template must spend from tx of template that comes before",
);
TxIn {
previous_output: OutPoint::new(*prev_txid, *prev_vout as _),
script_sig: ScriptBuf::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}
}
})
.collect(),
output: tx_tmp
.outputs
.iter()
.map(|output| match &output.spk_index {
None => TxOut {
value: Amount::from_sat(output.value),
script_pubkey: ScriptBuf::new(),
},
Some(index) => TxOut {
value: Amount::from_sat(output.value),
script_pubkey: spk_index.spk_at_index(index).unwrap(),
},
})
.collect(),
};
tx_ids.insert(tx_tmp.tx_name, tx.compute_txid());
spk_index.scan(&tx);
let _ = graph.insert_tx(tx.clone());
for anchor in tx_tmp.anchors.iter() {
let _ = graph.insert_anchor(tx.compute_txid(), anchor.clone());
}
let _ = graph.insert_seen_at(tx.compute_txid(), tx_tmp.last_seen.unwrap_or(0));
}
(graph, spk_index, tx_ids)
}

View File

@@ -0,0 +1,653 @@
#[macro_use]
mod common;
use bdk_chain::{
chain_graph::*,
collections::HashSet,
sparse_chain,
tx_graph::{self, TxGraph},
BlockId, TxHeight,
};
use bitcoin::{OutPoint, PackedLockTime, Script, Sequence, Transaction, TxIn, TxOut, Witness};
#[test]
fn test_spent_by() {
let tx1 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let op = OutPoint {
txid: tx1.txid(),
vout: 0,
};
let tx2 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: op,
..Default::default()
}],
output: vec![],
};
let tx3 = Transaction {
version: 0x01,
lock_time: PackedLockTime(42),
input: vec![TxIn {
previous_output: op,
..Default::default()
}],
output: vec![],
};
let mut cg1 = ChainGraph::default();
let _ = cg1
.insert_tx(tx1, TxHeight::Unconfirmed)
.expect("should insert");
let mut cg2 = cg1.clone();
let _ = cg1
.insert_tx(tx2.clone(), TxHeight::Unconfirmed)
.expect("should insert");
let _ = cg2
.insert_tx(tx3.clone(), TxHeight::Unconfirmed)
.expect("should insert");
assert_eq!(cg1.spent_by(op), Some((&TxHeight::Unconfirmed, tx2.txid())));
assert_eq!(cg2.spent_by(op), Some((&TxHeight::Unconfirmed, tx3.txid())));
}
#[test]
fn update_evicts_conflicting_tx() {
let cp_a = BlockId {
height: 0,
hash: h!("A"),
};
let cp_b = BlockId {
height: 1,
hash: h!("B"),
};
let cp_b2 = BlockId {
height: 1,
hash: h!("B'"),
};
let tx_a = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let tx_b = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default()],
};
let tx_b2 = Transaction {
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default(), TxOut::default()],
};
{
let mut cg1 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg
.insert_tx(tx_a.clone(), TxHeight::Confirmed(0))
.expect("should insert tx");
let _ = cg
.insert_tx(tx_b.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
let cg2 = {
let mut cg = ChainGraph::default();
let _ = cg
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
let changeset = ChangeSet::<TxHeight> {
chain: sparse_chain::ChangeSet {
checkpoints: Default::default(),
txids: [
(tx_b.txid(), None),
(tx_b2.txid(), Some(TxHeight::Unconfirmed)),
]
.into(),
},
graph: tx_graph::Additions {
tx: [tx_b2.clone()].into(),
txout: [].into(),
},
};
assert_eq!(
cg1.determine_changeset(&cg2),
Ok(changeset.clone()),
"tx should be evicted from mempool"
);
cg1.apply_changeset(changeset);
}
{
let cg1 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg.insert_checkpoint(cp_b).expect("should insert cp");
let _ = cg
.insert_tx(tx_a.clone(), TxHeight::Confirmed(0))
.expect("should insert tx");
let _ = cg
.insert_tx(tx_b.clone(), TxHeight::Confirmed(1))
.expect("should insert tx");
cg
};
let cg2 = {
let mut cg = ChainGraph::default();
let _ = cg
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
assert_eq!(
cg1.determine_changeset(&cg2),
Err(UpdateError::UnresolvableConflict(UnresolvableConflict {
already_confirmed_tx: (TxHeight::Confirmed(1), tx_b.txid()),
update_tx: (TxHeight::Unconfirmed, tx_b2.txid()),
})),
"fail if tx is evicted from valid block"
);
}
{
// Given 2 blocks `{A, B}`, and an update that invalidates block B with
// `{A, B'}`, we expect txs that exist in `B` that conflicts with txs
// introduced in the update to be successfully evicted.
let mut cg1 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg.insert_checkpoint(cp_b).expect("should insert cp");
let _ = cg
.insert_tx(tx_a, TxHeight::Confirmed(0))
.expect("should insert tx");
let _ = cg
.insert_tx(tx_b.clone(), TxHeight::Confirmed(1))
.expect("should insert tx");
cg
};
let cg2 = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(cp_a).expect("should insert cp");
let _ = cg.insert_checkpoint(cp_b2).expect("should insert cp");
let _ = cg
.insert_tx(tx_b2.clone(), TxHeight::Unconfirmed)
.expect("should insert tx");
cg
};
let changeset = ChangeSet::<TxHeight> {
chain: sparse_chain::ChangeSet {
checkpoints: [(1, Some(h!("B'")))].into(),
txids: [
(tx_b.txid(), None),
(tx_b2.txid(), Some(TxHeight::Unconfirmed)),
]
.into(),
},
graph: tx_graph::Additions {
tx: [tx_b2].into(),
txout: [].into(),
},
};
assert_eq!(
cg1.determine_changeset(&cg2),
Ok(changeset.clone()),
"tx should be evicted from B",
);
cg1.apply_changeset(changeset);
}
}
#[test]
fn chain_graph_new_missing() {
let tx_a = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let tx_b = Transaction {
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let update = chain!(
index: TxHeight,
checkpoints: [[0, h!("A")]],
txids: [
(tx_a.txid(), TxHeight::Confirmed(0)),
(tx_b.txid(), TxHeight::Confirmed(0))
]
);
let mut graph = TxGraph::default();
let mut expected_missing = HashSet::new();
expected_missing.insert(tx_a.txid());
expected_missing.insert(tx_b.txid());
assert_eq!(
ChainGraph::new(update.clone(), graph.clone()),
Err(NewError::Missing(expected_missing.clone()))
);
let _ = graph.insert_tx(tx_b.clone());
expected_missing.remove(&tx_b.txid());
assert_eq!(
ChainGraph::new(update.clone(), graph.clone()),
Err(NewError::Missing(expected_missing.clone()))
);
let _ = graph.insert_txout(
OutPoint {
txid: tx_a.txid(),
vout: 0,
},
tx_a.output[0].clone(),
);
assert_eq!(
ChainGraph::new(update.clone(), graph.clone()),
Err(NewError::Missing(expected_missing)),
"inserting an output instead of full tx doesn't satisfy constraint"
);
let _ = graph.insert_tx(tx_a.clone());
let new_graph = ChainGraph::new(update.clone(), graph.clone()).unwrap();
let expected_graph = {
let mut cg = ChainGraph::<TxHeight>::default();
let _ = cg
.insert_checkpoint(update.latest_checkpoint().unwrap())
.unwrap();
let _ = cg.insert_tx(tx_a, TxHeight::Confirmed(0)).unwrap();
let _ = cg.insert_tx(tx_b, TxHeight::Confirmed(0)).unwrap();
cg
};
assert_eq!(new_graph, expected_graph);
}
#[test]
fn chain_graph_new_conflicts() {
let tx_a = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let tx_b = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default()],
};
let tx_b2 = Transaction {
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.txid(), 0),
script_sig: Script::new(),
sequence: Sequence::default(),
witness: Witness::new(),
}],
output: vec![TxOut::default(), TxOut::default()],
};
let chain = chain!(
index: TxHeight,
checkpoints: [[5, h!("A")]],
txids: [
(tx_a.txid(), TxHeight::Confirmed(1)),
(tx_b.txid(), TxHeight::Confirmed(2)),
(tx_b2.txid(), TxHeight::Confirmed(3))
]
);
let graph = TxGraph::new([tx_a, tx_b, tx_b2]);
assert!(matches!(
ChainGraph::new(chain, graph),
Err(NewError::Conflict { .. })
));
}
#[test]
fn test_get_tx_in_chain() {
let mut cg = ChainGraph::default();
let tx = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
};
let _ = cg.insert_tx(tx.clone(), TxHeight::Unconfirmed).unwrap();
assert_eq!(
cg.get_tx_in_chain(tx.txid()),
Some((&TxHeight::Unconfirmed, &tx))
);
}
#[test]
fn test_iterate_transactions() {
let mut cg = ChainGraph::default();
let txs = (0..3)
.map(|i| Transaction {
version: i,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut::default()],
})
.collect::<Vec<_>>();
let _ = cg
.insert_checkpoint(BlockId {
height: 1,
hash: h!("A"),
})
.unwrap();
let _ = cg
.insert_tx(txs[0].clone(), TxHeight::Confirmed(1))
.unwrap();
let _ = cg.insert_tx(txs[1].clone(), TxHeight::Unconfirmed).unwrap();
let _ = cg
.insert_tx(txs[2].clone(), TxHeight::Confirmed(0))
.unwrap();
assert_eq!(
cg.transactions_in_chain().collect::<Vec<_>>(),
vec![
(&TxHeight::Confirmed(0), &txs[2]),
(&TxHeight::Confirmed(1), &txs[0]),
(&TxHeight::Unconfirmed, &txs[1]),
]
);
}
/// Start with: block1, block2a, tx1, tx2a
/// Update 1: block2a -> block2b , tx2a -> tx2b
/// Update 2: block2b -> block2c , tx2b -> tx2a
#[test]
fn test_apply_changes_reintroduce_tx() {
let block1 = BlockId {
height: 1,
hash: h!("block 1"),
};
let block2a = BlockId {
height: 2,
hash: h!("block 2a"),
};
let block2b = BlockId {
height: 2,
hash: h!("block 2b"),
};
let block2c = BlockId {
height: 2,
hash: h!("block 2c"),
};
let tx1 = Transaction {
version: 0,
lock_time: PackedLockTime(1),
input: Vec::new(),
output: [TxOut {
value: 1,
script_pubkey: Script::new(),
}]
.into(),
};
let tx2a = Transaction {
version: 0,
lock_time: PackedLockTime('a'.into()),
input: [TxIn {
previous_output: OutPoint::new(tx1.txid(), 0),
..Default::default()
}]
.into(),
output: [TxOut {
value: 0,
..Default::default()
}]
.into(),
};
let tx2b = Transaction {
lock_time: PackedLockTime('b'.into()),
..tx2a.clone()
};
// block1, block2a, tx1, tx2a
let mut cg = {
let mut cg = ChainGraph::default();
let _ = cg.insert_checkpoint(block1).unwrap();
let _ = cg.insert_checkpoint(block2a).unwrap();
let _ = cg.insert_tx(tx1, TxHeight::Confirmed(1)).unwrap();
let _ = cg.insert_tx(tx2a.clone(), TxHeight::Confirmed(2)).unwrap();
cg
};
// block2a -> block2b , tx2a -> tx2b
let update = {
let mut update = ChainGraph::default();
let _ = update.insert_checkpoint(block1).unwrap();
let _ = update.insert_checkpoint(block2b).unwrap();
let _ = update
.insert_tx(tx2b.clone(), TxHeight::Confirmed(2))
.unwrap();
update
};
assert_eq!(
cg.apply_update(update).expect("should update"),
ChangeSet {
chain: changeset! {
checkpoints: [(2, Some(block2b.hash))],
txids: [(tx2a.txid(), None), (tx2b.txid(), Some(TxHeight::Confirmed(2)))]
},
graph: tx_graph::Additions {
tx: [tx2b.clone()].into(),
..Default::default()
},
}
);
// block2b -> block2c , tx2b -> tx2a
let update = {
let mut update = ChainGraph::default();
let _ = update.insert_checkpoint(block1).unwrap();
let _ = update.insert_checkpoint(block2c).unwrap();
let _ = update
.insert_tx(tx2a.clone(), TxHeight::Confirmed(2))
.unwrap();
update
};
assert_eq!(
cg.apply_update(update).expect("should update"),
ChangeSet {
chain: changeset! {
checkpoints: [(2, Some(block2c.hash))],
txids: [(tx2b.txid(), None), (tx2a.txid(), Some(TxHeight::Confirmed(2)))]
},
..Default::default()
}
);
}
#[test]
fn test_evict_descendants() {
let block_1 = BlockId {
height: 1,
hash: h!("block 1"),
};
let block_2a = BlockId {
height: 2,
hash: h!("block 2 a"),
};
let block_2b = BlockId {
height: 2,
hash: h!("block 2 b"),
};
let tx_1 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(h!("fake tx"), 0),
..Default::default()
}],
output: vec![TxOut {
value: 10_000,
script_pubkey: Script::new(),
}],
..common::new_tx(1)
};
let tx_2 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_1.txid(), 0),
..Default::default()
}],
output: vec![
TxOut {
value: 20_000,
script_pubkey: Script::new(),
},
TxOut {
value: 30_000,
script_pubkey: Script::new(),
},
],
..common::new_tx(2)
};
let tx_3 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_2.txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: 40_000,
script_pubkey: Script::new(),
}],
..common::new_tx(3)
};
let tx_4 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_2.txid(), 1),
..Default::default()
}],
output: vec![TxOut {
value: 40_000,
script_pubkey: Script::new(),
}],
..common::new_tx(4)
};
let tx_5 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_4.txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: 40_000,
script_pubkey: Script::new(),
}],
..common::new_tx(5)
};
let tx_conflict = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_1.txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: 12345,
script_pubkey: Script::new(),
}],
..common::new_tx(6)
};
// 1 is spent by 2, 2 is spent by 3 and 4, 4 is spent by 5
let _txid_1 = tx_1.txid();
let txid_2 = tx_2.txid();
let txid_3 = tx_3.txid();
let txid_4 = tx_4.txid();
let txid_5 = tx_5.txid();
// this tx conflicts with 2
let txid_conflict = tx_conflict.txid();
let cg = {
let mut cg = ChainGraph::<TxHeight>::default();
let _ = cg.insert_checkpoint(block_1);
let _ = cg.insert_checkpoint(block_2a);
let _ = cg.insert_tx(tx_1, TxHeight::Confirmed(1));
let _ = cg.insert_tx(tx_2, TxHeight::Confirmed(2));
let _ = cg.insert_tx(tx_3, TxHeight::Confirmed(2));
let _ = cg.insert_tx(tx_4, TxHeight::Confirmed(2));
let _ = cg.insert_tx(tx_5, TxHeight::Confirmed(2));
cg
};
let update = {
let mut cg = ChainGraph::<TxHeight>::default();
let _ = cg.insert_checkpoint(block_1);
let _ = cg.insert_checkpoint(block_2b);
let _ = cg.insert_tx(tx_conflict.clone(), TxHeight::Confirmed(2));
cg
};
assert_eq!(
cg.determine_changeset(&update),
Ok(ChangeSet {
chain: changeset! {
checkpoints: [(2, Some(block_2b.hash))],
txids: [(txid_2, None), (txid_3, None), (txid_4, None), (txid_5, None), (txid_conflict, Some(TxHeight::Confirmed(2)))]
},
graph: tx_graph::Additions {
tx: [tx_conflict.clone()].into(),
..Default::default()
}
})
);
let err = cg
.insert_tx_preview(tx_conflict, TxHeight::Unconfirmed)
.expect_err("must fail due to conflicts");
assert!(matches!(err, InsertTxError::UnresolvableConflict(_)));
}

View File

@@ -1,660 +0,0 @@
#![cfg(feature = "miniscript")]
#[macro_use]
mod common;
use std::{collections::BTreeSet, sync::Arc};
use crate::common::DESCRIPTORS;
use bdk_chain::{
indexed_tx_graph::{self, IndexedTxGraph},
indexer::keychain_txout::KeychainTxOutIndex,
local_chain::LocalChain,
tx_graph, Balance, ChainPosition, ConfirmationBlockTime, DescriptorExt,
};
use bitcoin::{secp256k1::Secp256k1, Amount, OutPoint, ScriptBuf, Transaction, TxIn, TxOut};
use miniscript::Descriptor;
/// Ensure [`IndexedTxGraph::insert_relevant_txs`] can successfully index transactions NOT presented
/// in topological order.
///
/// Given 3 transactions (A, B, C), where A has 2 owned outputs. B and C spends an output each of A.
/// Typically, we would only know whether B and C are relevant if we have indexed A (A's outpoints
/// are associated with owned spks in the index). Ensure insertion and indexing is topological-
/// agnostic.
#[test]
fn insert_relevant_txs() {
use bdk_chain::indexer::keychain_txout;
let (descriptor, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), DESCRIPTORS[0])
.expect("must be valid");
let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey();
let mut graph = IndexedTxGraph::<ConfirmationBlockTime, KeychainTxOutIndex<()>>::new(
KeychainTxOutIndex::new(10),
);
let _ = graph
.index
.insert_descriptor((), descriptor.clone())
.unwrap();
let tx_a = Transaction {
output: vec![
TxOut {
value: Amount::from_sat(10_000),
script_pubkey: spk_0,
},
TxOut {
value: Amount::from_sat(20_000),
script_pubkey: spk_1,
},
],
..common::new_tx(0)
};
let tx_b = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.compute_txid(), 0),
..Default::default()
}],
..common::new_tx(1)
};
let tx_c = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx_a.compute_txid(), 1),
..Default::default()
}],
..common::new_tx(2)
};
let txs = [tx_c, tx_b, tx_a];
let changeset = indexed_tx_graph::ChangeSet {
tx_graph: tx_graph::ChangeSet {
txs: txs.iter().cloned().map(Arc::new).collect(),
..Default::default()
},
indexer: keychain_txout::ChangeSet {
last_revealed: [(descriptor.descriptor_id(), 9_u32)].into(),
},
};
assert_eq!(
graph.batch_insert_relevant(txs.iter().map(|tx| (tx, None))),
changeset,
);
// The initial changeset will also contain info about the keychain we added
let initial_changeset = indexed_tx_graph::ChangeSet {
tx_graph: changeset.tx_graph,
indexer: keychain_txout::ChangeSet {
last_revealed: changeset.indexer.last_revealed,
},
};
assert_eq!(graph.initial_changeset(), initial_changeset);
}
/// Ensure consistency IndexedTxGraph list_* and balance methods. These methods lists
/// relevant txouts and utxos from the information fetched from a ChainOracle (here a LocalChain).
///
/// Test Setup:
///
/// Local Chain => <0> ----- <1> ----- <2> ----- <3> ---- ... ---- <150>
///
/// Keychains:
///
/// keychain_1: Trusted
/// keychain_2: Untrusted
///
/// Transactions:
///
/// tx1: A Coinbase, sending 70000 sats to "trusted" address. [Block 0]
/// tx2: A external Receive, sending 30000 sats to "untrusted" address. [Block 1]
/// tx3: Internal Spend. Spends tx2 and returns change of 10000 to "trusted" address. [Block 2]
/// tx4: Mempool tx, sending 20000 sats to "untrusted" address.
/// tx5: Mempool tx, sending 15000 sats to "trusted" address.
/// tx6: Complete unrelated tx. [Block 3]
///
/// Different transactions are added via `insert_relevant_txs`.
/// `list_owned_txout`, `list_owned_utxos` and `balance` method is asserted
/// with expected values at Block height 0, 1, and 2.
///
/// Finally Add more blocks to local chain until tx1 coinbase maturity hits.
/// Assert maturity at coinbase maturity inflection height. Block height 98 and 99.
#[test]
fn test_list_owned_txouts() {
// Create Local chains
let local_chain = LocalChain::from_blocks((0..150).map(|i| (i as u32, h!("random"))).collect())
.expect("must have genesis hash");
// Initiate IndexedTxGraph
let (desc_1, _) =
Descriptor::parse_descriptor(&Secp256k1::signing_only(), common::DESCRIPTORS[2]).unwrap();
let (desc_2, _) =
Descriptor::parse_descriptor(&Secp256k1::signing_only(), common::DESCRIPTORS[3]).unwrap();
let mut graph = IndexedTxGraph::<ConfirmationBlockTime, KeychainTxOutIndex<String>>::new(
KeychainTxOutIndex::new(10),
);
assert!(graph
.index
.insert_descriptor("keychain_1".into(), desc_1)
.unwrap());
assert!(graph
.index
.insert_descriptor("keychain_2".into(), desc_2)
.unwrap());
// Get trusted and untrusted addresses
let mut trusted_spks: Vec<ScriptBuf> = Vec::new();
let mut untrusted_spks: Vec<ScriptBuf> = Vec::new();
{
// we need to scope here to take immutable reference of the graph
for _ in 0..10 {
let ((_, script), _) = graph
.index
.reveal_next_spk("keychain_1".to_string())
.unwrap();
// TODO Assert indexes
trusted_spks.push(script.to_owned());
}
}
{
for _ in 0..10 {
let ((_, script), _) = graph
.index
.reveal_next_spk("keychain_2".to_string())
.unwrap();
untrusted_spks.push(script.to_owned());
}
}
// Create test transactions
// tx1 is the genesis coinbase
let tx1 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::null(),
..Default::default()
}],
output: vec![TxOut {
value: Amount::from_sat(70000),
script_pubkey: trusted_spks[0].to_owned(),
}],
..common::new_tx(0)
};
// tx2 is an incoming transaction received at untrusted keychain at block 1.
let tx2 = Transaction {
output: vec![TxOut {
value: Amount::from_sat(30000),
script_pubkey: untrusted_spks[0].to_owned(),
}],
..common::new_tx(0)
};
// tx3 spends tx2 and gives a change back in trusted keychain. Confirmed at Block 2.
let tx3 = Transaction {
input: vec![TxIn {
previous_output: OutPoint::new(tx2.compute_txid(), 0),
..Default::default()
}],
output: vec![TxOut {
value: Amount::from_sat(10000),
script_pubkey: trusted_spks[1].to_owned(),
}],
..common::new_tx(0)
};
// tx4 is an external transaction receiving at untrusted keychain, unconfirmed.
let tx4 = Transaction {
output: vec![TxOut {
value: Amount::from_sat(20000),
script_pubkey: untrusted_spks[1].to_owned(),
}],
..common::new_tx(0)
};
// tx5 is an external transaction receiving at trusted keychain, unconfirmed.
let tx5 = Transaction {
output: vec![TxOut {
value: Amount::from_sat(15000),
script_pubkey: trusted_spks[2].to_owned(),
}],
..common::new_tx(0)
};
// tx6 is an unrelated transaction confirmed at 3.
let tx6 = common::new_tx(0);
// Insert transactions into graph with respective anchors
// Insert unconfirmed txs with a last_seen timestamp
let _ =
graph.batch_insert_relevant([&tx1, &tx2, &tx3, &tx6].iter().enumerate().map(|(i, tx)| {
let height = i as u32;
(
*tx,
local_chain
.get(height)
.map(|cp| cp.block_id())
.map(|block_id| ConfirmationBlockTime {
block_id,
confirmation_time: 100,
}),
)
}));
let _ = graph.batch_insert_relevant_unconfirmed([&tx4, &tx5].iter().map(|tx| (*tx, 100)));
// A helper lambda to extract and filter data from the graph.
let fetch =
|height: u32, graph: &IndexedTxGraph<ConfirmationBlockTime, KeychainTxOutIndex<String>>| {
let chain_tip = local_chain
.get(height)
.map(|cp| cp.block_id())
.unwrap_or_else(|| panic!("block must exist at {}", height));
let txouts = graph
.graph()
.filter_chain_txouts(
&local_chain,
chain_tip,
graph.index.outpoints().iter().cloned(),
)
.collect::<Vec<_>>();
let utxos = graph
.graph()
.filter_chain_unspents(
&local_chain,
chain_tip,
graph.index.outpoints().iter().cloned(),
)
.collect::<Vec<_>>();
let balance = graph.graph().balance(
&local_chain,
chain_tip,
graph.index.outpoints().iter().cloned(),
|_, spk: ScriptBuf| trusted_spks.contains(&spk),
);
let confirmed_txouts_txid = txouts
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
let unconfirmed_txouts_txid = txouts
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
let confirmed_utxos_txid = utxos
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Confirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
let unconfirmed_utxos_txid = utxos
.iter()
.filter_map(|(_, full_txout)| {
if matches!(full_txout.chain_position, ChainPosition::Unconfirmed(_)) {
Some(full_txout.outpoint.txid)
} else {
None
}
})
.collect::<BTreeSet<_>>();
(
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
)
};
// ----- TEST BLOCK -----
// AT Block 0
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(0, &graph);
// tx1 is a confirmed txout and is unspent
// tx4, tx5 are unconfirmed
assert_eq!(confirmed_txouts_txid, [tx1.compute_txid()].into());
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(confirmed_utxos_txid, [tx1.compute_txid()].into());
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::ZERO // Nothing is confirmed yet
}
);
}
// AT Block 1
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(1, &graph);
// tx2 gets into confirmed txout set
assert_eq!(
confirmed_txouts_txid,
[tx1.compute_txid(), tx2.compute_txid()].into()
);
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
// tx2 gets into confirmed utxos set
assert_eq!(
confirmed_utxos_txid,
[tx1.compute_txid(), tx2.compute_txid()].into()
);
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(30_000) // tx2 got confirmed
}
);
}
// AT Block 2
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(2, &graph);
// tx3 now gets into the confirmed txout set
assert_eq!(
confirmed_txouts_txid,
[tx1.compute_txid(), tx2.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
// tx3 also gets into confirmed utxo set
assert_eq!(
confirmed_utxos_txid,
[tx1.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(10000) // tx3 got confirmed
}
);
}
// AT Block 98
{
let (
confirmed_txouts_txid,
unconfirmed_txouts_txid,
confirmed_utxos_txid,
unconfirmed_utxos_txid,
balance,
) = fetch(98, &graph);
// no change compared to block 2
assert_eq!(
confirmed_txouts_txid,
[tx1.compute_txid(), tx2.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_txouts_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
assert_eq!(
confirmed_utxos_txid,
[tx1.compute_txid(), tx3.compute_txid()].into()
);
assert_eq!(
unconfirmed_utxos_txid,
[tx4.compute_txid(), tx5.compute_txid()].into()
);
// Coinbase is still immature
assert_eq!(
balance,
Balance {
immature: Amount::from_sat(70000), // immature coinbase
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(10000) // tx3 is confirmed
}
);
}
// AT Block 99
{
let (_, _, _, _, balance) = fetch(99, &graph);
// Coinbase maturity hits
assert_eq!(
balance,
Balance {
immature: Amount::ZERO, // coinbase matured
trusted_pending: Amount::from_sat(15000), // tx5
untrusted_pending: Amount::from_sat(20000), // tx4
confirmed: Amount::from_sat(80000) // tx1 + tx3
}
);
}
}
/// Given a `LocalChain`, `IndexedTxGraph`, and a `Transaction`, when we insert some anchor
/// (possibly non-canonical) and/or a last-seen timestamp into the graph, we expect the
/// result of `get_chain_position` in these cases:
///
/// - tx with no anchors or last_seen has no `ChainPosition`
/// - tx with any last_seen will be `Unconfirmed`
/// - tx with an anchor in best chain will be `Confirmed`
/// - tx with an anchor not in best chain (no last_seen) has no `ChainPosition`
#[test]
fn test_get_chain_position() {
use bdk_chain::local_chain::CheckPoint;
use bdk_chain::spk_txout::SpkTxOutIndex;
use bdk_chain::BlockId;
struct TestCase<A> {
name: &'static str,
tx: Transaction,
anchor: Option<A>,
last_seen: Option<u64>,
exp_pos: Option<ChainPosition<A>>,
}
// addr: bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm
let spk = ScriptBuf::from_hex("0014c692ecf13534982a9a2834565cbd37add8027140").unwrap();
let mut graph = IndexedTxGraph::new({
let mut index = SpkTxOutIndex::default();
let _ = index.insert_spk(0u32, spk.clone());
index
});
// Anchors to test
let blocks = vec![block_id!(0, "g"), block_id!(1, "A"), block_id!(2, "B")];
let cp = CheckPoint::from_block_ids(blocks.clone()).unwrap();
let chain = LocalChain::from_tip(cp).unwrap();
// The test will insert a transaction into the indexed tx graph
// along with any anchors and timestamps, then check the value
// returned by `get_chain_position`.
fn run(
chain: &LocalChain,
graph: &mut IndexedTxGraph<BlockId, SpkTxOutIndex<u32>>,
test: TestCase<BlockId>,
) {
let TestCase {
name,
tx,
anchor,
last_seen,
exp_pos,
} = test;
// add data to graph
let txid = tx.compute_txid();
let _ = graph.insert_tx(tx);
if let Some(anchor) = anchor {
let _ = graph.insert_anchor(txid, anchor);
}
if let Some(seen_at) = last_seen {
let _ = graph.insert_seen_at(txid, seen_at);
}
// check chain position
let res = graph
.graph()
.get_chain_position(chain, chain.tip().block_id(), txid);
assert_eq!(
res.map(ChainPosition::cloned),
exp_pos,
"failed test case: {name}"
);
}
[
TestCase {
name: "tx no anchors or last_seen - no chain pos",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(0)
},
anchor: None,
last_seen: None,
exp_pos: None,
},
TestCase {
name: "tx last_seen - unconfirmed",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(1)
},
anchor: None,
last_seen: Some(2),
exp_pos: Some(ChainPosition::Unconfirmed(2)),
},
TestCase {
name: "tx anchor in best chain - confirmed",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(2)
},
anchor: Some(blocks[1]),
last_seen: None,
exp_pos: Some(ChainPosition::Confirmed(blocks[1])),
},
TestCase {
name: "tx unknown anchor with last_seen - unconfirmed",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(3)
},
anchor: Some(block_id!(2, "B'")),
last_seen: Some(2),
exp_pos: Some(ChainPosition::Unconfirmed(2)),
},
TestCase {
name: "tx unknown anchor - no chain pos",
tx: Transaction {
output: vec![TxOut {
value: Amount::ONE_BTC,
script_pubkey: spk.clone(),
}],
..common::new_tx(4)
},
anchor: Some(block_id!(2, "B'")),
last_seen: None,
exp_pos: None,
},
]
.into_iter()
.for_each(|t| run(&chain, &mut graph, t));
}

View File

@@ -0,0 +1,239 @@
#![cfg(feature = "miniscript")]
#[macro_use]
mod common;
use bdk_chain::{
keychain::{Balance, KeychainTracker},
miniscript::{
bitcoin::{secp256k1::Secp256k1, OutPoint, PackedLockTime, Transaction, TxOut},
Descriptor,
},
BlockId, ConfirmationTime, TxHeight,
};
use bitcoin::TxIn;
#[test]
fn test_insert_tx() {
let mut tracker = KeychainTracker::default();
let secp = Secp256k1::new();
let (descriptor, _) = Descriptor::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
tracker.add_keychain((), descriptor.clone());
let txout = TxOut {
value: 100_000,
script_pubkey: descriptor.at_derivation_index(5).script_pubkey(),
};
let tx = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![txout],
};
let _ = tracker.txout_index.reveal_to_target(&(), 5);
let changeset = tracker
.insert_tx_preview(tx.clone(), ConfirmationTime::Unconfirmed)
.unwrap();
tracker.apply_changeset(changeset);
assert_eq!(
tracker
.chain_graph()
.transactions_in_chain()
.collect::<Vec<_>>(),
vec![(&ConfirmationTime::Unconfirmed, &tx)]
);
assert_eq!(
tracker
.txout_index
.txouts_of_keychain(&())
.collect::<Vec<_>>(),
vec![(
5,
OutPoint {
txid: tx.txid(),
vout: 0
}
)]
);
}
#[test]
fn test_balance() {
use core::str::FromStr;
#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd)]
enum Keychain {
One,
Two,
}
let mut tracker = KeychainTracker::<Keychain, TxHeight>::default();
let one = Descriptor::from_str("tr([73c5da0a/86'/0'/0']xpub6BgBgsespWvERF3LHQu6CnqdvfEvtMcQjYrcRzx53QJjSxarj2afYWcLteoGVky7D3UKDP9QyrLprQ3VCECoY49yfdDEHGCtMMj92pReUsQ/0/*)#rg247h69").unwrap();
let two = Descriptor::from_str("tr([73c5da0a/86'/0'/0']xpub6BgBgsespWvERF3LHQu6CnqdvfEvtMcQjYrcRzx53QJjSxarj2afYWcLteoGVky7D3UKDP9QyrLprQ3VCECoY49yfdDEHGCtMMj92pReUsQ/1/*)#ju05rz2a").unwrap();
tracker.add_keychain(Keychain::One, one);
tracker.add_keychain(Keychain::Two, two);
let tx1 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut {
value: 13_000,
script_pubkey: tracker
.txout_index
.reveal_next_spk(&Keychain::One)
.0
.1
.clone(),
}],
};
let tx2 = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut {
value: 7_000,
script_pubkey: tracker
.txout_index
.reveal_next_spk(&Keychain::Two)
.0
.1
.clone(),
}],
};
let tx_coinbase = Transaction {
version: 0x01,
lock_time: PackedLockTime(0),
input: vec![TxIn::default()],
output: vec![TxOut {
value: 11_000,
script_pubkey: tracker
.txout_index
.reveal_next_spk(&Keychain::Two)
.0
.1
.clone(),
}],
};
assert!(tx_coinbase.is_coin_base());
let _ = tracker
.insert_checkpoint(BlockId {
height: 5,
hash: h!("1"),
})
.unwrap();
let should_trust = |keychain: &Keychain| match *keychain {
Keychain::One => false,
Keychain::Two => true,
};
assert_eq!(tracker.balance(should_trust), Balance::default());
let _ = tracker
.insert_tx(tx1.clone(), TxHeight::Unconfirmed)
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
untrusted_pending: 13_000,
..Default::default()
}
);
let _ = tracker
.insert_tx(tx2.clone(), TxHeight::Unconfirmed)
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 7_000,
untrusted_pending: 13_000,
..Default::default()
}
);
let _ = tracker
.insert_tx(tx_coinbase, TxHeight::Confirmed(0))
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 7_000,
untrusted_pending: 13_000,
immature: 11_000,
..Default::default()
}
);
let _ = tracker.insert_tx(tx1, TxHeight::Confirmed(1)).unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 7_000,
untrusted_pending: 0,
immature: 11_000,
confirmed: 13_000,
}
);
let _ = tracker.insert_tx(tx2, TxHeight::Confirmed(2)).unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 0,
untrusted_pending: 0,
immature: 11_000,
confirmed: 20_000,
}
);
let _ = tracker
.insert_checkpoint(BlockId {
height: 98,
hash: h!("98"),
})
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 0,
untrusted_pending: 0,
immature: 11_000,
confirmed: 20_000,
}
);
let _ = tracker
.insert_checkpoint(BlockId {
height: 99,
hash: h!("99"),
})
.unwrap();
assert_eq!(
tracker.balance(should_trust),
Balance {
trusted_pending: 0,
untrusted_pending: 0,
immature: 0,
confirmed: 31_000,
}
);
assert_eq!(tracker.balance_at(0), 0);
assert_eq!(tracker.balance_at(1), 13_000);
assert_eq!(tracker.balance_at(2), 20_000);
assert_eq!(tracker.balance_at(98), 20_000);
assert_eq!(tracker.balance_at(99), 31_000);
assert_eq!(tracker.balance_at(100), 31_000);
}

View File

@@ -4,227 +4,162 @@
mod common;
use bdk_chain::{
collections::BTreeMap,
indexer::keychain_txout::{ChangeSet, KeychainTxOutIndex},
DescriptorExt, DescriptorId, Indexer, Merge,
keychain::{DerivationAdditions, KeychainTxOutIndex},
};
use bitcoin::{secp256k1::Secp256k1, Amount, OutPoint, ScriptBuf, Transaction, TxOut};
use bitcoin::{secp256k1::Secp256k1, OutPoint, Script, Transaction, TxOut};
use miniscript::{Descriptor, DescriptorPublicKey};
use crate::common::DESCRIPTORS;
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)]
enum TestKeychain {
External,
Internal,
}
fn parse_descriptor(descriptor: &str) -> Descriptor<DescriptorPublicKey> {
fn init_txout_index() -> (
bdk_chain::keychain::KeychainTxOutIndex<TestKeychain>,
Descriptor<DescriptorPublicKey>,
Descriptor<DescriptorPublicKey>,
) {
let mut txout_index = bdk_chain::keychain::KeychainTxOutIndex::<TestKeychain>::default();
let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, descriptor)
.unwrap()
.0
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
txout_index.add_keychain(TestKeychain::External, external_descriptor.clone());
txout_index.add_keychain(TestKeychain::Internal, internal_descriptor.clone());
(txout_index, external_descriptor, internal_descriptor)
}
fn init_txout_index(
external_descriptor: Descriptor<DescriptorPublicKey>,
internal_descriptor: Descriptor<DescriptorPublicKey>,
lookahead: u32,
) -> KeychainTxOutIndex<TestKeychain> {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(lookahead);
let _ = txout_index
.insert_descriptor(TestKeychain::External, external_descriptor)
.unwrap();
let _ = txout_index
.insert_descriptor(TestKeychain::Internal, internal_descriptor)
.unwrap();
txout_index
}
fn spk_at_index(descriptor: &Descriptor<DescriptorPublicKey>, index: u32) -> ScriptBuf {
fn spk_at_index(descriptor: &Descriptor<DescriptorPublicKey>, index: u32) -> Script {
descriptor
.derived_descriptor(&Secp256k1::verification_only(), index)
.expect("must derive")
.script_pubkey()
}
// We create two empty changesets lhs and rhs, we then insert various descriptors with various
// last_revealed, merge rhs to lhs, and check that the result is consistent with these rules:
// - Existing index doesn't update if the new index in `other` is lower than `self`.
// - Existing index updates if the new index in `other` is higher than `self`.
// - Existing index is unchanged if keychain doesn't exist in `other`.
// - New keychain gets added if the keychain is in `other` but not in `self`.
#[test]
fn merge_changesets_check_last_revealed() {
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let descriptor_ids: Vec<_> = DESCRIPTORS
.iter()
.take(4)
.map(|d| {
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, d)
.unwrap()
.0
.descriptor_id()
})
.collect();
let mut lhs_di = BTreeMap::<DescriptorId, u32>::default();
let mut rhs_di = BTreeMap::<DescriptorId, u32>::default();
lhs_di.insert(descriptor_ids[0], 7);
lhs_di.insert(descriptor_ids[1], 0);
lhs_di.insert(descriptor_ids[2], 3);
rhs_di.insert(descriptor_ids[0], 3); // value less than lhs desc 0
rhs_di.insert(descriptor_ids[1], 5); // value more than lhs desc 1
lhs_di.insert(descriptor_ids[3], 4); // key doesn't exist in lhs
let mut lhs = ChangeSet {
last_revealed: lhs_di,
};
let rhs = ChangeSet {
last_revealed: rhs_di,
};
lhs.merge(rhs);
// Existing index doesn't update if the new index in `other` is lower than `self`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[0]), Some(&7));
// Existing index updates if the new index in `other` is higher than `self`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[1]), Some(&5));
// Existing index is unchanged if keychain doesn't exist in `other`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[2]), Some(&3));
// New keychain gets added if the keychain is in `other` but not in `self`.
assert_eq!(lhs.last_revealed.get(&descriptor_ids[3]), Some(&4));
}
#[test]
fn test_set_all_derivation_indices() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index =
init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 0);
let (mut txout_index, _, _) = init_txout_index();
let derive_to: BTreeMap<_, _> =
[(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into();
let last_revealed: BTreeMap<_, _> = [
(external_descriptor.descriptor_id(), 12),
(internal_descriptor.descriptor_id(), 24),
]
.into();
assert_eq!(
txout_index.reveal_to_target_multi(&derive_to),
ChangeSet {
last_revealed: last_revealed.clone()
}
txout_index.reveal_to_target_multi(&derive_to).1.as_inner(),
&derive_to
);
assert_eq!(txout_index.last_revealed_indices(), derive_to);
assert_eq!(txout_index.last_revealed_indices(), &derive_to);
assert_eq!(
txout_index.reveal_to_target_multi(&derive_to),
ChangeSet::default(),
txout_index.reveal_to_target_multi(&derive_to).1,
DerivationAdditions::default(),
"no changes if we set to the same thing"
);
assert_eq!(txout_index.initial_changeset().last_revealed, last_revealed);
}
#[test]
fn test_lookahead() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index =
init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 10);
let (mut txout_index, external_desc, internal_desc) = init_txout_index();
// ensure it does not break anything if lookahead is set multiple times
(0..=10).for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::External, lookahead));
(0..=20)
.filter(|v| v % 2 == 0)
.for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::Internal, lookahead));
assert_eq!(txout_index.inner().all_spks().len(), 30);
// given:
// - external lookahead set to 10
// - internal lookahead set to 20
// when:
// - set external derivation index to value higher than last, but within the lookahead value
// expect:
// - scripts cached in spk_txout_index should increase correctly
// - stored scripts of external keychain should be of expected counts
for index in (0..20).skip_while(|i| i % 2 == 1) {
let (revealed_spks, revealed_changeset) = txout_index
.reveal_to_target(TestKeychain::External, index)
.unwrap();
let (revealed_spks, revealed_additions) =
txout_index.reveal_to_target(&TestKeychain::External, index);
assert_eq!(
revealed_spks,
vec![(index, spk_at_index(&external_descriptor, index))],
revealed_spks.collect::<Vec<_>>(),
vec![(index, spk_at_index(&external_desc, index))],
);
assert_eq!(
&revealed_changeset.last_revealed,
&[(external_descriptor.descriptor_id(), index)].into()
revealed_additions.as_inner(),
&[(TestKeychain::External, index)].into()
);
assert_eq!(
txout_index.inner().all_spks().len(),
10 /* external lookahead */ +
10 /* internal lookahead */ +
20 /* internal lookahead */ +
index as usize + 1 /* `derived` count */
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::External)
.revealed_spks_of_keychain(&TestKeychain::External)
.count(),
index as usize + 1,
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::Internal)
.revealed_spks_of_keychain(&TestKeychain::Internal)
.count(),
0,
);
assert_eq!(
txout_index
.unused_keychain_spks(TestKeychain::External)
.unused_spks_of_keychain(&TestKeychain::External)
.count(),
index as usize + 1,
);
assert_eq!(
txout_index
.unused_keychain_spks(TestKeychain::Internal)
.unused_spks_of_keychain(&TestKeychain::Internal)
.count(),
0,
);
}
// given:
// - internal lookahead is 10
// - internal lookahead is 20
// - internal derivation index is `None`
// when:
// - derivation index is set ahead of current derivation index + lookahead
// expect:
// - scripts cached in spk_txout_index should increase correctly, a.k.a. no scripts are skipped
let (revealed_spks, revealed_changeset) = txout_index
.reveal_to_target(TestKeychain::Internal, 24)
.unwrap();
let (revealed_spks, revealed_additions) =
txout_index.reveal_to_target(&TestKeychain::Internal, 24);
assert_eq!(
revealed_spks,
revealed_spks.collect::<Vec<_>>(),
(0..=24)
.map(|index| (index, spk_at_index(&internal_descriptor, index)))
.map(|index| (index, spk_at_index(&internal_desc, index)))
.collect::<Vec<_>>(),
);
assert_eq!(
&revealed_changeset.last_revealed,
&[(internal_descriptor.descriptor_id(), 24)].into()
revealed_additions.as_inner(),
&[(TestKeychain::Internal, 24)].into()
);
assert_eq!(
txout_index.inner().all_spks().len(),
10 /* external lookahead */ +
10 /* internal lookahead */ +
20 /* internal lookahead */ +
20 /* external stored index count */ +
25 /* internal stored index count */
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::Internal)
.revealed_spks_of_keychain(&TestKeychain::Internal)
.count(),
25,
);
// ensure derivation indices are expected for each keychain
let last_external_index = txout_index
.last_revealed_index(TestKeychain::External)
.last_revealed_index(&TestKeychain::External)
.expect("already derived");
let last_internal_index = txout_index
.last_revealed_index(TestKeychain::Internal)
.last_revealed_index(&TestKeychain::Internal)
.expect("already derived");
assert_eq!(last_external_index, 19);
assert_eq!(last_internal_index, 24);
@@ -239,40 +174,38 @@ fn test_lookahead() {
let tx = Transaction {
output: vec![
TxOut {
script_pubkey: external_descriptor
script_pubkey: external_desc
.at_derivation_index(external_index)
.unwrap()
.script_pubkey(),
value: Amount::from_sat(10_000),
value: 10_000,
},
TxOut {
script_pubkey: internal_descriptor
script_pubkey: internal_desc
.at_derivation_index(internal_index)
.unwrap()
.script_pubkey(),
value: Amount::from_sat(10_000),
value: 10_000,
},
],
..common::new_tx(external_index)
};
assert_eq!(txout_index.index_tx(&tx), ChangeSet::default());
assert_eq!(txout_index.scan(&tx), DerivationAdditions::default());
assert_eq!(
txout_index.last_revealed_index(TestKeychain::External),
txout_index.last_revealed_index(&TestKeychain::External),
Some(last_external_index)
);
assert_eq!(
txout_index.last_revealed_index(TestKeychain::Internal),
txout_index.last_revealed_index(&TestKeychain::Internal),
Some(last_internal_index)
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::External)
.revealed_spks_of_keychain(&TestKeychain::External)
.count(),
last_external_index as usize + 1,
);
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::Internal)
.revealed_spks_of_keychain(&TestKeychain::Internal)
.count(),
last_internal_index as usize + 1,
);
@@ -286,84 +219,68 @@ fn test_lookahead() {
// - last used index should change as expected
#[test]
fn test_scan_with_lookahead() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index =
init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 10);
let (mut txout_index, external_desc, _) = init_txout_index();
txout_index.set_lookahead_for_all(10);
let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30]
let spks: BTreeMap<u32, Script> = [0, 10, 20, 30]
.into_iter()
.map(|i| {
(
i,
external_descriptor
.at_derivation_index(i)
.unwrap()
.script_pubkey(),
)
})
.map(|i| (i, external_desc.at_derivation_index(i).script_pubkey()))
.collect();
for (&spk_i, spk) in &spks {
let op = OutPoint::new(h!("fake tx"), spk_i);
let txout = TxOut {
script_pubkey: spk.clone(),
value: Amount::ZERO,
value: 0,
};
let changeset = txout_index.index_txout(op, &txout);
let additions = txout_index.scan_txout(op, &txout);
assert_eq!(
&changeset.last_revealed,
&[(external_descriptor.descriptor_id(), spk_i)].into()
additions.as_inner(),
&[(TestKeychain::External, spk_i)].into()
);
assert_eq!(
txout_index.last_revealed_index(TestKeychain::External),
txout_index.last_revealed_index(&TestKeychain::External),
Some(spk_i)
);
assert_eq!(
txout_index.last_used_index(TestKeychain::External),
txout_index.last_used_index(&TestKeychain::External),
Some(spk_i)
);
}
// now try with index 41 (lookahead surpassed), we expect that the txout to not be indexed
let spk_41 = external_descriptor
.at_derivation_index(41)
.unwrap()
.script_pubkey();
let spk_41 = external_desc.at_derivation_index(41).script_pubkey();
let op = OutPoint::new(h!("fake tx"), 41);
let txout = TxOut {
script_pubkey: spk_41,
value: Amount::ZERO,
value: 0,
};
let changeset = txout_index.index_txout(op, &txout);
assert!(changeset.is_empty());
let additions = txout_index.scan_txout(op, &txout);
assert!(additions.is_empty());
}
#[test]
#[rustfmt::skip]
fn test_wildcard_derivations() {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut txout_index = init_txout_index(external_descriptor.clone(), internal_descriptor.clone(), 0);
let external_spk_0 = external_descriptor.at_derivation_index(0).unwrap().script_pubkey();
let external_spk_16 = external_descriptor.at_derivation_index(16).unwrap().script_pubkey();
let external_spk_26 = external_descriptor.at_derivation_index(26).unwrap().script_pubkey();
let external_spk_27 = external_descriptor.at_derivation_index(27).unwrap().script_pubkey();
let (mut txout_index, external_desc, _) = init_txout_index();
let external_spk_0 = external_desc.at_derivation_index(0).script_pubkey();
let external_spk_16 = external_desc.at_derivation_index(16).script_pubkey();
let external_spk_26 = external_desc.at_derivation_index(26).script_pubkey();
let external_spk_27 = external_desc.at_derivation_index(27).script_pubkey();
// - nothing is derived
// - unused list is also empty
//
// - next_derivation_index() == (0, true)
// - derive_new() == ((0, <spk>), keychain::ChangeSet)
// - next_unused() == ((0, <spk>), keychain::ChangeSet:is_empty())
assert_eq!(txout_index.next_index(TestKeychain::External).unwrap(), (0, true));
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0_u32, external_spk_0.clone()));
assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 0)].into());
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0_u32, external_spk_0.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
// - derive_new() == ((0, <spk>), DerivationAdditions)
// - next_unused() == ((0, <spk>), DerivationAdditions:is_empty())
assert_eq!(txout_index.next_index(&TestKeychain::External), (0, true));
let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External);
assert_eq!(spk, (0_u32, &external_spk_0));
assert_eq!(changeset.as_inner(), &[(TestKeychain::External, 0)].into());
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
assert_eq!(spk, (0_u32, &external_spk_0));
assert_eq!(changeset.as_inner(), &[].into());
// - derived till 25
// - used all spks till 15.
@@ -371,51 +288,48 @@ fn test_wildcard_derivations() {
// - unused list: [16, 18, 19, 21, 22, 24, 25]
// - next_derivation_index() = (26, true)
// - derive_new() = ((26, <spk>), keychain::ChangeSet)
// - next_unused() == ((16, <spk>), keychain::ChangeSet::is_empty())
let _ = txout_index.reveal_to_target(TestKeychain::External, 25);
// - derive_new() = ((26, <spk>), DerivationAdditions)
// - next_unused() == ((16, <spk>), DerivationAdditions::is_empty())
let _ = txout_index.reveal_to_target(&TestKeychain::External, 25);
(0..=15)
.chain([17, 20, 23])
.for_each(|index| assert!(txout_index.mark_used(TestKeychain::External, index)));
.into_iter()
.chain(vec![17, 20, 23].into_iter())
.for_each(|index| assert!(txout_index.mark_used(&TestKeychain::External, index)));
assert_eq!(txout_index.next_index(TestKeychain::External).unwrap(), (26, true));
assert_eq!(txout_index.next_index(&TestKeychain::External), (26, true));
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (26, external_spk_26));
let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External);
assert_eq!(spk, (26, &external_spk_26));
assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 26)].into());
assert_eq!(changeset.as_inner(), &[(TestKeychain::External, 26)].into());
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (16, external_spk_16));
assert_eq!(&changeset.last_revealed, &[].into());
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
assert_eq!(spk, (16, &external_spk_16));
assert_eq!(changeset.as_inner(), &[].into());
// - Use all the derived till 26.
// - next_unused() = ((27, <spk>), keychain::ChangeSet)
(0..=26).for_each(|index| {
txout_index.mark_used(TestKeychain::External, index);
// - next_unused() = ((27, <spk>), DerivationAdditions)
(0..=26).into_iter().for_each(|index| {
txout_index.mark_used(&TestKeychain::External, index);
});
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (27, external_spk_27));
assert_eq!(&changeset.last_revealed, &[(external_descriptor.descriptor_id(), 27)].into());
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
assert_eq!(spk, (27, &external_spk_27));
assert_eq!(changeset.as_inner(), &[(TestKeychain::External, 27)].into());
}
#[test]
fn test_non_wildcard_derivations() {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0);
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::default();
let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let (no_wildcard_descriptor, _) =
Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, DESCRIPTORS[6]).unwrap();
let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
let external_spk = no_wildcard_descriptor
.at_derivation_index(0)
.unwrap()
.script_pubkey();
let _ = txout_index
.insert_descriptor(TestKeychain::External, no_wildcard_descriptor.clone())
.unwrap();
txout_index.add_keychain(TestKeychain::External, no_wildcard_descriptor);
// given:
// - `txout_index` with no stored scripts
@@ -423,267 +337,33 @@ fn test_non_wildcard_derivations() {
// - next derivation index should be new
// - when we derive a new script, script @ index 0
// - when we get the next unused script, script @ index 0
assert_eq!(
txout_index.next_index(TestKeychain::External).unwrap(),
(0, true)
);
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(
&changeset.last_revealed,
&[(no_wildcard_descriptor.descriptor_id(), 0)].into()
);
assert_eq!(txout_index.next_index(&TestKeychain::External), (0, true));
let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External);
assert_eq!(spk, (0, &external_spk));
assert_eq!(changeset.as_inner(), &[(TestKeychain::External, 0)].into());
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
assert_eq!(spk, (0, &external_spk));
assert_eq!(changeset.as_inner(), &[].into());
// given:
// - the non-wildcard descriptor already has a stored and used script
// expect:
// - next derivation index should not be new
// - derive new and next unused should return the old script
// - store_up_to should not panic and return empty changeset
assert_eq!(
txout_index.next_index(TestKeychain::External).unwrap(),
(0, false)
);
txout_index.mark_used(TestKeychain::External, 0);
// - store_up_to should not panic and return empty additions
assert_eq!(txout_index.next_index(&TestKeychain::External), (0, false));
txout_index.mark_used(&TestKeychain::External, 0);
let (spk, changeset) = txout_index.reveal_next_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External);
assert_eq!(spk, (0, &external_spk));
assert_eq!(changeset.as_inner(), &[].into());
let (spk, changeset) = txout_index.next_unused_spk(TestKeychain::External).unwrap();
assert_eq!(spk, (0, external_spk.clone()));
assert_eq!(&changeset.last_revealed, &[].into());
let (revealed_spks, revealed_changeset) = txout_index
.reveal_to_target(TestKeychain::External, 200)
.unwrap();
assert_eq!(revealed_spks.len(), 0);
assert!(revealed_changeset.is_empty());
// we check that spks_of_keychain returns a SpkIterator with just one element
assert_eq!(
txout_index
.revealed_keychain_spks(TestKeychain::External)
.count(),
1,
);
}
/// Check that calling `lookahead_to_target` stores the expected spks.
#[test]
fn lookahead_to_target() {
#[derive(Default)]
struct TestCase {
/// Global lookahead value.
lookahead: u32,
/// Last revealed index for external keychain.
external_last_revealed: Option<u32>,
/// Last revealed index for internal keychain.
internal_last_revealed: Option<u32>,
/// Call `lookahead_to_target(External, u32)`.
external_target: Option<u32>,
/// Call `lookahead_to_target(Internal, u32)`.
internal_target: Option<u32>,
}
let test_cases = &[
TestCase {
lookahead: 0,
external_target: Some(100),
..Default::default()
},
TestCase {
lookahead: 10,
internal_target: Some(99),
..Default::default()
},
TestCase {
lookahead: 100,
internal_target: Some(9),
external_target: Some(10),
..Default::default()
},
TestCase {
lookahead: 12,
external_last_revealed: Some(2),
internal_last_revealed: Some(2),
internal_target: Some(15),
external_target: Some(13),
},
TestCase {
lookahead: 13,
external_last_revealed: Some(100),
internal_last_revealed: Some(21),
internal_target: Some(120),
external_target: Some(130),
},
];
for t in test_cases {
let external_descriptor = parse_descriptor(DESCRIPTORS[0]);
let internal_descriptor = parse_descriptor(DESCRIPTORS[1]);
let mut index = init_txout_index(
external_descriptor.clone(),
internal_descriptor.clone(),
t.lookahead,
);
if let Some(last_revealed) = t.external_last_revealed {
let _ = index.reveal_to_target(TestKeychain::External, last_revealed);
}
if let Some(last_revealed) = t.internal_last_revealed {
let _ = index.reveal_to_target(TestKeychain::Internal, last_revealed);
}
let keychain_test_cases = [
(
TestKeychain::External,
t.external_last_revealed,
t.external_target,
),
(
TestKeychain::Internal,
t.internal_last_revealed,
t.internal_target,
),
];
for (keychain, last_revealed, target) in keychain_test_cases {
if let Some(target) = target {
let original_last_stored_index = match last_revealed {
Some(last_revealed) => Some(last_revealed + t.lookahead),
None => t.lookahead.checked_sub(1),
};
let exp_last_stored_index = match original_last_stored_index {
Some(original_last_stored_index) => {
Ord::max(target, original_last_stored_index)
}
None => target,
};
index.lookahead_to_target(keychain.clone(), target);
let keys = index
.inner()
.all_spks()
.range((keychain.clone(), 0)..=(keychain.clone(), u32::MAX))
.map(|(k, _)| k.clone())
.collect::<Vec<_>>();
let exp_keys = core::iter::repeat(keychain)
.zip(0_u32..=exp_last_stored_index)
.collect::<Vec<_>>();
assert_eq!(keys, exp_keys);
}
}
}
}
#[test]
fn applying_changesets_one_by_one_vs_aggregate_must_have_same_result() {
let desc = parse_descriptor(DESCRIPTORS[0]);
let changesets: &[ChangeSet] = &[
ChangeSet {
last_revealed: [(desc.descriptor_id(), 10)].into(),
},
ChangeSet {
last_revealed: [(desc.descriptor_id(), 12)].into(),
},
];
let mut indexer_a = KeychainTxOutIndex::<TestKeychain>::new(0);
indexer_a
.insert_descriptor(TestKeychain::External, desc.clone())
.expect("must insert keychain");
for changeset in changesets {
indexer_a.apply_changeset(changeset.clone());
}
let mut indexer_b = KeychainTxOutIndex::<TestKeychain>::new(0);
indexer_b
.insert_descriptor(TestKeychain::External, desc.clone())
.expect("must insert keychain");
let aggregate_changesets = changesets
.iter()
.cloned()
.reduce(|mut agg, cs| {
agg.merge(cs);
agg
})
.expect("must aggregate changesets");
indexer_b.apply_changeset(aggregate_changesets);
assert_eq!(
indexer_a.keychains().collect::<Vec<_>>(),
indexer_b.keychains().collect::<Vec<_>>()
);
assert_eq!(
indexer_a.spk_at_index(TestKeychain::External, 0),
indexer_b.spk_at_index(TestKeychain::External, 0)
);
assert_eq!(
indexer_a.spk_at_index(TestKeychain::Internal, 0),
indexer_b.spk_at_index(TestKeychain::Internal, 0)
);
assert_eq!(
indexer_a.last_revealed_indices(),
indexer_b.last_revealed_indices()
);
}
#[test]
fn assigning_same_descriptor_to_multiple_keychains_should_error() {
let desc = parse_descriptor(DESCRIPTORS[0]);
let mut indexer = KeychainTxOutIndex::<TestKeychain>::new(0);
let _ = indexer
.insert_descriptor(TestKeychain::Internal, desc.clone())
.unwrap();
assert!(indexer
.insert_descriptor(TestKeychain::External, desc)
.is_err())
}
#[test]
fn reassigning_keychain_to_a_new_descriptor_should_error() {
let desc1 = parse_descriptor(DESCRIPTORS[0]);
let desc2 = parse_descriptor(DESCRIPTORS[1]);
let mut indexer = KeychainTxOutIndex::<TestKeychain>::new(0);
let _ = indexer.insert_descriptor(TestKeychain::Internal, desc1);
assert!(indexer
.insert_descriptor(TestKeychain::Internal, desc2)
.is_err());
}
#[test]
fn when_querying_over_a_range_of_keychains_the_utxos_should_show_up() {
let mut indexer = KeychainTxOutIndex::<usize>::new(0);
let mut tx = common::new_tx(0);
for (i, descriptor) in DESCRIPTORS.iter().enumerate() {
let descriptor = parse_descriptor(descriptor);
let _ = indexer.insert_descriptor(i, descriptor.clone()).unwrap();
if i != 4 {
// skip one in the middle to see if uncovers any bugs
indexer.reveal_next_spk(i);
}
tx.output.push(TxOut {
script_pubkey: descriptor.at_derivation_index(0).unwrap().script_pubkey(),
value: Amount::from_sat(10_000),
});
}
let n_spks = DESCRIPTORS.len() - /*we skipped one*/ 1;
let _ = indexer.index_tx(&tx);
assert_eq!(indexer.outpoints().len(), n_spks);
assert_eq!(indexer.revealed_spks(0..DESCRIPTORS.len()).count(), n_spks);
assert_eq!(indexer.revealed_spks(1..4).count(), 4 - 1);
assert_eq!(
indexer.net_value(&tx, 0..DESCRIPTORS.len()).to_sat(),
(10_000 * n_spks) as i64
);
assert_eq!(
indexer.net_value(&tx, 3..6).to_sat(),
(10_000 * (6 - 3 - /*the skipped one*/ 1)) as i64
);
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
assert_eq!(spk, (0, &external_spk));
assert_eq!(changeset.as_inner(), &[].into());
let (revealed_spks, revealed_additions) =
txout_index.reveal_to_target(&TestKeychain::External, 200);
assert_eq!(revealed_spks.count(), 0);
assert!(revealed_additions.is_empty());
}

View File

@@ -1,848 +0,0 @@
#![cfg(feature = "miniscript")]
use std::ops::{Bound, RangeBounds};
use bdk_chain::{
local_chain::{
AlterCheckPointError, ApplyHeaderError, CannotConnectError, ChangeSet, CheckPoint,
LocalChain, MissingGenesisError,
},
BlockId,
};
use bitcoin::{block::Header, hashes::Hash, BlockHash};
use proptest::prelude::*;
#[macro_use]
mod common;
#[derive(Debug)]
struct TestLocalChain<'a> {
name: &'static str,
chain: LocalChain,
update: CheckPoint,
exp: ExpectedResult<'a>,
}
#[derive(Debug, PartialEq)]
enum ExpectedResult<'a> {
Ok {
changeset: &'a [(u32, Option<BlockHash>)],
init_changeset: &'a [(u32, Option<BlockHash>)],
},
Err(CannotConnectError),
}
impl<'a> TestLocalChain<'a> {
fn run(mut self) {
println!("[TestLocalChain] test: {}", self.name);
let got_changeset = match self.chain.apply_update(self.update) {
Ok(changeset) => changeset,
Err(got_err) => {
assert_eq!(
ExpectedResult::Err(got_err),
self.exp,
"{}: unexpected error",
self.name
);
return;
}
};
match self.exp {
ExpectedResult::Ok {
changeset,
init_changeset,
} => {
assert_eq!(
got_changeset,
changeset.iter().cloned().collect(),
"{}: unexpected changeset",
self.name
);
assert_eq!(
self.chain.initial_changeset(),
init_changeset.iter().cloned().collect(),
"{}: unexpected initial changeset",
self.name
);
}
ExpectedResult::Err(err) => panic!(
"{}: expected error ({}), got non-error result: {:?}",
self.name, err, got_changeset
),
}
}
}
#[test]
fn update_local_chain() {
[
TestLocalChain {
name: "add first tip",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A"))],
exp: ExpectedResult::Ok {
changeset: &[],
init_changeset: &[(0, Some(h!("A")))],
},
},
TestLocalChain {
name: "add second tip",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A")), (1, h!("B"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("B")))],
init_changeset: &[(0, Some(h!("A"))), (1, Some(h!("B")))],
},
},
TestLocalChain {
name: "two disjoint chains cannot merge",
chain: local_chain![(0, h!("_")), (1, h!("A"))],
update: chain_update![(0, h!("_")), (2, h!("B"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 1,
}),
},
TestLocalChain {
name: "two disjoint chains cannot merge (existing chain longer)",
chain: local_chain![(0, h!("_")), (2, h!("A"))],
update: chain_update![(0, h!("_")), (1, h!("B"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 2,
}),
},
TestLocalChain {
name: "duplicate chains should merge",
chain: local_chain![(0, h!("A"))],
update: chain_update![(0, h!("A"))],
exp: ExpectedResult::Ok {
changeset: &[],
init_changeset: &[(0, Some(h!("A")))],
},
},
// Introduce an older checkpoint (B)
// | 0 | 1 | 2 | 3
// chain | _ C D
// update | _ B C
TestLocalChain {
name: "can introduce older checkpoint",
chain: local_chain![(0, h!("_")), (2, h!("C")), (3, h!("D"))],
update: chain_update![(0, h!("_")), (1, h!("B")), (2, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("B")))],
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("B"))), (2, Some(h!("C"))), (3, Some(h!("D")))],
},
},
// Introduce an older checkpoint (A) that is not directly behind PoA
// | 0 | 2 | 3 | 4
// chain | _ B C
// update | _ A C
TestLocalChain {
name: "can introduce older checkpoint 2",
chain: local_chain![(0, h!("_")), (3, h!("B")), (4, h!("C"))],
update: chain_update![(0, h!("_")), (2, h!("A")), (4, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(2, Some(h!("A")))],
init_changeset: &[(0, Some(h!("_"))), (2, Some(h!("A"))), (3, Some(h!("B"))), (4, Some(h!("C")))],
}
},
// Introduce an older checkpoint (B) that is not the oldest checkpoint
// | 0 | 1 | 2 | 3
// chain | _ A C
// update | _ B C
TestLocalChain {
name: "can introduce older checkpoint 3",
chain: local_chain![(0, h!("_")), (1, h!("A")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(2, Some(h!("B")))],
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
}
},
// Introduce two older checkpoints below the PoA
// | 0 | 1 | 2 | 3
// chain | _ C
// update | _ A B C
TestLocalChain {
name: "introduce two older checkpoints below PoA",
chain: local_chain![(0, h!("_")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("A"))), (2, Some(h!("B")))],
init_changeset: &[(0, Some(h!("_"))), (1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))],
},
},
TestLocalChain {
name: "fix blockhash before agreement point",
chain: local_chain![(0, h!("im-wrong")), (1, h!("we-agree"))],
update: chain_update![(0, h!("fix")), (1, h!("we-agree"))],
exp: ExpectedResult::Ok {
changeset: &[(0, Some(h!("fix")))],
init_changeset: &[(0, Some(h!("fix"))), (1, Some(h!("we-agree")))],
},
},
// B and C are in both chain and update
// | 0 | 1 | 2 | 3 | 4
// chain | _ B C
// update | _ A B C D
// This should succeed with the point of agreement being C and A should be added in addition.
TestLocalChain {
name: "two points of agreement",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C")), (4, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[(1, Some(h!("A"))), (4, Some(h!("D")))],
init_changeset: &[
(0, Some(h!("_"))),
(1, Some(h!("A"))),
(2, Some(h!("B"))),
(3, Some(h!("C"))),
(4, Some(h!("D"))),
],
},
},
// Update and chain does not connect:
// | 0 | 1 | 2 | 3 | 4
// chain | _ B C
// update | _ A B D
// This should fail as we cannot figure out whether C & D are on the same chain
TestLocalChain {
name: "update and chain does not connect",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
update: chain_update![(0, h!("_")), (1, h!("A")), (2, h!("B")), (4, h!("D"))],
exp: ExpectedResult::Err(CannotConnectError {
try_include_height: 3,
}),
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | _ B C E
// update | _ B' C' D
// This should succeed and invalidate B,C and E with point of agreement being A.
TestLocalChain {
name: "transitive invalidation applies to checkpoints higher than invalidation",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
update: chain_update![(0, h!("_")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
(5, None),
],
init_changeset: &[
(0, Some(h!("_"))),
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
],
},
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4
// chain | _ B C E
// update | _ B' C' D
// This should succeed and invalidate B, C and E with no point of agreement
TestLocalChain {
name: "transitive invalidation applies to checkpoints higher than invalidation no point of agreement",
chain: local_chain![(0, h!("_")), (1, h!("B")), (2, h!("C")), (4, h!("E"))],
update: chain_update![(0, h!("_")), (1, h!("B'")), (2, h!("C'")), (3, h!("D"))],
exp: ExpectedResult::Ok {
changeset: &[
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
(4, None)
],
init_changeset: &[
(0, Some(h!("_"))),
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
],
},
},
// Transient invalidation:
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | _ A B C E
// update | _ B' C' D
// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
// A was invalid.
TestLocalChain {
name: "invalidation but no connection",
chain: local_chain![(0, h!("_")), (1, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))],
update: chain_update![(0, h!("_")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))],
exp: ExpectedResult::Err(CannotConnectError { try_include_height: 1 }),
},
// Introduce blocks between two points of agreement
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | A B D E
// update | A C E F
TestLocalChain {
name: "introduce blocks between two points of agreement",
chain: local_chain![(0, h!("A")), (1, h!("B")), (3, h!("D")), (4, h!("E"))],
update: chain_update![(0, h!("A")), (2, h!("C")), (4, h!("E")), (5, h!("F"))],
exp: ExpectedResult::Ok {
changeset: &[
(2, Some(h!("C"))),
(5, Some(h!("F"))),
],
init_changeset: &[
(0, Some(h!("A"))),
(1, Some(h!("B"))),
(2, Some(h!("C"))),
(3, Some(h!("D"))),
(4, Some(h!("E"))),
(5, Some(h!("F"))),
],
},
},
// Allow update that is shorter than original chain
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | A C D E F
// update | A C D'
TestLocalChain {
name: "allow update that is shorter than original chain",
chain: local_chain![(0, h!("_")), (2, h!("C")), (3, h!("D")), (4, h!("E")), (5, h!("F"))],
update: chain_update![(0, h!("_")), (2, h!("C")), (3, h!("D'"))],
exp: ExpectedResult::Ok {
changeset: &[
(3, Some(h!("D'"))),
(4, None),
(5, None),
],
init_changeset: &[
(0, Some(h!("_"))),
(2, Some(h!("C"))),
(3, Some(h!("D'"))),
],
},
},
]
.into_iter()
.for_each(TestLocalChain::run);
}
#[test]
fn local_chain_insert_block() {
struct TestCase {
original: LocalChain,
insert: (u32, BlockHash),
expected_result: Result<ChangeSet, AlterCheckPointError>,
expected_final: LocalChain,
}
let test_cases = [
TestCase {
original: local_chain![(0, h!("_"))],
insert: (5, h!("block5")),
expected_result: Ok([(5, Some(h!("block5")))].into()),
expected_final: local_chain![(0, h!("_")), (5, h!("block5"))],
},
TestCase {
original: local_chain![(0, h!("_")), (3, h!("A"))],
insert: (4, h!("B")),
expected_result: Ok([(4, Some(h!("B")))].into()),
expected_final: local_chain![(0, h!("_")), (3, h!("A")), (4, h!("B"))],
},
TestCase {
original: local_chain![(0, h!("_")), (4, h!("B"))],
insert: (3, h!("A")),
expected_result: Ok([(3, Some(h!("A")))].into()),
expected_final: local_chain![(0, h!("_")), (3, h!("A")), (4, h!("B"))],
},
TestCase {
original: local_chain![(0, h!("_")), (2, h!("K"))],
insert: (2, h!("K")),
expected_result: Ok([].into()),
expected_final: local_chain![(0, h!("_")), (2, h!("K"))],
},
TestCase {
original: local_chain![(0, h!("_")), (2, h!("K"))],
insert: (2, h!("J")),
expected_result: Err(AlterCheckPointError {
height: 2,
original_hash: h!("K"),
update_hash: Some(h!("J")),
}),
expected_final: local_chain![(0, h!("_")), (2, h!("K"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
let mut chain = t.original;
assert_eq!(
chain.insert_block(t.insert.into()),
t.expected_result,
"[{}] unexpected result when inserting block",
i,
);
assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,);
}
}
#[test]
fn local_chain_disconnect_from() {
struct TestCase {
name: &'static str,
original: LocalChain,
disconnect_from: (u32, BlockHash),
exp_result: Result<ChangeSet, MissingGenesisError>,
exp_final: LocalChain,
}
let test_cases = [
TestCase {
name: "try_replace_genesis_should_fail",
original: local_chain![(0, h!("_"))],
disconnect_from: (0, h!("_")),
exp_result: Err(MissingGenesisError),
exp_final: local_chain![(0, h!("_"))],
},
TestCase {
name: "try_replace_genesis_should_fail_2",
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
disconnect_from: (0, h!("_")),
exp_result: Err(MissingGenesisError),
exp_final: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
},
TestCase {
name: "from_does_not_exist",
original: local_chain![(0, h!("_")), (3, h!("C"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::default()),
exp_final: local_chain![(0, h!("_")), (3, h!("C"))],
},
TestCase {
name: "from_has_different_blockhash",
original: local_chain![(0, h!("_")), (2, h!("B"))],
disconnect_from: (2, h!("not_B")),
exp_result: Ok(ChangeSet::default()),
exp_final: local_chain![(0, h!("_")), (2, h!("B"))],
},
TestCase {
name: "disconnect_one",
original: local_chain![(0, h!("_")), (2, h!("B"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::from_iter([(2, None)])),
exp_final: local_chain![(0, h!("_"))],
},
TestCase {
name: "disconnect_three",
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C")), (4, h!("D"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::from_iter([(2, None), (3, None), (4, None)])),
exp_final: local_chain![(0, h!("_"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("Case {}: {}", i, t.name);
let mut chain = t.original;
let result = chain.disconnect_from(t.disconnect_from.into());
assert_eq!(
result, t.exp_result,
"[{}:{}] unexpected changeset result",
i, t.name
);
assert_eq!(
chain, t.exp_final,
"[{}:{}] unexpected final chain",
i, t.name
);
}
}
#[test]
fn checkpoint_from_block_ids() {
struct TestCase<'a> {
name: &'a str,
blocks: &'a [(u32, BlockHash)],
exp_result: Result<(), Option<(u32, BlockHash)>>,
}
let test_cases = [
TestCase {
name: "in_order",
blocks: &[(0, h!("A")), (1, h!("B")), (3, h!("D"))],
exp_result: Ok(()),
},
TestCase {
name: "with_duplicates",
blocks: &[(1, h!("B")), (2, h!("C")), (2, h!("C'"))],
exp_result: Err(Some((2, h!("C")))),
},
TestCase {
name: "not_in_order",
blocks: &[(1, h!("B")), (3, h!("D")), (2, h!("C"))],
exp_result: Err(Some((3, h!("D")))),
},
TestCase {
name: "empty",
blocks: &[],
exp_result: Err(None),
},
TestCase {
name: "single",
blocks: &[(21, h!("million"))],
exp_result: Ok(()),
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("running test case {}: '{}'", i, t.name);
let result = CheckPoint::from_block_ids(
t.blocks
.iter()
.map(|&(height, hash)| BlockId { height, hash }),
);
match t.exp_result {
Ok(_) => {
assert!(result.is_ok(), "[{}:{}] should be Ok", i, t.name);
let result_vec = {
let mut v = result
.unwrap()
.into_iter()
.map(|cp| (cp.height(), cp.hash()))
.collect::<Vec<_>>();
v.reverse();
v
};
assert_eq!(
&result_vec, t.blocks,
"[{}:{}] not equal to original block ids",
i, t.name
);
}
Err(exp_last) => {
assert!(result.is_err(), "[{}:{}] should be Err", i, t.name);
let err = result.unwrap_err();
assert_eq!(
err.as_ref()
.map(|last_cp| (last_cp.height(), last_cp.hash())),
exp_last,
"[{}:{}] error's last cp height should be {:?}, got {:?}",
i,
t.name,
exp_last,
err
);
}
}
}
}
#[test]
fn checkpoint_query() {
struct TestCase {
chain: LocalChain,
/// The heights we want to call [`CheckPoint::query`] with, represented as an inclusive
/// range.
///
/// If a [`CheckPoint`] exists at that height, we expect [`CheckPoint::query`] to return
/// it. If not, [`CheckPoint::query`] should return `None`.
query_range: (u32, u32),
}
let test_cases = [
TestCase {
chain: local_chain![(0, h!("_")), (1, h!("A"))],
query_range: (0, 2),
},
TestCase {
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
query_range: (0, 3),
},
];
for t in test_cases.into_iter() {
let tip = t.chain.tip();
for h in t.query_range.0..=t.query_range.1 {
let query_result = tip.get(h);
// perform an exhausitive search for the checkpoint at height `h`
let exp_hash = t
.chain
.iter_checkpoints()
.find(|cp| cp.height() == h)
.map(|cp| cp.hash());
match query_result {
Some(cp) => {
assert_eq!(Some(cp.hash()), exp_hash);
assert_eq!(cp.height(), h);
}
None => assert!(exp_hash.is_none()),
}
}
}
}
#[test]
fn checkpoint_insert() {
struct TestCase<'a> {
/// The name of the test.
name: &'a str,
/// The original checkpoint chain to call [`CheckPoint::insert`] on.
chain: &'a [(u32, BlockHash)],
/// The `block_id` to insert.
to_insert: (u32, BlockHash),
/// The expected final checkpoint chain after calling [`CheckPoint::insert`].
exp_final_chain: &'a [(u32, BlockHash)],
}
let test_cases = [
TestCase {
name: "insert_above_tip",
chain: &[(1, h!("a")), (2, h!("b"))],
to_insert: (4, h!("d")),
exp_final_chain: &[(1, h!("a")), (2, h!("b")), (4, h!("d"))],
},
TestCase {
name: "insert_already_exists_expect_no_change",
chain: &[(1, h!("a")), (2, h!("b")), (3, h!("c"))],
to_insert: (2, h!("b")),
exp_final_chain: &[(1, h!("a")), (2, h!("b")), (3, h!("c"))],
},
TestCase {
name: "insert_in_middle",
chain: &[(2, h!("b")), (4, h!("d")), (5, h!("e"))],
to_insert: (3, h!("c")),
exp_final_chain: &[(2, h!("b")), (3, h!("c")), (4, h!("d")), (5, h!("e"))],
},
TestCase {
name: "replace_one",
chain: &[(3, h!("c")), (4, h!("d")), (5, h!("e"))],
to_insert: (5, h!("E")),
exp_final_chain: &[(3, h!("c")), (4, h!("d")), (5, h!("E"))],
},
TestCase {
name: "insert_conflict_should_evict",
chain: &[(3, h!("c")), (4, h!("d")), (5, h!("e")), (6, h!("f"))],
to_insert: (4, h!("D")),
exp_final_chain: &[(3, h!("c")), (4, h!("D"))],
},
];
fn genesis_block() -> impl Iterator<Item = BlockId> {
core::iter::once((0, h!("_"))).map(BlockId::from)
}
for (i, t) in test_cases.into_iter().enumerate() {
println!("Running [{}] '{}'", i, t.name);
let chain = CheckPoint::from_block_ids(
genesis_block().chain(t.chain.iter().copied().map(BlockId::from)),
)
.expect("test formed incorrectly, must construct checkpoint chain");
let exp_final_chain = CheckPoint::from_block_ids(
genesis_block().chain(t.exp_final_chain.iter().copied().map(BlockId::from)),
)
.expect("test formed incorrectly, must construct checkpoint chain");
assert_eq!(
chain.insert(t.to_insert.into()),
exp_final_chain,
"unexpected final chain"
);
}
}
#[test]
fn local_chain_apply_header_connected_to() {
fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
Header {
version: bitcoin::block::Version::default(),
prev_blockhash,
merkle_root: bitcoin::hash_types::TxMerkleNode::all_zeros(),
time: 0,
bits: bitcoin::CompactTarget::default(),
nonce: 0,
}
}
struct TestCase {
name: &'static str,
chain: LocalChain,
header: Header,
height: u32,
connected_to: BlockId,
exp_result: Result<Vec<(u32, Option<BlockHash>)>, ApplyHeaderError>,
}
let test_cases = [
{
let header = header_from_prev_blockhash(h!("_"));
let hash = header.block_hash();
let height = 1;
let connected_to = BlockId { height, hash };
TestCase {
name: "connected_to_self_header_applied_to_self",
chain: local_chain![(0, h!("_")), (height, hash)],
header,
height,
connected_to,
exp_result: Ok(vec![]),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let hash = header.block_hash();
let height = prev_height + 1;
let connected_to = BlockId {
height: prev_height,
hash: prev_hash,
};
TestCase {
name: "connected_to_prev_header_applied_to_self",
chain: local_chain![(0, h!("_")), (prev_height, prev_hash)],
header,
height,
connected_to,
exp_result: Ok(vec![(height, Some(hash))]),
}
},
{
let header = header_from_prev_blockhash(BlockHash::all_zeros());
let hash = header.block_hash();
let height = 0;
let connected_to = BlockId { height, hash };
TestCase {
name: "genesis_applied_to_self",
chain: local_chain![(0, hash)],
header,
height,
connected_to,
exp_result: Ok(vec![]),
}
},
{
let header = header_from_prev_blockhash(h!("Z"));
let height = 10;
let hash = header.block_hash();
let prev_height = height - 1;
let prev_hash = header.prev_blockhash;
TestCase {
name: "connect_at_connected_to",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
header,
height: 10,
connected_to: BlockId {
height: 3,
hash: h!("C"),
},
exp_result: Ok(vec![(prev_height, Some(prev_hash)), (height, Some(hash))]),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let connected_to = BlockId {
height: prev_height,
hash: h!("not_prev_hash"),
};
TestCase {
name: "inconsistent_prev_hash",
chain: local_chain![(0, h!("_")), (prev_height, h!("not_prev_hash"))],
header,
height: prev_height + 1,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let height = prev_height + 1;
let connected_to = BlockId {
height,
hash: h!("not_current_hash"),
};
TestCase {
name: "inconsistent_current_block",
chain: local_chain![(0, h!("_")), (height, h!("not_current_hash"))],
header,
height,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
{
let header = header_from_prev_blockhash(h!("B"));
let height = 3;
let connected_to = BlockId {
height: 4,
hash: h!("D"),
};
TestCase {
name: "connected_to_is_greater",
chain: local_chain![(0, h!("_")), (2, h!("B"))],
header,
height,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("running test case {}: '{}'", i, t.name);
let mut chain = t.chain;
let result = chain.apply_header_connected_to(&t.header, t.height, t.connected_to);
let exp_result = t
.exp_result
.map(|cs| cs.iter().cloned().collect::<ChangeSet>());
assert_eq!(result, exp_result, "[{}:{}] unexpected result", i, t.name);
}
}
fn generate_height_range_bounds(
height_upper_bound: u32,
) -> impl Strategy<Value = (Bound<u32>, Bound<u32>)> {
fn generate_height_bound(height_upper_bound: u32) -> impl Strategy<Value = Bound<u32>> {
prop_oneof![
(0..height_upper_bound).prop_map(Bound::Included),
(0..height_upper_bound).prop_map(Bound::Excluded),
Just(Bound::Unbounded),
]
}
(
generate_height_bound(height_upper_bound),
generate_height_bound(height_upper_bound),
)
}
fn generate_checkpoints(max_height: u32, max_count: usize) -> impl Strategy<Value = CheckPoint> {
proptest::collection::btree_set(1..max_height, 0..max_count).prop_map(|mut heights| {
heights.insert(0); // must have genesis
CheckPoint::from_block_ids(heights.into_iter().map(|height| {
let hash = bitcoin::hashes::Hash::hash(height.to_le_bytes().as_slice());
BlockId { height, hash }
}))
.expect("blocks must be in order as it comes from btreeset")
})
}
proptest! {
#![proptest_config(ProptestConfig {
..Default::default()
})]
/// Ensure that [`CheckPoint::range`] returns the expected checkpoint heights by comparing it
/// against a more primitive approach.
#[test]
fn checkpoint_range(
range in generate_height_range_bounds(21_000),
cp in generate_checkpoints(21_000, 2100)
) {
let exp_heights = cp.iter().map(|cp| cp.height()).filter(|h| range.contains(h)).collect::<Vec<u32>>();
let heights = cp.range(range).map(|cp| cp.height()).collect::<Vec<u32>>();
prop_assert_eq!(heights, exp_heights);
}
}

View File

@@ -0,0 +1,773 @@
#[macro_use]
mod common;
use bdk_chain::{collections::BTreeSet, sparse_chain::*, BlockId, TxHeight};
use bitcoin::{hashes::Hash, Txid};
use core::ops::Bound;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub struct TestIndex(TxHeight, u32);
impl ChainPosition for TestIndex {
fn height(&self) -> TxHeight {
self.0
}
fn max_ord_of_height(height: TxHeight) -> Self {
Self(height, u32::MAX)
}
fn min_ord_of_height(height: TxHeight) -> Self {
Self(height, u32::MIN)
}
}
impl TestIndex {
pub fn new<H>(height: H, ext: u32) -> Self
where
H: Into<TxHeight>,
{
Self(height.into(), ext)
}
}
#[test]
fn add_first_checkpoint() {
let chain = SparseChain::default();
assert_eq!(
chain.determine_changeset(&chain!([0, h!("A")])),
Ok(changeset! {
checkpoints: [(0, Some(h!("A")))],
txids: []
},),
"add first tip"
);
}
#[test]
fn add_second_tip() {
let chain = chain!([0, h!("A")]);
assert_eq!(
chain.determine_changeset(&chain!([0, h!("A")], [1, h!("B")])),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: []
},),
"extend tip by one"
);
}
#[test]
fn two_disjoint_chains_cannot_merge() {
let chain1 = chain!([0, h!("A")]);
let chain2 = chain!([1, h!("B")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::NotConnected(0))
);
}
#[test]
fn duplicate_chains_should_merge() {
let chain1 = chain!([0, h!("A")]);
let chain2 = chain!([0, h!("A")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(ChangeSet::default())
);
}
#[test]
fn duplicate_chains_with_txs_should_merge() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(ChangeSet::default())
);
}
#[test]
fn duplicate_chains_with_different_txs_should_merge() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx1"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx1"), Some(TxHeight::Confirmed(0)))]
})
);
}
#[test]
fn invalidate_first_and_only_checkpoint_without_tx_changes() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A'")))],
txids: []
},)
);
}
#[test]
fn invalidate_first_and_only_checkpoint_with_tx_move_forward() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A'")],[1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A'"))), (1, Some(h!("B")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
},)
);
}
#[test]
fn invalidate_first_and_only_checkpoint_with_tx_move_backward() {
let chain1 = chain!(checkpoints: [[1,h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
let chain2 = chain!(checkpoints: [[0,h!("A")],[1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A"))), (1, Some(h!("B'")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn invalidate_a_checkpoint_and_try_and_move_tx_when_it_wasnt_within_invalidation() {
let chain1 = chain!(checkpoints: [[0, h!("A")], [1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0, h!("A")], [1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::TxInconsistent {
txid: h!("tx0"),
original_pos: TxHeight::Confirmed(0),
update_pos: TxHeight::Confirmed(1),
})
);
}
/// This test doesn't make much sense. We're invalidating a block at height 1 and moving it to
/// height 0. It should be impossible for it to be at height 1 at any point if it was at height 0
/// all along.
#[test]
fn move_invalidated_tx_into_earlier_checkpoint() {
let chain1 = chain!(checkpoints: [[0, h!("A")], [1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
let chain2 = chain!(checkpoints: [[0, h!("A")], [1, h!("B'")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B'")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn invalidate_first_and_only_checkpoint_with_tx_move_to_mempool() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
let chain2 = chain!(checkpoints: [[0,h!("A'")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A'")))],
txids: [(h!("tx0"), Some(TxHeight::Unconfirmed))]
},)
);
}
#[test]
fn confirm_tx_without_extending_chain() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn confirm_tx_backwards_while_extending_chain() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain!(checkpoints: [[0,h!("A")],[1,h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(0))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(0)))]
},)
);
}
#[test]
fn confirm_tx_in_new_block() {
let chain1 = chain!(checkpoints: [[0,h!("A")]], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain! {
checkpoints: [[0,h!("A")], [1,h!("B")]],
txids: [(h!("tx0"), TxHeight::Confirmed(1))]
};
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
},)
);
}
#[test]
fn merging_mempool_of_empty_chains_doesnt_fail() {
let chain1 = chain!(checkpoints: [], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
let chain2 = chain!(checkpoints: [], txids: [(h!("tx1"), TxHeight::Unconfirmed)]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx1"), Some(TxHeight::Unconfirmed))]
},)
);
}
#[test]
fn cannot_insert_confirmed_tx_without_checkpoints() {
let chain = SparseChain::default();
assert_eq!(
chain.insert_tx_preview(h!("A"), TxHeight::Confirmed(0)),
Err(InsertTxError::TxTooHigh {
txid: h!("A"),
tx_height: 0,
tip_height: None
})
);
}
#[test]
fn empty_chain_can_add_unconfirmed_transactions() {
let chain1 = chain!(checkpoints: [[0, h!("A")]], txids: []);
let chain2 = chain!(checkpoints: [], txids: [(h!("tx0"), TxHeight::Unconfirmed)]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [ (h!("tx0"), Some(TxHeight::Unconfirmed)) ]
},)
);
}
#[test]
fn can_update_with_shorter_chain() {
let chain1 = chain!(checkpoints: [[1, h!("B")],[2, h!("C")]], txids: []);
let chain2 = chain!(checkpoints: [[1, h!("B")]], txids: [(h!("tx0"), TxHeight::Confirmed(1))]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [],
txids: [(h!("tx0"), Some(TxHeight::Confirmed(1)))]
},)
)
}
#[test]
fn can_introduce_older_checkpoints() {
let chain1 = chain!(checkpoints: [[2, h!("C")], [3, h!("D")]], txids: []);
let chain2 = chain!(checkpoints: [[1, h!("B")], [2, h!("C")]], txids: []);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(1, Some(h!("B")))],
txids: []
},)
);
}
#[test]
fn fix_blockhash_before_agreement_point() {
let chain1 = chain!([0, h!("im-wrong")], [1, h!("we-agree")]);
let chain2 = chain!([0, h!("fix")], [1, h!("we-agree")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("fix")))],
txids: []
},)
)
}
// TODO: Use macro
#[test]
fn cannot_change_ext_index_of_confirmed_tx() {
let chain1 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx0"), TestIndex(TxHeight::Confirmed(1), 10))]
);
let chain2 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx0"), TestIndex(TxHeight::Confirmed(1), 20))]
);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::TxInconsistent {
txid: h!("tx0"),
original_pos: TestIndex(TxHeight::Confirmed(1), 10),
update_pos: TestIndex(TxHeight::Confirmed(1), 20),
}),
)
}
#[test]
fn can_change_index_of_unconfirmed_tx() {
let chain1 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx1"), TestIndex(TxHeight::Unconfirmed, 10))]
);
let chain2 = chain!(
index: TestIndex,
checkpoints: [[1, h!("A")]],
txids: [(h!("tx1"), TestIndex(TxHeight::Unconfirmed, 20))]
);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(ChangeSet {
checkpoints: [].into(),
txids: [(h!("tx1"), Some(TestIndex(TxHeight::Unconfirmed, 20)),)].into()
},),
)
}
/// B and C are in both chain and update
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C
/// update | A B C D
/// ```
/// This should succeed with the point of agreement being C and A should be added in addition.
#[test]
fn two_points_of_agreement() {
let chain1 = chain!([1, h!("B")], [2, h!("C")]);
let chain2 = chain!([0, h!("A")], [1, h!("B")], [2, h!("C")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [(0, Some(h!("A"))), (3, Some(h!("D")))]
},),
);
}
/// Update and chain does not connect:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C
/// update | A B D
/// ```
/// This should fail as we cannot figure out whether C & D are on the same chain
#[test]
fn update_and_chain_does_not_connect() {
let chain1 = chain!([1, h!("B")], [2, h!("C")]);
let chain2 = chain!([0, h!("A")], [1, h!("B")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::NotConnected(2)),
);
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4 | 5
/// chain | A B C E
/// update | A B' C' D
/// ```
/// This should succeed and invalidate B,C and E with point of agreement being A.
/// It should also invalidate transactions at height 1.
#[test]
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation() {
let chain1 = chain! {
checkpoints: [[0, h!("A")], [2, h!("B")], [3, h!("C")], [5, h!("E")]],
txids: [
(h!("a"), TxHeight::Confirmed(0)),
(h!("b1"), TxHeight::Confirmed(1)),
(h!("b2"), TxHeight::Confirmed(2)),
(h!("d"), TxHeight::Confirmed(3)),
(h!("e"), TxHeight::Confirmed(5))
]
};
let chain2 = chain! {
checkpoints: [[0, h!("A")], [2, h!("B'")], [3, h!("C'")], [4, h!("D")]],
txids: [(h!("b1"), TxHeight::Confirmed(4)), (h!("b2"), TxHeight::Confirmed(3))]
};
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [
(2, Some(h!("B'"))),
(3, Some(h!("C'"))),
(4, Some(h!("D"))),
(5, None)
],
txids: [
(h!("b1"), Some(TxHeight::Confirmed(4))),
(h!("b2"), Some(TxHeight::Confirmed(3))),
(h!("d"), Some(TxHeight::Unconfirmed)),
(h!("e"), Some(TxHeight::Unconfirmed))
]
},)
);
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | B C E
/// update | B' C' D
/// ```
///
/// This should succeed and invalidate B, C and E with no point of agreement
#[test]
fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation_no_point_of_agreement() {
let chain1 = chain!([1, h!("B")], [2, h!("C")], [4, h!("E")]);
let chain2 = chain!([1, h!("B'")], [2, h!("C'")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [
(1, Some(h!("B'"))),
(2, Some(h!("C'"))),
(3, Some(h!("D"))),
(4, None)
]
},)
)
}
/// Transient invalidation:
/// ```
/// | 0 | 1 | 2 | 3 | 4
/// chain | A B C E
/// update | B' C' D
/// ```
///
/// This should fail since although it tells us that B and C are invalid it doesn't tell us whether
/// A was invalid.
#[test]
fn invalidation_but_no_connection() {
let chain1 = chain!([0, h!("A")], [1, h!("B")], [2, h!("C")], [4, h!("E")]);
let chain2 = chain!([1, h!("B'")], [2, h!("C'")], [3, h!("D")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Err(UpdateError::NotConnected(0))
)
}
#[test]
fn checkpoint_limit_is_respected() {
let mut chain1 = SparseChain::default();
let _ = chain1
.apply_update(chain!(
[1, h!("A")],
[2, h!("B")],
[3, h!("C")],
[4, h!("D")],
[5, h!("E")]
))
.unwrap();
assert_eq!(chain1.checkpoints().len(), 5);
chain1.set_checkpoint_limit(Some(4));
assert_eq!(chain1.checkpoints().len(), 4);
let _ = chain1
.insert_checkpoint(BlockId {
height: 6,
hash: h!("F"),
})
.unwrap();
assert_eq!(chain1.checkpoints().len(), 4);
let changeset = chain1.determine_changeset(&chain!([6, h!("F")], [7, h!("G")]));
assert_eq!(changeset, Ok(changeset!(checkpoints: [(7, Some(h!("G")))])));
chain1.apply_changeset(changeset.unwrap());
assert_eq!(chain1.checkpoints().len(), 4);
}
#[test]
fn range_txids_by_height() {
let mut chain = chain!(index: TestIndex, checkpoints: [[1, h!("block 1")], [2, h!("block 2")]]);
let txids: [(TestIndex, Txid); 4] = [
(
TestIndex(TxHeight::Confirmed(1), u32::MIN),
Txid::from_inner([0x00; 32]),
),
(
TestIndex(TxHeight::Confirmed(1), u32::MAX),
Txid::from_inner([0xfe; 32]),
),
(
TestIndex(TxHeight::Confirmed(2), u32::MIN),
Txid::from_inner([0x01; 32]),
),
(
TestIndex(TxHeight::Confirmed(2), u32::MAX),
Txid::from_inner([0xff; 32]),
),
];
// populate chain with txids
for (index, txid) in txids {
let _ = chain.insert_tx(txid, index).expect("should succeed");
}
// inclusive start
assert_eq!(
chain
.range_txids_by_height(TxHeight::Confirmed(1)..)
.collect::<Vec<_>>(),
txids.iter().collect::<Vec<_>>(),
);
// exclusive start
assert_eq!(
chain
.range_txids_by_height((Bound::Excluded(TxHeight::Confirmed(1)), Bound::Unbounded,))
.collect::<Vec<_>>(),
txids[2..].iter().collect::<Vec<_>>(),
);
// inclusive end
assert_eq!(
chain
.range_txids_by_height((Bound::Unbounded, Bound::Included(TxHeight::Confirmed(2))))
.collect::<Vec<_>>(),
txids[..4].iter().collect::<Vec<_>>(),
);
// exclusive end
assert_eq!(
chain
.range_txids_by_height(..TxHeight::Confirmed(2))
.collect::<Vec<_>>(),
txids[..2].iter().collect::<Vec<_>>(),
);
}
#[test]
fn range_txids_by_index() {
let mut chain = chain!(index: TestIndex, checkpoints: [[1, h!("block 1")],[2, h!("block 2")]]);
let txids: [(TestIndex, Txid); 4] = [
(TestIndex(TxHeight::Confirmed(1), u32::MIN), h!("tx 1 min")),
(TestIndex(TxHeight::Confirmed(1), u32::MAX), h!("tx 1 max")),
(TestIndex(TxHeight::Confirmed(2), u32::MIN), h!("tx 2 min")),
(TestIndex(TxHeight::Confirmed(2), u32::MAX), h!("tx 2 max")),
];
// populate chain with txids
for (index, txid) in txids {
let _ = chain.insert_tx(txid, index).expect("should succeed");
}
// inclusive start
assert_eq!(
chain
.range_txids_by_position(TestIndex(TxHeight::Confirmed(1), u32::MIN)..)
.collect::<Vec<_>>(),
txids.iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position(TestIndex(TxHeight::Confirmed(1), u32::MAX)..)
.collect::<Vec<_>>(),
txids[1..].iter().collect::<Vec<_>>(),
);
// exclusive start
assert_eq!(
chain
.range_txids_by_position((
Bound::Excluded(TestIndex(TxHeight::Confirmed(1), u32::MIN)),
Bound::Unbounded
))
.collect::<Vec<_>>(),
txids[1..].iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position((
Bound::Excluded(TestIndex(TxHeight::Confirmed(1), u32::MAX)),
Bound::Unbounded
))
.collect::<Vec<_>>(),
txids[2..].iter().collect::<Vec<_>>(),
);
// inclusive end
assert_eq!(
chain
.range_txids_by_position((
Bound::Unbounded,
Bound::Included(TestIndex(TxHeight::Confirmed(2), u32::MIN))
))
.collect::<Vec<_>>(),
txids[..3].iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position((
Bound::Unbounded,
Bound::Included(TestIndex(TxHeight::Confirmed(2), u32::MAX))
))
.collect::<Vec<_>>(),
txids[..4].iter().collect::<Vec<_>>(),
);
// exclusive end
assert_eq!(
chain
.range_txids_by_position(..TestIndex(TxHeight::Confirmed(2), u32::MIN))
.collect::<Vec<_>>(),
txids[..2].iter().collect::<Vec<_>>(),
);
assert_eq!(
chain
.range_txids_by_position(..TestIndex(TxHeight::Confirmed(2), u32::MAX))
.collect::<Vec<_>>(),
txids[..3].iter().collect::<Vec<_>>(),
);
}
#[test]
fn range_txids() {
let mut chain = SparseChain::default();
let txids = (0..100)
.map(|v| Txid::hash(v.to_string().as_bytes()))
.collect::<BTreeSet<Txid>>();
// populate chain
for txid in &txids {
let _ = chain
.insert_tx(*txid, TxHeight::Unconfirmed)
.expect("should succeed");
}
for txid in &txids {
assert_eq!(
chain
.range_txids((TxHeight::Unconfirmed, *txid)..)
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids.range(*txid..).collect::<Vec<_>>(),
"range with inclusive start should succeed"
);
assert_eq!(
chain
.range_txids((
Bound::Excluded((TxHeight::Unconfirmed, *txid)),
Bound::Unbounded,
))
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids
.range((Bound::Excluded(*txid), Bound::Unbounded,))
.collect::<Vec<_>>(),
"range with exclusive start should succeed"
);
assert_eq!(
chain
.range_txids(..(TxHeight::Unconfirmed, *txid))
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids.range(..*txid).collect::<Vec<_>>(),
"range with exclusive end should succeed"
);
assert_eq!(
chain
.range_txids((
Bound::Included((TxHeight::Unconfirmed, *txid)),
Bound::Unbounded,
))
.map(|(_, txid)| txid)
.collect::<Vec<_>>(),
txids
.range((Bound::Included(*txid), Bound::Unbounded,))
.collect::<Vec<_>>(),
"range with inclusive end should succeed"
);
}
}
#[test]
fn invalidated_txs_move_to_unconfirmed() {
let chain1 = chain! {
checkpoints: [[0, h!("A")], [1, h!("B")], [2, h!("C")]],
txids: [
(h!("a"), TxHeight::Confirmed(0)),
(h!("b"), TxHeight::Confirmed(1)),
(h!("c"), TxHeight::Confirmed(2)),
(h!("d"), TxHeight::Unconfirmed)
]
};
let chain2 = chain!([0, h!("A")], [1, h!("B'")]);
assert_eq!(
chain1.determine_changeset(&chain2),
Ok(changeset! {
checkpoints: [
(1, Some(h!("B'"))),
(2, None)
],
txids: [
(h!("b"), Some(TxHeight::Unconfirmed)),
(h!("c"), Some(TxHeight::Unconfirmed))
]
},)
);
}
#[test]
fn change_tx_position_from_unconfirmed_to_confirmed() {
let mut chain = SparseChain::<TxHeight>::default();
let txid = h!("txid");
let _ = chain.insert_tx(txid, TxHeight::Unconfirmed).unwrap();
assert_eq!(chain.tx_position(txid), Some(&TxHeight::Unconfirmed));
let _ = chain
.insert_checkpoint(BlockId {
height: 0,
hash: h!("0"),
})
.unwrap();
let _ = chain.insert_tx(txid, TxHeight::Confirmed(0)).unwrap();
assert_eq!(chain.tx_position(txid), Some(&TxHeight::Confirmed(0)));
}

View File

@@ -1,88 +1,64 @@
use bdk_chain::{spk_txout::SpkTxOutIndex, Indexer};
use bitcoin::{
absolute, transaction, Amount, OutPoint, ScriptBuf, SignedAmount, Transaction, TxIn, TxOut,
};
use bdk_chain::SpkTxOutIndex;
use bitcoin::{hashes::hex::FromHex, OutPoint, PackedLockTime, Script, Transaction, TxIn, TxOut};
#[test]
fn spk_txout_sent_and_received() {
let spk1 = ScriptBuf::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap();
let spk2 = ScriptBuf::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap();
let spk1 = Script::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap();
let spk2 = Script::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap();
let mut index = SpkTxOutIndex::default();
index.insert_spk(0, spk1.clone());
index.insert_spk(1, spk2.clone());
let tx1 = Transaction {
version: transaction::Version::TWO,
lock_time: absolute::LockTime::ZERO,
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut {
value: Amount::from_sat(42_000),
value: 42_000,
script_pubkey: spk1.clone(),
}],
};
assert_eq!(index.sent_and_received(&tx1), (0, 42_000));
assert_eq!(index.net_value(&tx1), 42_000);
index.scan(&tx1);
assert_eq!(
index.sent_and_received(&tx1, ..),
(Amount::from_sat(0), Amount::from_sat(42_000))
);
assert_eq!(
index.sent_and_received(&tx1, ..1),
(Amount::from_sat(0), Amount::from_sat(42_000))
);
assert_eq!(
index.sent_and_received(&tx1, 1..),
(Amount::from_sat(0), Amount::from_sat(0))
);
assert_eq!(index.net_value(&tx1, ..), SignedAmount::from_sat(42_000));
index.index_tx(&tx1);
assert_eq!(
index.sent_and_received(&tx1, ..),
(Amount::from_sat(0), Amount::from_sat(42_000)),
index.sent_and_received(&tx1),
(0, 42_000),
"shouldn't change after scanning"
);
let tx2 = Transaction {
version: transaction::Version::ONE,
lock_time: absolute::LockTime::ZERO,
version: 0x1,
lock_time: PackedLockTime(0),
input: vec![TxIn {
previous_output: OutPoint {
txid: tx1.compute_txid(),
txid: tx1.txid(),
vout: 0,
},
..Default::default()
}],
output: vec![
TxOut {
value: Amount::from_sat(20_000),
value: 20_000,
script_pubkey: spk2,
},
TxOut {
script_pubkey: spk1,
value: Amount::from_sat(30_000),
value: 30_000,
},
],
};
assert_eq!(
index.sent_and_received(&tx2, ..),
(Amount::from_sat(42_000), Amount::from_sat(50_000))
);
assert_eq!(
index.sent_and_received(&tx2, ..1),
(Amount::from_sat(42_000), Amount::from_sat(30_000))
);
assert_eq!(
index.sent_and_received(&tx2, 1..),
(Amount::from_sat(0), Amount::from_sat(20_000))
);
assert_eq!(index.net_value(&tx2, ..), SignedAmount::from_sat(8_000));
assert_eq!(index.sent_and_received(&tx2), (42_000, 50_000));
assert_eq!(index.net_value(&tx2), 8_000);
}
#[test]
fn mark_used() {
let spk1 = ScriptBuf::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap();
let spk2 = ScriptBuf::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap();
let spk1 = Script::from_hex("001404f1e52ce2bab3423c6a8c63b7cd730d8f12542c").unwrap();
let spk2 = Script::from_hex("00142b57404ae14f08c3a0c903feb2af7830605eb00f").unwrap();
let mut spk_index = SpkTxOutIndex::default();
spk_index.insert_spk(1, spk1.clone());
@@ -97,16 +73,16 @@ fn mark_used() {
assert!(spk_index.is_used(&1));
let tx1 = Transaction {
version: transaction::Version::TWO,
lock_time: absolute::LockTime::ZERO,
version: 0x02,
lock_time: PackedLockTime(0),
input: vec![],
output: vec![TxOut {
value: Amount::from_sat(42_000),
value: 42_000,
script_pubkey: spk1,
}],
};
spk_index.index_tx(&tx1);
spk_index.scan(&tx1);
spk_index.unmark_used(&1);
assert!(
spk_index.is_used(&1),

File diff suppressed because it is too large Load Diff

View File

@@ -1,670 +0,0 @@
#![cfg(feature = "miniscript")]
#[macro_use]
mod common;
use std::collections::{BTreeSet, HashSet};
use bdk_chain::{Balance, BlockId};
use bitcoin::{Amount, OutPoint, ScriptBuf};
use common::*;
#[allow(dead_code)]
struct Scenario<'a> {
/// Name of the test scenario
name: &'a str,
/// Transaction templates
tx_templates: &'a [TxTemplate<'a, BlockId>],
/// Names of txs that must exist in the output of `list_canonical_txs`
exp_chain_txs: HashSet<&'a str>,
/// Outpoints that must exist in the output of `filter_chain_txouts`
exp_chain_txouts: HashSet<(&'a str, u32)>,
/// Outpoints of UTXOs that must exist in the output of `filter_chain_unspents`
exp_unspents: HashSet<(&'a str, u32)>,
/// Expected balances
exp_balance: Balance,
}
/// This test ensures that [`TxGraph`] will reliably filter out irrelevant transactions when
/// presented with multiple conflicting transaction scenarios using the [`TxTemplate`] structure.
/// This test also checks that [`TxGraph::list_canonical_txs`], [`TxGraph::filter_chain_txouts`],
/// [`TxGraph::filter_chain_unspents`], and [`TxGraph::balance`] return correct data.
#[test]
fn test_tx_conflict_handling() {
// Create Local chains
let local_chain = local_chain!(
(0, h!("A")),
(1, h!("B")),
(2, h!("C")),
(3, h!("D")),
(4, h!("E")),
(5, h!("F")),
(6, h!("G"))
);
let chain_tip = local_chain.tip().block_id();
let scenarios = [
Scenario {
name: "coinbase tx cannot be in mempool and be unconfirmed",
tx_templates: &[
TxTemplate {
tx_name: "unconfirmed_coinbase",
inputs: &[TxInTemplate::Coinbase],
outputs: &[TxOutTemplate::new(5000, Some(0))],
..Default::default()
},
TxTemplate {
tx_name: "confirmed_genesis",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(1))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "unconfirmed_conflict",
inputs: &[
TxInTemplate::PrevTx("confirmed_genesis", 0),
TxInTemplate::PrevTx("unconfirmed_coinbase", 0)
],
outputs: &[TxOutTemplate::new(20000, Some(2))],
..Default::default()
},
TxTemplate {
tx_name: "confirmed_conflict",
inputs: &[TxInTemplate::PrevTx("confirmed_genesis", 0)],
outputs: &[TxOutTemplate::new(20000, Some(3))],
anchors: &[block_id!(4, "E")],
..Default::default()
},
],
exp_chain_txs: HashSet::from(["confirmed_genesis", "confirmed_conflict"]),
exp_chain_txouts: HashSet::from([("confirmed_genesis", 0), ("confirmed_conflict", 0)]),
exp_unspents: HashSet::from([("confirmed_conflict", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(20000),
},
},
Scenario {
name: "2 unconfirmed txs with same last_seens conflict",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
outputs: &[TxOutTemplate::new(40000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
last_seen: Some(300),
..Default::default()
},
],
// the txgraph is going to pick tx_conflict_2 because of higher lexicographical txid
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]),
exp_unspents: HashSet::from([("tx_conflict_2", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "2 unconfirmed txs with different last_seens conflict",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0)), TxOutTemplate::new(10000, Some(1))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(2))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::PrevTx("tx1", 1)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
last_seen: Some(300),
..Default::default()
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx1", 1), ("tx_conflict_2", 0)]),
exp_unspents: HashSet::from([("tx_conflict_2", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "3 unconfirmed txs with different last_seens conflict",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_3",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(40000, Some(3))],
last_seen: Some(400),
..Default::default()
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_3"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_3", 0)]),
exp_unspents: HashSet::from([("tx_conflict_3", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(40000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "unconfirmed tx conflicts with tx in orphaned block, orphaned higher last_seen",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_orphaned_conflict",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
anchors: &[block_id!(4, "Orphaned Block")],
last_seen: Some(300),
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_orphaned_conflict"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_orphaned_conflict", 0)]),
exp_unspents: HashSet::from([("tx_orphaned_conflict", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "unconfirmed tx conflicts with tx in orphaned block, orphaned lower last_seen",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_orphaned_conflict",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
anchors: &[block_id!(4, "Orphaned Block")],
last_seen: Some(100),
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_1"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_1", 0)]),
exp_unspents: HashSet::from([("tx_conflict_1", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(20000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "multiple unconfirmed txs conflict with a confirmed tx",
tx_templates: &[
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "tx_conflict_1",
inputs: &[TxInTemplate::PrevTx("tx1", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_2",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "tx_conflict_3",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(40000, Some(3))],
last_seen: Some(400),
..Default::default()
},
TxTemplate {
tx_name: "tx_confirmed_conflict",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(50000, Some(4))],
anchors: &[block_id!(1, "B")],
..Default::default()
},
],
exp_chain_txs: HashSet::from(["tx1", "tx_confirmed_conflict"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_confirmed_conflict", 0)]),
exp_unspents: HashSet::from([("tx_confirmed_conflict", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(50000),
},
},
Scenario {
name: "B and B' spend A and conflict, C spends B, all the transactions are unconfirmed, B' has higher last_seen than B",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
last_seen: Some(22),
..Default::default()
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(23),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
last_seen: Some(24),
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[TxInTemplate::PrevTx("B", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
last_seen: Some(25),
..Default::default()
},
],
// A, B, C will appear in the list methods
// This is because B' has a higher last seen than B, but C has a higher
// last seen than B', so B and C are considered canonical
exp_chain_txs: HashSet::from(["A", "B", "C"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B", 0), ("C", 0)]),
exp_unspents: HashSet::from([("C", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "B and B' spend A and conflict, C spends B, A and B' are in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(1))],
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
anchors: &[block_id!(4, "E")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[TxInTemplate::PrevTx("B", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
..Default::default()
},
],
// B and C should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(20000),
},
},
Scenario {
name: "B and B' spend A and conflict, C spends B', A and B' are in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(1))],
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
anchors: &[block_id!(4, "E")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[TxInTemplate::PrevTx("B'", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
..Default::default()
},
],
// B should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'", "C"]),
exp_chain_txouts: HashSet::from([
("A", 0),
("B'", 0),
("C", 0),
]),
exp_unspents: HashSet::from([("C", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "B and B' spend A and conflict, C spends both B and B', A is in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(30000, Some(2))],
last_seen: Some(300),
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[
TxInTemplate::PrevTx("B", 0),
TxInTemplate::PrevTx("B'", 0),
],
outputs: &[TxOutTemplate::new(20000, Some(3))],
..Default::default()
},
],
// C should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::from_sat(30000),
untrusted_pending: Amount::ZERO,
confirmed: Amount::ZERO,
},
},
Scenario {
name: "B and B' spend A and conflict, B' is confirmed, C spends both B and B', A is in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(50000, Some(4))],
anchors: &[block_id!(1, "B")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[
TxInTemplate::PrevTx("B", 0),
TxInTemplate::PrevTx("B'", 0),
],
outputs: &[TxOutTemplate::new(20000, Some(5))],
..Default::default()
},
],
// C should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(50000),
},
},
Scenario {
name: "B and B' spend A and conflict, B' is confirmed, C spends both B and B', D spends C, A is in best chain",
tx_templates: &[
TxTemplate {
tx_name: "A",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(0))],
anchors: &[block_id!(1, "B")],
last_seen: None,
},
TxTemplate {
tx_name: "B",
inputs: &[TxInTemplate::PrevTx("A", 0), TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(20000, Some(1))],
last_seen: Some(200),
..Default::default()
},
TxTemplate {
tx_name: "B'",
inputs: &[TxInTemplate::PrevTx("A", 0)],
outputs: &[TxOutTemplate::new(50000, Some(4))],
anchors: &[block_id!(1, "B")],
..Default::default()
},
TxTemplate {
tx_name: "C",
inputs: &[
TxInTemplate::PrevTx("B", 0),
TxInTemplate::PrevTx("B'", 0),
],
outputs: &[TxOutTemplate::new(20000, Some(5))],
..Default::default()
},
TxTemplate {
tx_name: "D",
inputs: &[TxInTemplate::PrevTx("C", 0)],
outputs: &[TxOutTemplate::new(20000, Some(6))],
..Default::default()
},
],
// D should not appear in the list methods
exp_chain_txs: HashSet::from(["A", "B'"]),
exp_chain_txouts: HashSet::from([("A", 0), ("B'", 0)]),
exp_unspents: HashSet::from([("B'", 0)]),
exp_balance: Balance {
immature: Amount::ZERO,
trusted_pending: Amount::ZERO,
untrusted_pending: Amount::ZERO,
confirmed: Amount::from_sat(50000),
},
},
];
for scenario in scenarios {
let (tx_graph, spk_index, exp_tx_ids) = init_graph(scenario.tx_templates.iter());
let txs = tx_graph
.list_canonical_txs(&local_chain, chain_tip)
.map(|tx| tx.tx_node.txid)
.collect::<BTreeSet<_>>();
let exp_txs = scenario
.exp_chain_txs
.iter()
.map(|txid| *exp_tx_ids.get(txid).expect("txid must exist"))
.collect::<BTreeSet<_>>();
assert_eq!(
txs, exp_txs,
"\n[{}] 'list_canonical_txs' failed",
scenario.name
);
let txouts = tx_graph
.filter_chain_txouts(
&local_chain,
chain_tip,
spk_index.outpoints().iter().cloned(),
)
.map(|(_, full_txout)| full_txout.outpoint)
.collect::<BTreeSet<_>>();
let exp_txouts = scenario
.exp_chain_txouts
.iter()
.map(|(txid, vout)| OutPoint {
txid: *exp_tx_ids.get(txid).expect("txid must exist"),
vout: *vout,
})
.collect::<BTreeSet<_>>();
assert_eq!(
txouts, exp_txouts,
"\n[{}] 'filter_chain_txouts' failed",
scenario.name
);
let utxos = tx_graph
.filter_chain_unspents(
&local_chain,
chain_tip,
spk_index.outpoints().iter().cloned(),
)
.map(|(_, full_txout)| full_txout.outpoint)
.collect::<BTreeSet<_>>();
let exp_utxos = scenario
.exp_unspents
.iter()
.map(|(txid, vout)| OutPoint {
txid: *exp_tx_ids.get(txid).expect("txid must exist"),
vout: *vout,
})
.collect::<BTreeSet<_>>();
assert_eq!(
utxos, exp_utxos,
"\n[{}] 'filter_chain_unspents' failed",
scenario.name
);
let balance = tx_graph.balance(
&local_chain,
chain_tip,
spk_index.outpoints().iter().cloned(),
|_, spk: ScriptBuf| spk_index.index_of_spk(spk).is_some(),
);
assert_eq!(
balance, scenario.exp_balance,
"\n[{}] 'balance' failed",
scenario.name
);
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "bdk_electrum"
version = "0.16.0"
version = "0.2.0"
edition = "2021"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
@@ -12,9 +12,5 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../chain", version = "0.17.0" }
electrum-client = { version = "0.20" }
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }
[dev-dependencies]
bdk_testenv = { path = "../testenv", default-features = false }
bdk_chain = { path = "../chain", version = "0.4.0", features = ["serde", "miniscript"] }
electrum-client = { version = "0.12" }

View File

@@ -1,7 +1,3 @@
# BDK Electrum
BDK Electrum extends [`electrum-client`] to update [`bdk_chain`] structures
from an Electrum server.
[`electrum-client`]: https://docs.rs/electrum-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/
BDK Electrum client library for updating the keychain tracker.

View File

@@ -1,491 +0,0 @@
use bdk_chain::{
bitcoin::{block::Header, BlockHash, OutPoint, ScriptBuf, Transaction, Txid},
collections::{BTreeMap, HashMap},
local_chain::CheckPoint,
spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult},
tx_graph::TxGraph,
Anchor, BlockId, ConfirmationBlockTime,
};
use electrum_client::{ElectrumApi, Error, HeaderNotification};
use std::{
collections::BTreeSet,
sync::{Arc, Mutex},
};
/// We include a chain suffix of a certain length for the purpose of robustness.
const CHAIN_SUFFIX_LENGTH: u32 = 8;
/// Wrapper around an [`electrum_client::ElectrumApi`] which includes an internal in-memory
/// transaction cache to avoid re-fetching already downloaded transactions.
#[derive(Debug)]
pub struct BdkElectrumClient<E> {
/// The internal [`electrum_client::ElectrumApi`]
pub inner: E,
/// The transaction cache
tx_cache: Mutex<HashMap<Txid, Arc<Transaction>>>,
/// The header cache
block_header_cache: Mutex<HashMap<u32, Header>>,
}
impl<E: ElectrumApi> BdkElectrumClient<E> {
/// Creates a new bdk client from a [`electrum_client::ElectrumApi`]
pub fn new(client: E) -> Self {
Self {
inner: client,
tx_cache: Default::default(),
block_header_cache: Default::default(),
}
}
/// Inserts transactions into the transaction cache so that the client will not fetch these
/// transactions.
pub fn populate_tx_cache<A>(&self, tx_graph: impl AsRef<TxGraph<A>>) {
let txs = tx_graph
.as_ref()
.full_txs()
.map(|tx_node| (tx_node.txid, tx_node.tx));
let mut tx_cache = self.tx_cache.lock().unwrap();
for (txid, tx) in txs {
tx_cache.insert(txid, tx);
}
}
/// Fetch transaction of given `txid`.
///
/// If it hits the cache it will return the cached version and avoid making the request.
pub fn fetch_tx(&self, txid: Txid) -> Result<Arc<Transaction>, Error> {
let tx_cache = self.tx_cache.lock().unwrap();
if let Some(tx) = tx_cache.get(&txid) {
return Ok(Arc::clone(tx));
}
drop(tx_cache);
let tx = Arc::new(self.inner.transaction_get(&txid)?);
self.tx_cache.lock().unwrap().insert(txid, Arc::clone(&tx));
Ok(tx)
}
/// Fetch block header of given `height`.
///
/// If it hits the cache it will return the cached version and avoid making the request.
fn fetch_header(&self, height: u32) -> Result<Header, Error> {
let block_header_cache = self.block_header_cache.lock().unwrap();
if let Some(header) = block_header_cache.get(&height) {
return Ok(*header);
}
drop(block_header_cache);
self.update_header(height)
}
/// Update a block header at given `height`. Returns the updated header.
fn update_header(&self, height: u32) -> Result<Header, Error> {
let header = self.inner.block_header(height as usize)?;
self.block_header_cache
.lock()
.unwrap()
.insert(height, header);
Ok(header)
}
/// Broadcasts a transaction to the network.
///
/// This is a re-export of [`ElectrumApi::transaction_broadcast`].
pub fn transaction_broadcast(&self, tx: &Transaction) -> Result<Txid, Error> {
self.inner.transaction_broadcast(tx)
}
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and
/// returns updates for [`bdk_chain`] data structures.
///
/// - `request`: struct with data required to perform a spk-based blockchain client full scan,
/// see [`FullScanRequest`]
/// - `stop_gap`: the full scan for each keychain stops after a gap of script pubkeys with no
/// associated transactions
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
/// request
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
pub fn full_scan<K: Ord + Clone>(
&self,
request: FullScanRequest<K>,
stop_gap: usize,
batch_size: usize,
fetch_prev_txouts: bool,
) -> Result<FullScanResult<K>, Error> {
let (tip, latest_blocks) =
fetch_tip_and_latest_blocks(&self.inner, request.chain_tip.clone())?;
let mut graph_update = TxGraph::<ConfirmationBlockTime>::default();
let mut last_active_indices = BTreeMap::<K, u32>::new();
for (keychain, spks) in request.spks_by_keychain {
if let Some(last_active_index) =
self.populate_with_spks(&mut graph_update, spks, stop_gap, batch_size)?
{
last_active_indices.insert(keychain, last_active_index);
}
}
let chain_update = chain_update(tip, &latest_blocks, graph_update.all_anchors())?;
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
if fetch_prev_txouts {
self.fetch_prev_txout(&mut graph_update)?;
}
Ok(FullScanResult {
graph_update,
chain_update,
last_active_indices,
})
}
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified
/// and returns updates for [`bdk_chain`] data structures.
///
/// - `request`: struct with data required to perform a spk-based blockchain client sync,
/// see [`SyncRequest`]
/// - `batch_size`: specifies the max number of script pubkeys to request for in a single batch
/// request
/// - `fetch_prev_txouts`: specifies whether or not we want previous `TxOut`s for fee
/// calculation
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: Self::full_scan
pub fn sync(
&self,
request: SyncRequest,
batch_size: usize,
fetch_prev_txouts: bool,
) -> Result<SyncResult, Error> {
let full_scan_req = FullScanRequest::from_chain_tip(request.chain_tip.clone())
.set_spks_for_keychain((), request.spks.enumerate().map(|(i, spk)| (i as u32, spk)));
let mut full_scan_res = self.full_scan(full_scan_req, usize::MAX, batch_size, false)?;
let (tip, latest_blocks) =
fetch_tip_and_latest_blocks(&self.inner, request.chain_tip.clone())?;
self.populate_with_txids(&mut full_scan_res.graph_update, request.txids)?;
self.populate_with_outpoints(&mut full_scan_res.graph_update, request.outpoints)?;
let chain_update = chain_update(
tip,
&latest_blocks,
full_scan_res.graph_update.all_anchors(),
)?;
// Fetch previous `TxOut`s for fee calculation if flag is enabled.
if fetch_prev_txouts {
self.fetch_prev_txout(&mut full_scan_res.graph_update)?;
}
Ok(SyncResult {
chain_update,
graph_update: full_scan_res.graph_update,
})
}
/// Populate the `graph_update` with transactions/anchors associated with the given `spks`.
///
/// Transactions that contains an output with requested spk, or spends form an output with
/// requested spk will be added to `graph_update`. Anchors of the aforementioned transactions are
/// also included.
fn populate_with_spks(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
mut spks: impl Iterator<Item = (u32, ScriptBuf)>,
stop_gap: usize,
batch_size: usize,
) -> Result<Option<u32>, Error> {
let mut unused_spk_count = 0_usize;
let mut last_active_index = Option::<u32>::None;
loop {
let spks = (0..batch_size)
.map_while(|_| spks.next())
.collect::<Vec<_>>();
if spks.is_empty() {
return Ok(last_active_index);
}
let spk_histories = self
.inner
.batch_script_get_history(spks.iter().map(|(_, s)| s.as_script()))?;
for ((spk_index, _spk), spk_history) in spks.into_iter().zip(spk_histories) {
if spk_history.is_empty() {
unused_spk_count = unused_spk_count.saturating_add(1);
if unused_spk_count >= stop_gap {
return Ok(last_active_index);
}
continue;
} else {
last_active_index = Some(spk_index);
unused_spk_count = 0;
}
for tx_res in spk_history {
let _ = graph_update.insert_tx(self.fetch_tx(tx_res.tx_hash)?);
self.validate_merkle_for_anchor(graph_update, tx_res.tx_hash, tx_res.height)?;
}
}
}
}
/// Populate the `graph_update` with associated transactions/anchors of `outpoints`.
///
/// Transactions in which the outpoint resides, and transactions that spend from the outpoint are
/// included. Anchors of the aforementioned transactions are included.
fn populate_with_outpoints(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
outpoints: impl IntoIterator<Item = OutPoint>,
) -> Result<(), Error> {
for outpoint in outpoints {
let op_txid = outpoint.txid;
let op_tx = self.fetch_tx(op_txid)?;
let op_txout = match op_tx.output.get(outpoint.vout as usize) {
Some(txout) => txout,
None => continue,
};
debug_assert_eq!(op_tx.compute_txid(), op_txid);
// attempt to find the following transactions (alongside their chain positions), and
// add to our sparsechain `update`:
let mut has_residing = false; // tx in which the outpoint resides
let mut has_spending = false; // tx that spends the outpoint
for res in self.inner.script_get_history(&op_txout.script_pubkey)? {
if has_residing && has_spending {
break;
}
if !has_residing && res.tx_hash == op_txid {
has_residing = true;
let _ = graph_update.insert_tx(Arc::clone(&op_tx));
self.validate_merkle_for_anchor(graph_update, res.tx_hash, res.height)?;
}
if !has_spending && res.tx_hash != op_txid {
let res_tx = self.fetch_tx(res.tx_hash)?;
// we exclude txs/anchors that do not spend our specified outpoint(s)
has_spending = res_tx
.input
.iter()
.any(|txin| txin.previous_output == outpoint);
if !has_spending {
continue;
}
let _ = graph_update.insert_tx(Arc::clone(&res_tx));
self.validate_merkle_for_anchor(graph_update, res.tx_hash, res.height)?;
}
}
}
Ok(())
}
/// Populate the `graph_update` with transactions/anchors of the provided `txids`.
fn populate_with_txids(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
txids: impl IntoIterator<Item = Txid>,
) -> Result<(), Error> {
for txid in txids {
let tx = match self.fetch_tx(txid) {
Ok(tx) => tx,
Err(electrum_client::Error::Protocol(_)) => continue,
Err(other_err) => return Err(other_err),
};
let spk = tx
.output
.first()
.map(|txo| &txo.script_pubkey)
.expect("tx must have an output");
// because of restrictions of the Electrum API, we have to use the `script_get_history`
// call to get confirmation status of our transaction
if let Some(r) = self
.inner
.script_get_history(spk)?
.into_iter()
.find(|r| r.tx_hash == txid)
{
self.validate_merkle_for_anchor(graph_update, txid, r.height)?;
}
let _ = graph_update.insert_tx(tx);
}
Ok(())
}
// Helper function which checks if a transaction is confirmed by validating the merkle proof.
// An anchor is inserted if the transaction is validated to be in a confirmed block.
fn validate_merkle_for_anchor(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
txid: Txid,
confirmation_height: i32,
) -> Result<(), Error> {
if let Ok(merkle_res) = self
.inner
.transaction_get_merkle(&txid, confirmation_height as usize)
{
let mut header = self.fetch_header(merkle_res.block_height as u32)?;
let mut is_confirmed_tx = electrum_client::utils::validate_merkle_proof(
&txid,
&header.merkle_root,
&merkle_res,
);
// Merkle validation will fail if the header in `block_header_cache` is outdated, so we
// want to check if there is a new header and validate against the new one.
if !is_confirmed_tx {
header = self.update_header(merkle_res.block_height as u32)?;
is_confirmed_tx = electrum_client::utils::validate_merkle_proof(
&txid,
&header.merkle_root,
&merkle_res,
);
}
if is_confirmed_tx {
let _ = graph_update.insert_anchor(
txid,
ConfirmationBlockTime {
confirmation_time: header.time as u64,
block_id: BlockId {
height: merkle_res.block_height as u32,
hash: header.block_hash(),
},
},
);
}
}
Ok(())
}
// Helper function which fetches the `TxOut`s of our relevant transactions' previous transactions,
// which we do not have by default. This data is needed to calculate the transaction fee.
fn fetch_prev_txout(
&self,
graph_update: &mut TxGraph<ConfirmationBlockTime>,
) -> Result<(), Error> {
let full_txs: Vec<Arc<Transaction>> =
graph_update.full_txs().map(|tx_node| tx_node.tx).collect();
for tx in full_txs {
for vin in &tx.input {
let outpoint = vin.previous_output;
let vout = outpoint.vout;
let prev_tx = self.fetch_tx(outpoint.txid)?;
let txout = prev_tx.output[vout as usize].clone();
let _ = graph_update.insert_txout(outpoint, txout);
}
}
Ok(())
}
}
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. The latest blocks are
/// fetched to construct checkpoint updates with the proper [`BlockHash`] in case of re-org.
fn fetch_tip_and_latest_blocks(
client: &impl ElectrumApi,
prev_tip: CheckPoint,
) -> Result<(CheckPoint, BTreeMap<u32, BlockHash>), Error> {
let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
let new_tip_height = height as u32;
// If electrum returns a tip height that is lower than our previous tip, then checkpoints do
// not need updating. We just return the previous tip and use that as the point of agreement.
if new_tip_height < prev_tip.height() {
return Ok((prev_tip, BTreeMap::new()));
}
// Atomically fetch the latest `CHAIN_SUFFIX_LENGTH` count of blocks from Electrum. We use this
// to construct our checkpoint update.
let mut new_blocks = {
let start_height = new_tip_height.saturating_sub(CHAIN_SUFFIX_LENGTH - 1);
let hashes = client
.block_headers(start_height as _, CHAIN_SUFFIX_LENGTH as _)?
.headers
.into_iter()
.map(|h| h.block_hash());
(start_height..).zip(hashes).collect::<BTreeMap<u32, _>>()
};
// Find the "point of agreement" (if any).
let agreement_cp = {
let mut agreement_cp = Option::<CheckPoint>::None;
for cp in prev_tip.iter() {
let cp_block = cp.block_id();
let hash = match new_blocks.get(&cp_block.height) {
Some(&hash) => hash,
None => {
assert!(
new_tip_height >= cp_block.height,
"already checked that electrum's tip cannot be smaller"
);
let hash = client.block_header(cp_block.height as _)?.block_hash();
new_blocks.insert(cp_block.height, hash);
hash
}
};
if hash == cp_block.hash {
agreement_cp = Some(cp);
break;
}
}
agreement_cp
};
let agreement_height = agreement_cp.as_ref().map(CheckPoint::height);
let new_tip = new_blocks
.iter()
// Prune `new_blocks` to only include blocks that are actually new.
.filter(|(height, _)| Some(*<&u32>::clone(height)) > agreement_height)
.map(|(height, hash)| BlockId {
height: *height,
hash: *hash,
})
.fold(agreement_cp, |prev_cp, block| {
Some(match prev_cp {
Some(cp) => cp.push(block).expect("must extend checkpoint"),
None => CheckPoint::new(block),
})
})
.expect("must have at least one checkpoint");
Ok((new_tip, new_blocks))
}
// Add a corresponding checkpoint per anchor height if it does not yet exist. Checkpoints should not
// surpass `latest_blocks`.
fn chain_update<A: Anchor>(
mut tip: CheckPoint,
latest_blocks: &BTreeMap<u32, BlockHash>,
anchors: &BTreeSet<(A, Txid)>,
) -> Result<CheckPoint, Error> {
for anchor in anchors {
let height = anchor.0.anchor_block().height;
// Checkpoint uses the `BlockHash` from `latest_blocks` so that the hash will be consistent
// in case of a re-org.
if tip.get(height).is_none() && height <= tip.height() {
let hash = match latest_blocks.get(&height) {
Some(&hash) => hash,
None => anchor.0.anchor_block().hash,
};
tip = tip.insert(BlockId { hash, height });
}
}
Ok(tip)
}

View File

@@ -1,22 +1,588 @@
//! This crate is used for updating structures of [`bdk_chain`] with data from an Electrum server.
//! This crate is used for updating structures of the [`bdk_chain`] crate with data from electrum.
//!
//! The two primary methods are [`BdkElectrumClient::sync`] and [`BdkElectrumClient::full_scan`]. In most cases
//! [`BdkElectrumClient::sync`] is used to sync the transaction histories of scripts that the application
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`BdkElectrumClient::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for
//! [`bdk_chain`].
//! The star of the show is the [`ElectrumExt::scan`] method, which scans for relevant blockchain
//! data (via electrum) and outputs an [`ElectrumUpdate`].
//!
//! Refer to [`example_electrum`] for a complete example.
//! An [`ElectrumUpdate`] only includes `txid`s and no full transactions. The caller is responsible
//! for obtaining full transactions before applying. This can be done with
//! these steps:
//!
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum
//! 1. Determine which full transactions are missing. The method [`missing_full_txs`] of
//! [`ElectrumUpdate`] can be used.
//!
//! 2. Obtaining the full transactions. To do this via electrum, the method
//! [`batch_transaction_get`] can be used.
//!
//! Refer to [`bdk_electrum_example`] for a complete example.
//!
//! [`ElectrumClient::scan`]: ElectrumClient::scan
//! [`missing_full_txs`]: ElectrumUpdate::missing_full_txs
//! [`batch_transaction_get`]: ElectrumApi::batch_transaction_get
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
#![warn(missing_docs)]
mod bdk_electrum_client;
pub use bdk_electrum_client::*;
use std::{
collections::{BTreeMap, HashMap},
fmt::Debug,
};
pub use bdk_chain;
use bdk_chain::{
bitcoin::{hashes::hex::FromHex, BlockHash, OutPoint, Script, Transaction, Txid},
chain_graph::{self, ChainGraph},
keychain::KeychainScan,
sparse_chain::{self, ChainPosition, SparseChain},
tx_graph::TxGraph,
BlockId, ConfirmationTime, TxHeight,
};
pub use electrum_client;
use electrum_client::{Client, ElectrumApi, Error};
/// Trait to extend [`electrum_client::Client`] functionality.
///
/// Refer to [crate-level documentation] for more.
///
/// [crate-level documentation]: crate
pub trait ElectrumExt {
/// Fetch the latest block height.
fn get_tip(&self) -> Result<(u32, BlockHash), Error>;
/// Scan the blockchain (via electrum) for the data specified. This returns a [`ElectrumUpdate`]
/// which can be transformed into a [`KeychainScan`] after we find all the missing full
/// transactions.
///
/// - `local_chain`: the most recent block hashes present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want the updated [`ChainPosition`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
batch_size: usize,
) -> Result<ElectrumUpdate<K, TxHeight>, Error>;
/// Convenience method to call [`scan`] without requiring a keychain.
///
/// [`scan`]: ElectrumExt::scan
fn scan_without_keychain(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
misc_spks: impl IntoIterator<Item = Script>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
batch_size: usize,
) -> Result<SparseChain, Error> {
let spk_iter = misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk));
self.scan(
local_chain,
[((), spk_iter)].into(),
txids,
outpoints,
usize::MAX,
batch_size,
)
.map(|u| u.chain_update)
}
}
impl ElectrumExt for Client {
fn get_tip(&self) -> Result<(u32, BlockHash), Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
self.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))
}
fn scan<K: Ord + Clone>(
&self,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, Script)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
batch_size: usize,
) -> Result<ElectrumUpdate<K, TxHeight>, Error> {
let mut request_spks = keychain_spks
.into_iter()
.map(|(k, s)| {
let iter = s.into_iter();
(k, iter)
})
.collect::<BTreeMap<K, _>>();
let mut scanned_spks = BTreeMap::<(K, u32), (Script, bool)>::new();
let txids = txids.into_iter().collect::<Vec<_>>();
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
let update = loop {
let mut update = prepare_update(self, local_chain)?;
if !request_spks.is_empty() {
if !scanned_spks.is_empty() {
let mut scanned_spk_iter = scanned_spks
.iter()
.map(|(i, (spk, _))| (i.clone(), spk.clone()));
match populate_with_spks::<K, _, _>(
self,
&mut update,
&mut scanned_spk_iter,
stop_gap,
batch_size,
) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(mut spks) => scanned_spks.append(&mut spks),
};
}
for (keychain, keychain_spks) in &mut request_spks {
match populate_with_spks::<K, u32, _>(
self,
&mut update,
keychain_spks,
stop_gap,
batch_size,
) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(spks) => scanned_spks.extend(
spks.into_iter()
.map(|(spk_i, spk)| ((keychain.clone(), spk_i), spk)),
),
};
}
}
match populate_with_txids(self, &mut update, &mut txids.iter().cloned()) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(_) => {}
}
match populate_with_outpoints(self, &mut update, &mut outpoints.iter().cloned()) {
Err(InternalError::Reorg) => continue,
Err(InternalError::ElectrumError(e)) => return Err(e),
Ok(_txs) => { /* [TODO] cache full txs to reduce bandwidth */ }
}
// check for reorgs during scan process
let our_tip = update
.latest_checkpoint()
.expect("update must have atleast one checkpoint");
let server_blockhash = self.block_header(our_tip.height as usize)?.block_hash();
if our_tip.hash != server_blockhash {
continue; // reorg
} else {
break update;
}
};
let last_active_index = request_spks
.into_keys()
.filter_map(|k| {
scanned_spks
.range((k.clone(), u32::MIN)..=(k.clone(), u32::MAX))
.rev()
.find(|(_, (_, active))| *active)
.map(|((_, i), _)| (k, *i))
})
.collect::<BTreeMap<_, _>>();
Ok(ElectrumUpdate {
chain_update: update,
last_active_indices: last_active_index,
})
}
}
/// The result of [`ElectrumExt::scan`].
pub struct ElectrumUpdate<K, P> {
/// The internal [`SparseChain`] update.
pub chain_update: SparseChain<P>,
/// The last keychain script pubkey indices, which had transaction histories.
pub last_active_indices: BTreeMap<K, u32>,
}
impl<K, P> Default for ElectrumUpdate<K, P> {
fn default() -> Self {
Self {
chain_update: Default::default(),
last_active_indices: Default::default(),
}
}
}
impl<K, P> AsRef<SparseChain<P>> for ElectrumUpdate<K, P> {
fn as_ref(&self) -> &SparseChain<P> {
&self.chain_update
}
}
impl<K: Ord + Clone + Debug, P: ChainPosition> ElectrumUpdate<K, P> {
/// Return a list of missing full transactions that are required to [`inflate_update`].
///
/// [`inflate_update`]: bdk_chain::chain_graph::ChainGraph::inflate_update
pub fn missing_full_txs<G>(&self, graph: G) -> Vec<&Txid>
where
G: AsRef<TxGraph>,
{
self.chain_update
.txids()
.filter(|(_, txid)| graph.as_ref().get_tx(*txid).is_none())
.map(|(_, txid)| txid)
.collect()
}
/// Transform the [`ElectrumUpdate`] into a [`KeychainScan`], which can be applied to a
/// `tracker`.
///
/// This will fail if there are missing full transactions not provided via `new_txs`.
pub fn into_keychain_scan<CG>(
self,
new_txs: Vec<Transaction>,
chain_graph: &CG,
) -> Result<KeychainScan<K, P>, chain_graph::NewError<P>>
where
CG: AsRef<ChainGraph<P>>,
{
Ok(KeychainScan {
update: chain_graph
.as_ref()
.inflate_update(self.chain_update, new_txs)?,
last_active_indices: self.last_active_indices,
})
}
}
impl<K: Ord + Clone + Debug> ElectrumUpdate<K, TxHeight> {
/// Creates [`ElectrumUpdate<K, ConfirmationTime>`] from [`ElectrumUpdate<K, TxHeight>`].
pub fn into_confirmation_time_update(
self,
client: &electrum_client::Client,
) -> Result<ElectrumUpdate<K, ConfirmationTime>, Error> {
let heights = self
.chain_update
.range_txids_by_height(..TxHeight::Unconfirmed)
.map(|(h, _)| match h {
TxHeight::Confirmed(h) => *h,
_ => unreachable!("already filtered out unconfirmed"),
})
.collect::<Vec<u32>>();
let height_to_time = heights
.clone()
.into_iter()
.zip(
client
.batch_block_header(heights)?
.into_iter()
.map(|bh| bh.time as u64),
)
.collect::<HashMap<u32, u64>>();
let mut new_update = SparseChain::<ConfirmationTime>::from_checkpoints(
self.chain_update.range_checkpoints(..),
);
for &(tx_height, txid) in self.chain_update.txids() {
let conf_time = match tx_height {
TxHeight::Confirmed(height) => ConfirmationTime::Confirmed {
height,
time: height_to_time[&height],
},
TxHeight::Unconfirmed => ConfirmationTime::Unconfirmed,
};
let _ = new_update.insert_tx(txid, conf_time).expect("must insert");
}
Ok(ElectrumUpdate {
chain_update: new_update,
last_active_indices: self.last_active_indices,
})
}
}
#[derive(Debug)]
enum InternalError {
ElectrumError(Error),
Reorg,
}
impl From<electrum_client::Error> for InternalError {
fn from(value: electrum_client::Error) -> Self {
Self::ElectrumError(value)
}
}
fn get_tip(client: &Client) -> Result<(u32, BlockHash), Error> {
// TODO: unsubscribe when added to the client, or is there a better call to use here?
client
.block_headers_subscribe()
.map(|data| (data.height as u32, data.header.block_hash()))
}
/// Prepare an update sparsechain "template" based on the checkpoints of the `local_chain`.
fn prepare_update(
client: &Client,
local_chain: &BTreeMap<u32, BlockHash>,
) -> Result<SparseChain, Error> {
let mut update = SparseChain::default();
// Find the local chain block that is still there so our update can connect to the local chain.
for (&existing_height, &existing_hash) in local_chain.iter().rev() {
// TODO: a batch request may be safer, as a reorg that happens when we are obtaining
// `block_header`s will result in inconsistencies
let current_hash = client.block_header(existing_height as usize)?.block_hash();
let _ = update
.insert_checkpoint(BlockId {
height: existing_height,
hash: current_hash,
})
.expect("This never errors because we are working with a fresh chain");
if current_hash == existing_hash {
break;
}
}
// Insert the new tip so new transactions will be accepted into the sparsechain.
let tip = {
let (height, hash) = get_tip(client)?;
BlockId { height, hash }
};
if let Err(failure) = update.insert_checkpoint(tip) {
match failure {
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
// There has been a re-org before we even begin scanning addresses.
// Just recursively call (this should never happen).
return prepare_update(client, local_chain);
}
}
}
Ok(update)
}
/// This atrocity is required because electrum thinks a height of 0 means "unconfirmed", but there is
/// such thing as a genesis block.
///
/// We contain an expectation for the genesis coinbase txid to always have a chain position of
/// [`TxHeight::Confirmed(0)`].
fn determine_tx_height(raw_height: i32, tip_height: u32, txid: Txid) -> TxHeight {
if txid
== Txid::from_hex("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b")
.expect("must deserialize genesis coinbase txid")
{
return TxHeight::Confirmed(0);
}
match raw_height {
h if h <= 0 => {
debug_assert!(
h == 0 || h == -1,
"unexpected height ({}) from electrum server",
h
);
TxHeight::Unconfirmed
}
h => {
let h = h as u32;
if h > tip_height {
TxHeight::Unconfirmed
} else {
TxHeight::Confirmed(h)
}
}
}
}
/// Populates the update [`SparseChain`] with related transactions and associated [`ChainPosition`]s
/// of the provided `outpoints` (this is the tx which contains the outpoint and the one spending the
/// outpoint).
///
/// Unfortunately, this is awkward to implement as electrum does not provide such an API. Instead, we
/// will get the tx history of the outpoint's spk and try to find the containing tx and the
/// spending tx.
fn populate_with_outpoints(
client: &Client,
update: &mut SparseChain,
outpoints: &mut impl Iterator<Item = OutPoint>,
) -> Result<HashMap<Txid, Transaction>, InternalError> {
let tip = update
.latest_checkpoint()
.expect("update must atleast have one checkpoint");
let mut full_txs = HashMap::new();
for outpoint in outpoints {
let txid = outpoint.txid;
let tx = client.transaction_get(&txid)?;
debug_assert_eq!(tx.txid(), txid);
let txout = match tx.output.get(outpoint.vout as usize) {
Some(txout) => txout,
None => continue,
};
// attempt to find the following transactions (alongside their chain positions), and
// add to our sparsechain `update`:
let mut has_residing = false; // tx in which the outpoint resides
let mut has_spending = false; // tx that spends the outpoint
for res in client.script_get_history(&txout.script_pubkey)? {
if has_residing && has_spending {
break;
}
if res.tx_hash == txid {
if has_residing {
continue;
}
has_residing = true;
full_txs.insert(res.tx_hash, tx.clone());
} else {
if has_spending {
continue;
}
let res_tx = match full_txs.get(&res.tx_hash) {
Some(tx) => tx,
None => {
let res_tx = client.transaction_get(&res.tx_hash)?;
full_txs.insert(res.tx_hash, res_tx);
full_txs.get(&res.tx_hash).expect("just inserted")
}
};
has_spending = res_tx
.input
.iter()
.any(|txin| txin.previous_output == outpoint);
if !has_spending {
continue;
}
};
let tx_height = determine_tx_height(res.height, tip.height, res.tx_hash);
if let Err(failure) = update.insert_tx(res.tx_hash, tx_height) {
match failure {
sparse_chain::InsertTxError::TxTooHigh { .. } => {
unreachable!("we should never encounter this as we ensured height <= tip");
}
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
return Err(InternalError::Reorg);
}
}
}
}
}
Ok(full_txs)
}
/// Populate an update [`SparseChain`] with transactions (and associated block positions) from
/// the given `txids`.
fn populate_with_txids(
client: &Client,
update: &mut SparseChain,
txids: &mut impl Iterator<Item = Txid>,
) -> Result<(), InternalError> {
let tip = update
.latest_checkpoint()
.expect("update must have atleast one checkpoint");
for txid in txids {
let tx = match client.transaction_get(&txid) {
Ok(tx) => tx,
Err(electrum_client::Error::Protocol(_)) => continue,
Err(other_err) => return Err(other_err.into()),
};
let spk = tx
.output
.get(0)
.map(|txo| &txo.script_pubkey)
.expect("tx must have an output");
let tx_height = match client
.script_get_history(spk)?
.into_iter()
.find(|r| r.tx_hash == txid)
{
Some(r) => determine_tx_height(r.height, tip.height, r.tx_hash),
None => continue,
};
if let Err(failure) = update.insert_tx(txid, tx_height) {
match failure {
sparse_chain::InsertTxError::TxTooHigh { .. } => {
unreachable!("we should never encounter this as we ensured height <= tip");
}
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
return Err(InternalError::Reorg);
}
}
}
}
Ok(())
}
/// Populate an update [`SparseChain`] with transactions (and associated block positions) from
/// the transaction history of the provided `spk`s.
fn populate_with_spks<K, I, S>(
client: &Client,
update: &mut SparseChain,
spks: &mut S,
stop_gap: usize,
batch_size: usize,
) -> Result<BTreeMap<I, (Script, bool)>, InternalError>
where
K: Ord + Clone,
I: Ord + Clone,
S: Iterator<Item = (I, Script)>,
{
let tip = update.latest_checkpoint().map_or(0, |cp| cp.height);
let mut unused_spk_count = 0_usize;
let mut scanned_spks = BTreeMap::new();
loop {
let spks = (0..batch_size)
.map_while(|_| spks.next())
.collect::<Vec<_>>();
if spks.is_empty() {
return Ok(scanned_spks);
}
let spk_histories = client.batch_script_get_history(spks.iter().map(|(_, s)| s))?;
for ((spk_index, spk), spk_history) in spks.into_iter().zip(spk_histories) {
if spk_history.is_empty() {
scanned_spks.insert(spk_index, (spk, false));
unused_spk_count += 1;
if unused_spk_count > stop_gap {
return Ok(scanned_spks);
}
continue;
} else {
scanned_spks.insert(spk_index, (spk, true));
unused_spk_count = 0;
}
for tx in spk_history {
let tx_height = determine_tx_height(tx.height, tip, tx.tx_hash);
if let Err(failure) = update.insert_tx(tx.tx_hash, tx_height) {
match failure {
sparse_chain::InsertTxError::TxTooHigh { .. } => {
unreachable!(
"we should never encounter this as we ensured height <= tip"
);
}
sparse_chain::InsertTxError::TxMovedUnexpectedly { .. } => {
return Err(InternalError::Reorg);
}
}
}
}
}
}
}

View File

@@ -1,437 +0,0 @@
use bdk_chain::{
bitcoin::{hashes::Hash, Address, Amount, ScriptBuf, Txid, WScriptHash},
local_chain::LocalChain,
spk_client::{FullScanRequest, SyncRequest},
spk_txout::SpkTxOutIndex,
Balance, ConfirmationBlockTime, IndexedTxGraph,
};
use bdk_electrum::BdkElectrumClient;
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
use std::collections::{BTreeSet, HashSet};
use std::str::FromStr;
fn get_balance(
recv_chain: &LocalChain,
recv_graph: &IndexedTxGraph<ConfirmationBlockTime, SpkTxOutIndex<()>>,
) -> anyhow::Result<Balance> {
let chain_tip = recv_chain.tip().block_id();
let outpoints = recv_graph.index.outpoints().clone();
let balance = recv_graph
.graph()
.balance(recv_chain, chain_tip, outpoints, |_, _| true);
Ok(balance)
}
#[test]
pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 =
Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked();
let misc_spks = [
receive_address0.script_pubkey(),
receive_address1.script_pubkey(),
];
let _block_hashes = env.mine_blocks(101, None)?;
let txid1 = env.bitcoind.client.send_to_address(
&receive_address1,
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let txid2 = env.bitcoind.client.send_to_address(
&receive_address0,
Amount::from_sat(20000),
None,
None,
None,
None,
Some(1),
None,
)?;
env.mine_blocks(1, None)?;
env.wait_until_electrum_sees_block()?;
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
let sync_update = {
let request = SyncRequest::from_chain_tip(cp_tip.clone()).set_spks(misc_spks);
client.sync(request, 1, true)?
};
assert!(
{
let update_cps = sync_update
.chain_update
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let superset_cps = cp_tip
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
superset_cps.is_superset(&update_cps)
},
"update should not alter original checkpoint tip since we already started with all checkpoints",
);
let graph_update = sync_update.graph_update;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2];
expected_txids.sort();
assert_eq!(graph_update_txids, expected_txids);
Ok(())
}
/// Test the bounds of the address scan depending on the `stop_gap`.
#[test]
pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses.
let addresses = [
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
];
let addresses: Vec<_> = addresses
.into_iter()
.map(|s| Address::from_str(s).unwrap().assume_checked())
.collect();
let spks: Vec<_> = addresses
.iter()
.enumerate()
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
.collect();
// Then receive coins on the 4th address.
let txid_4th_addr = env.bitcoind.client.send_to_address(
&addresses[3],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
env.mine_blocks(1, None)?;
env.wait_until_electrum_sees_block()?;
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
// A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4
// will.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 3, 1, false)?
};
assert!(full_scan_update.graph_update.full_txs().next().is_none());
assert!(full_scan_update.last_active_indices.is_empty());
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 4, 1, false)?
};
assert_eq!(
full_scan_update
.graph_update
.full_txs()
.next()
.unwrap()
.txid,
txid_4th_addr
);
assert_eq!(full_scan_update.last_active_indices[&0], 3);
// Now receive a coin on the last address.
let txid_last_addr = env.bitcoind.client.send_to_address(
&addresses[addresses.len() - 1],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
env.mine_blocks(1, None)?;
env.wait_until_electrum_sees_block()?;
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
// The last active indice won't be updated in the first case but will in the second one.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 5, 1, false)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 3);
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 6, 1, false)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 9);
Ok(())
}
/// Ensure that [`ElectrumExt`] can sync properly.
///
/// 1. Mine 101 blocks.
/// 2. Send a tx.
/// 3. Mine extra block to confirm sent tx.
/// 4. Check [`Balance`] to ensure tx is confirmed.
#[test]
fn scan_detects_confirmed_tx() -> anyhow::Result<()> {
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
// Setup addresses.
let addr_to_mine = env
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?;
// Setup receiver.
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<ConfirmationBlockTime, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// Mine some blocks.
env.mine_blocks(101, Some(addr_to_mine))?;
// Create transaction that is tracked by our receiver.
env.send(&addr_to_track, SEND_AMOUNT)?;
// Mine a block to confirm sent tx.
env.mine_blocks(1, None)?;
// Sync up to tip.
env.wait_until_electrum_sees_block()?;
let update = client.sync(
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks(core::iter::once(spk_to_track)),
5,
true,
)?;
let _ = recv_chain
.apply_update(update.chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
let _ = recv_graph.apply_update(update.graph_update);
// Check to see if tx is confirmed.
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT,
..Balance::default()
},
);
for tx in recv_graph.graph().full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transaction's previous outputs.
let fee = recv_graph
.graph()
.calculate_fee(&tx.tx)
.expect("fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
Ok(())
}
/// Ensure that confirmed txs that are reorged become unconfirmed.
///
/// 1. Mine 101 blocks.
/// 2. Mine 8 blocks with a confirmed tx in each.
/// 3. Perform 8 separate reorgs on each block with a confirmed tx.
/// 4. Check [`Balance`] after each reorg to ensure unconfirmed amount is correct.
#[test]
fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
const REORG_COUNT: usize = 8;
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let electrum_client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
let client = BdkElectrumClient::new(electrum_client);
// Setup addresses.
let addr_to_mine = env
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?;
// Setup receiver.
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<ConfirmationBlockTime, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// Mine some blocks.
env.mine_blocks(101, Some(addr_to_mine))?;
// Create transactions that are tracked by our receiver.
let mut txids = vec![];
let mut hashes = vec![];
for _ in 0..REORG_COUNT {
txids.push(env.send(&addr_to_track, SEND_AMOUNT)?);
hashes.extend(env.mine_blocks(1, None)?);
}
// Sync up to tip.
env.wait_until_electrum_sees_block()?;
let update = client.sync(
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]),
5,
false,
)?;
let _ = recv_chain
.apply_update(update.chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
let _ = recv_graph.apply_update(update.graph_update.clone());
// Retain a snapshot of all anchors before reorg process.
let initial_anchors = update.graph_update.all_anchors();
let anchors: Vec<_> = initial_anchors.iter().cloned().collect();
assert_eq!(anchors.len(), REORG_COUNT);
for i in 0..REORG_COUNT {
let (anchor, txid) = anchors[i];
assert_eq!(anchor.block_id.hash, hashes[i]);
assert_eq!(txid, txids[i]);
}
// Check if initial balance is correct.
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * REORG_COUNT as u64,
..Balance::default()
},
"initial balance must be correct",
);
// Perform reorgs with different depths.
for depth in 1..=REORG_COUNT {
env.reorg_empty_blocks(depth)?;
env.wait_until_electrum_sees_block()?;
let update = client.sync(
SyncRequest::from_chain_tip(recv_chain.tip()).chain_spks([spk_to_track.clone()]),
5,
false,
)?;
let _ = recv_chain
.apply_update(update.chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
// Check that no new anchors are added during current reorg.
assert!(initial_anchors.is_superset(update.graph_update.all_anchors()));
let _ = recv_graph.apply_update(update.graph_update);
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT * (REORG_COUNT - depth) as u64,
..Balance::default()
},
"reorg_count: {}",
depth,
);
}
Ok(())
}

View File

@@ -1,6 +1,6 @@
[package]
name = "bdk_esplora"
version = "0.16.0"
version = "0.2.0"
edition = "2021"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
@@ -12,23 +12,13 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk_chain = { path = "../chain", version = "0.17.0", default-features = false }
esplora-client = { version = "0.8.0", default-features = false }
bdk_chain = { path = "../chain", version = "0.4.0", features = ["serde", "miniscript"] }
esplora-client = { version = "0.3", default-features = false }
async-trait = { version = "0.1.66", optional = true }
futures = { version = "0.3.26", optional = true }
bitcoin = { version = "0.32.0", optional = true, default-features = false }
miniscript = { version = "12.0.0", optional = true, default-features = false }
[dev-dependencies]
bdk_testenv = { path = "../testenv", default-features = false }
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
[features]
default = ["std", "async-https", "blocking-https-rustls"]
std = ["bdk_chain/std", "miniscript?/std"]
default = ["async-https", "blocking"]
async = ["async-trait", "futures", "esplora-client/async"]
async-https = ["async", "esplora-client/async-https"]
async-https-rustls = ["async", "esplora-client/async-https-rustls"]
blocking = ["esplora-client/blocking"]
blocking-https-rustls = ["esplora-client/blocking-https-rustls"]

View File

@@ -1,6 +1,6 @@
# BDK Esplora
BDK Esplora extends [`esplora-client`] to update [`bdk_chain`] structures
BDK Esplora extends [`esplora_client`](crate::esplora_client) to update [`bdk_chain`] structures
from an Esplora server.
## Usage
@@ -9,17 +9,17 @@ There are two versions of the extension trait (blocking and async).
For blocking-only:
```toml
bdk_esplora = { version = "0.3", features = ["blocking"] }
bdk_esplora = { version = "0.1", features = ["blocking"] }
```
For async-only:
```toml
bdk_esplora = { version = "0.3", features = ["async"] }
bdk_esplora = { version = "0.1", features = ["async"] }
```
For async-only (with https):
```toml
bdk_esplora = { version = "0.3", features = ["async-https"] }
bdk_esplora = { version = "0.1", features = ["async-https"] }
```
To use the extension traits:
@@ -27,10 +27,7 @@ To use the extension traits:
// for blocking
use bdk_esplora::EsploraExt;
// for async
// use bdk_esplora::EsploraAsyncExt;
use bdk_esplora::EsploraAsyncExt;
```
For full examples, refer to [`example-crates/wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).
[`esplora-client`]: https://docs.rs/esplora-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/
For full examples, refer to [`example-crates/wallet_esplora`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora) (blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).

View File

@@ -1,590 +1,316 @@
use std::collections::BTreeSet;
use std::collections::BTreeMap;
use async_trait::async_trait;
use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
collections::BTreeMap,
local_chain::CheckPoint,
BlockId, ConfirmationBlockTime, TxGraph,
bitcoin::{BlockHash, OutPoint, Script, Txid},
chain_graph::ChainGraph,
keychain::KeychainScan,
sparse_chain, BlockId, ConfirmationTime,
};
use bdk_chain::{Anchor, Indexed};
use esplora_client::{Amount, TxStatus};
use futures::{stream::FuturesOrdered, TryStreamExt};
use esplora_client::{Error, OutputStatus};
use futures::stream::{FuturesOrdered, TryStreamExt};
use crate::anchor_from_status;
use crate::map_confirmation_time;
/// [`esplora_client::Error`]
type Error = Box<esplora_client::Error>;
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
/// Trait to extend [`esplora_client::AsyncClient`] functionality.
///
/// Refer to [crate-level documentation] for more.
/// This is the async version of [`EsploraExt`]. Refer to
/// [crate-level documentation] for more.
///
/// [`EsploraExt`]: crate::EsploraExt
/// [crate-level documentation]: crate
#[cfg(feature = "async")]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
pub trait EsploraAsyncExt {
/// Scan keychain scripts for transactions against Esplora, returning an update that can be
/// applied to the receiving structures.
/// Scan the blockchain (via esplora) for the data specified and returns a [`KeychainScan`].
///
/// - `request`: struct with data required to perform a spk-based blockchain client full scan,
/// see [`FullScanRequest`]
/// - `local_chain`: the most recent block hashes present locally
/// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`ChainPosition`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no
/// associated transactions. `parallel_requests` specifies the max number of HTTP requests to
/// make in parallel.
/// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
///
/// ## Note
///
/// `stop_gap` is defined as "the maximum number of consecutive unused addresses".
/// For example, with a `stop_gap` of 3, `full_scan` will keep scanning
/// until it encounters 3 consecutive script pubkeys with no associated transactions.
///
/// This follows the same approach as other Bitcoin-related software,
/// such as [Electrum](https://electrum.readthedocs.io/en/latest/faq.html#what-is-the-gap-limit),
/// [BTCPay Server](https://docs.btcpayserver.org/FAQ/Wallet/#the-gap-limit-problem),
/// and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing).
///
/// A `stop_gap` of 0 will be treated as a `stop_gap` of 1.
async fn full_scan<K: Ord + Clone + Send>(
/// [`ChainPosition`]: bdk_chain::sparse_chain::ChainPosition
#[allow(clippy::result_large_err)] // FIXME
async fn scan<K: Ord + Clone + Send>(
&self,
request: FullScanRequest<K>,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<
K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
>,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize,
parallel_requests: usize,
) -> Result<FullScanResult<K>, Error>;
) -> Result<KeychainScan<K, ConfirmationTime>, Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
/// specified and return a [`TxGraph`].
/// Convenience method to call [`scan`] without requiring a keychain.
///
/// - `request`: struct with data required to perform a spk-based blockchain client sync, see
/// [`SyncRequest`]
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: EsploraAsyncExt::full_scan
async fn sync(
/// [`scan`]: EsploraAsyncExt::scan
#[allow(clippy::result_large_err)] // FIXME
async fn scan_without_keychain(
&self,
request: SyncRequest,
local_chain: &BTreeMap<u32, BlockHash>,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = Script> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize,
) -> Result<SyncResult, Error>;
) -> Result<ChainGraph<ConfirmationTime>, Error> {
let wallet_scan = self
.scan(
local_chain,
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
txids,
outpoints,
usize::MAX,
parallel_requests,
)
.await?;
Ok(wallet_scan.update)
}
}
#[cfg(feature = "async")]
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
impl EsploraAsyncExt for esplora_client::AsyncClient {
async fn full_scan<K: Ord + Clone + Send>(
#[allow(clippy::result_large_err)] // FIXME
async fn scan<K: Ord + Clone + Send>(
&self,
request: FullScanRequest<K>,
local_chain: &BTreeMap<u32, BlockHash>,
keychain_spks: BTreeMap<
K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
>,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize,
parallel_requests: usize,
) -> Result<FullScanResult<K>, Error> {
let latest_blocks = fetch_latest_blocks(self).await?;
let (graph_update, last_active_indices) = full_scan_for_index_and_graph(
self,
request.spks_by_keychain,
stop_gap,
parallel_requests,
)
.await?;
let chain_update = chain_update(
self,
&latest_blocks,
&request.chain_tip,
graph_update.all_anchors(),
)
.await?;
Ok(FullScanResult {
chain_update,
graph_update,
last_active_indices,
})
}
) -> Result<KeychainScan<K, ConfirmationTime>, Error> {
let txids = txids.into_iter();
let outpoints = outpoints.into_iter();
let parallel_requests = parallel_requests.max(1);
let mut scan = KeychainScan::default();
let update = &mut scan.update;
let last_active_indices = &mut scan.last_active_indices;
async fn sync(
&self,
request: SyncRequest,
parallel_requests: usize,
) -> Result<SyncResult, Error> {
let latest_blocks = fetch_latest_blocks(self).await?;
let graph_update = sync_for_index_and_graph(
self,
request.spks,
request.txids,
request.outpoints,
parallel_requests,
)
.await?;
let chain_update = chain_update(
self,
&latest_blocks,
&request.chain_tip,
graph_update.all_anchors(),
)
.await?;
Ok(SyncResult {
chain_update,
graph_update,
})
}
}
/// Fetch latest blocks from Esplora in an atomic call.
///
/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND
/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
/// alternating between chain-sources.
async fn fetch_latest_blocks(
client: &esplora_client::AsyncClient,
) -> Result<BTreeMap<u32, BlockHash>, Error> {
Ok(client
.get_blocks(None)
.await?
.into_iter()
.map(|b| (b.time.height, b.id))
.collect())
}
/// Used instead of [`esplora_client::BlockingClient::get_block_hash`].
///
/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again.
async fn fetch_block(
client: &esplora_client::AsyncClient,
latest_blocks: &BTreeMap<u32, BlockHash>,
height: u32,
) -> Result<Option<BlockHash>, Error> {
if let Some(&hash) = latest_blocks.get(&height) {
return Ok(Some(hash));
}
// We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain
// tip is used to signal for the last-synced-up-to-height.
let &tip_height = latest_blocks
.keys()
.last()
.expect("must have atleast one entry");
if height > tip_height {
return Ok(None);
}
Ok(Some(client.get_block_hash(height).await?))
}
/// Create the [`local_chain::Update`].
///
/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched
/// should not surpass `latest_blocks`.
async fn chain_update<A: Anchor>(
client: &esplora_client::AsyncClient,
latest_blocks: &BTreeMap<u32, BlockHash>,
local_tip: &CheckPoint,
anchors: &BTreeSet<(A, Txid)>,
) -> Result<CheckPoint, Error> {
let mut point_of_agreement = None;
let mut conflicts = vec![];
for local_cp in local_tip.iter() {
let remote_hash = match fetch_block(client, latest_blocks, local_cp.height()).await? {
Some(hash) => hash,
None => continue,
};
if remote_hash == local_cp.hash() {
point_of_agreement = Some(local_cp.clone());
break;
} else {
// it is not strictly necessary to include all the conflicted heights (we do need the
// first one) but it seems prudent to make sure the updated chain's heights are a
// superset of the existing chain after update.
conflicts.push(BlockId {
height: local_cp.height(),
hash: remote_hash,
});
}
}
let mut tip = point_of_agreement.expect("remote esplora should have same genesis block");
tip = tip
.extend(conflicts.into_iter().rev())
.expect("evicted are in order");
for anchor in anchors {
let height = anchor.0.anchor_block().height;
if tip.get(height).is_none() {
let hash = match fetch_block(client, latest_blocks, height).await? {
Some(hash) => hash,
None => continue,
for (&height, &original_hash) in local_chain.iter().rev() {
let update_block_id = BlockId {
height,
hash: self.get_block_hash(height).await?,
};
tip = tip.insert(BlockId { height, hash });
let _ = update
.insert_checkpoint(update_block_id)
.expect("cannot repeat height here");
if update_block_id.hash == original_hash {
break;
}
}
let tip_at_start = BlockId {
height: self.get_height().await?,
hash: self.get_tip_hash().await?,
};
if let Err(failure) = update.insert_checkpoint(tip_at_start) {
match failure {
sparse_chain::InsertCheckpointError::HashNotMatching { .. } => {
// there was a re-org before we started scanning. We haven't consumed any iterators, so calling this function recursively is safe.
return EsploraAsyncExt::scan(
self,
local_chain,
keychain_spks,
txids,
outpoints,
stop_gap,
parallel_requests,
)
.await;
}
}
}
}
// insert the most recent blocks at the tip to make sure we update the tip and make the update
// robust.
for (&height, &hash) in latest_blocks.iter() {
tip = tip.insert(BlockId { height, hash });
}
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
let mut last_active_index = None;
let mut empty_scripts = 0;
type IndexWithTxs = (u32, Vec<esplora_client::Tx>);
Ok(tip)
}
loop {
let futures: FuturesOrdered<_> = (0..parallel_requests)
.filter_map(|_| {
let (index, script) = spks.next()?;
let client = self.clone();
Some(async move {
let mut related_txs = client.scripthash_txs(&script, None).await?;
/// This performs a full scan to get an update for the [`TxGraph`] and
/// [`KeychainTxOutIndex`](bdk_chain::indexer::keychain_txout::KeychainTxOutIndex).
async fn full_scan_for_index_and_graph<K: Ord + Clone + Send>(
client: &esplora_client::AsyncClient,
keychain_spks: BTreeMap<
K,
impl IntoIterator<IntoIter = impl Iterator<Item = Indexed<ScriptBuf>> + Send> + Send,
>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationBlockTime>, BTreeMap<K, u32>), Error> {
type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
let parallel_requests = Ord::max(parallel_requests, 1);
let mut graph = TxGraph::<ConfirmationBlockTime>::default();
let mut last_active_indexes = BTreeMap::<K, u32>::new();
let n_confirmed =
related_txs.iter().filter(|tx| tx.status.confirmed).count();
// esplora pages on 25 confirmed transactions. If there are 25 or more we
// keep requesting to see if there's more.
if n_confirmed >= 25 {
loop {
let new_related_txs = client
.scripthash_txs(
&script,
Some(related_txs.last().unwrap().txid),
)
.await?;
let n = new_related_txs.len();
related_txs.extend(new_related_txs);
// we've reached the end
if n < 25 {
break;
}
}
}
for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter();
let mut last_index = Option::<u32>::None;
let mut last_active_index = Option::<u32>::None;
Result::<_, esplora_client::Error>::Ok((index, related_txs))
})
})
.collect();
loop {
let handles = spks
.by_ref()
.take(parallel_requests)
.map(|(spk_index, spk)| {
let client = client.clone();
async move {
let mut last_seen = None;
let mut spk_txs = Vec::new();
loop {
let txs = client.scripthash_txs(&spk, last_seen).await?;
let tx_count = txs.len();
last_seen = txs.last().map(|tx| tx.txid);
spk_txs.extend(txs);
if tx_count < 25 {
break Result::<_, Error>::Ok((spk_index, spk_txs));
let n_futures = futures.len();
let idx_with_tx: Vec<IndexWithTxs> = futures.try_collect().await?;
for (index, related_txs) in idx_with_tx {
if related_txs.is_empty() {
empty_scripts += 1;
} else {
last_active_index = Some(index);
empty_scripts = 0;
}
for tx in related_txs {
let confirmation_time =
map_confirmation_time(&tx.status, tip_at_start.height);
if let Err(failure) = update.insert_tx(tx.to_tx(), confirmation_time) {
use bdk_chain::{
chain_graph::InsertTxError, sparse_chain::InsertTxError::*,
};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
}
}
}
}
})
.collect::<FuturesOrdered<_>>();
}
if handles.is_empty() {
break;
if n_futures == 0 || empty_scripts >= stop_gap {
break;
}
}
for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
last_index = Some(index);
if !txs.is_empty() {
last_active_index = Some(index);
}
for tx in txs {
let _ = graph.insert_tx(tx.to_tx());
if let Some(anchor) = anchor_from_status(&tx.status) {
let _ = graph.insert_anchor(tx.txid, anchor);
if let Some(last_active_index) = last_active_index {
last_active_indices.insert(keychain, last_active_index);
}
}
for txid in txids {
let (tx, tx_status) =
match (self.get_tx(&txid).await?, self.get_tx_status(&txid).await?) {
(Some(tx), Some(tx_status)) => (tx, tx_status),
_ => continue,
};
let confirmation_time = map_confirmation_time(&tx_status, tip_at_start.height);
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
}
}
}
}
let previous_outputs = tx.vin.iter().filter_map(|vin| {
let prevout = vin.prevout.as_ref()?;
Some((
OutPoint {
txid: vin.txid,
vout: vin.vout,
},
TxOut {
script_pubkey: prevout.scriptpubkey.clone(),
value: Amount::from_sat(prevout.value),
},
))
});
for (outpoint, txout) in previous_outputs {
let _ = graph.insert_txout(outpoint, txout);
for op in outpoints {
let mut op_txs = Vec::with_capacity(2);
if let (Some(tx), Some(tx_status)) = (
self.get_tx(&op.txid).await?,
self.get_tx_status(&op.txid).await?,
) {
op_txs.push((tx, tx_status));
if let Some(OutputStatus {
txid: Some(txid),
status: Some(spend_status),
..
}) = self.get_output_status(&op.txid, op.vout as _).await?
{
if let Some(spend_tx) = self.get_tx(&txid).await? {
op_txs.push((spend_tx, spend_status));
}
}
}
let last_index = last_index.expect("Must be set since handles wasn't empty.");
let gap_limit_reached = if let Some(i) = last_active_index {
last_index >= i.saturating_add(stop_gap as u32)
for (tx, status) in op_txs {
let confirmation_time = map_confirmation_time(&status, tip_at_start.height);
if let Err(failure) = update.insert_tx(tx, confirmation_time) {
use bdk_chain::{chain_graph::InsertTxError, sparse_chain::InsertTxError::*};
match failure {
InsertTxError::Chain(TxTooHigh { .. }) => {
unreachable!("chain position already checked earlier")
}
InsertTxError::Chain(TxMovedUnexpectedly { .. })
| InsertTxError::UnresolvableConflict(_) => {
/* implies reorg during a scan. We deal with that below */
}
}
}
}
}
let reorg_occurred = {
if let Some(checkpoint) = update.chain().latest_checkpoint() {
self.get_block_hash(checkpoint.height).await? != checkpoint.hash
} else {
last_index + 1 >= stop_gap as u32
};
if gap_limit_reached {
break;
false
}
};
if reorg_occurred {
// A reorg occurred, so let's find out where all the txids we found are in the chain now.
// XXX: collect required because of weird type naming issues
let txids_found = update
.chain()
.txids()
.map(|(_, txid)| *txid)
.collect::<Vec<_>>();
scan.update = EsploraAsyncExt::scan_without_keychain(
self,
local_chain,
[],
txids_found,
[],
parallel_requests,
)
.await?;
}
if let Some(last_active_index) = last_active_index {
last_active_indexes.insert(keychain, last_active_index);
}
}
Ok((graph, last_active_indexes))
}
async fn sync_for_index_and_graph(
client: &esplora_client::AsyncClient,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize,
) -> Result<TxGraph<ConfirmationBlockTime>, Error> {
let mut graph = full_scan_for_index_and_graph(
client,
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
usize::MAX,
parallel_requests,
)
.await
.map(|(g, _)| g)?;
let mut txids = txids.into_iter();
loop {
let handles = txids
.by_ref()
.take(parallel_requests)
.filter(|&txid| graph.get_tx(txid).is_none())
.map(|txid| {
let client = client.clone();
async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
})
.collect::<FuturesOrdered<_>>();
if handles.is_empty() {
break;
}
for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
}
}
for op in outpoints.into_iter() {
if graph.get_tx(op.txid).is_none() {
if let Some(tx) = client.get_tx(&op.txid).await? {
let _ = graph.insert_tx(tx);
}
let status = client.get_tx_status(&op.txid).await?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(op.txid, anchor);
}
}
if let Some(op_status) = client.get_output_status(&op.txid, op.vout as _).await? {
if let Some(txid) = op_status.txid {
if graph.get_tx(txid).is_none() {
if let Some(tx) = client.get_tx(&txid).await? {
let _ = graph.insert_tx(tx);
}
let status = client.get_tx_status(&txid).await?;
if let Some(anchor) = anchor_from_status(&status) {
let _ = graph.insert_anchor(txid, anchor);
}
}
}
}
}
Ok(graph)
}
#[cfg(test)]
mod test {
use std::{collections::BTreeSet, time::Duration};
use bdk_chain::{
bitcoin::{hashes::Hash, Txid},
local_chain::LocalChain,
BlockId,
};
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
use esplora_client::Builder;
use crate::async_ext::{chain_update, fetch_latest_blocks};
macro_rules! h {
($index:literal) => {{
bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
}};
}
/// Ensure that update does not remove heights (from original), and all anchor heights are included.
#[tokio::test]
pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
struct TestCase<'a> {
name: &'a str,
/// Initial blockchain height to start the env with.
initial_env_height: u32,
/// Initial checkpoint heights to start with.
initial_cps: &'a [u32],
/// The final blockchain height of the env.
final_env_height: u32,
/// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
/// the blockhash from the env.
anchors: &'a [(u32, Txid)],
}
let test_cases = [
TestCase {
name: "chain_extends",
initial_env_height: 60,
initial_cps: &[59, 60],
final_env_height: 90,
anchors: &[],
},
TestCase {
name: "introduce_older_heights",
initial_env_height: 50,
initial_cps: &[10, 15],
final_env_height: 50,
anchors: &[(11, h!("A")), (14, h!("B"))],
},
TestCase {
name: "introduce_older_heights_after_chain_extends",
initial_env_height: 50,
initial_cps: &[10, 15],
final_env_height: 100,
anchors: &[(11, h!("A")), (14, h!("B"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("[{}] running test case: {}", i, t.name);
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
// set env to `initial_env_height`
if let Some(to_mine) = t
.initial_env_height
.checked_sub(env.make_checkpoint_tip().height())
{
env.mine_blocks(to_mine as _, None)?;
}
while client.get_height().await? < t.initial_env_height {
std::thread::sleep(Duration::from_millis(10));
}
// craft initial `local_chain`
let local_chain = {
let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
// force `chain_update_blocking` to add all checkpoints in `t.initial_cps`
let anchors = t
.initial_cps
.iter()
.map(|&height| -> anyhow::Result<_> {
Ok((
BlockId {
height,
hash: env.bitcoind.client.get_block_hash(height as _)?,
},
Txid::all_zeros(),
))
})
.collect::<anyhow::Result<BTreeSet<_>>>()?;
let update = chain_update(
&client,
&fetch_latest_blocks(&client).await?,
&chain.tip(),
&anchors,
)
.await?;
chain.apply_update(update)?;
chain
};
println!("local chain height: {}", local_chain.tip().height());
// extend env chain
if let Some(to_mine) = t
.final_env_height
.checked_sub(env.make_checkpoint_tip().height())
{
env.mine_blocks(to_mine as _, None)?;
}
while client.get_height().await? < t.final_env_height {
std::thread::sleep(Duration::from_millis(10));
}
// craft update
let update = {
let anchors = t
.anchors
.iter()
.map(|&(height, txid)| -> anyhow::Result<_> {
Ok((
BlockId {
height,
hash: env.bitcoind.client.get_block_hash(height as _)?,
},
txid,
))
})
.collect::<anyhow::Result<_>>()?;
chain_update(
&client,
&fetch_latest_blocks(&client).await?,
&local_chain.tip(),
&anchors,
)
.await?
};
// apply update
let mut updated_local_chain = local_chain.clone();
updated_local_chain.apply_update(update)?;
println!(
"updated local chain height: {}",
updated_local_chain.tip().height()
);
assert!(
{
let initial_heights = local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
let updated_heights = updated_local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
updated_heights.is_superset(&initial_heights)
},
"heights from the initial chain must all be in the updated chain",
);
assert!(
{
let exp_anchor_heights = t
.anchors
.iter()
.map(|(h, _)| *h)
.chain(t.initial_cps.iter().copied())
.collect::<BTreeSet<_>>();
let anchor_heights = updated_local_chain
.iter_checkpoints()
.map(|cp| cp.height())
.collect::<BTreeSet<_>>();
anchor_heights.is_superset(&exp_anchor_heights)
},
"anchor heights must all be in updated chain",
);
}
Ok(())
Ok(scan)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +1,5 @@
#![doc = include_str!("../README.md")]
//! This crate is used for updating structures of [`bdk_chain`] with data from an Esplora server.
//!
//! The two primary methods are [`EsploraExt::sync`] and [`EsploraExt::full_scan`]. In most cases
//! [`EsploraExt::sync`] is used to sync the transaction histories of scripts that the application
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`EsploraExt::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for [`bdk_chain`]
//! via a new [`TxGraph`] to be appended to any existing [`TxGraph`] data.
//!
//! Refer to [`example_esplora`] for a complete example.
//!
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
use bdk_chain::{BlockId, ConfirmationBlockTime};
use bdk_chain::ConfirmationTime;
use esplora_client::TxStatus;
pub use esplora_client;
@@ -31,19 +14,14 @@ mod async_ext;
#[cfg(feature = "async")]
pub use async_ext::*;
fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationBlockTime> {
if let TxStatus {
block_height: Some(height),
block_hash: Some(hash),
block_time: Some(time),
..
} = status.clone()
{
Some(ConfirmationBlockTime {
block_id: BlockId { height, hash },
confirmation_time: time,
})
} else {
None
pub(crate) fn map_confirmation_time(
tx_status: &TxStatus,
height_at_start: u32,
) -> ConfirmationTime {
match (tx_status.block_time, tx_status.block_height) {
(Some(time), Some(height)) if height <= height_at_start => {
ConfirmationTime::Confirmed { height, time }
}
_ => ConfirmationTime::Unconfirmed,
}
}

View File

@@ -1,231 +0,0 @@
use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
use bdk_esplora::EsploraAsyncExt;
use esplora_client::{self, Builder};
use std::collections::{BTreeSet, HashSet};
use std::str::FromStr;
use std::thread::sleep;
use std::time::Duration;
use bdk_chain::bitcoin::{Address, Amount, Txid};
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
#[tokio::test]
pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 =
Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked();
let misc_spks = [
receive_address0.script_pubkey(),
receive_address1.script_pubkey(),
];
let _block_hashes = env.mine_blocks(101, None)?;
let txid1 = env.bitcoind.client.send_to_address(
&receive_address1,
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let txid2 = env.bitcoind.client.send_to_address(
&receive_address0,
Amount::from_sat(20000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 102 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
let sync_update = {
let request = SyncRequest::from_chain_tip(cp_tip.clone()).set_spks(misc_spks);
client.sync(request, 1).await?
};
assert!(
{
let update_cps = sync_update
.chain_update
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let superset_cps = cp_tip
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
superset_cps.is_superset(&update_cps)
},
"update should not alter original checkpoint tip since we already started with all checkpoints",
);
let graph_update = sync_update.graph_update;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2];
expected_txids.sort();
assert_eq!(graph_update_txids, expected_txids);
Ok(())
}
/// Test the bounds of the address scan depending on the `stop_gap`.
#[tokio::test]
pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses.
let addresses = [
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
];
let addresses: Vec<_> = addresses
.into_iter()
.map(|s| Address::from_str(s).unwrap().assume_checked())
.collect();
let spks: Vec<_> = addresses
.iter()
.enumerate()
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
.collect();
// Then receive coins on the 4th address.
let txid_4th_addr = env.bitcoind.client.send_to_address(
&addresses[3],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 103 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
// A scan with a gap limit of 3 won't find the transaction, but a scan with a gap limit of 4
// will.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 3, 1).await?
};
assert!(full_scan_update.graph_update.full_txs().next().is_none());
assert!(full_scan_update.last_active_indices.is_empty());
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 4, 1).await?
};
assert_eq!(
full_scan_update
.graph_update
.full_txs()
.next()
.unwrap()
.txid,
txid_4th_addr
);
assert_eq!(full_scan_update.last_active_indices[&0], 3);
// Now receive a coin on the last address.
let txid_last_addr = env.bitcoind.client.send_to_address(
&addresses[addresses.len() - 1],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 104 {
sleep(Duration::from_millis(10))
}
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
// The last active indice won't be updated in the first case but will in the second one.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 5, 1).await?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 3);
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 6, 1).await?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 9);
Ok(())
}

View File

@@ -1,232 +0,0 @@
use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
use bdk_esplora::EsploraExt;
use esplora_client::{self, Builder};
use std::collections::{BTreeSet, HashSet};
use std::str::FromStr;
use std::thread::sleep;
use std::time::Duration;
use bdk_chain::bitcoin::{Address, Amount, Txid};
use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
#[test]
pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking();
let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 =
Address::from_str("bcrt1qfjg5lv3dvc9az8patec8fjddrs4aqtauadnagr")?.assume_checked();
let misc_spks = [
receive_address0.script_pubkey(),
receive_address1.script_pubkey(),
];
let _block_hashes = env.mine_blocks(101, None)?;
let txid1 = env.bitcoind.client.send_to_address(
&receive_address1,
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let txid2 = env.bitcoind.client.send_to_address(
&receive_address0,
Amount::from_sat(20000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 102 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
let sync_update = {
let request = SyncRequest::from_chain_tip(cp_tip.clone()).set_spks(misc_spks);
client.sync(request, 1)?
};
assert!(
{
let update_cps = sync_update
.chain_update
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let superset_cps = cp_tip
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
superset_cps.is_superset(&update_cps)
},
"update should not alter original checkpoint tip since we already started with all checkpoints",
);
let graph_update = sync_update.graph_update;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_unsigned()
.expect("valid `Amount`");
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2];
expected_txids.sort();
assert_eq!(graph_update_txids, expected_txids);
Ok(())
}
/// Test the bounds of the address scan depending on the `stop_gap`.
#[test]
pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking();
let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses.
let addresses = [
"bcrt1qj9f7r8r3p2y0sqf4r3r62qysmkuh0fzep473d2ar7rcz64wqvhssjgf0z4",
"bcrt1qmm5t0ch7vh2hryx9ctq3mswexcugqe4atkpkl2tetm8merqkthas3w7q30",
"bcrt1qut9p7ej7l7lhyvekj28xknn8gnugtym4d5qvnp5shrsr4nksmfqsmyn87g",
"bcrt1qqz0xtn3m235p2k96f5wa2dqukg6shxn9n3txe8arlrhjh5p744hsd957ww",
"bcrt1q9c0t62a8l6wfytmf2t9lfj35avadk3mm8g4p3l84tp6rl66m48sqrme7wu",
"bcrt1qkmh8yrk2v47cklt8dytk8f3ammcwa4q7dzattedzfhqzvfwwgyzsg59zrh",
"bcrt1qvgrsrzy07gjkkfr5luplt0azxtfwmwq5t62gum5jr7zwcvep2acs8hhnp2",
"bcrt1qw57edarcg50ansq8mk3guyrk78rk0fwvrds5xvqeupteu848zayq549av8",
"bcrt1qvtve5ekf6e5kzs68knvnt2phfw6a0yjqrlgat392m6zt9jsvyxhqfx67ef",
"bcrt1qw03ddumfs9z0kcu76ln7jrjfdwam20qtffmkcral3qtza90sp9kqm787uk",
];
let addresses: Vec<_> = addresses
.into_iter()
.map(|s| Address::from_str(s).unwrap().assume_checked())
.collect();
let spks: Vec<_> = addresses
.iter()
.enumerate()
.map(|(i, addr)| (i as u32, addr.script_pubkey()))
.collect();
// Then receive coins on the 4th address.
let txid_4th_addr = env.bitcoind.client.send_to_address(
&addresses[3],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 103 {
sleep(Duration::from_millis(10))
}
// use a full checkpoint linked list (since this is not what we are testing)
let cp_tip = env.make_checkpoint_tip();
// A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4
// will.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 3, 1)?
};
assert!(full_scan_update.graph_update.full_txs().next().is_none());
assert!(full_scan_update.last_active_indices.is_empty());
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 4, 1)?
};
assert_eq!(
full_scan_update
.graph_update
.full_txs()
.next()
.unwrap()
.txid,
txid_4th_addr
);
assert_eq!(full_scan_update.last_active_indices[&0], 3);
// Now receive a coin on the last address.
let txid_last_addr = env.bitcoind.client.send_to_address(
&addresses[addresses.len() - 1],
Amount::from_sat(10000),
None,
None,
None,
None,
Some(1),
None,
)?;
let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 104 {
sleep(Duration::from_millis(10))
}
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will.
// The last active indice won't be updated in the first case but will in the second one.
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 5, 1)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 3);
let full_scan_update = {
let request =
FullScanRequest::from_chain_tip(cp_tip.clone()).set_spks_for_keychain(0, spks.clone());
client.full_scan(request, 6, 1)?
};
let txs: HashSet<_> = full_scan_update
.graph_update
.full_txs()
.map(|tx| tx.txid)
.collect();
assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
assert_eq!(full_scan_update.last_active_indices[&0], 9);
Ok(())
}

View File

@@ -1,17 +1,17 @@
[package]
name = "bdk_file_store"
version = "0.14.0"
version = "0.1.0"
edition = "2021"
license = "MIT OR Apache-2.0"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_file_store"
description = "A simple append-only flat file database for persisting bdk_chain data."
description = "A simple append-only flat file implementation of Persist for Bitcoin Dev Kit."
keywords = ["bitcoin", "persist", "persistence", "bdk", "file"]
authors = ["Bitcoin Dev Kit Developers"]
readme = "README.md"
[dependencies]
bdk_chain = { path = "../chain", version = "0.17.0", features = [ "serde", "miniscript" ] }
bdk_chain = { path = "../chain", version = "0.4.0", features = [ "serde", "miniscript" ] }
bincode = { version = "1" }
serde = { version = "1", features = ["derive"] }

View File

@@ -1,7 +1,10 @@
# BDK File Store
This is a simple append-only flat file database for persisting [`bdk_chain`] changesets.
This is a simple append-only flat file implementation of
[`Persist`](`bdk_chain::keychain::persist::Persist`).
The main structure is [`Store`] which works with any [`bdk_chain`] based changesets to persist data into a flat file.
The main structure is [`KeychainStore`](`crate::KeychainStore`), which can be used with [`bdk`]'s
`Wallet` to persist wallet data into a flat file.
[`bdk_chain`]:https://docs.rs/bdk_chain/latest/bdk_chain/
[`bdk`]: https://docs.rs/bdk/latest
[`bdk_chain`]: https://docs.rs/bdk_chain/latest

View File

@@ -1,108 +0,0 @@
use bincode::Options;
use std::{
fs::File,
io::{self, BufReader, Seek},
marker::PhantomData,
};
use crate::bincode_options;
/// Iterator over entries in a file store.
///
/// Reads and returns an entry each time [`next`] is called. If an error occurs while reading the
/// iterator will yield a `Result::Err(_)` instead and then `None` for the next call to `next`.
///
/// [`next`]: Self::next
pub struct EntryIter<'t, T> {
/// Buffered reader around the file
db_file: BufReader<&'t mut File>,
finished: bool,
/// The file position for the first read of `db_file`.
start_pos: Option<u64>,
types: PhantomData<T>,
}
impl<'t, T> EntryIter<'t, T> {
pub fn new(start_pos: u64, db_file: &'t mut File) -> Self {
Self {
db_file: BufReader::new(db_file),
start_pos: Some(start_pos),
finished: false,
types: PhantomData,
}
}
}
impl<'t, T> Iterator for EntryIter<'t, T>
where
T: serde::de::DeserializeOwned,
{
type Item = Result<T, IterError>;
fn next(&mut self) -> Option<Self::Item> {
if self.finished {
return None;
}
(|| {
if let Some(start) = self.start_pos.take() {
self.db_file.seek(io::SeekFrom::Start(start))?;
}
let pos_before_read = self.db_file.stream_position()?;
match bincode_options().deserialize_from(&mut self.db_file) {
Ok(changeset) => Ok(Some(changeset)),
Err(e) => {
self.finished = true;
let pos_after_read = self.db_file.stream_position()?;
// allow unexpected EOF if 0 bytes were read
if let bincode::ErrorKind::Io(inner) = &*e {
if inner.kind() == io::ErrorKind::UnexpectedEof
&& pos_after_read == pos_before_read
{
return Ok(None);
}
}
self.db_file.seek(io::SeekFrom::Start(pos_before_read))?;
Err(IterError::Bincode(*e))
}
}
})()
.transpose()
}
}
impl<'t, T> Drop for EntryIter<'t, T> {
fn drop(&mut self) {
// This syncs the underlying file's offset with the buffer's position. This way, we
// maintain the correct position to start the next read/write.
if let Ok(pos) = self.db_file.stream_position() {
let _ = self.db_file.get_mut().seek(io::SeekFrom::Start(pos));
}
}
}
/// Error type for [`EntryIter`].
#[derive(Debug)]
pub enum IterError {
/// Failure to read from the file.
Io(io::Error),
/// Failure to decode data from the file.
Bincode(bincode::ErrorKind),
}
impl core::fmt::Display for IterError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
IterError::Io(e) => write!(f, "io error trying to read entry {}", e),
IterError::Bincode(e) => write!(f, "bincode error while reading entry {}", e),
}
}
}
impl From<io::Error> for IterError {
fn from(value: io::Error) -> Self {
IterError::Io(value)
}
}
impl std::error::Error for IterError {}

View File

@@ -0,0 +1,404 @@
//! Module for persisting data on disk.
//!
//! The star of the show is [`KeychainStore`], which maintains an append-only file of
//! [`KeychainChangeSet`]s which can be used to restore a [`KeychainTracker`].
use bdk_chain::{
keychain::{KeychainChangeSet, KeychainTracker},
sparse_chain,
};
use bincode::{DefaultOptions, Options};
use core::marker::PhantomData;
use std::{
fs::{File, OpenOptions},
io::{self, Read, Seek, Write},
path::Path,
};
/// BDK File Store magic bytes length.
const MAGIC_BYTES_LEN: usize = 12;
/// BDK File Store magic bytes.
const MAGIC_BYTES: [u8; MAGIC_BYTES_LEN] = [98, 100, 107, 102, 115, 48, 48, 48, 48, 48, 48, 48];
/// Persists an append only list of `KeychainChangeSet<K,P>` to a single file.
/// [`KeychainChangeSet<K,P>`] record the changes made to a [`KeychainTracker<K,P>`].
#[derive(Debug)]
pub struct KeychainStore<K, P> {
db_file: File,
changeset_type_params: core::marker::PhantomData<(K, P)>,
}
fn bincode() -> impl bincode::Options {
DefaultOptions::new().with_varint_encoding()
}
impl<K, P> KeychainStore<K, P>
where
K: Ord + Clone + core::fmt::Debug,
P: sparse_chain::ChainPosition,
KeychainChangeSet<K, P>: serde::Serialize + serde::de::DeserializeOwned,
{
/// Creates a new store from a [`File`].
///
/// The file must have been opened with read and write permissions.
///
/// [`File`]: std::fs::File
pub fn new(mut file: File) -> Result<Self, FileError> {
file.rewind()?;
let mut magic_bytes = [0_u8; MAGIC_BYTES_LEN];
file.read_exact(&mut magic_bytes)?;
if magic_bytes != MAGIC_BYTES {
return Err(FileError::InvalidMagicBytes(magic_bytes));
}
Ok(Self {
db_file: file,
changeset_type_params: Default::default(),
})
}
/// Creates or loads a store from `db_path`. If no file exists there, it will be created.
pub fn new_from_path<D: AsRef<Path>>(db_path: D) -> Result<Self, FileError> {
let already_exists = db_path.as_ref().exists();
let mut db_file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(db_path)?;
if !already_exists {
db_file.write_all(&MAGIC_BYTES)?;
}
Self::new(db_file)
}
/// Iterates over the stored changeset from first to last, changing the seek position at each
/// iteration.
///
/// The iterator may fail to read an entry and therefore return an error. However, the first time
/// it returns an error will be the last. After doing so, the iterator will always yield `None`.
///
/// **WARNING**: This method changes the write position in the underlying file. You should
/// always iterate over all entries until `None` is returned if you want your next write to go
/// at the end; otherwise, you will write over existing entries.
pub fn iter_changesets(&mut self) -> Result<EntryIter<'_, KeychainChangeSet<K, P>>, io::Error> {
self.db_file
.seek(io::SeekFrom::Start(MAGIC_BYTES_LEN as _))?;
Ok(EntryIter::new(&mut self.db_file))
}
/// Loads all the changesets that have been stored as one giant changeset.
///
/// This function returns a tuple of the aggregate changeset and a result that indicates
/// whether an error occurred while reading or deserializing one of the entries. If so the
/// changeset will consist of all of those it was able to read.
///
/// You should usually check the error. In many applications, it may make sense to do a full
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the
/// changesets it was unable to read changed the derivation indices of the tracker.
///
/// **WARNING**: This method changes the write position of the underlying file. The next
/// changeset will be written over the erroring entry (or the end of the file if none existed).
pub fn aggregate_changeset(&mut self) -> (KeychainChangeSet<K, P>, Result<(), IterError>) {
let mut changeset = KeychainChangeSet::default();
let result = (|| {
let iter_changeset = self.iter_changesets()?;
for next_changeset in iter_changeset {
changeset.append(next_changeset?);
}
Ok(())
})();
(changeset, result)
}
/// Reads and applies all the changesets stored sequentially to the tracker, stopping when it fails
/// to read the next one.
///
/// **WARNING**: This method changes the write position of the underlying file. The next
/// changeset will be written over the erroring entry (or the end of the file if none existed).
pub fn load_into_keychain_tracker(
&mut self,
tracker: &mut KeychainTracker<K, P>,
) -> Result<(), IterError> {
for changeset in self.iter_changesets()? {
tracker.apply_changeset(changeset?)
}
Ok(())
}
/// Append a new changeset to the file and truncate the file to the end of the appended changeset.
///
/// The truncation is to avoid the possibility of having a valid but inconsistent changeset
/// directly after the appended changeset.
pub fn append_changeset(
&mut self,
changeset: &KeychainChangeSet<K, P>,
) -> Result<(), io::Error> {
if changeset.is_empty() {
return Ok(());
}
bincode()
.serialize_into(&mut self.db_file, changeset)
.map_err(|e| match *e {
bincode::ErrorKind::Io(inner) => inner,
unexpected_err => panic!("unexpected bincode error: {}", unexpected_err),
})?;
// truncate file after this changeset addition
// if this is not done, data after this changeset may represent valid changesets, however
// applying those changesets on top of this one may result in an inconsistent state
let pos = self.db_file.stream_position()?;
self.db_file.set_len(pos)?;
// We want to make sure that derivation indices changes are written to disk as soon as
// possible, so you know about the write failure before you give out the address in the application.
if !changeset.derivation_indices.is_empty() {
self.db_file.sync_data()?;
}
Ok(())
}
}
/// Error that occurs due to problems encountered with the file.
#[derive(Debug)]
pub enum FileError {
/// IO error, this may mean that the file is too short.
Io(io::Error),
/// Magic bytes do not match what is expected.
InvalidMagicBytes([u8; MAGIC_BYTES_LEN]),
}
impl core::fmt::Display for FileError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Io(e) => write!(f, "io error trying to read file: {}", e),
Self::InvalidMagicBytes(b) => write!(
f,
"file has invalid magic bytes: expected={:?} got={:?}",
MAGIC_BYTES, b
),
}
}
}
impl From<io::Error> for FileError {
fn from(value: io::Error) -> Self {
Self::Io(value)
}
}
impl std::error::Error for FileError {}
/// Error type for [`EntryIter`].
#[derive(Debug)]
pub enum IterError {
/// Failure to read from the file.
Io(io::Error),
/// Failure to decode data from the file.
Bincode(bincode::ErrorKind),
}
impl core::fmt::Display for IterError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
IterError::Io(e) => write!(f, "io error trying to read entry {}", e),
IterError::Bincode(e) => write!(f, "bincode error while reading entry {}", e),
}
}
}
impl std::error::Error for IterError {}
/// Iterator over entries in a file store.
///
/// Reads and returns an entry each time [`next`] is called. If an error occurs while reading the
/// iterator will yield a `Result::Err(_)` instead and then `None` for the next call to `next`.
///
/// [`next`]: Self::next
pub struct EntryIter<'a, V> {
db_file: &'a mut File,
types: PhantomData<V>,
error_exit: bool,
}
impl<'a, V> EntryIter<'a, V> {
pub fn new(db_file: &'a mut File) -> Self {
Self {
db_file,
types: PhantomData,
error_exit: false,
}
}
}
impl<'a, V> Iterator for EntryIter<'a, V>
where
V: serde::de::DeserializeOwned,
{
type Item = Result<V, IterError>;
fn next(&mut self) -> Option<Self::Item> {
let result = (|| {
let pos = self.db_file.stream_position()?;
match bincode().deserialize_from(&mut self.db_file) {
Ok(changeset) => Ok(Some(changeset)),
Err(e) => {
if let bincode::ErrorKind::Io(inner) = &*e {
if inner.kind() == io::ErrorKind::UnexpectedEof {
let eof = self.db_file.seek(io::SeekFrom::End(0))?;
if pos == eof {
return Ok(None);
}
}
}
self.db_file.seek(io::SeekFrom::Start(pos))?;
Err(IterError::Bincode(*e))
}
}
})();
let result = result.transpose();
if let Some(Err(_)) = &result {
self.error_exit = true;
}
result
}
}
impl From<io::Error> for IterError {
fn from(value: io::Error) -> Self {
IterError::Io(value)
}
}
#[cfg(test)]
mod test {
use super::*;
use bdk_chain::{
keychain::{DerivationAdditions, KeychainChangeSet},
TxHeight,
};
use std::{
io::{Read, Write},
vec::Vec,
};
use tempfile::NamedTempFile;
#[derive(
Debug,
Clone,
Copy,
PartialOrd,
Ord,
PartialEq,
Eq,
Hash,
serde::Serialize,
serde::Deserialize,
)]
enum TestKeychain {
External,
Internal,
}
impl core::fmt::Display for TestKeychain {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::External => write!(f, "external"),
Self::Internal => write!(f, "internal"),
}
}
}
#[test]
fn magic_bytes() {
assert_eq!(&MAGIC_BYTES, "bdkfs0000000".as_bytes());
}
#[test]
fn new_fails_if_file_is_too_short() {
let mut file = NamedTempFile::new().unwrap();
file.write_all(&MAGIC_BYTES[..MAGIC_BYTES_LEN - 1])
.expect("should write");
match KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap()) {
Err(FileError::Io(e)) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn new_fails_if_magic_bytes_are_invalid() {
let invalid_magic_bytes = "ldkfs0000000";
let mut file = NamedTempFile::new().unwrap();
file.write_all(invalid_magic_bytes.as_bytes())
.expect("should write");
match KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap()) {
Err(FileError::InvalidMagicBytes(b)) => {
assert_eq!(b, invalid_magic_bytes.as_bytes())
}
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn append_changeset_truncates_invalid_bytes() {
// initial data to write to file (magic bytes + invalid data)
let mut data = [255_u8; 2000];
data[..MAGIC_BYTES_LEN].copy_from_slice(&MAGIC_BYTES);
let changeset = KeychainChangeSet {
derivation_indices: DerivationAdditions(
vec![(TestKeychain::External, 42)].into_iter().collect(),
),
chain_graph: Default::default(),
};
let mut file = NamedTempFile::new().unwrap();
file.write_all(&data).expect("should write");
let mut store = KeychainStore::<TestKeychain, TxHeight>::new(file.reopen().unwrap())
.expect("should open");
match store.iter_changesets().expect("seek should succeed").next() {
Some(Err(IterError::Bincode(_))) => {}
unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
}
store.append_changeset(&changeset).expect("should append");
drop(store);
let got_bytes = {
let mut buf = Vec::new();
file.reopen()
.unwrap()
.read_to_end(&mut buf)
.expect("should read");
buf
};
let expected_bytes = {
let mut buf = MAGIC_BYTES.to_vec();
DefaultOptions::new()
.with_varint_encoding()
.serialize_into(&mut buf, &changeset)
.expect("should encode");
buf
};
assert_eq!(got_bytes, expected_bytes);
}
}

View File

@@ -1,42 +1,32 @@
#![doc = include_str!("../README.md")]
mod entry_iter;
mod store;
use std::io;
mod file_store;
use bdk_chain::{
keychain::{KeychainChangeSet, KeychainTracker, PersistBackend},
sparse_chain::ChainPosition,
};
pub use file_store::*;
use bincode::{DefaultOptions, Options};
pub use entry_iter::*;
pub use store::*;
impl<K, P> PersistBackend<K, P> for KeychainStore<K, P>
where
K: Ord + Clone + core::fmt::Debug,
P: ChainPosition,
KeychainChangeSet<K, P>: serde::Serialize + serde::de::DeserializeOwned,
{
type WriteError = std::io::Error;
pub(crate) fn bincode_options() -> impl bincode::Options {
DefaultOptions::new().with_varint_encoding()
}
type LoadError = IterError;
/// Error that occurs due to problems encountered with the file.
#[derive(Debug)]
pub enum FileError {
/// IO error, this may mean that the file is too short.
Io(io::Error),
/// Magic bytes do not match what is expected.
InvalidMagicBytes { got: Vec<u8>, expected: Vec<u8> },
}
fn append_changeset(
&mut self,
changeset: &KeychainChangeSet<K, P>,
) -> Result<(), Self::WriteError> {
KeychainStore::append_changeset(self, changeset)
}
impl core::fmt::Display for FileError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Io(e) => write!(f, "io error trying to read file: {}", e),
Self::InvalidMagicBytes { got, expected } => write!(
f,
"file has invalid magic bytes: expected={:?} got={:?}",
expected, got,
),
}
fn load_into_keychain_tracker(
&mut self,
tracker: &mut KeychainTracker<K, P>,
) -> Result<(), Self::LoadError> {
KeychainStore::load_into_keychain_tracker(self, tracker)
}
}
impl From<io::Error> for FileError {
fn from(value: io::Error) -> Self {
Self::Io(value)
}
}
impl std::error::Error for FileError {}

View File

@@ -1,443 +0,0 @@
use crate::{bincode_options, EntryIter, FileError, IterError};
use bdk_chain::Merge;
use bincode::Options;
use std::{
fmt::{self, Debug},
fs::{File, OpenOptions},
io::{self, Read, Seek, Write},
marker::PhantomData,
path::Path,
};
/// Persists an append-only list of changesets (`C`) to a single file.
#[derive(Debug)]
pub struct Store<C>
where
C: Sync + Send,
{
magic_len: usize,
db_file: File,
marker: PhantomData<C>,
}
impl<C> Store<C>
where
C: Merge
+ serde::Serialize
+ serde::de::DeserializeOwned
+ core::marker::Send
+ core::marker::Sync,
{
/// Create a new [`Store`] file in write-only mode; error if the file exists.
///
/// `magic` is the prefixed bytes to write to the new file. This will be checked when opening
/// the `Store` in the future with [`open`].
///
/// [`open`]: Store::open
pub fn create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
where
P: AsRef<Path>,
{
if file_path.as_ref().exists() {
// `io::Error` is used instead of a variant on `FileError` because there is already a
// nightly-only `File::create_new` method
return Err(FileError::Io(io::Error::new(
io::ErrorKind::Other,
"file already exists",
)));
}
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(file_path)?;
f.write_all(magic)?;
Ok(Self {
magic_len: magic.len(),
db_file: f,
marker: Default::default(),
})
}
/// Open an existing [`Store`].
///
/// Use [`create_new`] to create a new `Store`.
///
/// # Errors
///
/// If the prefixed bytes of the opened file does not match the provided `magic`, the
/// [`FileError::InvalidMagicBytes`] error variant will be returned.
///
/// [`create_new`]: Store::create_new
pub fn open<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
where
P: AsRef<Path>,
{
let mut f = OpenOptions::new().read(true).write(true).open(file_path)?;
let mut magic_buf = vec![0_u8; magic.len()];
f.read_exact(&mut magic_buf)?;
if magic_buf != magic {
return Err(FileError::InvalidMagicBytes {
got: magic_buf,
expected: magic.to_vec(),
});
}
Ok(Self {
magic_len: magic.len(),
db_file: f,
marker: Default::default(),
})
}
/// Attempt to open existing [`Store`] file; create it if the file is non-existent.
///
/// Internally, this calls either [`open`] or [`create_new`].
///
/// [`open`]: Store::open
/// [`create_new`]: Store::create_new
pub fn open_or_create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError>
where
P: AsRef<Path>,
{
if file_path.as_ref().exists() {
Self::open(magic, file_path)
} else {
Self::create_new(magic, file_path)
}
}
/// Iterates over the stored changeset from first to last, changing the seek position at each
/// iteration.
///
/// The iterator may fail to read an entry and therefore return an error. However, the first time
/// it returns an error will be the last. After doing so, the iterator will always yield `None`.
///
/// **WARNING**: This method changes the write position in the underlying file. You should
/// always iterate over all entries until `None` is returned if you want your next write to go
/// at the end; otherwise, you will write over existing entries.
pub fn iter_changesets(&mut self) -> EntryIter<C> {
EntryIter::new(self.magic_len as u64, &mut self.db_file)
}
/// Loads all the changesets that have been stored as one giant changeset.
///
/// This function returns the aggregate changeset, or `None` if nothing was persisted.
/// If reading or deserializing any of the entries fails, an error is returned that
/// consists of all those it was able to read.
///
/// You should usually check the error. In many applications, it may make sense to do a full
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the
/// changesets was unable to read changes of the derivation indices of a keychain.
///
/// **WARNING**: This method changes the write position of the underlying file. The next
/// changeset will be written over the erroring entry (or the end of the file if none existed).
pub fn aggregate_changesets(&mut self) -> Result<Option<C>, AggregateChangesetsError<C>> {
let mut changeset = Option::<C>::None;
for next_changeset in self.iter_changesets() {
let next_changeset = match next_changeset {
Ok(next_changeset) => next_changeset,
Err(iter_error) => {
return Err(AggregateChangesetsError {
changeset,
iter_error,
})
}
};
match &mut changeset {
Some(changeset) => changeset.merge(next_changeset),
changeset => *changeset = Some(next_changeset),
}
}
Ok(changeset)
}
/// Append a new changeset to the file and truncate the file to the end of the appended
/// changeset.
///
/// The truncation is to avoid the possibility of having a valid but inconsistent changeset
/// directly after the appended changeset.
pub fn append_changeset(&mut self, changeset: &C) -> Result<(), io::Error> {
// no need to write anything if changeset is empty
if changeset.is_empty() {
return Ok(());
}
bincode_options()
.serialize_into(&mut self.db_file, changeset)
.map_err(|e| match *e {
bincode::ErrorKind::Io(error) => error,
unexpected_err => panic!("unexpected bincode error: {}", unexpected_err),
})?;
// truncate file after this changeset addition
// if this is not done, data after this changeset may represent valid changesets, however
// applying those changesets on top of this one may result in an inconsistent state
let pos = self.db_file.stream_position()?;
self.db_file.set_len(pos)?;
Ok(())
}
}
/// Error type for [`Store::aggregate_changesets`].
#[derive(Debug)]
pub struct AggregateChangesetsError<C> {
/// The partially-aggregated changeset.
pub changeset: Option<C>,
/// The error returned by [`EntryIter`].
pub iter_error: IterError,
}
impl<C> std::fmt::Display for AggregateChangesetsError<C> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.iter_error, f)
}
}
impl<C: fmt::Debug> std::error::Error for AggregateChangesetsError<C> {}
#[cfg(test)]
mod test {
use super::*;
use bincode::DefaultOptions;
use std::{
collections::BTreeSet,
io::{Read, Write},
vec::Vec,
};
use tempfile::NamedTempFile;
const TEST_MAGIC_BYTES_LEN: usize = 12;
const TEST_MAGIC_BYTES: [u8; TEST_MAGIC_BYTES_LEN] =
[98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49];
type TestChangeSet = BTreeSet<String>;
/// Check behavior of [`Store::create_new`] and [`Store::open`].
#[test]
fn construct_store() {
let temp_dir = tempfile::tempdir().unwrap();
let file_path = temp_dir.path().join("db_file");
let _ = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
.expect_err("must not open as file does not exist yet");
let _ = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path)
.expect("must create file");
// cannot create new as file already exists
let _ = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path)
.expect_err("must fail as file already exists now");
let _ = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
.expect("must open as file exists now");
}
#[test]
fn open_or_create_new() {
let temp_dir = tempfile::tempdir().unwrap();
let file_path = temp_dir.path().join("db_file");
let changeset = BTreeSet::from(["hello".to_string(), "world".to_string()]);
{
let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path)
.expect("must create");
assert!(file_path.exists());
db.append_changeset(&changeset).expect("must succeed");
}
{
let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path)
.expect("must recover");
let recovered_changeset = db.aggregate_changesets().expect("must succeed");
assert_eq!(recovered_changeset, Some(changeset));
}
}
#[test]
fn new_fails_if_file_is_too_short() {
let mut file = NamedTempFile::new().unwrap();
file.write_all(&TEST_MAGIC_BYTES[..TEST_MAGIC_BYTES_LEN - 1])
.expect("should write");
match Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()) {
Err(FileError::Io(e)) => assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof),
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn new_fails_if_magic_bytes_are_invalid() {
let invalid_magic_bytes = "ldkfs0000000";
let mut file = NamedTempFile::new().unwrap();
file.write_all(invalid_magic_bytes.as_bytes())
.expect("should write");
match Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()) {
Err(FileError::InvalidMagicBytes { got, .. }) => {
assert_eq!(got, invalid_magic_bytes.as_bytes())
}
unexpected => panic!("unexpected result: {:?}", unexpected),
};
}
#[test]
fn append_changeset_truncates_invalid_bytes() {
// initial data to write to file (magic bytes + invalid data)
let mut data = [255_u8; 2000];
data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES);
let changeset = TestChangeSet::from(["one".into(), "two".into(), "three!".into()]);
let mut file = NamedTempFile::new().unwrap();
file.write_all(&data).expect("should write");
let mut store =
Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, file.path()).expect("should open");
match store.iter_changesets().next() {
Some(Err(IterError::Bincode(_))) => {}
unexpected_res => panic!("unexpected result: {:?}", unexpected_res),
}
store.append_changeset(&changeset).expect("should append");
drop(store);
let got_bytes = {
let mut buf = Vec::new();
file.reopen()
.unwrap()
.read_to_end(&mut buf)
.expect("should read");
buf
};
let expected_bytes = {
let mut buf = TEST_MAGIC_BYTES.to_vec();
DefaultOptions::new()
.with_varint_encoding()
.serialize_into(&mut buf, &changeset)
.expect("should encode");
buf
};
assert_eq!(got_bytes, expected_bytes);
}
#[test]
fn last_write_is_short() {
let temp_dir = tempfile::tempdir().unwrap();
let changesets = [
TestChangeSet::from(["1".into()]),
TestChangeSet::from(["2".into(), "3".into()]),
TestChangeSet::from(["4".into(), "5".into(), "6".into()]),
];
let last_changeset = TestChangeSet::from(["7".into(), "8".into(), "9".into()]);
let last_changeset_bytes = bincode_options().serialize(&last_changeset).unwrap();
for short_write_len in 1..last_changeset_bytes.len() - 1 {
let file_path = temp_dir.path().join(format!("{}.dat", short_write_len));
println!("Test file: {:?}", file_path);
// simulate creating a file, writing data where the last write is incomplete
{
let mut db =
Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
for changeset in &changesets {
db.append_changeset(changeset).unwrap();
}
// this is the incomplete write
db.db_file
.write_all(&last_changeset_bytes[..short_write_len])
.unwrap();
}
// load file again and aggregate changesets
// write the last changeset again (this time it succeeds)
{
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let err = db
.aggregate_changesets()
.expect_err("should return error as last read is short");
assert_eq!(
err.changeset,
changesets.iter().cloned().reduce(|mut acc, cs| {
Merge::merge(&mut acc, cs);
acc
}),
"should recover all changesets that are written in full",
);
db.db_file.write_all(&last_changeset_bytes).unwrap();
}
// load file again - this time we should successfully aggregate all changesets
{
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let aggregated_changesets = db
.aggregate_changesets()
.expect("aggregating all changesets should succeed");
assert_eq!(
aggregated_changesets,
changesets
.iter()
.cloned()
.chain(core::iter::once(last_changeset.clone()))
.reduce(|mut acc, cs| {
Merge::merge(&mut acc, cs);
acc
}),
"should recover all changesets",
);
}
}
}
#[test]
fn write_after_short_read() {
let temp_dir = tempfile::tempdir().unwrap();
let changesets = (0..20)
.map(|n| TestChangeSet::from([format!("{}", n)]))
.collect::<Vec<_>>();
let last_changeset = TestChangeSet::from(["last".into()]);
for read_count in 0..changesets.len() {
let file_path = temp_dir.path().join(format!("{}.dat", read_count));
println!("Test file: {:?}", file_path);
// First, we create the file with all the changesets!
let mut db = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
for changeset in &changesets {
db.append_changeset(changeset).unwrap();
}
drop(db);
// We re-open the file and read `read_count` number of changesets.
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let mut exp_aggregation = db
.iter_changesets()
.take(read_count)
.map(|r| r.expect("must read valid changeset"))
.fold(TestChangeSet::default(), |mut acc, v| {
Merge::merge(&mut acc, v);
acc
});
// We write after a short read.
db.append_changeset(&last_changeset)
.expect("last write must succeed");
Merge::merge(&mut exp_aggregation, last_changeset.clone());
drop(db);
// We open the file again and check whether aggregate changeset is expected.
let aggregation = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
.unwrap()
.aggregate_changesets()
.expect("must aggregate changesets")
.unwrap_or_default();
assert_eq!(aggregation, exp_aggregation);
}
}
}

View File

@@ -0,0 +1 @@

View File

@@ -1,13 +0,0 @@
[package]
name = "bdk_hwi"
version = "0.4.0"
edition = "2021"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
description = "Utilities to use bdk with hardware wallets"
license = "MIT OR Apache-2.0"
readme = "README.md"
[dependencies]
bdk_wallet = { path = "../wallet", version = "1.0.0-beta.1" }
hwi = { version = "0.9.0", features = [ "miniscript"] }

View File

@@ -1,3 +0,0 @@
# BDK HWI Signer
This crate contains `HWISigner`, an implementation of a `TransactionSigner` to be used with hardware wallets.

View File

@@ -1,39 +0,0 @@
//! HWI Signer
//!
//! This crate contains HWISigner, an implementation of a [`TransactionSigner`] to be
//! used with hardware wallets.
//! ```no_run
//! # use bdk_wallet::bitcoin::Network;
//! # use bdk_wallet::descriptor::Descriptor;
//! # use bdk_wallet::signer::SignerOrdering;
//! # use bdk_hwi::HWISigner;
//! # use bdk_wallet::{KeychainKind, SignOptions, Wallet};
//! # use hwi::HWIClient;
//! # use std::sync::Arc;
//! # use std::str::FromStr;
//! #
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut devices = HWIClient::enumerate()?;
//! if devices.is_empty() {
//! panic!("No devices found!");
//! }
//! let first_device = devices.remove(0)?;
//! let custom_signer = HWISigner::from_device(&first_device, Network::Testnet.into())?;
//!
//! # let mut wallet = Wallet::create("", "").network(Network::Testnet).create_wallet_no_persist()?;
//! #
//! // Adding the hardware signer to the BDK wallet
//! wallet.add_signer(
//! KeychainKind::External,
//! SignerOrdering(200),
//! Arc::new(custom_signer),
//! );
//!
//! # Ok(())
//! # }
//! ```
//!
//! [`TransactionSigner`]: bdk_wallet::signer::TransactionSigner
mod signer;
pub use signer::*;

Some files were not shown because too many files have changed in this diff Show More