Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
4d040b7057 build(deps): bump actions/setup-python from 4 to 5
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-12-11 06:05:08 +00:00
79 changed files with 2043 additions and 4300 deletions

View File

@@ -33,7 +33,7 @@ jobs:
- name: Run simulator image - name: Run simulator image
run: docker run --name simulator --network=host hwi/ledger_emulator & run: docker run --name simulator --network=host hwi/ledger_emulator &
- name: Install Python - name: Install Python
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: '3.9' python-version: '3.9'
- name: Install python dependencies - name: Install python dependencies

View File

@@ -12,7 +12,7 @@ jobs:
rust: rust:
- version: stable - version: stable
clippy: true clippy: true
- version: 1.63.0 # MSRV - version: 1.57.0 # MSRV
features: features:
- --no-default-features - --no-default-features
- --all-features - --all-features
@@ -28,11 +28,28 @@ jobs:
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1 uses: Swatinem/rust-cache@v2.2.1
- name: Pin dependencies for MSRV - name: Pin dependencies for MSRV
if: matrix.rust.version == '1.63.0' if: matrix.rust.version == '1.57.0'
run: | run: |
cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5" cargo update -p log --precise "0.4.18"
cargo update -p time --precise "0.3.20" cargo update -p tempfile --precise "3.6.0"
cargo update -p home --precise "0.5.5" cargo update -p reqwest --precise "0.11.18"
cargo update -p hyper-rustls --precise 0.24.0
cargo update -p rustls:0.21.9 --precise "0.21.1"
cargo update -p rustls:0.20.9 --precise "0.20.8"
cargo update -p tokio --precise "1.29.1"
cargo update -p tokio-util --precise "0.7.8"
cargo update -p flate2 --precise "1.0.26"
cargo update -p h2 --precise "0.3.20"
cargo update -p rustls-webpki:0.100.3 --precise "0.100.1"
cargo update -p rustls-webpki:0.101.7 --precise "0.101.1"
cargo update -p zip --precise "0.6.2"
cargo update -p time --precise "0.3.13"
cargo update -p byteorder --precise "1.4.3"
cargo update -p webpki --precise "0.22.2"
cargo update -p os_str_bytes --precise 6.5.1
cargo update -p sct --precise 0.7.0
cargo update -p cc --precise "1.0.81"
cargo update -p jobserver --precise "0.1.26"
- name: Build - name: Build
run: cargo build ${{ matrix.features }} run: cargo build ${{ matrix.features }}
- name: Test - name: Test
@@ -117,7 +134,9 @@ jobs:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
toolchain: stable # we pin clippy instead of using "stable" so that our CI doesn't break
# at each new cargo release
toolchain: "1.67.0"
components: clippy components: clippy
override: true override: true
- name: Rust Cache - name: Rust Cache

View File

@@ -17,8 +17,6 @@ jobs:
run: rustup update run: rustup update
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1 uses: Swatinem/rust-cache@v2.2.1
- name: Pin dependencies for MSRV
run: cargo update -p home --precise "0.5.5"
- name: Build docs - name: Build docs
run: cargo doc --no-deps run: cargo doc --no-deps
env: env:

View File

@@ -7,8 +7,6 @@ members = [
"crates/electrum", "crates/electrum",
"crates/esplora", "crates/esplora",
"crates/bitcoind_rpc", "crates/bitcoind_rpc",
"crates/hwi",
"crates/testenv",
"example-crates/example_cli", "example-crates/example_cli",
"example-crates/example_electrum", "example-crates/example_electrum",
"example-crates/example_esplora", "example-crates/example_esplora",
@@ -16,7 +14,6 @@ members = [
"example-crates/wallet_electrum", "example-crates/wallet_electrum",
"example-crates/wallet_esplora_blocking", "example-crates/wallet_esplora_blocking",
"example-crates/wallet_esplora_async", "example-crates/wallet_esplora_async",
"example-crates/wallet_rpc",
"nursery/tmp_plan", "nursery/tmp_plan",
"nursery/coin_select" "nursery/coin_select"
] ]

View File

@@ -15,7 +15,7 @@
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a> <a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a> <a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a> <a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a> <a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a> <a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p> </p>
@@ -48,8 +48,6 @@ The project is split up into several crates in the `/crates` directory:
Fully working examples of how to use these components are in `/example-crates`: Fully working examples of how to use these components are in `/example-crates`:
- [`example_cli`](./example-crates/example_cli): Library used by the `example_*` crates. Provides utilities for syncing, showing the balance, generating addresses and creating transactions without using the bdk `Wallet`. - [`example_cli`](./example-crates/example_cli): Library used by the `example_*` crates. Provides utilities for syncing, showing the balance, generating addresses and creating transactions without using the bdk `Wallet`.
- [`example_electrum`](./example-crates/example_electrum): A command line Bitcoin wallet application built on top of `example_cli` and the `electrum` crate. It shows the power of the bdk tools (`chain` + `file_store` + `electrum`), without depending on the main `bdk` library. - [`example_electrum`](./example-crates/example_electrum): A command line Bitcoin wallet application built on top of `example_cli` and the `electrum` crate. It shows the power of the bdk tools (`chain` + `file_store` + `electrum`), without depending on the main `bdk` library.
- [`example_esplora`](./example-crates/example_esplora): A command line Bitcoin wallet application built on top of `example_cli` and the `esplora` crate. It shows the power of the bdk tools (`chain` + `file_store` + `esplora`), without depending on the main `bdk` library.
- [`example_bitcoind_rpc_polling`](./example-crates/example_bitcoind_rpc_polling): A command line Bitcoin wallet application built on top of `example_cli` and the `bitcoind_rpc` crate. It shows the power of the bdk tools (`chain` + `file_store` + `bitcoind_rpc`), without depending on the main `bdk` library.
- [`wallet_esplora_blocking`](./example-crates/wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface. - [`wallet_esplora_blocking`](./example-crates/wallet_esplora_blocking): Uses the `Wallet` to sync and spend using the Esplora blocking interface.
- [`wallet_esplora_async`](./example-crates/wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface. - [`wallet_esplora_async`](./example-crates/wallet_esplora_async): Uses the `Wallet` to sync and spend using the Esplora asynchronous interface.
- [`wallet_electrum`](./example-crates/wallet_electrum): Uses the `Wallet` to sync and spend using Electrum. - [`wallet_electrum`](./example-crates/wallet_electrum): Uses the `Wallet` to sync and spend using Electrum.
@@ -62,19 +60,51 @@ Fully working examples of how to use these components are in `/example-crates`:
[`bdk_chain`]: https://docs.rs/bdk-chain/ [`bdk_chain`]: https://docs.rs/bdk-chain/
## Minimum Supported Rust Version (MSRV) ## Minimum Supported Rust Version (MSRV)
This library should compile with any combination of features with Rust 1.63.0. This library should compile with any combination of features with Rust 1.57.0.
To build with the MSRV you will need to pin dependencies as follows: To build with the MSRV you will need to pin dependencies as follows:
```shell ```shell
# zip 0.6.3 has MSRV 1.64.0 # log 0.4.19 has MSRV 1.60.0+
cargo update -p log --precise "0.4.18"
# tempfile 3.7.0 has MSRV 1.63.0+
cargo update -p tempfile --precise "3.6.0"
# reqwest 0.11.19 has MSRV 1.63.0+
cargo update -p reqwest --precise "0.11.18"
# hyper-rustls 0.24.1 has MSRV 1.60.0+
cargo update -p hyper-rustls --precise 0.24.0
# rustls 0.21.7 has MSRV 1.60.0+
cargo update -p rustls:0.21.9 --precise "0.21.1"
# rustls 0.20.9 has MSRV 1.60.0+
cargo update -p rustls:0.20.9 --precise "0.20.8"
# tokio 1.33 has MSRV 1.63.0+
cargo update -p tokio --precise "1.29.1"
# tokio-util 0.7.9 doesn't build with MSRV 1.57.0
cargo update -p tokio-util --precise "0.7.8"
# flate2 1.0.27 has MSRV 1.63.0+
cargo update -p flate2 --precise "1.0.26"
# h2 0.3.21 has MSRV 1.63.0+
cargo update -p h2 --precise "0.3.20"
# rustls-webpki 0.100.3 has MSRV 1.60.0+
cargo update -p rustls-webpki:0.100.3 --precise "0.100.1"
# rustls-webpki 0.101.2 has MSRV 1.60.0+
cargo update -p rustls-webpki:0.101.7 --precise "0.101.1"
# zip 0.6.6 has MSRV 1.59.0+
cargo update -p zip --precise "0.6.2" cargo update -p zip --precise "0.6.2"
# time 0.3.21 has MSRV 1.65.0 # time 0.3.14 has MSRV 1.59.0+
cargo update -p time --precise "0.3.20" cargo update -p time --precise "0.3.13"
# jobserver 0.1.27 has MSRV 1.66.0 # byteorder 1.5.0 has MSRV 1.60.0+
cargo update -p byteorder --precise "1.4.3"
# webpki 0.22.4 requires `ring:0.17.2` which has MSRV 1.61.0+
cargo update -p webpki --precise "0.22.2"
# os_str_bytes 6.6.0 has MSRV 1.61.0+
cargo update -p os_str_bytes --precise 6.5.1
# sct 0.7.1 has MSRV 1.61.0+
cargo update -p sct --precise 0.7.0
# cc 1.0.82 has MSRV 1.61.0+
cargo update -p cc --precise "1.0.81"
# jobserver 0.1.27 has MSRV 1.66.0+
cargo update -p jobserver --precise "0.1.26" cargo update -p jobserver --precise "0.1.26"
# home 0.5.9 has MSRV 1.70.0
cargo update -p home --precise "0.5.5"
``` ```
## License ## License

View File

@@ -1 +1 @@
msrv="1.63.0" msrv="1.57.0"

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "bdk" name = "bdk"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
version = "1.0.0-alpha.8" version = "1.0.0-alpha.2"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk" documentation = "https://docs.rs/bdk"
description = "A modern, lightweight, descriptor-based wallet library" description = "A modern, lightweight, descriptor-based wallet library"
@@ -10,7 +10,7 @@ readme = "README.md"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
authors = ["Bitcoin Dev Kit Developers"] authors = ["Bitcoin Dev Kit Developers"]
edition = "2021" edition = "2021"
rust-version = "1.63" rust-version = "1.57"
[dependencies] [dependencies]
rand = "^0.8" rand = "^0.8"
@@ -18,10 +18,11 @@ miniscript = { version = "10.0.0", features = ["serde"], default-features = fals
bitcoin = { version = "0.30.0", features = ["serde", "base64", "rand-std"], default-features = false } bitcoin = { version = "0.30.0", features = ["serde", "base64", "rand-std"], default-features = false }
serde = { version = "^1.0", features = ["derive"] } serde = { version = "^1.0", features = ["derive"] }
serde_json = { version = "^1.0" } serde_json = { version = "^1.0" }
bdk_chain = { path = "../chain", version = "0.11.0", features = ["miniscript", "serde"], default-features = false } bdk_chain = { path = "../chain", version = "0.6.0", features = ["miniscript", "serde"], default-features = false }
# Optional dependencies # Optional dependencies
bip39 = { version = "2.0", optional = true } hwi = { version = "0.7.0", optional = true, features = [ "miniscript"] }
bip39 = { version = "1.0.1", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies] [target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = "0.2" getrandom = "0.2"
@@ -33,6 +34,8 @@ std = ["bitcoin/std", "miniscript/std", "bdk_chain/std"]
compiler = ["miniscript/compiler"] compiler = ["miniscript/compiler"]
all-keys = ["keys-bip39"] all-keys = ["keys-bip39"]
keys-bip39 = ["bip39"] keys-bip39 = ["bip39"]
hardware-signer = ["hwi"]
test-hardware-signer = ["hardware-signer"]
# This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended # This feature is used to run `cargo check` in our CI targeting wasm. It's not recommended
# for libraries to explicitly include the "getrandom/js" feature, so we only do it when # for libraries to explicitly include the "getrandom/js" feature, so we only do it when

View File

@@ -13,7 +13,7 @@
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a> <a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a> <a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a> <a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a> <a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a> <a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p> </p>
@@ -26,7 +26,7 @@
## `bdk` ## `bdk`
The `bdk` crate provides the [`Wallet`] type which is a simple, high-level The `bdk` crate provides the [`Wallet`](`crate::Wallet`) type which is a simple, high-level
interface built from the low-level components of [`bdk_chain`]. `Wallet` is a good starting point interface built from the low-level components of [`bdk_chain`]. `Wallet` is a good starting point
for many simple applications as well as a good demonstration of how to use the other mechanisms to for many simple applications as well as a good demonstration of how to use the other mechanisms to
construct a wallet. It has two keychains (external and internal) which are defined by construct a wallet. It has two keychains (external and internal) which are defined by
@@ -34,51 +34,51 @@ construct a wallet. It has two keychains (external and internal) which are defin
chain data it also uses the descriptors to find transaction outputs owned by them. From there, you chain data it also uses the descriptors to find transaction outputs owned by them. From there, you
can create and sign transactions. can create and sign transactions.
For details about the API of `Wallet` see the [module-level documentation][`Wallet`]. For more information, see the [`Wallet`'s documentation](https://docs.rs/bdk/latest/bdk/wallet/struct.Wallet.html).
### Blockchain data ### Blockchain data
In order to get blockchain data for `Wallet` to consume, you should configure a client from In order to get blockchain data for `Wallet` to consume, you have to put it into particular form.
an available chain source. Typically you make a request to the chain source and get a response Right now this is [`KeychainScan`] which is defined in [`bdk_chain`].
that the `Wallet` can use to update its view of the chain.
This can be created manually or from blockchain-scanning crates.
**Blockchain Data Sources** **Blockchain Data Sources**
* [`bdk_esplora`]: Grabs blockchain data from Esplora for updating BDK structures. * [`bdk_esplora`]: Grabs blockchain data from Esplora for updating BDK structures.
* [`bdk_electrum`]: Grabs blockchain data from Electrum for updating BDK structures. * [`bdk_electrum`]: Grabs blockchain data from Electrum for updating BDK structures.
* [`bdk_bitcoind_rpc`]: Grabs blockchain data from Bitcoin Core for updating BDK structures.
**Examples** **Examples**
* [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async) * [`example-crates/wallet_esplora`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora)
* [`example-crates/wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_blocking)
* [`example-crates/wallet_electrum`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_electrum) * [`example-crates/wallet_electrum`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_electrum)
* [`example-crates/wallet_rpc`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_rpc)
### Persistence ### Persistence
To persist the `Wallet` on disk, it must be constructed with a [`PersistBackend`] implementation. To persist the `Wallet` on disk, `Wallet` needs to be constructed with a
[`Persist`](https://docs.rs/bdk_chain/latest/bdk_chain/keychain/struct.KeychainPersist.html) implementation.
**Implementations** **Implementations**
* [`bdk_file_store`]: A simple flat-file implementation of [`PersistBackend`]. * [`bdk_file_store`]: a simple flat-file implementation of `Persist`.
**Example** **Example**
<!-- compile_fail because outpoint and txout are fake variables --> ```rust
```rust,compile_fail use bdk::{bitcoin::Network, wallet::{AddressIndex, Wallet}};
use bdk::{bitcoin::Network, wallet::{ChangeSet, Wallet}};
fn main() { fn main() {
// Create a new file `Store`. // a type that implements `Persist`
let db = bdk_file_store::Store::<ChangeSet>::open_or_create_new(b"magic_bytes", "path/to/my_wallet.db").expect("create store"); let db = ();
let descriptor = "wpkh(tprv8ZgxMBicQKsPdcAqYBpzAFwU5yxBUo88ggoBqu1qPcHUfSbKK1sKMLmC7EAk438btHQrSdu3jGGQa6PA71nvH5nkDexhLteJqkM4dQmWF9g/84'/1'/0'/0/*)"; let descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/0'/0'/0/*)";
let mut wallet = Wallet::new_or_load(descriptor, None, db, Network::Testnet).expect("create or load wallet"); let mut wallet = Wallet::new(descriptor, None, db, Network::Testnet).expect("should create");
// Insert a single `TxOut` at `OutPoint` into the wallet. // get a new address (this increments revealed derivation index)
let _ = wallet.insert_txout(outpoint, txout); println!("revealed address: {}", wallet.get_address(AddressIndex::New));
wallet.commit().expect("must write to database"); println!("staged changes: {:?}", wallet.staged());
// persist changes
wallet.commit().expect("must save");
} }
``` ```
@@ -218,11 +218,9 @@ submitted for inclusion in the work by you, as defined in the Apache-2.0
license, shall be dual licensed as above, without any additional terms or license, shall be dual licensed as above, without any additional terms or
conditions. conditions.
[`Wallet`]: https://docs.rs/bdk/1.0.0-alpha.7/bdk/wallet/struct.Wallet.html
[`PersistBackend`]: https://docs.rs/bdk_chain/latest/bdk_chain/trait.PersistBackend.html
[`bdk_chain`]: https://docs.rs/bdk_chain/latest [`bdk_chain`]: https://docs.rs/bdk_chain/latest
[`bdk_file_store`]: https://docs.rs/bdk_file_store/latest [`bdk_file_store`]: https://docs.rs/bdk_file_store/latest
[`bdk_electrum`]: https://docs.rs/bdk_electrum/latest [`bdk_electrum`]: https://docs.rs/bdk_electrum/latest
[`bdk_esplora`]: https://docs.rs/bdk_esplora/latest [`bdk_esplora`]: https://docs.rs/bdk_esplora/latest
[`bdk_bitcoind_rpc`]: https://docs.rs/bdk_bitcoind_rpc/latest [`KeychainScan`]: https://docs.rs/bdk_chain/latest/bdk_chain/keychain/struct.KeychainScan.html
[`rust-miniscript`]: https://docs.rs/miniscript/latest/miniscript/index.html [`rust-miniscript`]: https://docs.rs/miniscript/latest/miniscript/index.html

View File

@@ -33,8 +33,8 @@ fn main() -> Result<(), anyhow::Error> {
let mnemonic_with_passphrase = (mnemonic, None); let mnemonic_with_passphrase = (mnemonic, None);
// define external and internal derivation key path // define external and internal derivation key path
let external_path = DerivationPath::from_str("m/86h/1h/0h/0").unwrap(); let external_path = DerivationPath::from_str("m/86h/0h/0h/0").unwrap();
let internal_path = DerivationPath::from_str("m/86h/1h/0h/1").unwrap(); let internal_path = DerivationPath::from_str("m/86h/0h/0h/1").unwrap();
// generate external and internal descriptor from mnemonic // generate external and internal descriptor from mnemonic
let (external_descriptor, ext_keymap) = let (external_descriptor, ext_keymap) =

View File

@@ -42,16 +42,22 @@ fn poly_mod(mut c: u64, val: u64) -> u64 {
c c
} }
/// Compute the checksum bytes of a descriptor, excludes any existing checksum in the descriptor string from the calculation /// Computes the checksum bytes of a descriptor.
pub fn calc_checksum_bytes(mut desc: &str) -> Result<[u8; 8], DescriptorError> { /// `exclude_hash = true` ignores all data after the first '#' (inclusive).
pub(crate) fn calc_checksum_bytes_internal(
mut desc: &str,
exclude_hash: bool,
) -> Result<[u8; 8], DescriptorError> {
let mut c = 1; let mut c = 1;
let mut cls = 0; let mut cls = 0;
let mut clscount = 0; let mut clscount = 0;
let mut original_checksum = None; let mut original_checksum = None;
if let Some(split) = desc.split_once('#') { if exclude_hash {
desc = split.0; if let Some(split) = desc.split_once('#') {
original_checksum = Some(split.1); desc = split.0;
original_checksum = Some(split.1);
}
} }
for ch in desc.as_bytes() { for ch in desc.as_bytes() {
@@ -89,10 +95,39 @@ pub fn calc_checksum_bytes(mut desc: &str) -> Result<[u8; 8], DescriptorError> {
Ok(checksum) Ok(checksum)
} }
/// Compute the checksum bytes of a descriptor, excludes any existing checksum in the descriptor string from the calculation
pub fn calc_checksum_bytes(desc: &str) -> Result<[u8; 8], DescriptorError> {
calc_checksum_bytes_internal(desc, true)
}
/// Compute the checksum of a descriptor, excludes any existing checksum in the descriptor string from the calculation /// Compute the checksum of a descriptor, excludes any existing checksum in the descriptor string from the calculation
pub fn calc_checksum(desc: &str) -> Result<String, DescriptorError> { pub fn calc_checksum(desc: &str) -> Result<String, DescriptorError> {
// unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET` // unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET`
calc_checksum_bytes(desc).map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) }) calc_checksum_bytes_internal(desc, true)
.map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
}
// TODO in release 0.25.0, remove get_checksum_bytes and get_checksum
// TODO in release 0.25.0, consolidate calc_checksum_bytes_internal into calc_checksum_bytes
/// Compute the checksum bytes of a descriptor
#[deprecated(
since = "0.24.0",
note = "Use new `calc_checksum_bytes` function which excludes any existing checksum in the descriptor string before calculating the checksum hash bytes. See https://github.com/bitcoindevkit/bdk/pull/765."
)]
pub fn get_checksum_bytes(desc: &str) -> Result<[u8; 8], DescriptorError> {
calc_checksum_bytes_internal(desc, false)
}
/// Compute the checksum of a descriptor
#[deprecated(
since = "0.24.0",
note = "Use new `calc_checksum` function which excludes any existing checksum in the descriptor string before calculating the checksum hash. See https://github.com/bitcoindevkit/bdk/pull/765."
)]
pub fn get_checksum(desc: &str) -> Result<String, DescriptorError> {
// unsafe is okay here as the checksum only uses bytes in `CHECKSUM_CHARSET`
calc_checksum_bytes_internal(desc, false)
.map(|b| unsafe { String::from_utf8_unchecked(b.to_vec()) })
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -575,7 +575,7 @@ mod test {
if let ExtendedDescriptor::Pkh(pkh) = xdesc.0 { if let ExtendedDescriptor::Pkh(pkh) = xdesc.0 {
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into(); let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
let purpose = path.first().unwrap(); let purpose = path.get(0).unwrap();
assert_matches!(purpose, Hardened { index: 44 }); assert_matches!(purpose, Hardened { index: 44 });
let coin_type = path.get(1).unwrap(); let coin_type = path.get(1).unwrap();
assert_matches!(coin_type, Hardened { index: 0 }); assert_matches!(coin_type, Hardened { index: 0 });
@@ -589,7 +589,7 @@ mod test {
if let ExtendedDescriptor::Pkh(pkh) = tdesc.0 { if let ExtendedDescriptor::Pkh(pkh) = tdesc.0 {
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into(); let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
let purpose = path.first().unwrap(); let purpose = path.get(0).unwrap();
assert_matches!(purpose, Hardened { index: 44 }); assert_matches!(purpose, Hardened { index: 44 });
let coin_type = path.get(1).unwrap(); let coin_type = path.get(1).unwrap();
assert_matches!(coin_type, Hardened { index: 1 }); assert_matches!(coin_type, Hardened { index: 1 });

View File

@@ -17,6 +17,8 @@ extern crate std;
pub extern crate alloc; pub extern crate alloc;
pub extern crate bitcoin; pub extern crate bitcoin;
#[cfg(feature = "hardware-signer")]
pub extern crate hwi;
pub extern crate miniscript; pub extern crate miniscript;
extern crate serde; extern crate serde;
extern crate serde_json; extern crate serde_json;

View File

@@ -11,10 +11,9 @@
//! Additional functions on the `rust-bitcoin` `PartiallySignedTransaction` structure. //! Additional functions on the `rust-bitcoin` `PartiallySignedTransaction` structure.
use crate::FeeRate;
use alloc::vec::Vec; use alloc::vec::Vec;
use bitcoin::psbt::PartiallySignedTransaction as Psbt; use bitcoin::psbt::PartiallySignedTransaction as Psbt;
use bitcoin::Amount;
use bitcoin::FeeRate;
use bitcoin::TxOut; use bitcoin::TxOut;
// TODO upstream the functions here to `rust-bitcoin`? // TODO upstream the functions here to `rust-bitcoin`?
@@ -36,16 +35,24 @@ pub trait PsbtUtils {
} }
impl PsbtUtils for Psbt { impl PsbtUtils for Psbt {
#[allow(clippy::all)] // We want to allow `manual_map` but it is too new.
fn get_utxo_for(&self, input_index: usize) -> Option<TxOut> { fn get_utxo_for(&self, input_index: usize) -> Option<TxOut> {
let tx = &self.unsigned_tx; let tx = &self.unsigned_tx;
let input = self.inputs.get(input_index)?;
match (&input.witness_utxo, &input.non_witness_utxo) { if input_index >= tx.input.len() {
(Some(_), _) => input.witness_utxo.clone(), return None;
(_, Some(_)) => input.non_witness_utxo.as_ref().map(|in_tx| { }
in_tx.output[tx.input[input_index].previous_output.vout as usize].clone()
}), if let Some(input) = self.inputs.get(input_index) {
_ => None, if let Some(wit_utxo) = &input.witness_utxo {
Some(wit_utxo.clone())
} else if let Some(in_tx) = &input.non_witness_utxo {
Some(in_tx.output[tx.input[input_index].previous_output.vout as usize].clone())
} else {
None
}
} else {
None
} }
} }
@@ -66,7 +73,7 @@ impl PsbtUtils for Psbt {
let fee_amount = self.fee_amount(); let fee_amount = self.fee_amount();
fee_amount.map(|fee| { fee_amount.map(|fee| {
let weight = self.clone().extract_tx().weight(); let weight = self.clone().extract_tx().weight();
Amount::from_sat(fee) / weight FeeRate::from_wu(fee, weight)
}) })
} }
} }

View File

@@ -11,10 +11,11 @@
use alloc::boxed::Box; use alloc::boxed::Box;
use core::convert::AsRef; use core::convert::AsRef;
use core::ops::Sub;
use bdk_chain::ConfirmationTime; use bdk_chain::ConfirmationTime;
use bitcoin::blockdata::transaction::{OutPoint, Sequence, TxOut}; use bitcoin::blockdata::transaction::{OutPoint, TxOut};
use bitcoin::psbt; use bitcoin::{psbt, Weight};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -46,6 +47,116 @@ impl AsRef<[u8]> for KeychainKind {
} }
} }
/// Fee rate
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd)]
// Internally stored as satoshi/vbyte
pub struct FeeRate(f32);
impl FeeRate {
/// Create a new instance checking the value provided
///
/// ## Panics
///
/// Panics if the value is not [normal](https://doc.rust-lang.org/std/primitive.f32.html#method.is_normal) (except if it's a positive zero) or negative.
fn new_checked(value: f32) -> Self {
assert!(value.is_normal() || value == 0.0);
assert!(value.is_sign_positive());
FeeRate(value)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in sats/kwu
pub fn from_sat_per_kwu(sat_per_kwu: f32) -> Self {
FeeRate::new_checked(sat_per_kwu / 250.0_f32)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in sats/kvb
pub fn from_sat_per_kvb(sat_per_kvb: f32) -> Self {
FeeRate::new_checked(sat_per_kvb / 1000.0_f32)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in btc/kvbytes
///
/// ## Panics
///
/// Panics if the value is not [normal](https://doc.rust-lang.org/std/primitive.f32.html#method.is_normal) (except if it's a positive zero) or negative.
pub fn from_btc_per_kvb(btc_per_kvb: f32) -> Self {
FeeRate::new_checked(btc_per_kvb * 1e5)
}
/// Create a new instance of [`FeeRate`] given a float fee rate in satoshi/vbyte
///
/// ## Panics
///
/// Panics if the value is not [normal](https://doc.rust-lang.org/std/primitive.f32.html#method.is_normal) (except if it's a positive zero) or negative.
pub fn from_sat_per_vb(sat_per_vb: f32) -> Self {
FeeRate::new_checked(sat_per_vb)
}
/// Create a new [`FeeRate`] with the default min relay fee value
pub const fn default_min_relay_fee() -> Self {
FeeRate(1.0)
}
/// Calculate fee rate from `fee` and weight units (`wu`).
pub fn from_wu(fee: u64, wu: Weight) -> FeeRate {
Self::from_vb(fee, wu.to_vbytes_ceil() as usize)
}
/// Calculate fee rate from `fee` and `vbytes`.
pub fn from_vb(fee: u64, vbytes: usize) -> FeeRate {
let rate = fee as f32 / vbytes as f32;
Self::from_sat_per_vb(rate)
}
/// Return the value as satoshi/vbyte
pub fn as_sat_per_vb(&self) -> f32 {
self.0
}
/// Return the value as satoshi/kwu
pub fn sat_per_kwu(&self) -> f32 {
self.0 * 250.0_f32
}
/// Calculate absolute fee in Satoshis using size in weight units.
pub fn fee_wu(&self, wu: Weight) -> u64 {
self.fee_vb(wu.to_vbytes_ceil() as usize)
}
/// Calculate absolute fee in Satoshis using size in virtual bytes.
pub fn fee_vb(&self, vbytes: usize) -> u64 {
(self.as_sat_per_vb() * vbytes as f32).ceil() as u64
}
}
impl Default for FeeRate {
fn default() -> Self {
FeeRate::default_min_relay_fee()
}
}
impl Sub for FeeRate {
type Output = Self;
fn sub(self, other: FeeRate) -> Self::Output {
FeeRate(self.0 - other.0)
}
}
/// Trait implemented by types that can be used to measure weight units.
pub trait Vbytes {
/// Convert weight units to virtual bytes.
fn vbytes(self) -> usize;
}
impl Vbytes for usize {
fn vbytes(self) -> usize {
// ref: https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#transaction-size-calculations
(self as f32 / 4.0).ceil() as usize
}
}
/// An unspent output owned by a [`Wallet`]. /// An unspent output owned by a [`Wallet`].
/// ///
/// [`Wallet`]: crate::Wallet /// [`Wallet`]: crate::Wallet
@@ -86,8 +197,6 @@ pub enum Utxo {
Foreign { Foreign {
/// The location of the output. /// The location of the output.
outpoint: OutPoint, outpoint: OutPoint,
/// The nSequence value to set for this input.
sequence: Option<Sequence>,
/// The information about the input we require to add it to a PSBT. /// The information about the input we require to add it to a PSBT.
// Box it to stop the type being too big. // Box it to stop the type being too big.
psbt_input: Box<psbt::Input>, psbt_input: Box<psbt::Input>,
@@ -110,7 +219,6 @@ impl Utxo {
Utxo::Foreign { Utxo::Foreign {
outpoint, outpoint,
psbt_input, psbt_input,
..
} => { } => {
if let Some(prev_tx) = &psbt_input.non_witness_utxo { if let Some(prev_tx) = &psbt_input.non_witness_utxo {
return &prev_tx.output[outpoint.vout as usize]; return &prev_tx.output[outpoint.vout as usize];
@@ -124,12 +232,74 @@ impl Utxo {
} }
} }
} }
}
/// Get the sequence number if an explicit sequence number has to be set for this input. #[cfg(test)]
pub fn sequence(&self) -> Option<Sequence> { mod tests {
match self { use super::*;
Utxo::Local(_) => None,
Utxo::Foreign { sequence, .. } => *sequence, #[test]
} fn can_store_feerate_in_const() {
const _MIN_RELAY: FeeRate = FeeRate::default_min_relay_fee();
}
#[test]
#[should_panic]
fn test_invalid_feerate_neg_zero() {
let _ = FeeRate::from_sat_per_vb(-0.0);
}
#[test]
#[should_panic]
fn test_invalid_feerate_neg_value() {
let _ = FeeRate::from_sat_per_vb(-5.0);
}
#[test]
#[should_panic]
fn test_invalid_feerate_nan() {
let _ = FeeRate::from_sat_per_vb(f32::NAN);
}
#[test]
#[should_panic]
fn test_invalid_feerate_inf() {
let _ = FeeRate::from_sat_per_vb(f32::INFINITY);
}
#[test]
fn test_valid_feerate_pos_zero() {
let _ = FeeRate::from_sat_per_vb(0.0);
}
#[test]
fn test_fee_from_btc_per_kvb() {
let fee = FeeRate::from_btc_per_kvb(1e-5);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_from_sat_per_vbyte() {
let fee = FeeRate::from_sat_per_vb(1.0);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_default_min_relay_fee() {
let fee = FeeRate::default_min_relay_fee();
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_from_sat_per_kvb() {
let fee = FeeRate::from_sat_per_kvb(1000.0);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_fee_from_sat_per_kwu() {
let fee = FeeRate::from_sat_per_kwu(250.0);
assert!((fee.as_sat_per_vb() - 1.0).abs() < f32::EPSILON);
assert_eq!(fee.sat_per_kwu(), 250.0);
} }
} }

View File

@@ -41,7 +41,7 @@
//! &self, //! &self,
//! required_utxos: Vec<WeightedUtxo>, //! required_utxos: Vec<WeightedUtxo>,
//! optional_utxos: Vec<WeightedUtxo>, //! optional_utxos: Vec<WeightedUtxo>,
//! fee_rate: FeeRate, //! fee_rate: bdk::FeeRate,
//! target_amount: u64, //! target_amount: u64,
//! drain_script: &Script, //! drain_script: &Script,
//! ) -> Result<CoinSelectionResult, coin_selection::Error> { //! ) -> Result<CoinSelectionResult, coin_selection::Error> {
@@ -61,7 +61,7 @@
//! }, //! },
//! ) //! )
//! .collect::<Vec<_>>(); //! .collect::<Vec<_>>();
//! let additional_fees = (fee_rate * additional_weight).to_sat(); //! let additional_fees = fee_rate.fee_wu(additional_weight);
//! let amount_needed_with_fees = additional_fees + target_amount; //! let amount_needed_with_fees = additional_fees + target_amount;
//! if selected_amount < amount_needed_with_fees { //! if selected_amount < amount_needed_with_fees {
//! return Err(coin_selection::Error::InsufficientFunds { //! return Err(coin_selection::Error::InsufficientFunds {
@@ -100,15 +100,13 @@
//! # Ok::<(), anyhow::Error>(()) //! # Ok::<(), anyhow::Error>(())
//! ``` //! ```
use crate::chain::collections::HashSet; use crate::types::FeeRate;
use crate::wallet::utils::IsDust; use crate::wallet::utils::IsDust;
use crate::Utxo; use crate::Utxo;
use crate::WeightedUtxo; use crate::WeightedUtxo;
use bitcoin::FeeRate;
use alloc::vec::Vec; use alloc::vec::Vec;
use bitcoin::consensus::encode::serialize; use bitcoin::consensus::encode::serialize;
use bitcoin::OutPoint;
use bitcoin::{Script, Weight}; use bitcoin::{Script, Weight};
use core::convert::TryInto; use core::convert::TryInto;
@@ -313,8 +311,7 @@ impl CoinSelectionAlgorithm for OldestFirstCoinSelection {
pub fn decide_change(remaining_amount: u64, fee_rate: FeeRate, drain_script: &Script) -> Excess { pub fn decide_change(remaining_amount: u64, fee_rate: FeeRate, drain_script: &Script) -> Excess {
// drain_output_len = size(len(script_pubkey)) + len(script_pubkey) + size(output_value) // drain_output_len = size(len(script_pubkey)) + len(script_pubkey) + size(output_value)
let drain_output_len = serialize(drain_script).len() + 8usize; let drain_output_len = serialize(drain_script).len() + 8usize;
let change_fee = let change_fee = fee_rate.fee_vb(drain_output_len);
(fee_rate * Weight::from_vb(drain_output_len as u64).expect("overflow occurred")).to_sat();
let drain_val = remaining_amount.saturating_sub(change_fee); let drain_val = remaining_amount.saturating_sub(change_fee);
if drain_val.is_dust(drain_script) { if drain_val.is_dust(drain_script) {
@@ -345,12 +342,9 @@ fn select_sorted_utxos(
(&mut selected_amount, &mut fee_amount), (&mut selected_amount, &mut fee_amount),
|(selected_amount, fee_amount), (must_use, weighted_utxo)| { |(selected_amount, fee_amount), (must_use, weighted_utxo)| {
if must_use || **selected_amount < target_amount + **fee_amount { if must_use || **selected_amount < target_amount + **fee_amount {
**fee_amount += (fee_rate **fee_amount += fee_rate.fee_wu(Weight::from_wu(
* Weight::from_wu( (TXIN_BASE_WEIGHT + weighted_utxo.satisfaction_weight) as u64,
(TXIN_BASE_WEIGHT + weighted_utxo.satisfaction_weight) as u64, ));
))
.to_sat();
**selected_amount += weighted_utxo.utxo.txout().value; **selected_amount += weighted_utxo.utxo.txout().value;
Some(weighted_utxo.utxo) Some(weighted_utxo.utxo)
} else { } else {
@@ -391,10 +385,9 @@ struct OutputGroup {
impl OutputGroup { impl OutputGroup {
fn new(weighted_utxo: WeightedUtxo, fee_rate: FeeRate) -> Self { fn new(weighted_utxo: WeightedUtxo, fee_rate: FeeRate) -> Self {
let fee = (fee_rate let fee = fee_rate.fee_wu(Weight::from_wu(
* Weight::from_wu((TXIN_BASE_WEIGHT + weighted_utxo.satisfaction_weight) as u64)) (TXIN_BASE_WEIGHT + weighted_utxo.satisfaction_weight) as u64,
.to_sat(); ));
let effective_value = weighted_utxo.utxo.txout().value as i64 - fee as i64; let effective_value = weighted_utxo.utxo.txout().value as i64 - fee as i64;
OutputGroup { OutputGroup {
weighted_utxo, weighted_utxo,
@@ -461,8 +454,7 @@ impl CoinSelectionAlgorithm for BranchAndBoundCoinSelection {
.iter() .iter()
.fold(0, |acc, x| acc + x.effective_value); .fold(0, |acc, x| acc + x.effective_value);
let cost_of_change = let cost_of_change = self.size_of_change as f32 * fee_rate.as_sat_per_vb();
(Weight::from_vb(self.size_of_change).expect("overflow occurred") * fee_rate).to_sat();
// `curr_value` and `curr_available_value` are both the sum of *effective_values* of // `curr_value` and `curr_available_value` are both the sum of *effective_values* of
// the UTXOs. For the optional UTXOs (curr_available_value) we filter out UTXOs with // the UTXOs. For the optional UTXOs (curr_available_value) we filter out UTXOs with
@@ -553,7 +545,7 @@ impl BranchAndBoundCoinSelection {
mut curr_value: i64, mut curr_value: i64,
mut curr_available_value: i64, mut curr_available_value: i64,
target_amount: i64, target_amount: i64,
cost_of_change: u64, cost_of_change: f32,
drain_script: &Script, drain_script: &Script,
fee_rate: FeeRate, fee_rate: FeeRate,
) -> Result<CoinSelectionResult, Error> { ) -> Result<CoinSelectionResult, Error> {
@@ -719,36 +711,17 @@ impl BranchAndBoundCoinSelection {
} }
} }
/// Remove duplicate UTXOs.
///
/// If a UTXO appears in both `required` and `optional`, the appearance in `required` is kept.
pub(crate) fn filter_duplicates<I>(required: I, optional: I) -> (I, I)
where
I: IntoIterator<Item = WeightedUtxo> + FromIterator<WeightedUtxo>,
{
let mut visited = HashSet::<OutPoint>::new();
let required = required
.into_iter()
.filter(|utxo| visited.insert(utxo.utxo.outpoint()))
.collect::<I>();
let optional = optional
.into_iter()
.filter(|utxo| visited.insert(utxo.utxo.outpoint()))
.collect::<I>();
(required, optional)
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use assert_matches::assert_matches; use assert_matches::assert_matches;
use core::str::FromStr; use core::str::FromStr;
use bdk_chain::ConfirmationTime; use bdk_chain::ConfirmationTime;
use bitcoin::{Amount, OutPoint, ScriptBuf, TxOut}; use bitcoin::{OutPoint, ScriptBuf, TxOut};
use super::*; use super::*;
use crate::types::*; use crate::types::*;
use crate::wallet::coin_selection::filter_duplicates; use crate::wallet::Vbytes;
use rand::rngs::StdRng; use rand::rngs::StdRng;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
@@ -826,14 +799,13 @@ mod test {
fn generate_random_utxos(rng: &mut StdRng, utxos_number: usize) -> Vec<WeightedUtxo> { fn generate_random_utxos(rng: &mut StdRng, utxos_number: usize) -> Vec<WeightedUtxo> {
let mut res = Vec::new(); let mut res = Vec::new();
for i in 0..utxos_number { for _ in 0..utxos_number {
res.push(WeightedUtxo { res.push(WeightedUtxo {
satisfaction_weight: P2WPKH_SATISFACTION_SIZE, satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
utxo: Utxo::Local(LocalOutput { utxo: Utxo::Local(LocalOutput {
outpoint: OutPoint::from_str(&format!( outpoint: OutPoint::from_str(
"ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:{}", "ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:0",
i )
))
.unwrap(), .unwrap(),
txout: TxOut { txout: TxOut {
value: rng.gen_range(0..200000000), value: rng.gen_range(0..200000000),
@@ -857,26 +829,24 @@ mod test {
} }
fn generate_same_value_utxos(utxos_value: u64, utxos_number: usize) -> Vec<WeightedUtxo> { fn generate_same_value_utxos(utxos_value: u64, utxos_number: usize) -> Vec<WeightedUtxo> {
(0..utxos_number) let utxo = WeightedUtxo {
.map(|i| WeightedUtxo { satisfaction_weight: P2WPKH_SATISFACTION_SIZE,
satisfaction_weight: P2WPKH_SATISFACTION_SIZE, utxo: Utxo::Local(LocalOutput {
utxo: Utxo::Local(LocalOutput { outpoint: OutPoint::from_str(
outpoint: OutPoint::from_str(&format!( "ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:0",
"ebd9813ecebc57ff8f30797de7c205e3c7498ca950ea4341ee51a685ff2fa30a:{}", )
i .unwrap(),
)) txout: TxOut {
.unwrap(), value: utxos_value,
txout: TxOut { script_pubkey: ScriptBuf::new(),
value: utxos_value, },
script_pubkey: ScriptBuf::new(), keychain: KeychainKind::External,
}, is_spent: false,
keychain: KeychainKind::External, derivation_index: 42,
is_spent: false, confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 },
derivation_index: 42, }),
confirmation_time: ConfirmationTime::Unconfirmed { last_seen: 0 }, };
}), vec![utxo; utxos_number]
})
.collect()
} }
fn sum_random_utxos(mut rng: &mut StdRng, utxos: &mut Vec<WeightedUtxo>) -> u64 { fn sum_random_utxos(mut rng: &mut StdRng, utxos: &mut Vec<WeightedUtxo>) -> u64 {
@@ -898,7 +868,7 @@ mod test {
.coin_select( .coin_select(
utxos, utxos,
vec![], vec![],
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -919,7 +889,7 @@ mod test {
.coin_select( .coin_select(
utxos, utxos,
vec![], vec![],
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -940,7 +910,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -962,7 +932,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -980,7 +950,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1000), FeeRate::from_sat_per_vb(1000.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -997,7 +967,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1018,7 +988,7 @@ mod test {
.coin_select( .coin_select(
utxos, utxos,
vec![], vec![],
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1039,7 +1009,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1061,7 +1031,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1080,7 +1050,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1000), FeeRate::from_sat_per_vb(1000.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1101,7 +1071,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1122,7 +1092,7 @@ mod test {
.coin_select( .coin_select(
utxos.clone(), utxos.clone(),
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1143,7 +1113,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1180,7 +1150,7 @@ mod test {
.coin_select( .coin_select(
required, required,
optional, optional,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1202,7 +1172,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1), FeeRate::from_sat_per_vb(1.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1220,7 +1190,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(1000), FeeRate::from_sat_per_vb(1000.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1232,18 +1202,22 @@ mod test {
let utxos = get_test_utxos(); let utxos = get_test_utxos();
let drain_script = ScriptBuf::default(); let drain_script = ScriptBuf::default();
let target_amount = 99932; // first utxo's effective value let target_amount = 99932; // first utxo's effective value
let feerate = FeeRate::BROADCAST_MIN;
let result = BranchAndBoundCoinSelection::new(0) let result = BranchAndBoundCoinSelection::new(0)
.coin_select(vec![], utxos, feerate, target_amount, &drain_script) .coin_select(
vec![],
utxos,
FeeRate::from_sat_per_vb(1.0),
target_amount,
&drain_script,
)
.unwrap(); .unwrap();
assert_eq!(result.selected.len(), 1); assert_eq!(result.selected.len(), 1);
assert_eq!(result.selected_amount(), 100_000); assert_eq!(result.selected_amount(), 100_000);
let input_weight = (TXIN_BASE_WEIGHT + P2WPKH_SATISFACTION_SIZE) as u64; let input_size = (TXIN_BASE_WEIGHT + P2WPKH_SATISFACTION_SIZE).vbytes();
// the final fee rate should be exactly the same as the fee rate given // the final fee rate should be exactly the same as the fee rate given
let result_feerate = Amount::from_sat(result.fee_amount) / Weight::from_wu(input_weight); assert!((1.0 - (result.fee_amount as f32 / input_size as f32)).abs() < f32::EPSILON);
assert_eq!(result_feerate, feerate);
} }
#[test] #[test]
@@ -1259,7 +1233,7 @@ mod test {
.coin_select( .coin_select(
vec![], vec![],
optional_utxos, optional_utxos,
FeeRate::ZERO, FeeRate::from_sat_per_vb(0.0),
target_amount, target_amount,
&drain_script, &drain_script,
) )
@@ -1271,7 +1245,7 @@ mod test {
#[test] #[test]
#[should_panic(expected = "BnBNoExactMatch")] #[should_panic(expected = "BnBNoExactMatch")]
fn test_bnb_function_no_exact_match() { fn test_bnb_function_no_exact_match() {
let fee_rate = FeeRate::from_sat_per_vb_unchecked(10); let fee_rate = FeeRate::from_sat_per_vb(10.0);
let utxos: Vec<OutputGroup> = get_test_utxos() let utxos: Vec<OutputGroup> = get_test_utxos()
.into_iter() .into_iter()
.map(|u| OutputGroup::new(u, fee_rate)) .map(|u| OutputGroup::new(u, fee_rate))
@@ -1280,7 +1254,7 @@ mod test {
let curr_available_value = utxos.iter().fold(0, |acc, x| acc + x.effective_value); let curr_available_value = utxos.iter().fold(0, |acc, x| acc + x.effective_value);
let size_of_change = 31; let size_of_change = 31;
let cost_of_change = (Weight::from_vb_unchecked(size_of_change) * fee_rate).to_sat(); let cost_of_change = size_of_change as f32 * fee_rate.as_sat_per_vb();
let drain_script = ScriptBuf::default(); let drain_script = ScriptBuf::default();
let target_amount = 20_000 + FEE_AMOUNT; let target_amount = 20_000 + FEE_AMOUNT;
@@ -1301,7 +1275,7 @@ mod test {
#[test] #[test]
#[should_panic(expected = "BnBTotalTriesExceeded")] #[should_panic(expected = "BnBTotalTriesExceeded")]
fn test_bnb_function_tries_exceeded() { fn test_bnb_function_tries_exceeded() {
let fee_rate = FeeRate::from_sat_per_vb_unchecked(10); let fee_rate = FeeRate::from_sat_per_vb(10.0);
let utxos: Vec<OutputGroup> = generate_same_value_utxos(100_000, 100_000) let utxos: Vec<OutputGroup> = generate_same_value_utxos(100_000, 100_000)
.into_iter() .into_iter()
.map(|u| OutputGroup::new(u, fee_rate)) .map(|u| OutputGroup::new(u, fee_rate))
@@ -1310,7 +1284,7 @@ mod test {
let curr_available_value = utxos.iter().fold(0, |acc, x| acc + x.effective_value); let curr_available_value = utxos.iter().fold(0, |acc, x| acc + x.effective_value);
let size_of_change = 31; let size_of_change = 31;
let cost_of_change = (Weight::from_vb_unchecked(size_of_change) * fee_rate).to_sat(); let cost_of_change = size_of_change as f32 * fee_rate.as_sat_per_vb();
let target_amount = 20_000 + FEE_AMOUNT; let target_amount = 20_000 + FEE_AMOUNT;
let drain_script = ScriptBuf::default(); let drain_script = ScriptBuf::default();
@@ -1332,9 +1306,9 @@ mod test {
// The match won't be exact but still in the range // The match won't be exact but still in the range
#[test] #[test]
fn test_bnb_function_almost_exact_match_with_fees() { fn test_bnb_function_almost_exact_match_with_fees() {
let fee_rate = FeeRate::from_sat_per_vb_unchecked(1); let fee_rate = FeeRate::from_sat_per_vb(1.0);
let size_of_change = 31; let size_of_change = 31;
let cost_of_change = (Weight::from_vb_unchecked(size_of_change) * fee_rate).to_sat(); let cost_of_change = size_of_change as f32 * fee_rate.as_sat_per_vb();
let utxos: Vec<_> = generate_same_value_utxos(50_000, 10) let utxos: Vec<_> = generate_same_value_utxos(50_000, 10)
.into_iter() .into_iter()
@@ -1347,7 +1321,7 @@ mod test {
// 2*(value of 1 utxo) - 2*(1 utxo fees with 1.0sat/vbyte fee rate) - // 2*(value of 1 utxo) - 2*(1 utxo fees with 1.0sat/vbyte fee rate) -
// cost_of_change + 5. // cost_of_change + 5.
let target_amount = 2 * 50_000 - 2 * 67 - cost_of_change as i64 + 5; let target_amount = 2 * 50_000 - 2 * 67 - cost_of_change.ceil() as i64 + 5;
let drain_script = ScriptBuf::default(); let drain_script = ScriptBuf::default();
@@ -1372,7 +1346,7 @@ mod test {
fn test_bnb_function_exact_match_more_utxos() { fn test_bnb_function_exact_match_more_utxos() {
let seed = [0; 32]; let seed = [0; 32];
let mut rng: StdRng = SeedableRng::from_seed(seed); let mut rng: StdRng = SeedableRng::from_seed(seed);
let fee_rate = FeeRate::ZERO; let fee_rate = FeeRate::from_sat_per_vb(0.0);
for _ in 0..200 { for _ in 0..200 {
let optional_utxos: Vec<_> = generate_random_utxos(&mut rng, 40) let optional_utxos: Vec<_> = generate_random_utxos(&mut rng, 40)
@@ -1398,7 +1372,7 @@ mod test {
curr_value, curr_value,
curr_available_value, curr_available_value,
target_amount, target_amount,
0, 0.0,
&drain_script, &drain_script,
fee_rate, fee_rate,
) )
@@ -1414,7 +1388,7 @@ mod test {
let mut utxos = generate_random_utxos(&mut rng, 300); let mut utxos = generate_random_utxos(&mut rng, 300);
let target_amount = sum_random_utxos(&mut rng, &mut utxos) + FEE_AMOUNT; let target_amount = sum_random_utxos(&mut rng, &mut utxos) + FEE_AMOUNT;
let fee_rate = FeeRate::from_sat_per_vb_unchecked(1); let fee_rate = FeeRate::from_sat_per_vb(1.0);
let utxos: Vec<OutputGroup> = utxos let utxos: Vec<OutputGroup> = utxos
.into_iter() .into_iter()
.map(|u| OutputGroup::new(u, fee_rate)) .map(|u| OutputGroup::new(u, fee_rate))
@@ -1443,7 +1417,7 @@ mod test {
let selection = BranchAndBoundCoinSelection::default().coin_select( let selection = BranchAndBoundCoinSelection::default().coin_select(
vec![], vec![],
utxos, utxos,
FeeRate::from_sat_per_vb_unchecked(10), FeeRate::from_sat_per_vb(10.0),
500_000, 500_000,
&drain_script, &drain_script,
); );
@@ -1469,7 +1443,7 @@ mod test {
let selection = BranchAndBoundCoinSelection::default().coin_select( let selection = BranchAndBoundCoinSelection::default().coin_select(
required, required,
optional, optional,
FeeRate::from_sat_per_vb_unchecked(10), FeeRate::from_sat_per_vb(10.0),
500_000, 500_000,
&drain_script, &drain_script,
); );
@@ -1491,7 +1465,7 @@ mod test {
let selection = BranchAndBoundCoinSelection::default().coin_select( let selection = BranchAndBoundCoinSelection::default().coin_select(
utxos, utxos,
vec![], vec![],
FeeRate::from_sat_per_vb_unchecked(10_000), FeeRate::from_sat_per_vb(10_000.0),
500_000, 500_000,
&drain_script, &drain_script,
); );
@@ -1504,95 +1478,4 @@ mod test {
}) })
); );
} }
#[test]
fn test_filter_duplicates() {
fn utxo(txid: &str, value: u64) -> WeightedUtxo {
WeightedUtxo {
satisfaction_weight: 0,
utxo: Utxo::Local(LocalOutput {
outpoint: OutPoint::new(bitcoin::hashes::Hash::hash(txid.as_bytes()), 0),
txout: TxOut {
value,
script_pubkey: ScriptBuf::new(),
},
keychain: KeychainKind::External,
is_spent: false,
derivation_index: 0,
confirmation_time: ConfirmationTime::Confirmed {
height: 12345,
time: 12345,
},
}),
}
}
fn to_utxo_vec(utxos: &[(&str, u64)]) -> Vec<WeightedUtxo> {
let mut v = utxos
.iter()
.map(|&(txid, value)| utxo(txid, value))
.collect::<Vec<_>>();
v.sort_by_key(|u| u.utxo.outpoint());
v
}
struct TestCase<'a> {
name: &'a str,
required: &'a [(&'a str, u64)],
optional: &'a [(&'a str, u64)],
exp_required: &'a [(&'a str, u64)],
exp_optional: &'a [(&'a str, u64)],
}
let test_cases = [
TestCase {
name: "no_duplicates",
required: &[("A", 1000), ("B", 2100)],
optional: &[("C", 1000)],
exp_required: &[("A", 1000), ("B", 2100)],
exp_optional: &[("C", 1000)],
},
TestCase {
name: "duplicate_required_utxos",
required: &[("A", 3000), ("B", 1200), ("C", 1234), ("A", 3000)],
optional: &[("D", 2100)],
exp_required: &[("A", 3000), ("B", 1200), ("C", 1234)],
exp_optional: &[("D", 2100)],
},
TestCase {
name: "duplicate_optional_utxos",
required: &[("A", 3000), ("B", 1200)],
optional: &[("C", 5000), ("D", 1300), ("C", 5000)],
exp_required: &[("A", 3000), ("B", 1200)],
exp_optional: &[("C", 5000), ("D", 1300)],
},
TestCase {
name: "duplicate_across_required_and_optional_utxos",
required: &[("A", 3000), ("B", 1200), ("C", 2100)],
optional: &[("A", 3000), ("D", 1200), ("E", 5000)],
exp_required: &[("A", 3000), ("B", 1200), ("C", 2100)],
exp_optional: &[("D", 1200), ("E", 5000)],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("Case {}: {}", i, t.name);
let (required, optional) =
filter_duplicates(to_utxo_vec(t.required), to_utxo_vec(t.optional));
assert_eq!(
required,
to_utxo_vec(t.exp_required),
"[{}:{}] unexpected `required` result",
i,
t.name
);
assert_eq!(
optional,
to_utxo_vec(t.exp_optional),
"[{}:{}] unexpected `optional` result",
i,
t.name
);
}
}
} }

View File

@@ -14,7 +14,7 @@
use crate::descriptor::policy::PolicyError; use crate::descriptor::policy::PolicyError;
use crate::descriptor::DescriptorError; use crate::descriptor::DescriptorError;
use crate::wallet::coin_selection; use crate::wallet::coin_selection;
use crate::{descriptor, KeychainKind}; use crate::{descriptor, FeeRate, KeychainKind};
use alloc::string::String; use alloc::string::String;
use bitcoin::{absolute, psbt, OutPoint, Sequence, Txid}; use bitcoin::{absolute, psbt, OutPoint, Sequence, Txid};
use core::fmt; use core::fmt;
@@ -83,8 +83,8 @@ pub enum CreateTxError<P> {
}, },
/// When bumping a tx the fee rate requested is lower than required /// When bumping a tx the fee rate requested is lower than required
FeeRateTooLow { FeeRateTooLow {
/// Required fee rate /// Required fee rate (satoshi/vbyte)
required: bitcoin::FeeRate, required: FeeRate,
}, },
/// `manually_selected_only` option is selected but no utxo has been passed /// `manually_selected_only` option is selected but no utxo has been passed
NoUtxosSelected, NoUtxosSelected,
@@ -168,10 +168,8 @@ where
CreateTxError::FeeRateTooLow { required } => { CreateTxError::FeeRateTooLow { required } => {
write!( write!(
f, f,
// Note: alternate fmt as sat/vb (ceil) available in bitcoin-0.31 "Fee rate too low: required {} sat/vbyte",
//"Fee rate too low: required {required:#}" required.as_sat_per_vb()
"Fee rate too low: required {} sat/vb",
crate::floating_rate!(required)
) )
} }
CreateTxError::NoUtxosSelected => { CreateTxError::NoUtxosSelected => {

View File

@@ -18,7 +18,7 @@
//! # use bdk::signer::SignerOrdering; //! # use bdk::signer::SignerOrdering;
//! # use bdk::wallet::hardwaresigner::HWISigner; //! # use bdk::wallet::hardwaresigner::HWISigner;
//! # use bdk::wallet::AddressIndex::New; //! # use bdk::wallet::AddressIndex::New;
//! # use bdk::{KeychainKind, SignOptions, Wallet}; //! # use bdk::{FeeRate, KeychainKind, SignOptions, Wallet};
//! # use hwi::HWIClient; //! # use hwi::HWIClient;
//! # use std::sync::Arc; //! # use std::sync::Arc;
//! # //! #

View File

@@ -11,8 +11,8 @@
//! Wallet //! Wallet
//! //!
//! This module defines the [`Wallet`]. //! This module defines the [`Wallet`] structure.
use crate::collections::{BTreeMap, HashMap}; use crate::collections::{BTreeMap, HashMap, HashSet};
use alloc::{ use alloc::{
boxed::Box, boxed::Box,
string::{String, ToString}, string::{String, ToString},
@@ -23,9 +23,7 @@ pub use bdk_chain::keychain::Balance;
use bdk_chain::{ use bdk_chain::{
indexed_tx_graph, indexed_tx_graph,
keychain::{self, KeychainTxOutIndex}, keychain::{self, KeychainTxOutIndex},
local_chain::{ local_chain::{self, CannotConnectError, CheckPoint, CheckPointIter, LocalChain},
self, ApplyHeaderError, CannotConnectError, CheckPoint, CheckPointIter, LocalChain,
},
tx_graph::{CanonicalTx, TxGraph}, tx_graph::{CanonicalTx, TxGraph},
Append, BlockId, ChainPosition, ConfirmationTime, ConfirmationTimeHeightAnchor, FullTxOut, Append, BlockId, ChainPosition, ConfirmationTime, ConfirmationTimeHeightAnchor, FullTxOut,
IndexedTxGraph, Persist, PersistBackend, IndexedTxGraph, Persist, PersistBackend,
@@ -33,8 +31,8 @@ use bdk_chain::{
use bitcoin::secp256k1::{All, Secp256k1}; use bitcoin::secp256k1::{All, Secp256k1};
use bitcoin::sighash::{EcdsaSighashType, TapSighashType}; use bitcoin::sighash::{EcdsaSighashType, TapSighashType};
use bitcoin::{ use bitcoin::{
absolute, Address, Block, FeeRate, Network, OutPoint, Script, ScriptBuf, Sequence, Transaction, absolute, Address, Network, OutPoint, Script, ScriptBuf, Sequence, Transaction, TxOut, Txid,
TxOut, Txid, Weight, Witness, Weight, Witness,
}; };
use bitcoin::{consensus::encode::serialize, BlockHash}; use bitcoin::{consensus::encode::serialize, BlockHash};
use bitcoin::{constants::genesis_block, psbt}; use bitcoin::{constants::genesis_block, psbt};
@@ -52,6 +50,10 @@ pub mod tx_builder;
pub(crate) mod utils; pub(crate) mod utils;
pub mod error; pub mod error;
#[cfg(feature = "hardware-signer")]
#[cfg_attr(docsrs, doc(cfg(feature = "hardware-signer")))]
pub mod hardwaresigner;
pub use utils::IsDust; pub use utils::IsDust;
#[allow(deprecated)] #[allow(deprecated)]
@@ -75,7 +77,7 @@ const COINBASE_MATURITY: u32 = 100;
/// A Bitcoin wallet /// A Bitcoin wallet
/// ///
/// The `Wallet` acts as a way of coherently interfacing with output descriptors and related transactions. /// The `Wallet` struct acts as a way of coherently interfacing with output descriptors and related transactions.
/// Its main components are: /// Its main components are:
/// ///
/// 1. output *descriptors* from which it can derive addresses. /// 1. output *descriptors* from which it can derive addresses.
@@ -235,7 +237,6 @@ impl Wallet {
network: Network, network: Network,
) -> Result<Self, DescriptorError> { ) -> Result<Self, DescriptorError> {
Self::new(descriptor, change_descriptor, (), network).map_err(|e| match e { Self::new(descriptor, change_descriptor, (), network).map_err(|e| match e {
NewError::NonEmptyDatabase => unreachable!("mock-database cannot have data"),
NewError::Descriptor(e) => e, NewError::Descriptor(e) => e,
NewError::Write(_) => unreachable!("mock-write must always succeed"), NewError::Write(_) => unreachable!("mock-write must always succeed"),
}) })
@@ -250,7 +251,6 @@ impl Wallet {
) -> Result<Self, crate::descriptor::DescriptorError> { ) -> Result<Self, crate::descriptor::DescriptorError> {
Self::new_with_genesis_hash(descriptor, change_descriptor, (), network, genesis_hash) Self::new_with_genesis_hash(descriptor, change_descriptor, (), network, genesis_hash)
.map_err(|e| match e { .map_err(|e| match e {
NewError::NonEmptyDatabase => unreachable!("mock-database cannot have data"),
NewError::Descriptor(e) => e, NewError::Descriptor(e) => e,
NewError::Write(_) => unreachable!("mock-write must always succeed"), NewError::Write(_) => unreachable!("mock-write must always succeed"),
}) })
@@ -264,11 +264,6 @@ where
/// Infallibly return a derived address using the external descriptor, see [`AddressIndex`] for /// Infallibly return a derived address using the external descriptor, see [`AddressIndex`] for
/// available address index selection strategies. If none of the keys in the descriptor are derivable /// available address index selection strategies. If none of the keys in the descriptor are derivable
/// (i.e. does not end with /*) then the same address will always be returned for any [`AddressIndex`]. /// (i.e. does not end with /*) then the same address will always be returned for any [`AddressIndex`].
///
/// # Panics
///
/// This panics when the caller requests for an address of derivation index greater than the
/// BIP32 max index.
pub fn get_address(&mut self, address_index: AddressIndex) -> AddressInfo { pub fn get_address(&mut self, address_index: AddressIndex) -> AddressInfo {
self.try_get_address(address_index).unwrap() self.try_get_address(address_index).unwrap()
} }
@@ -280,11 +275,6 @@ where
/// see [`AddressIndex`] for available address index selection strategies. If none of the keys /// see [`AddressIndex`] for available address index selection strategies. If none of the keys
/// in the descriptor are derivable (i.e. does not end with /*) then the same address will always /// in the descriptor are derivable (i.e. does not end with /*) then the same address will always
/// be returned for any [`AddressIndex`]. /// be returned for any [`AddressIndex`].
///
/// # Panics
///
/// This panics when the caller requests for an address of derivation index greater than the
/// BIP32 max index.
pub fn get_internal_address(&mut self, address_index: AddressIndex) -> AddressInfo { pub fn get_internal_address(&mut self, address_index: AddressIndex) -> AddressInfo {
self.try_get_internal_address(address_index).unwrap() self.try_get_internal_address(address_index).unwrap()
} }
@@ -298,8 +288,6 @@ where
/// [`new_with_genesis_hash`]: Wallet::new_with_genesis_hash /// [`new_with_genesis_hash`]: Wallet::new_with_genesis_hash
#[derive(Debug)] #[derive(Debug)]
pub enum NewError<W> { pub enum NewError<W> {
/// Database already has data.
NonEmptyDatabase,
/// There was problem with the passed-in descriptor(s). /// There was problem with the passed-in descriptor(s).
Descriptor(crate::descriptor::DescriptorError), Descriptor(crate::descriptor::DescriptorError),
/// We were unable to write the wallet's data to the persistence backend. /// We were unable to write the wallet's data to the persistence backend.
@@ -312,10 +300,6 @@ where
{ {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
NewError::NonEmptyDatabase => write!(
f,
"database already has data - use `load` or `new_or_load` methods instead"
),
NewError::Descriptor(e) => e.fmt(f), NewError::Descriptor(e) => e.fmt(f),
NewError::Write(e) => e.fmt(f), NewError::Write(e) => e.fmt(f),
} }
@@ -364,7 +348,7 @@ where
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl<L> std::error::Error for LoadError<L> where L: core::fmt::Display + core::fmt::Debug {} impl<L> std::error::Error for LoadError<L> where L: core::fmt::Display + core::fmt::Debug {}
/// Error type for when we try load a [`Wallet`] from persistence and creating it if non-existent. /// Error type for when we try load a [`Wallet`] from persistence and creating it if non-existant.
/// ///
/// Methods [`new_or_load`] and [`new_or_load_with_genesis_hash`] may return this error. /// Methods [`new_or_load`] and [`new_or_load_with_genesis_hash`] may return this error.
/// ///
@@ -440,55 +424,6 @@ pub enum InsertTxError {
}, },
} }
impl fmt::Display for InsertTxError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InsertTxError::ConfirmationHeightCannotBeGreaterThanTip {
tip_height,
tx_height,
} => {
write!(f, "cannot insert tx with confirmation height ({}) higher than internal tip height ({})", tx_height, tip_height)
}
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for InsertTxError {}
/// An error that may occur when applying a block to [`Wallet`].
#[derive(Debug)]
pub enum ApplyBlockError {
/// Occurs when the update chain cannot connect with original chain.
CannotConnect(CannotConnectError),
/// Occurs when the `connected_to` hash does not match the hash derived from `block`.
UnexpectedConnectedToHash {
/// Block hash of `connected_to`.
connected_to_hash: BlockHash,
/// Expected block hash of `connected_to`, as derived from `block`.
expected_hash: BlockHash,
},
}
impl fmt::Display for ApplyBlockError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ApplyBlockError::CannotConnect(err) => err.fmt(f),
ApplyBlockError::UnexpectedConnectedToHash {
expected_hash: block_hash,
connected_to_hash: checkpoint_hash,
} => write!(
f,
"`connected_to` hash {} differs from the expected hash {} (which is derived from `block`)",
checkpoint_hash, block_hash
),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ApplyBlockError {}
impl<D> Wallet<D> { impl<D> Wallet<D> {
/// Initialize an empty [`Wallet`]. /// Initialize an empty [`Wallet`].
pub fn new<E: IntoWalletDescriptor>( pub fn new<E: IntoWalletDescriptor>(
@@ -511,18 +446,13 @@ impl<D> Wallet<D> {
pub fn new_with_genesis_hash<E: IntoWalletDescriptor>( pub fn new_with_genesis_hash<E: IntoWalletDescriptor>(
descriptor: E, descriptor: E,
change_descriptor: Option<E>, change_descriptor: Option<E>,
mut db: D, db: D,
network: Network, network: Network,
genesis_hash: BlockHash, genesis_hash: BlockHash,
) -> Result<Self, NewError<D::WriteError>> ) -> Result<Self, NewError<D::WriteError>>
where where
D: PersistBackend<ChangeSet>, D: PersistBackend<ChangeSet>,
{ {
if let Ok(changeset) = db.load_from_persistence() {
if changeset.is_some() {
return Err(NewError::NonEmptyDatabase);
}
}
let secp = Secp256k1::new(); let secp = Secp256k1::new();
let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash); let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash);
let mut index = KeychainTxOutIndex::<KeychainKind>::default(); let mut index = KeychainTxOutIndex::<KeychainKind>::default();
@@ -587,9 +517,7 @@ impl<D> Wallet<D> {
create_signers(&mut index, &secp, descriptor, change_descriptor, network) create_signers(&mut index, &secp, descriptor, change_descriptor, network)
.map_err(LoadError::Descriptor)?; .map_err(LoadError::Descriptor)?;
let mut indexed_graph = IndexedTxGraph::new(index); let indexed_graph = IndexedTxGraph::new(index);
indexed_graph.apply_changeset(changeset.indexed_tx_graph);
let persist = Persist::new(db); let persist = Persist::new(db);
Ok(Wallet { Ok(Wallet {
@@ -685,9 +613,6 @@ impl<D> Wallet<D> {
genesis_hash, genesis_hash,
) )
.map_err(|e| match e { .map_err(|e| match e {
NewError::NonEmptyDatabase => {
unreachable!("database is already checked to have no data")
}
NewError::Descriptor(e) => NewOrLoadError::Descriptor(e), NewError::Descriptor(e) => NewOrLoadError::Descriptor(e),
NewError::Write(e) => NewOrLoadError::Write(e), NewError::Write(e) => NewOrLoadError::Write(e),
}), }),
@@ -710,11 +635,6 @@ impl<D> Wallet<D> {
/// ///
/// A `PersistBackend<ChangeSet>::WriteError` will result if unable to persist the new address /// A `PersistBackend<ChangeSet>::WriteError` will result if unable to persist the new address
/// to the `PersistBackend`. /// to the `PersistBackend`.
///
/// # Panics
///
/// This panics when the caller requests for an address of derivation index greater than the
/// BIP32 max index.
pub fn try_get_address( pub fn try_get_address(
&mut self, &mut self,
address_index: AddressIndex, address_index: AddressIndex,
@@ -735,11 +655,6 @@ impl<D> Wallet<D> {
/// see [`AddressIndex`] for available address index selection strategies. If none of the keys /// see [`AddressIndex`] for available address index selection strategies. If none of the keys
/// in the descriptor are derivable (i.e. does not end with /*) then the same address will always /// in the descriptor are derivable (i.e. does not end with /*) then the same address will always
/// be returned for any [`AddressIndex`]. /// be returned for any [`AddressIndex`].
///
/// # Panics
///
/// This panics when the caller requests for an address of derivation index greater than the
/// BIP32 max index.
pub fn try_get_internal_address( pub fn try_get_internal_address(
&mut self, &mut self,
address_index: AddressIndex, address_index: AddressIndex,
@@ -762,11 +677,6 @@ impl<D> Wallet<D> {
/// See [`AddressIndex`] for available address index selection strategies. If none of the keys /// See [`AddressIndex`] for available address index selection strategies. If none of the keys
/// in the descriptor are derivable (i.e. does not end with /*) then the same address will /// in the descriptor are derivable (i.e. does not end with /*) then the same address will
/// always be returned for any [`AddressIndex`]. /// always be returned for any [`AddressIndex`].
///
/// # Panics
///
/// This panics when the caller requests for an address of derivation index greater than the
/// BIP32 max index.
fn _get_address( fn _get_address(
&mut self, &mut self,
keychain: KeychainKind, keychain: KeychainKind,
@@ -786,14 +696,12 @@ impl<D> Wallet<D> {
let ((index, spk), index_changeset) = txout_index.next_unused_spk(&keychain); let ((index, spk), index_changeset) = txout_index.next_unused_spk(&keychain);
(index, spk.into(), Some(index_changeset)) (index, spk.into(), Some(index_changeset))
} }
AddressIndex::Peek(mut peek_index) => { AddressIndex::Peek(index) => {
let mut spk_iter = txout_index.unbounded_spk_iter(&keychain); let (index, spk) = txout_index
if !spk_iter.descriptor().has_wildcard() { .spks_of_keychain(&keychain)
peek_index = 0; .take(index as usize + 1)
} .last()
let (index, spk) = spk_iter .unwrap();
.nth(peek_index as usize)
.expect("derivation index is out of bounds");
(index, spk, None) (index, spk, None)
} }
}; };
@@ -823,7 +731,7 @@ impl<D> Wallet<D> {
/// ///
/// Will only return `Some(_)` if the wallet has given out the spk. /// Will only return `Some(_)` if the wallet has given out the spk.
pub fn derivation_of_spk(&self, spk: &Script) -> Option<(KeychainKind, u32)> { pub fn derivation_of_spk(&self, spk: &Script) -> Option<(KeychainKind, u32)> {
self.indexed_graph.index.index_of_spk(spk) self.indexed_graph.index.index_of_spk(spk).copied()
} }
/// Return the list of unspent outputs of this wallet /// Return the list of unspent outputs of this wallet
@@ -862,7 +770,7 @@ impl<D> Wallet<D> {
self.chain.tip() self.chain.tip()
} }
/// Get unbounded script pubkey iterators for both `Internal` and `External` keychains. /// Returns a iterators of all the script pubkeys for the `Internal` and External` variants in `KeychainKind`.
/// ///
/// This is intended to be used when doing a full scan of your addresses (e.g. after restoring /// This is intended to be used when doing a full scan of your addresses (e.g. after restoring
/// from seed words). You pass the `BTreeMap` of iterators to a blockchain data source (e.g. /// from seed words). You pass the `BTreeMap` of iterators to a blockchain data source (e.g.
@@ -870,36 +778,36 @@ impl<D> Wallet<D> {
/// ///
/// Note carefully that iterators go over **all** script pubkeys on the keychains (not what /// Note carefully that iterators go over **all** script pubkeys on the keychains (not what
/// script pubkeys the wallet is storing internally). /// script pubkeys the wallet is storing internally).
pub fn all_unbounded_spk_iters( pub fn spks_of_all_keychains(
&self, &self,
) -> BTreeMap<KeychainKind, impl Iterator<Item = (u32, ScriptBuf)> + Clone> { ) -> BTreeMap<KeychainKind, impl Iterator<Item = (u32, ScriptBuf)> + Clone> {
self.indexed_graph.index.all_unbounded_spk_iters() self.indexed_graph.index.spks_of_all_keychains()
} }
/// Get an unbounded script pubkey iterator for the given `keychain`. /// Gets an iterator over all the script pubkeys in a single keychain.
/// ///
/// See [`all_unbounded_spk_iters`] for more documentation /// See [`spks_of_all_keychains`] for more documentation
/// ///
/// [`all_unbounded_spk_iters`]: Self::all_unbounded_spk_iters /// [`spks_of_all_keychains`]: Self::spks_of_all_keychains
pub fn unbounded_spk_iter( pub fn spks_of_keychain(
&self, &self,
keychain: KeychainKind, keychain: KeychainKind,
) -> impl Iterator<Item = (u32, ScriptBuf)> + Clone { ) -> impl Iterator<Item = (u32, ScriptBuf)> + Clone {
self.indexed_graph.index.unbounded_spk_iter(&keychain) self.indexed_graph.index.spks_of_keychain(&keychain)
} }
/// Returns the utxo owned by this wallet corresponding to `outpoint` if it exists in the /// Returns the utxo owned by this wallet corresponding to `outpoint` if it exists in the
/// wallet's database. /// wallet's database.
pub fn get_utxo(&self, op: OutPoint) -> Option<LocalOutput> { pub fn get_utxo(&self, op: OutPoint) -> Option<LocalOutput> {
let (keychain, index, _) = self.indexed_graph.index.txout(op)?; let (&spk_i, _) = self.indexed_graph.index.txout(op)?;
self.indexed_graph self.indexed_graph
.graph() .graph()
.filter_chain_unspents( .filter_chain_unspents(
&self.chain, &self.chain,
self.chain.tip().block_id(), self.chain.tip().block_id(),
core::iter::once(((), op)), core::iter::once((spk_i, op)),
) )
.map(|(_, full_txo)| new_local_utxo(keychain, index, full_txo)) .map(|((k, i), full_txo)| new_local_utxo(k, i, full_txo))
.next() .next()
} }
@@ -986,15 +894,18 @@ impl<D> Wallet<D> {
/// ``` /// ```
/// [`insert_txout`]: Self::insert_txout /// [`insert_txout`]: Self::insert_txout
pub fn calculate_fee_rate(&self, tx: &Transaction) -> Result<FeeRate, CalculateFeeError> { pub fn calculate_fee_rate(&self, tx: &Transaction) -> Result<FeeRate, CalculateFeeError> {
self.calculate_fee(tx) self.calculate_fee(tx).map(|fee| {
.map(|fee| bitcoin::Amount::from_sat(fee) / tx.weight()) let weight = tx.weight();
FeeRate::from_wu(fee, weight)
})
} }
/// Compute the `tx`'s sent and received amounts (in satoshis). /// Computes total input value going from script pubkeys in the index (sent) and the total output
/// value going to script pubkeys in the index (received) in `tx`.
/// ///
/// This method returns a tuple `(sent, received)`. Sent is the sum of the txin amounts /// For the `sent` to be computed correctly, the outputs being spent must have already been
/// that spend from previous txouts tracked by this wallet. Received is the summation /// scanned by the index. Calculating received just uses the [`Transaction`] outputs directly,
/// of this tx's outputs that send to script pubkeys tracked by this wallet. /// so it will be correct even if it has not been scanned.
/// ///
/// # Examples /// # Examples
/// ///
@@ -1345,7 +1256,7 @@ impl<D> Wallet<D> {
} }
Some(tx_builder::Version(x)) => x, Some(tx_builder::Version(x)) => x,
None if requirements.csv.is_some() => 2, None if requirements.csv.is_some() => 2,
None => 1, _ => 1,
}; };
// We use a match here instead of a unwrap_or_else as it's way more readable :) // We use a match here instead of a unwrap_or_else as it's way more readable :)
@@ -1398,7 +1309,6 @@ impl<D> Wallet<D> {
} }
}; };
// The nSequence to be by default for inputs unless an explicit sequence is specified.
let n_sequence = match (params.rbf, requirements.csv) { let n_sequence = match (params.rbf, requirements.csv) {
// No RBF or CSV but there's an nLockTime, so the nSequence cannot be final // No RBF or CSV but there's an nLockTime, so the nSequence cannot be final
(None, None) if lock_time != absolute::LockTime::ZERO => { (None, None) if lock_time != absolute::LockTime::ZERO => {
@@ -1430,31 +1340,32 @@ impl<D> Wallet<D> {
(Some(rbf), _) => rbf.get_value(), (Some(rbf), _) => rbf.get_value(),
}; };
let (fee_rate, mut fee_amount) = match params.fee_policy.unwrap_or_default() { let (fee_rate, mut fee_amount) = match params
.fee_policy
.as_ref()
.unwrap_or(&FeePolicy::FeeRate(FeeRate::default()))
{
//FIXME: see https://github.com/bitcoindevkit/bdk/issues/256 //FIXME: see https://github.com/bitcoindevkit/bdk/issues/256
FeePolicy::FeeAmount(fee) => { FeePolicy::FeeAmount(fee) => {
if let Some(previous_fee) = params.bumping_fee { if let Some(previous_fee) = params.bumping_fee {
if fee < previous_fee.absolute { if *fee < previous_fee.absolute {
return Err(CreateTxError::FeeTooLow { return Err(CreateTxError::FeeTooLow {
required: previous_fee.absolute, required: previous_fee.absolute,
}); });
} }
} }
(FeeRate::ZERO, fee) (FeeRate::from_sat_per_vb(0.0), *fee)
} }
FeePolicy::FeeRate(rate) => { FeePolicy::FeeRate(rate) => {
if let Some(previous_fee) = params.bumping_fee { if let Some(previous_fee) = params.bumping_fee {
let required_feerate = FeeRate::from_sat_per_kwu( let required_feerate = FeeRate::from_sat_per_vb(previous_fee.rate + 1.0);
previous_fee.rate.to_sat_per_kwu() if *rate < required_feerate {
+ FeeRate::BROADCAST_MIN.to_sat_per_kwu(), // +1 sat/vb
);
if rate < required_feerate {
return Err(CreateTxError::FeeRateTooLow { return Err(CreateTxError::FeeRateTooLow {
required: required_feerate, required: required_feerate,
}); });
} }
} }
(rate, 0) (*rate, 0)
} }
}; };
@@ -1497,7 +1408,7 @@ impl<D> Wallet<D> {
outgoing += value; outgoing += value;
} }
fee_amount += (fee_rate * tx.weight()).to_sat(); fee_amount += fee_rate.fee_wu(tx.weight());
// Segwit transactions' header is 2WU larger than legacy txs' header, // Segwit transactions' header is 2WU larger than legacy txs' header,
// as they contain a witness marker (1WU) and a witness flag (1WU) (see BIP144). // as they contain a witness marker (1WU) and a witness flag (1WU) (see BIP144).
@@ -1508,7 +1419,7 @@ impl<D> Wallet<D> {
// end up with a transaction with a slightly higher fee rate than the requested one. // end up with a transaction with a slightly higher fee rate than the requested one.
// If, instead, we undershoot, we may end up with a feerate lower than the requested one // If, instead, we undershoot, we may end up with a feerate lower than the requested one
// - we might come up with non broadcastable txs! // - we might come up with non broadcastable txs!
fee_amount += (fee_rate * Weight::from_wu(2)).to_sat(); fee_amount += fee_rate.fee_wu(Weight::from_wu(2));
if params.change_policy != tx_builder::ChangeSpendPolicy::ChangeAllowed if params.change_policy != tx_builder::ChangeSpendPolicy::ChangeAllowed
&& internal_descriptor.is_none() && internal_descriptor.is_none()
@@ -1516,8 +1427,15 @@ impl<D> Wallet<D> {
return Err(CreateTxError::ChangePolicyDescriptor); return Err(CreateTxError::ChangePolicyDescriptor);
} }
let (required_utxos, optional_utxos) = let (required_utxos, optional_utxos) = self.preselect_utxos(
self.preselect_utxos(&params, Some(current_height.to_consensus_u32())); params.change_policy,
&params.unspendable,
params.utxos.clone(),
params.drain_wallet,
params.manually_selected_only,
params.bumping_fee.is_some(), // we mandate confirmed transactions if we're bumping the fee
Some(current_height.to_consensus_u32()),
);
// get drain script // get drain script
let drain_script = match params.drain_to { let drain_script = match params.drain_to {
@@ -1527,7 +1445,7 @@ impl<D> Wallet<D> {
let ((index, spk), index_changeset) = let ((index, spk), index_changeset) =
self.indexed_graph.index.next_unused_spk(&change_keychain); self.indexed_graph.index.next_unused_spk(&change_keychain);
let spk = spk.into(); let spk = spk.into();
self.indexed_graph.index.mark_used(change_keychain, index); self.indexed_graph.index.mark_used(&change_keychain, index);
self.persist self.persist
.stage(ChangeSet::from(indexed_tx_graph::ChangeSet::from( .stage(ChangeSet::from(indexed_tx_graph::ChangeSet::from(
index_changeset, index_changeset,
@@ -1537,9 +1455,6 @@ impl<D> Wallet<D> {
} }
}; };
let (required_utxos, optional_utxos) =
coin_selection::filter_duplicates(required_utxos, optional_utxos);
let coin_selection = coin_selection.coin_select( let coin_selection = coin_selection.coin_select(
required_utxos, required_utxos,
optional_utxos, optional_utxos,
@@ -1556,7 +1471,7 @@ impl<D> Wallet<D> {
.map(|u| bitcoin::TxIn { .map(|u| bitcoin::TxIn {
previous_output: u.outpoint(), previous_output: u.outpoint(),
script_sig: ScriptBuf::default(), script_sig: ScriptBuf::default(),
sequence: u.sequence().unwrap_or(n_sequence), sequence: n_sequence,
witness: Witness::new(), witness: Witness::new(),
}) })
.collect(); .collect();
@@ -1649,7 +1564,7 @@ impl<D> Wallet<D> {
/// let mut psbt = { /// let mut psbt = {
/// let mut builder = wallet.build_fee_bump(tx.txid())?; /// let mut builder = wallet.build_fee_bump(tx.txid())?;
/// builder /// builder
/// .fee_rate(FeeRate::from_sat_per_vb(5).expect("valid feerate")); /// .fee_rate(bdk::FeeRate::from_sat_per_vb(5.0));
/// builder.finish()? /// builder.finish()?
/// }; /// };
/// ///
@@ -1711,7 +1626,7 @@ impl<D> Wallet<D> {
.into(); .into();
let weighted_utxo = match txout_index.index_of_spk(&txout.script_pubkey) { let weighted_utxo = match txout_index.index_of_spk(&txout.script_pubkey) {
Some((keychain, derivation_index)) => { Some(&(keychain, derivation_index)) => {
#[allow(deprecated)] #[allow(deprecated)]
let satisfaction_weight = self let satisfaction_weight = self
.get_descriptor_for_keychain(keychain) .get_descriptor_for_keychain(keychain)
@@ -1736,7 +1651,6 @@ impl<D> Wallet<D> {
satisfaction_weight, satisfaction_weight,
utxo: Utxo::Foreign { utxo: Utxo::Foreign {
outpoint: txin.previous_output, outpoint: txin.previous_output,
sequence: Some(txin.sequence),
psbt_input: Box::new(psbt::Input { psbt_input: Box::new(psbt::Input {
witness_utxo: Some(txout.clone()), witness_utxo: Some(txout.clone()),
non_witness_utxo: Some(prev_tx.clone()), non_witness_utxo: Some(prev_tx.clone()),
@@ -1756,7 +1670,7 @@ impl<D> Wallet<D> {
for (index, txout) in tx.output.iter().enumerate() { for (index, txout) in tx.output.iter().enumerate() {
let change_type = self.map_keychain(KeychainKind::Internal); let change_type = self.map_keychain(KeychainKind::Internal);
match txout_index.index_of_spk(&txout.script_pubkey) { match txout_index.index_of_spk(&txout.script_pubkey) {
Some((keychain, _)) if keychain == change_type => change_index = Some(index), Some(&(keychain, _)) if keychain == change_type => change_index = Some(index),
_ => {} _ => {}
} }
} }
@@ -1777,7 +1691,7 @@ impl<D> Wallet<D> {
utxos: original_utxos, utxos: original_utxos,
bumping_fee: Some(tx_builder::PreviousFee { bumping_fee: Some(tx_builder::PreviousFee {
absolute: fee, absolute: fee,
rate: fee_rate, rate: fee_rate.as_sat_per_vb(),
}), }),
..Default::default() ..Default::default()
}; };
@@ -1971,15 +1885,6 @@ impl<D> Wallet<D> {
if sign_options.remove_partial_sigs { if sign_options.remove_partial_sigs {
psbt_input.partial_sigs.clear(); psbt_input.partial_sigs.clear();
} }
if sign_options.remove_taproot_extras {
// We just constructed the final witness, clear these fields.
psbt_input.tap_key_sig = None;
psbt_input.tap_script_sigs.clear();
psbt_input.tap_scripts.clear();
psbt_input.tap_key_origins.clear();
psbt_input.tap_internal_key = None;
psbt_input.tap_merkle_root = None;
}
} }
Err(_) => finished = false, Err(_) => finished = false,
} }
@@ -1988,12 +1893,6 @@ impl<D> Wallet<D> {
} }
} }
if finished && sign_options.remove_taproot_extras {
for output in &mut psbt.outputs {
output.tap_key_origins.clear();
}
}
Ok(finished) Ok(finished)
} }
@@ -2026,10 +1925,10 @@ impl<D> Wallet<D> {
pub fn cancel_tx(&mut self, tx: &Transaction) { pub fn cancel_tx(&mut self, tx: &Transaction) {
let txout_index = &mut self.indexed_graph.index; let txout_index = &mut self.indexed_graph.index;
for txout in &tx.output { for txout in &tx.output {
if let Some((keychain, index)) = txout_index.index_of_spk(&txout.script_pubkey) { if let Some(&(keychain, index)) = txout_index.index_of_spk(&txout.script_pubkey) {
// NOTE: unmark_used will **not** make something unused if it has actually been used // NOTE: unmark_used will **not** make something unused if it has actually been used
// by a tx in the tracker. It only removes the superficial marking. // by a tx in the tracker. It only removes the superficial marking.
txout_index.unmark_used(keychain, index); txout_index.unmark_used(&keychain, index);
} }
} }
} }
@@ -2045,7 +1944,7 @@ impl<D> Wallet<D> {
} }
fn get_descriptor_for_txout(&self, txout: &TxOut) -> Option<DerivedDescriptor> { fn get_descriptor_for_txout(&self, txout: &TxOut) -> Option<DerivedDescriptor> {
let (keychain, child) = self let &(keychain, child) = self
.indexed_graph .indexed_graph
.index .index
.index_of_spk(&txout.script_pubkey)?; .index_of_spk(&txout.script_pubkey)?;
@@ -2070,26 +1969,17 @@ impl<D> Wallet<D> {
/// Given the options returns the list of utxos that must be used to form the /// Given the options returns the list of utxos that must be used to form the
/// transaction and any further that may be used if needed. /// transaction and any further that may be used if needed.
#[allow(clippy::too_many_arguments)]
fn preselect_utxos( fn preselect_utxos(
&self, &self,
params: &TxParams, change_policy: tx_builder::ChangeSpendPolicy,
unspendable: &HashSet<OutPoint>,
manually_selected: Vec<WeightedUtxo>,
must_use_all_available: bool,
manual_only: bool,
must_only_use_confirmed_tx: bool,
current_height: Option<u32>, current_height: Option<u32>,
) -> (Vec<WeightedUtxo>, Vec<WeightedUtxo>) { ) -> (Vec<WeightedUtxo>, Vec<WeightedUtxo>) {
let TxParams {
change_policy,
unspendable,
utxos,
drain_wallet,
manually_selected_only,
bumping_fee,
..
} = params;
let manually_selected = utxos.clone();
// we mandate confirmed transactions if we're bumping the fee
let must_only_use_confirmed_tx = bumping_fee.is_some();
let must_use_all_available = *drain_wallet;
let chain_tip = self.chain.tip().block_id(); let chain_tip = self.chain.tip().block_id();
// must_spend <- manually selected utxos // must_spend <- manually selected utxos
// may_spend <- all other available utxos // may_spend <- all other available utxos
@@ -2104,7 +1994,7 @@ impl<D> Wallet<D> {
// NOTE: we are intentionally ignoring `unspendable` here. i.e manual // NOTE: we are intentionally ignoring `unspendable` here. i.e manual
// selection overrides unspendable. // selection overrides unspendable.
if *manually_selected_only { if manual_only {
return (must_spend, vec![]); return (must_spend, vec![]);
} }
@@ -2232,9 +2122,8 @@ impl<D> Wallet<D> {
} }
} }
Utxo::Foreign { Utxo::Foreign {
outpoint,
psbt_input: foreign_psbt_input, psbt_input: foreign_psbt_input,
.. outpoint,
} => { } => {
let is_taproot = foreign_psbt_input let is_taproot = foreign_psbt_input
.witness_utxo .witness_utxo
@@ -2269,7 +2158,7 @@ impl<D> Wallet<D> {
{ {
// Try to find the prev_script in our db to figure out if this is internal or external, // Try to find the prev_script in our db to figure out if this is internal or external,
// and the derivation index // and the derivation index
let (keychain, child) = self let &(keychain, child) = self
.indexed_graph .indexed_graph
.index .index
.index_of_spk(&utxo.txout.script_pubkey) .index_of_spk(&utxo.txout.script_pubkey)
@@ -2307,6 +2196,9 @@ impl<D> Wallet<D> {
) -> Result<(), MiniscriptPsbtError> { ) -> Result<(), MiniscriptPsbtError> {
// We need to borrow `psbt` mutably within the loops, so we have to allocate a vec for all // We need to borrow `psbt` mutably within the loops, so we have to allocate a vec for all
// the input utxos and outputs // the input utxos and outputs
//
// Clippy complains that the collect is not required, but that's wrong
#[allow(clippy::needless_collect)]
let utxos = (0..psbt.inputs.len()) let utxos = (0..psbt.inputs.len())
.filter_map(|i| psbt.get_utxo_for(i).map(|utxo| (true, i, utxo))) .filter_map(|i| psbt.get_utxo_for(i).map(|utxo| (true, i, utxo)))
.chain( .chain(
@@ -2320,7 +2212,7 @@ impl<D> Wallet<D> {
// Try to figure out the keychain and derivation for every input and output // Try to figure out the keychain and derivation for every input and output
for (is_input, index, out) in utxos.into_iter() { for (is_input, index, out) in utxos.into_iter() {
if let Some((keychain, child)) = if let Some(&(keychain, child)) =
self.indexed_graph.index.index_of_spk(&out.script_pubkey) self.indexed_graph.index.index_of_spk(&out.script_pubkey)
{ {
let desc = self.get_descriptor_for_keychain(keychain); let desc = self.get_descriptor_for_keychain(keychain);
@@ -2396,7 +2288,7 @@ impl<D> Wallet<D> {
self.persist.commit().map(|c| c.is_some()) self.persist.commit().map(|c| c.is_some())
} }
/// Returns the changes that will be committed with the next call to [`commit`]. /// Returns the changes that will be staged with the next call to [`commit`].
/// ///
/// [`commit`]: Self::commit /// [`commit`]: Self::commit
pub fn staged(&self) -> &ChangeSet pub fn staged(&self) -> &ChangeSet
@@ -2420,86 +2312,6 @@ impl<D> Wallet<D> {
pub fn local_chain(&self) -> &LocalChain { pub fn local_chain(&self) -> &LocalChain {
&self.chain &self.chain
} }
/// Introduces a `block` of `height` to the wallet, and tries to connect it to the
/// `prev_blockhash` of the block's header.
///
/// This is a convenience method that is equivalent to calling [`apply_block_connected_to`]
/// with `prev_blockhash` and `height-1` as the `connected_to` parameter.
///
/// [`apply_block_connected_to`]: Self::apply_block_connected_to
pub fn apply_block(&mut self, block: &Block, height: u32) -> Result<(), CannotConnectError>
where
D: PersistBackend<ChangeSet>,
{
let connected_to = match height.checked_sub(1) {
Some(prev_height) => BlockId {
height: prev_height,
hash: block.header.prev_blockhash,
},
None => BlockId {
height,
hash: block.block_hash(),
},
};
self.apply_block_connected_to(block, height, connected_to)
.map_err(|err| match err {
ApplyHeaderError::InconsistentBlocks => {
unreachable!("connected_to is derived from the block so must be consistent")
}
ApplyHeaderError::CannotConnect(err) => err,
})
}
/// Applies relevant transactions from `block` of `height` to the wallet, and connects the
/// block to the internal chain.
///
/// The `connected_to` parameter informs the wallet how this block connects to the internal
/// [`LocalChain`]. Relevant transactions are filtered from the `block` and inserted into the
/// internal [`TxGraph`].
pub fn apply_block_connected_to(
&mut self,
block: &Block,
height: u32,
connected_to: BlockId,
) -> Result<(), ApplyHeaderError>
where
D: PersistBackend<ChangeSet>,
{
let mut changeset = ChangeSet::default();
changeset.append(
self.chain
.apply_header_connected_to(&block.header, height, connected_to)?
.into(),
);
changeset.append(
self.indexed_graph
.apply_block_relevant(block, height)
.into(),
);
self.persist.stage(changeset);
Ok(())
}
/// Apply relevant unconfirmed transactions to the wallet.
///
/// Transactions that are not relevant are filtered out.
///
/// This method takes in an iterator of `(tx, last_seen)` where `last_seen` is the timestamp of
/// when the transaction was last seen in the mempool. This is used for conflict resolution
/// when there is conflicting unconfirmed transactions. The transaction with the later
/// `last_seen` is prioritized.
pub fn apply_unconfirmed_txs<'t>(
&mut self,
unconfirmed_txs: impl IntoIterator<Item = (&'t Transaction, u64)>,
) where
D: PersistBackend<ChangeSet>,
{
let indexed_graph_changeset = self
.indexed_graph
.batch_insert_relevant_unconfirmed(unconfirmed_txs);
self.persist.stage(ChangeSet::from(indexed_graph_changeset));
}
} }
impl<D> AsRef<bdk_chain::tx_graph::TxGraph<ConfirmationTimeHeightAnchor>> for Wallet<D> { impl<D> AsRef<bdk_chain::tx_graph::TxGraph<ConfirmationTimeHeightAnchor>> for Wallet<D> {
@@ -2578,17 +2390,6 @@ fn create_signers<E: IntoWalletDescriptor>(
Ok((signers, change_signers)) Ok((signers, change_signers))
} }
/// Transforms a [`FeeRate`] to `f64` with unit as sat/vb.
#[macro_export]
#[doc(hidden)]
macro_rules! floating_rate {
($rate:expr) => {{
use $crate::bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
// sat_kwu / 250.0 -> sat_vb
$rate.to_sat_per_kwu() as f64 / ((1000 / WITNESS_SCALE_FACTOR) as f64)
}};
}
#[macro_export] #[macro_export]
#[doc(hidden)] #[doc(hidden)]
/// Macro for getting a wallet for use in a doctest /// Macro for getting a wallet for use in a doctest

View File

@@ -80,7 +80,6 @@
//! ``` //! ```
use crate::collections::BTreeMap; use crate::collections::BTreeMap;
use alloc::string::String;
use alloc::sync::Arc; use alloc::sync::Arc;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::cmp::Ordering; use core::cmp::Ordering;
@@ -163,10 +162,16 @@ pub enum SignerError {
SighashError(sighash::Error), SighashError(sighash::Error),
/// Miniscript PSBT error /// Miniscript PSBT error
MiniscriptPsbt(MiniscriptPsbtError), MiniscriptPsbt(MiniscriptPsbtError),
/// To be used only by external libraries implementing [`InputSigner`] or /// Error while signing using hardware wallets
/// [`TransactionSigner`], so that they can return their own custom errors, without having to #[cfg(feature = "hardware-signer")]
/// modify [`SignerError`] in BDK. HWIError(hwi::error::Error),
External(String), }
#[cfg(feature = "hardware-signer")]
impl From<hwi::error::Error> for SignerError {
fn from(e: hwi::error::Error) -> Self {
SignerError::HWIError(e)
}
} }
impl From<sighash::Error> for SignerError { impl From<sighash::Error> for SignerError {
@@ -191,7 +196,8 @@ impl fmt::Display for SignerError {
Self::InvalidSighash => write!(f, "Invalid SIGHASH for the signing context in use"), Self::InvalidSighash => write!(f, "Invalid SIGHASH for the signing context in use"),
Self::SighashError(err) => write!(f, "Error while computing the hash to sign: {}", err), Self::SighashError(err) => write!(f, "Error while computing the hash to sign: {}", err),
Self::MiniscriptPsbt(err) => write!(f, "Miniscript PSBT error: {}", err), Self::MiniscriptPsbt(err) => write!(f, "Miniscript PSBT error: {}", err),
Self::External(err) => write!(f, "{}", err), #[cfg(feature = "hardware-signer")]
Self::HWIError(err) => write!(f, "Error while signing using hardware wallets: {}", err),
} }
} }
} }
@@ -215,7 +221,7 @@ pub enum SignerContext {
}, },
} }
/// Wrapper to pair a signer with its context /// Wrapper structure to pair a signer with its context
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct SignerWrapper<S: Sized + fmt::Debug + Clone> { pub struct SignerWrapper<S: Sized + fmt::Debug + Clone> {
signer: S, signer: S,
@@ -782,16 +788,6 @@ pub struct SignOptions {
/// Defaults to `true` which will remove partial signatures during finalization. /// Defaults to `true` which will remove partial signatures during finalization.
pub remove_partial_sigs: bool, pub remove_partial_sigs: bool,
/// Whether to remove taproot specific fields from the PSBT on finalization.
///
/// For inputs this includes the taproot internal key, merkle root, and individual
/// scripts and signatures. For both inputs and outputs it includes key origin info.
///
/// Defaults to `true` which will remove all of the above mentioned fields when finalizing.
///
/// See [`BIP371`](https://github.com/bitcoin/bips/blob/master/bip-0371.mediawiki) for details.
pub remove_taproot_extras: bool,
/// Whether to try finalizing the PSBT after the inputs are signed. /// Whether to try finalizing the PSBT after the inputs are signed.
/// ///
/// Defaults to `true` which will try finalizing PSBT after inputs are signed. /// Defaults to `true` which will try finalizing PSBT after inputs are signed.
@@ -816,10 +812,9 @@ pub struct SignOptions {
} }
/// Customize which taproot script-path leaves the signer should sign. /// Customize which taproot script-path leaves the signer should sign.
#[derive(Default, Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum TapLeavesOptions { pub enum TapLeavesOptions {
/// The signer will sign all the leaves it has a key for. /// The signer will sign all the leaves it has a key for.
#[default]
All, All,
/// The signer won't sign leaves other than the ones specified. Note that it could still ignore /// The signer won't sign leaves other than the ones specified. Note that it could still ignore
/// some of the specified leaves, if it doesn't have the right key to sign them. /// some of the specified leaves, if it doesn't have the right key to sign them.
@@ -830,6 +825,13 @@ pub enum TapLeavesOptions {
None, None,
} }
impl Default for TapLeavesOptions {
fn default() -> Self {
TapLeavesOptions::All
}
}
#[allow(clippy::derivable_impls)]
impl Default for SignOptions { impl Default for SignOptions {
fn default() -> Self { fn default() -> Self {
SignOptions { SignOptions {
@@ -837,7 +839,6 @@ impl Default for SignOptions {
assume_height: None, assume_height: None,
allow_all_sighashes: false, allow_all_sighashes: false,
remove_partial_sigs: true, remove_partial_sigs: true,
remove_taproot_extras: true,
try_finalize: true, try_finalize: true,
tap_leaves_options: TapLeavesOptions::default(), tap_leaves_options: TapLeavesOptions::default(),
sign_with_tap_internal_key: true, sign_with_tap_internal_key: true,

View File

@@ -31,7 +31,7 @@
//! // Create a transaction with one output to `to_address` of 50_000 satoshi //! // Create a transaction with one output to `to_address` of 50_000 satoshi
//! .add_recipient(to_address.script_pubkey(), 50_000) //! .add_recipient(to_address.script_pubkey(), 50_000)
//! // With a custom fee rate of 5.0 satoshi/vbyte //! // With a custom fee rate of 5.0 satoshi/vbyte
//! .fee_rate(FeeRate::from_sat_per_vb(5).expect("valid feerate")) //! .fee_rate(bdk::FeeRate::from_sat_per_vb(5.0))
//! // Only spend non-change outputs //! // Only spend non-change outputs
//! .do_not_spend_change() //! .do_not_spend_change()
//! // Turn on RBF signaling //! // Turn on RBF signaling
@@ -40,20 +40,22 @@
//! # Ok::<(), anyhow::Error>(()) //! # Ok::<(), anyhow::Error>(())
//! ``` //! ```
use crate::collections::BTreeMap;
use crate::collections::HashSet;
use alloc::{boxed::Box, rc::Rc, string::String, vec::Vec}; use alloc::{boxed::Box, rc::Rc, string::String, vec::Vec};
use bdk_chain::PersistBackend;
use core::cell::RefCell; use core::cell::RefCell;
use core::fmt; use core::fmt;
use core::marker::PhantomData; use core::marker::PhantomData;
use bdk_chain::PersistBackend;
use bitcoin::psbt::{self, PartiallySignedTransaction as Psbt}; use bitcoin::psbt::{self, PartiallySignedTransaction as Psbt};
use bitcoin::script::PushBytes; use bitcoin::{absolute, script::PushBytes, OutPoint, ScriptBuf, Sequence, Transaction, Txid};
use bitcoin::{absolute, FeeRate, OutPoint, ScriptBuf, Sequence, Transaction, Txid};
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm}; use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
use super::{ChangeSet, CreateTxError, Wallet}; use super::ChangeSet;
use crate::collections::{BTreeMap, HashSet}; use crate::types::{FeeRate, KeychainKind, LocalOutput, WeightedUtxo};
use crate::{KeychainKind, LocalOutput, Utxo, WeightedUtxo}; use crate::wallet::CreateTxError;
use crate::{Utxo, Wallet};
/// Context in which the [`TxBuilder`] is valid /// Context in which the [`TxBuilder`] is valid
pub trait TxBuilderContext: core::fmt::Debug + Default + Clone {} pub trait TxBuilderContext: core::fmt::Debug + Default + Clone {}
@@ -161,7 +163,7 @@ pub(crate) struct TxParams {
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub(crate) struct PreviousFee { pub(crate) struct PreviousFee {
pub absolute: u64, pub absolute: u64,
pub rate: FeeRate, pub rate: f32,
} }
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@@ -172,7 +174,7 @@ pub(crate) enum FeePolicy {
impl Default for FeePolicy { impl Default for FeePolicy {
fn default() -> Self { fn default() -> Self {
FeePolicy::FeeRate(FeeRate::BROADCAST_MIN) FeePolicy::FeeRate(FeeRate::default_min_relay_fee())
} }
} }
@@ -188,13 +190,15 @@ impl<'a, D, Cs: Clone, Ctx> Clone for TxBuilder<'a, D, Cs, Ctx> {
} }
// methods supported by both contexts, for any CoinSelectionAlgorithm // methods supported by both contexts, for any CoinSelectionAlgorithm
impl<'a, D, Cs, Ctx> TxBuilder<'a, D, Cs, Ctx> { impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx: TxBuilderContext> TxBuilder<'a, D, Cs, Ctx> {
/// Set a custom fee rate. /// Set a custom fee rate
/// /// The fee_rate method sets the mining fee paid by the transaction as a rate on its size.
/// This method sets the mining fee paid by the transaction as a rate on its size. /// This means that the total fee paid is equal to this rate * size of the transaction in virtual Bytes (vB) or Weight Unit (wu).
/// This means that the total fee paid is equal to `fee_rate` times the size /// This rate is internally expressed in satoshis-per-virtual-bytes (sats/vB) using FeeRate::from_sat_per_vb, but can also be set by:
/// of the transaction. Default is 1 sat/vB in accordance with Bitcoin Core's default /// * sats/kvB (1000 sats/kvB == 1 sats/vB) using FeeRate::from_sat_per_kvb
/// relay policy. /// * btc/kvB (0.00001000 btc/kvB == 1 sats/vB) using FeeRate::from_btc_per_kvb
/// * sats/kwu (250 sats/kwu == 1 sats/vB) using FeeRate::from_sat_per_kwu
/// Default is 1 sat/vB (see min_relay_fee)
/// ///
/// Note that this is really a minimum feerate -- it's possible to /// Note that this is really a minimum feerate -- it's possible to
/// overshoot it slightly since adding a change output to drain the remaining /// overshoot it slightly since adding a change output to drain the remaining
@@ -385,22 +389,6 @@ impl<'a, D, Cs, Ctx> TxBuilder<'a, D, Cs, Ctx> {
outpoint: OutPoint, outpoint: OutPoint,
psbt_input: psbt::Input, psbt_input: psbt::Input,
satisfaction_weight: usize, satisfaction_weight: usize,
) -> Result<&mut Self, AddForeignUtxoError> {
self.add_foreign_utxo_with_sequence(
outpoint,
psbt_input,
satisfaction_weight,
Sequence::MAX,
)
}
/// Same as [add_foreign_utxo](TxBuilder::add_foreign_utxo) but allows to set the nSequence value.
pub fn add_foreign_utxo_with_sequence(
&mut self,
outpoint: OutPoint,
psbt_input: psbt::Input,
satisfaction_weight: usize,
sequence: Sequence,
) -> Result<&mut Self, AddForeignUtxoError> { ) -> Result<&mut Self, AddForeignUtxoError> {
if psbt_input.witness_utxo.is_none() { if psbt_input.witness_utxo.is_none() {
match psbt_input.non_witness_utxo.as_ref() { match psbt_input.non_witness_utxo.as_ref() {
@@ -425,7 +413,6 @@ impl<'a, D, Cs, Ctx> TxBuilder<'a, D, Cs, Ctx> {
satisfaction_weight, satisfaction_weight,
utxo: Utxo::Foreign { utxo: Utxo::Foreign {
outpoint, outpoint,
sequence: Some(sequence),
psbt_input: Box::new(psbt_input), psbt_input: Box::new(psbt_input),
}, },
}); });
@@ -570,6 +557,20 @@ impl<'a, D, Cs, Ctx> TxBuilder<'a, D, Cs, Ctx> {
} }
} }
/// Finish building the transaction.
///
/// Returns a new [`Psbt`] per [`BIP174`].
///
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
pub fn finish(self) -> Result<Psbt, CreateTxError<D::WriteError>>
where
D: PersistBackend<ChangeSet>,
{
self.wallet
.borrow_mut()
.create_tx(self.coin_selection, self.params)
}
/// Enable signaling RBF /// Enable signaling RBF
/// ///
/// This will use the default nSequence value of `0xFFFFFFFD`. /// This will use the default nSequence value of `0xFFFFFFFD`.
@@ -616,22 +617,6 @@ impl<'a, D, Cs, Ctx> TxBuilder<'a, D, Cs, Ctx> {
} }
} }
impl<'a, D, Cs: CoinSelectionAlgorithm, Ctx> TxBuilder<'a, D, Cs, Ctx> {
/// Finish building the transaction.
///
/// Returns a new [`Psbt`] per [`BIP174`].
///
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
pub fn finish(self) -> Result<Psbt, CreateTxError<D::WriteError>>
where
D: PersistBackend<ChangeSet>,
{
self.wallet
.borrow_mut()
.create_tx(self.coin_selection, self.params)
}
}
#[derive(Debug)] #[derive(Debug)]
/// Error returned from [`TxBuilder::add_utxo`] and [`TxBuilder::add_utxos`] /// Error returned from [`TxBuilder::add_utxo`] and [`TxBuilder::add_utxos`]
pub enum AddUtxoError { pub enum AddUtxoError {
@@ -777,7 +762,7 @@ impl<'a, D, Cs: CoinSelectionAlgorithm> TxBuilder<'a, D, Cs, CreateTx> {
/// .drain_wallet() /// .drain_wallet()
/// // Send the excess (which is all the coins minus the fee) to this address. /// // Send the excess (which is all the coins minus the fee) to this address.
/// .drain_to(to_address.script_pubkey()) /// .drain_to(to_address.script_pubkey())
/// .fee_rate(FeeRate::from_sat_per_vb(5).expect("valid feerate")) /// .fee_rate(bdk::FeeRate::from_sat_per_vb(5.0))
/// .enable_rbf(); /// .enable_rbf();
/// let psbt = tx_builder.finish()?; /// let psbt = tx_builder.finish()?;
/// # Ok::<(), anyhow::Error>(()) /// # Ok::<(), anyhow::Error>(())
@@ -826,10 +811,9 @@ impl<'a, D> TxBuilder<'a, D, DefaultCoinSelectionAlgorithm, BumpFee> {
} }
/// Ordering of the transaction's inputs and outputs /// Ordering of the transaction's inputs and outputs
#[derive(Default, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)] #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum TxOrdering { pub enum TxOrdering {
/// Randomized (default) /// Randomized (default)
#[default]
Shuffle, Shuffle,
/// Unchanged /// Unchanged
Untouched, Untouched,
@@ -837,6 +821,12 @@ pub enum TxOrdering {
Bip69Lexicographic, Bip69Lexicographic,
} }
impl Default for TxOrdering {
fn default() -> Self {
TxOrdering::Shuffle
}
}
impl TxOrdering { impl TxOrdering {
/// Sort transaction inputs and outputs by [`TxOrdering`] variant /// Sort transaction inputs and outputs by [`TxOrdering`] variant
pub fn sort_tx(&self, tx: &mut Transaction) { pub fn sort_tx(&self, tx: &mut Transaction) {
@@ -890,10 +880,9 @@ impl RbfValue {
} }
/// Policy regarding the use of change outputs when creating a transaction /// Policy regarding the use of change outputs when creating a transaction
#[derive(Default, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)] #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum ChangeSpendPolicy { pub enum ChangeSpendPolicy {
/// Use both change and non-change outputs (default) /// Use both change and non-change outputs (default)
#[default]
ChangeAllowed, ChangeAllowed,
/// Only use change outputs (see [`TxBuilder::only_spend_change`]) /// Only use change outputs (see [`TxBuilder::only_spend_change`])
OnlyChange, OnlyChange,
@@ -901,6 +890,12 @@ pub enum ChangeSpendPolicy {
ChangeForbidden, ChangeForbidden,
} }
impl Default for ChangeSpendPolicy {
fn default() -> Self {
ChangeSpendPolicy::ChangeAllowed
}
}
impl ChangeSpendPolicy { impl ChangeSpendPolicy {
pub(crate) fn is_satisfied_by(&self, utxo: &LocalOutput) -> bool { pub(crate) fn is_satisfied_by(&self, utxo: &LocalOutput) -> bool {
match self { match self {

View File

@@ -4,7 +4,7 @@ use bdk::{wallet::AddressIndex, KeychainKind, LocalOutput, Wallet};
use bdk_chain::indexed_tx_graph::Indexer; use bdk_chain::indexed_tx_graph::Indexer;
use bdk_chain::{BlockId, ConfirmationTime}; use bdk_chain::{BlockId, ConfirmationTime};
use bitcoin::hashes::Hash; use bitcoin::hashes::Hash;
use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Transaction, TxIn, TxOut, Txid}; use bitcoin::{Address, BlockHash, Network, OutPoint, Transaction, TxIn, TxOut, Txid};
use std::str::FromStr; use std::str::FromStr;
// Return a fake wallet that appears to be funded for testing. // Return a fake wallet that appears to be funded for testing.
@@ -154,16 +154,3 @@ pub fn get_test_tr_with_taptree_xprv() -> &'static str {
pub fn get_test_tr_dup_keys() -> &'static str { pub fn get_test_tr_dup_keys() -> &'static str {
"tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG,{pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})" "tr(cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG,{pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642),pk(8aee2b8120a5f157f1223f72b5e62b825831a27a9fdf427db7cc697494d4a642)})"
} }
/// Construct a new [`FeeRate`] from the given raw `sat_vb` feerate. This is
/// useful in cases where we want to create a feerate from a `f64`, as the
/// traditional [`FeeRate::from_sat_per_vb`] method will only accept an integer.
///
/// **Note** this 'quick and dirty' conversion should only be used when the input
/// parameter has units of `satoshis/vbyte` **AND** is not expected to overflow,
/// or else the resulting value will be inaccurate.
pub fn feerate_unchecked(sat_vb: f64) -> FeeRate {
// 1 sat_vb / 4wu_vb * 1000kwu_wu = 250 sat_kwu
let sat_kwu = (sat_vb * 250.0).ceil() as u64;
FeeRate::from_sat_per_kwu(sat_kwu)
}

View File

@@ -1,8 +1,7 @@
use bdk::bitcoin::FeeRate;
use bdk::bitcoin::TxIn; use bdk::bitcoin::TxIn;
use bdk::wallet::AddressIndex; use bdk::wallet::AddressIndex;
use bdk::wallet::AddressIndex::New; use bdk::wallet::AddressIndex::New;
use bdk::{psbt, SignOptions}; use bdk::{psbt, FeeRate, SignOptions};
use bitcoin::psbt::PartiallySignedTransaction as Psbt; use bitcoin::psbt::PartiallySignedTransaction as Psbt;
use core::str::FromStr; use core::str::FromStr;
mod common; mod common;
@@ -83,13 +82,13 @@ fn test_psbt_sign_with_finalized() {
fn test_psbt_fee_rate_with_witness_utxo() { fn test_psbt_fee_rate_with_witness_utxo() {
use psbt::PsbtUtils; use psbt::PsbtUtils;
let expected_fee_rate = FeeRate::from_sat_per_kwu(310); let expected_fee_rate = 1.2345;
let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)"); let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.get_address(New); let addr = wallet.get_address(New);
let mut builder = wallet.build_tx(); let mut builder = wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet(); builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate); builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let mut psbt = builder.finish().unwrap(); let mut psbt = builder.finish().unwrap();
let fee_amount = psbt.fee_amount(); let fee_amount = psbt.fee_amount();
assert!(fee_amount.is_some()); assert!(fee_amount.is_some());
@@ -100,21 +99,21 @@ fn test_psbt_fee_rate_with_witness_utxo() {
assert!(finalized); assert!(finalized);
let finalized_fee_rate = psbt.fee_rate().unwrap(); let finalized_fee_rate = psbt.fee_rate().unwrap();
assert!(finalized_fee_rate >= expected_fee_rate); assert!(finalized_fee_rate.as_sat_per_vb() >= expected_fee_rate);
assert!(finalized_fee_rate < unfinalized_fee_rate); assert!(finalized_fee_rate.as_sat_per_vb() < unfinalized_fee_rate.as_sat_per_vb());
} }
#[test] #[test]
fn test_psbt_fee_rate_with_nonwitness_utxo() { fn test_psbt_fee_rate_with_nonwitness_utxo() {
use psbt::PsbtUtils; use psbt::PsbtUtils;
let expected_fee_rate = FeeRate::from_sat_per_kwu(310); let expected_fee_rate = 1.2345;
let (mut wallet, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)"); let (mut wallet, _) = get_funded_wallet("pkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.get_address(New); let addr = wallet.get_address(New);
let mut builder = wallet.build_tx(); let mut builder = wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet(); builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate); builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let mut psbt = builder.finish().unwrap(); let mut psbt = builder.finish().unwrap();
let fee_amount = psbt.fee_amount(); let fee_amount = psbt.fee_amount();
assert!(fee_amount.is_some()); assert!(fee_amount.is_some());
@@ -124,21 +123,21 @@ fn test_psbt_fee_rate_with_nonwitness_utxo() {
assert!(finalized); assert!(finalized);
let finalized_fee_rate = psbt.fee_rate().unwrap(); let finalized_fee_rate = psbt.fee_rate().unwrap();
assert!(finalized_fee_rate >= expected_fee_rate); assert!(finalized_fee_rate.as_sat_per_vb() >= expected_fee_rate);
assert!(finalized_fee_rate < unfinalized_fee_rate); assert!(finalized_fee_rate.as_sat_per_vb() < unfinalized_fee_rate.as_sat_per_vb());
} }
#[test] #[test]
fn test_psbt_fee_rate_with_missing_txout() { fn test_psbt_fee_rate_with_missing_txout() {
use psbt::PsbtUtils; use psbt::PsbtUtils;
let expected_fee_rate = FeeRate::from_sat_per_kwu(310); let expected_fee_rate = 1.2345;
let (mut wpkh_wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)"); let (mut wpkh_wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wpkh_wallet.get_address(New); let addr = wpkh_wallet.get_address(New);
let mut builder = wpkh_wallet.build_tx(); let mut builder = wpkh_wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet(); builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate); builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let mut wpkh_psbt = builder.finish().unwrap(); let mut wpkh_psbt = builder.finish().unwrap();
wpkh_psbt.inputs[0].witness_utxo = None; wpkh_psbt.inputs[0].witness_utxo = None;
@@ -150,7 +149,7 @@ fn test_psbt_fee_rate_with_missing_txout() {
let addr = pkh_wallet.get_address(New); let addr = pkh_wallet.get_address(New);
let mut builder = pkh_wallet.build_tx(); let mut builder = pkh_wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet(); builder.drain_to(addr.script_pubkey()).drain_wallet();
builder.fee_rate(expected_fee_rate); builder.fee_rate(FeeRate::from_sat_per_vb(expected_fee_rate));
let mut pkh_psbt = builder.finish().unwrap(); let mut pkh_psbt = builder.finish().unwrap();
pkh_psbt.inputs[0].non_witness_utxo = None; pkh_psbt.inputs[0].non_witness_utxo = None;
@@ -162,26 +161,16 @@ fn test_psbt_fee_rate_with_missing_txout() {
fn test_psbt_multiple_internalkey_signers() { fn test_psbt_multiple_internalkey_signers() {
use bdk::signer::{SignerContext, SignerOrdering, SignerWrapper}; use bdk::signer::{SignerContext, SignerOrdering, SignerWrapper};
use bdk::KeychainKind; use bdk::KeychainKind;
use bitcoin::key::TapTweak; use bitcoin::{secp256k1::Secp256k1, PrivateKey};
use bitcoin::secp256k1::{schnorr, KeyPair, Message, Secp256k1, XOnlyPublicKey}; use miniscript::psbt::PsbtExt;
use bitcoin::sighash::{Prevouts, SighashCache, TapSighashType};
use bitcoin::{PrivateKey, TxOut};
use std::sync::Arc; use std::sync::Arc;
let secp = Secp256k1::new(); let secp = Secp256k1::new();
let wif = "cNJmN3fH9DDbDt131fQNkVakkpzawJBSeybCUNmP1BovpmGQ45xG"; let (mut wallet, _) = get_funded_wallet(get_test_tr_single_sig());
let desc = format!("tr({})", wif);
let prv = PrivateKey::from_wif(wif).unwrap();
let keypair = KeyPair::from_secret_key(&secp, &prv.inner);
let (mut wallet, _) = get_funded_wallet(&desc);
let to_spend = wallet.get_balance().total();
let send_to = wallet.get_address(AddressIndex::New); let send_to = wallet.get_address(AddressIndex::New);
let mut builder = wallet.build_tx(); let mut builder = wallet.build_tx();
builder.drain_to(send_to.script_pubkey()).drain_wallet(); builder.add_recipient(send_to.script_pubkey(), 10_000);
let mut psbt = builder.finish().unwrap(); let mut psbt = builder.finish().unwrap();
let unsigned_tx = psbt.unsigned_tx.clone();
// Adds a signer for the wrong internal key, bdk should not use this key to sign // Adds a signer for the wrong internal key, bdk should not use this key to sign
wallet.add_signer( wallet.add_signer(
KeychainKind::External, KeychainKind::External,
@@ -194,32 +183,10 @@ fn test_psbt_multiple_internalkey_signers() {
}, },
)), )),
); );
let finalized = wallet.sign(&mut psbt, SignOptions::default()).unwrap(); let _ = wallet.sign(&mut psbt, SignOptions::default()).unwrap();
assert!(finalized); // Checks that we signed using the right key
assert!(
// To verify, we need the signature, message, and pubkey psbt.finalize_mut(&secp).is_ok(),
let witness = psbt.inputs[0].final_script_witness.as_ref().unwrap(); "The wrong internal key was used"
assert!(!witness.is_empty()); );
let signature = schnorr::Signature::from_slice(witness.iter().next().unwrap()).unwrap();
// the prevout we're spending
let prevouts = &[TxOut {
script_pubkey: send_to.script_pubkey(),
value: to_spend,
}];
let prevouts = Prevouts::All(prevouts);
let input_index = 0;
let mut sighash_cache = SighashCache::new(unsigned_tx);
let sighash = sighash_cache
.taproot_key_spend_signature_hash(input_index, &prevouts, TapSighashType::Default)
.unwrap();
let message = Message::from(sighash);
// add tweak. this was taken from `signer::sign_psbt_schnorr`
let keypair = keypair.tap_tweak(&secp, None).to_inner();
let (xonlykey, _parity) = XOnlyPublicKey::from_keypair(&keypair);
// Must verify if we used the correct key to sign
let verify_res = secp.verify_schnorr(&signature, &message, &xonlykey);
assert!(verify_res.is_ok(), "The wrong internal key was used");
} }

View File

@@ -7,20 +7,20 @@ use bdk::signer::{SignOptions, SignerError};
use bdk::wallet::coin_selection::{self, LargestFirstCoinSelection}; use bdk::wallet::coin_selection::{self, LargestFirstCoinSelection};
use bdk::wallet::error::CreateTxError; use bdk::wallet::error::CreateTxError;
use bdk::wallet::tx_builder::AddForeignUtxoError; use bdk::wallet::tx_builder::AddForeignUtxoError;
use bdk::wallet::AddressIndex::*;
use bdk::wallet::{AddressIndex, AddressInfo, Balance, Wallet}; use bdk::wallet::{AddressIndex, AddressInfo, Balance, Wallet};
use bdk::wallet::{AddressIndex::*, NewError}; use bdk::{FeeRate, KeychainKind};
use bdk::KeychainKind;
use bdk_chain::COINBASE_MATURITY; use bdk_chain::COINBASE_MATURITY;
use bdk_chain::{BlockId, ConfirmationTime}; use bdk_chain::{BlockId, ConfirmationTime};
use bitcoin::hashes::Hash; use bitcoin::hashes::Hash;
use bitcoin::psbt;
use bitcoin::script::PushBytesBuf;
use bitcoin::sighash::{EcdsaSighashType, TapSighashType}; use bitcoin::sighash::{EcdsaSighashType, TapSighashType};
use bitcoin::taproot::TapNodeHash; use bitcoin::ScriptBuf;
use bitcoin::{ use bitcoin::{
absolute, Address, Amount, BlockHash, FeeRate, Network, OutPoint, ScriptBuf, Sequence, absolute, script::PushBytesBuf, taproot::TapNodeHash, Address, OutPoint, Sequence, Transaction,
Transaction, TxIn, TxOut, Txid, Weight, TxIn, TxOut, Weight,
}; };
use bitcoin::{psbt, Network};
use bitcoin::{BlockHash, Txid};
mod common; mod common;
use common::*; use common::*;
@@ -71,33 +71,19 @@ fn load_recovers_wallet() {
let file_path = temp_dir.path().join("store.db"); let file_path = temp_dir.path().join("store.db");
// create new wallet // create new wallet
let wallet_spk_index = { let wallet_keychains = {
let db = bdk_file_store::Store::create_new(DB_MAGIC, &file_path).expect("must create db"); let db = bdk_file_store::Store::create_new(DB_MAGIC, &file_path).expect("must create db");
let mut wallet = Wallet::new(get_test_tr_single_sig_xprv(), None, db, Network::Testnet) let wallet =
.expect("must init wallet"); Wallet::new(get_test_wpkh(), None, db, Network::Testnet).expect("must init wallet");
wallet.keychains().clone()
wallet.try_get_address(New).unwrap();
wallet.spk_index().clone()
}; };
// recover wallet // recover wallet
{ {
let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db"); let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db");
let wallet = let wallet = Wallet::load(get_test_wpkh(), None, db).expect("must recover wallet");
Wallet::load(get_test_tr_single_sig_xprv(), None, db).expect("must recover wallet");
assert_eq!(wallet.network(), Network::Testnet); assert_eq!(wallet.network(), Network::Testnet);
assert_eq!(wallet.spk_index().keychains(), wallet_spk_index.keychains()); assert_eq!(wallet.spk_index().keychains(), &wallet_keychains);
assert_eq!(
wallet.spk_index().last_revealed_indices(),
wallet_spk_index.last_revealed_indices()
);
}
// `new` can only be called on empty db
{
let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db");
let result = Wallet::new(get_test_tr_single_sig_xprv(), None, db, Network::Testnet);
assert!(matches!(result, Err(NewError::NonEmptyDatabase)));
} }
} }
@@ -106,7 +92,7 @@ fn new_or_load() {
let temp_dir = tempfile::tempdir().expect("must create tempdir"); let temp_dir = tempfile::tempdir().expect("must create tempdir");
let file_path = temp_dir.path().join("store.db"); let file_path = temp_dir.path().join("store.db");
// init wallet when non-existent // init wallet when non-existant
let wallet_keychains = { let wallet_keychains = {
let db = bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path) let db = bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path)
.expect("must create db"); .expect("must create db");
@@ -246,11 +232,9 @@ fn test_get_funded_wallet_tx_fee_rate() {
// to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000 // to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000
// sats are the transaction fee. // sats are the transaction fee.
// tx weight = 452 wu, as vbytes = (452 + 3) / 4 = 113 // tx weight = 452 bytes, as vbytes = (452+3)/4 = 113
// fee_rate (sats per kwu) = fee / weight = 1000sat / 0.452kwu = 2212 // fee rate (sats per vbyte) = fee / vbytes = 1000 / 113 = 8.8495575221 rounded to 8.849558
// fee_rate (sats per vbyte ceil) = fee / vsize = 1000sat / 113vb = 9 assert_eq!(tx_fee_rate.as_sat_per_vb(), 8.849558);
assert_eq!(tx_fee_rate.to_sat_per_kwu(), 2212);
assert_eq!(tx_fee_rate.to_sat_per_vb_ceil(), 9);
} }
#[test] #[test]
@@ -304,15 +288,11 @@ macro_rules! assert_fee_rate {
assert_eq!(fee_amount, $fees); assert_eq!(fee_amount, $fees);
let tx_fee_rate = (Amount::from_sat(fee_amount) / tx.weight()) let tx_fee_rate = FeeRate::from_wu($fees, tx.weight());
.to_sat_per_kwu(); let fee_rate = $fee_rate;
let fee_rate = $fee_rate.to_sat_per_kwu();
let half_default = FeeRate::BROADCAST_MIN.checked_div(2)
.unwrap()
.to_sat_per_kwu();
if !dust_change { if !dust_change {
assert!(tx_fee_rate >= fee_rate && tx_fee_rate - fee_rate < half_default, "Expected fee rate of {:?}, the tx has {:?}", fee_rate, tx_fee_rate); assert!(tx_fee_rate >= fee_rate && (tx_fee_rate - fee_rate).as_sat_per_vb().abs() < 0.5, "Expected fee rate of {:?}, the tx has {:?}", fee_rate, tx_fee_rate);
} else { } else {
assert!(tx_fee_rate >= fee_rate, "Expected fee rate of at least {:?}, the tx has {:?}", fee_rate, tx_fee_rate); assert!(tx_fee_rate >= fee_rate, "Expected fee rate of at least {:?}, the tx has {:?}", fee_rate, tx_fee_rate);
} }
@@ -653,7 +633,7 @@ fn test_create_tx_default_fee_rate() {
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let fee = check_fee!(wallet, psbt); let fee = check_fee!(wallet, psbt);
assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::BROADCAST_MIN, @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::default(), @add_signature);
} }
#[test] #[test]
@@ -663,11 +643,11 @@ fn test_create_tx_custom_fee_rate() {
let mut builder = wallet.build_tx(); let mut builder = wallet.build_tx();
builder builder
.add_recipient(addr.script_pubkey(), 25_000) .add_recipient(addr.script_pubkey(), 25_000)
.fee_rate(FeeRate::from_sat_per_vb_unchecked(5)); .fee_rate(FeeRate::from_sat_per_vb(5.0));
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let fee = check_fee!(wallet, psbt); let fee = check_fee!(wallet, psbt);
assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb_unchecked(5), @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb(5.0), @add_signature);
} }
#[test] #[test]
@@ -759,7 +739,7 @@ fn test_create_tx_drain_to_dust_amount() {
builder builder
.drain_to(addr.script_pubkey()) .drain_to(addr.script_pubkey())
.drain_wallet() .drain_wallet()
.fee_rate(FeeRate::from_sat_per_vb_unchecked(454)); .fee_rate(FeeRate::from_sat_per_vb(453.0));
builder.finish().unwrap(); builder.finish().unwrap();
} }
@@ -1487,6 +1467,7 @@ fn test_bump_fee_confirmed_tx() {
} }
#[test] #[test]
#[should_panic(expected = "FeeRateTooLow")]
fn test_bump_fee_low_fee_rate() { fn test_bump_fee_low_fee_rate() {
let (mut wallet, _) = get_funded_wallet(get_test_wpkh()); let (mut wallet, _) = get_funded_wallet(get_test_wpkh());
let addr = wallet.get_address(New); let addr = wallet.get_address(New);
@@ -1495,7 +1476,6 @@ fn test_bump_fee_low_fee_rate() {
.add_recipient(addr.script_pubkey(), 25_000) .add_recipient(addr.script_pubkey(), 25_000)
.enable_rbf(); .enable_rbf();
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let feerate = psbt.fee_rate().unwrap();
let tx = psbt.extract_tx(); let tx = psbt.extract_tx();
let txid = tx.txid(); let txid = tx.txid();
@@ -1505,18 +1485,8 @@ fn test_bump_fee_low_fee_rate() {
.unwrap(); .unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::BROADCAST_MIN); builder.fee_rate(FeeRate::from_sat_per_vb(1.0));
let res = builder.finish(); builder.finish().unwrap();
assert_matches!(
res,
Err(CreateTxError::FeeRateTooLow { .. }),
"expected FeeRateTooLow error"
);
let required = feerate.to_sat_per_kwu() + 250; // +1 sat/vb
let sat_vb = required as f64 / 250.0;
let expect = format!("Fee rate too low: required {} sat/vb", sat_vb);
assert_eq!(res.unwrap_err().to_string(), expect);
} }
#[test] #[test]
@@ -1584,9 +1554,8 @@ fn test_bump_fee_reduce_change() {
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 }) .insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap(); .unwrap();
let feerate = FeeRate::from_sat_per_kwu(625); // 2.5 sat/vb
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(feerate).enable_rbf(); builder.fee_rate(FeeRate::from_sat_per_vb(2.5)).enable_rbf();
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx()); let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx());
let fee = check_fee!(wallet, psbt); let fee = check_fee!(wallet, psbt);
@@ -1617,7 +1586,7 @@ fn test_bump_fee_reduce_change() {
sent_received.1 sent_received.1
); );
assert_fee_rate!(psbt, fee.unwrap_or(0), feerate, @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb(2.5), @add_signature);
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_absolute(200); builder.fee_absolute(200);
@@ -1680,10 +1649,9 @@ fn test_bump_fee_reduce_single_recipient() {
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 }) .insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap(); .unwrap();
let feerate = FeeRate::from_sat_per_kwu(625); // 2.5 sat/vb
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder builder
.fee_rate(feerate) .fee_rate(FeeRate::from_sat_per_vb(2.5))
.allow_shrinking(addr.script_pubkey()) .allow_shrinking(addr.script_pubkey())
.unwrap(); .unwrap();
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
@@ -1697,7 +1665,7 @@ fn test_bump_fee_reduce_single_recipient() {
assert_eq!(tx.output.len(), 1); assert_eq!(tx.output.len(), 1);
assert_eq!(tx.output[0].value + fee.unwrap_or(0), sent_received.0); assert_eq!(tx.output[0].value + fee.unwrap_or(0), sent_received.0);
assert_fee_rate!(psbt, fee.unwrap_or(0), feerate, @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb(2.5), @add_signature);
} }
#[test] #[test]
@@ -1792,7 +1760,7 @@ fn test_bump_fee_drain_wallet() {
.drain_wallet() .drain_wallet()
.allow_shrinking(addr.script_pubkey()) .allow_shrinking(addr.script_pubkey())
.unwrap() .unwrap()
.fee_rate(FeeRate::from_sat_per_vb_unchecked(5)); .fee_rate(FeeRate::from_sat_per_vb(5.0));
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let sent_received = wallet.sent_and_received(&psbt.extract_tx()); let sent_received = wallet.sent_and_received(&psbt.extract_tx());
@@ -1855,7 +1823,7 @@ fn test_bump_fee_remove_output_manually_selected_only() {
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder builder
.manually_selected_only() .manually_selected_only()
.fee_rate(FeeRate::from_sat_per_vb_unchecked(255)); .fee_rate(FeeRate::from_sat_per_vb(255.0));
builder.finish().unwrap(); builder.finish().unwrap();
} }
@@ -1896,7 +1864,7 @@ fn test_bump_fee_add_input() {
.unwrap(); .unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::from_sat_per_vb_unchecked(50)); builder.fee_rate(FeeRate::from_sat_per_vb(50.0));
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx()); let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx());
let fee = check_fee!(wallet, psbt); let fee = check_fee!(wallet, psbt);
@@ -1923,7 +1891,7 @@ fn test_bump_fee_add_input() {
sent_received.1 sent_received.1
); );
assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb_unchecked(50), @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb(50.0), @add_signature);
} }
#[test] #[test]
@@ -2006,7 +1974,7 @@ fn test_bump_fee_no_change_add_input_and_change() {
// now bump the fees without using `allow_shrinking`. the wallet should add an // now bump the fees without using `allow_shrinking`. the wallet should add an
// extra input and a change output, and leave the original output untouched // extra input and a change output, and leave the original output untouched
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::from_sat_per_vb_unchecked(50)); builder.fee_rate(FeeRate::from_sat_per_vb(50.0));
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx()); let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx());
let fee = check_fee!(wallet, psbt); let fee = check_fee!(wallet, psbt);
@@ -2038,7 +2006,7 @@ fn test_bump_fee_no_change_add_input_and_change() {
75_000 - original_send_all_amount - fee.unwrap_or(0) 75_000 - original_send_all_amount - fee.unwrap_or(0)
); );
assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb_unchecked(50), @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb(50.0), @add_signature);
} }
#[test] #[test]
@@ -2083,7 +2051,7 @@ fn test_bump_fee_add_input_change_dust() {
// two inputs (50k, 25k) and one output (45k) - epsilon // two inputs (50k, 25k) and one output (45k) - epsilon
// We use epsilon here to avoid asking for a slightly too high feerate // We use epsilon here to avoid asking for a slightly too high feerate
let fee_abs = 50_000 + 25_000 - 45_000 - 10; let fee_abs = 50_000 + 25_000 - 45_000 - 10;
builder.fee_rate(Amount::from_sat(fee_abs) / new_tx_weight); builder.fee_rate(FeeRate::from_wu(fee_abs, new_tx_weight));
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx()); let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx());
let fee = check_fee!(wallet, psbt); let fee = check_fee!(wallet, psbt);
@@ -2106,7 +2074,7 @@ fn test_bump_fee_add_input_change_dust() {
45_000 45_000
); );
assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb_unchecked(140), @dust_change, @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb(140.0), @dust_change, @add_signature);
} }
#[test] #[test]
@@ -2137,7 +2105,7 @@ fn test_bump_fee_force_add_input() {
builder builder
.add_utxo(incoming_op) .add_utxo(incoming_op)
.unwrap() .unwrap()
.fee_rate(FeeRate::from_sat_per_vb_unchecked(5)); .fee_rate(FeeRate::from_sat_per_vb(5.0));
let psbt = builder.finish().unwrap(); let psbt = builder.finish().unwrap();
let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx()); let sent_received = wallet.sent_and_received(&psbt.clone().extract_tx());
let fee = check_fee!(wallet, psbt); let fee = check_fee!(wallet, psbt);
@@ -2165,7 +2133,7 @@ fn test_bump_fee_force_add_input() {
sent_received.1 sent_received.1
); );
assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb_unchecked(5), @add_signature); assert_fee_rate!(psbt, fee.unwrap_or(0), FeeRate::from_sat_per_vb(5.0), @add_signature);
} }
#[test] #[test]
@@ -2261,7 +2229,7 @@ fn test_bump_fee_unconfirmed_inputs_only() {
.insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 }) .insert_tx(tx, ConfirmationTime::Unconfirmed { last_seen: 0 })
.unwrap(); .unwrap();
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder.fee_rate(FeeRate::from_sat_per_vb_unchecked(25)); builder.fee_rate(FeeRate::from_sat_per_vb(25.0));
builder.finish().unwrap(); builder.finish().unwrap();
} }
@@ -2296,7 +2264,7 @@ fn test_bump_fee_unconfirmed_input() {
let mut builder = wallet.build_fee_bump(txid).unwrap(); let mut builder = wallet.build_fee_bump(txid).unwrap();
builder builder
.fee_rate(FeeRate::from_sat_per_vb_unchecked(15)) .fee_rate(FeeRate::from_sat_per_vb(15.0))
.allow_shrinking(addr.script_pubkey()) .allow_shrinking(addr.script_pubkey())
.unwrap(); .unwrap();
builder.finish().unwrap(); builder.finish().unwrap();
@@ -2316,7 +2284,7 @@ fn test_fee_amount_negative_drain_val() {
let send_to = Address::from_str("tb1ql7w62elx9ucw4pj5lgw4l028hmuw80sndtntxt") let send_to = Address::from_str("tb1ql7w62elx9ucw4pj5lgw4l028hmuw80sndtntxt")
.unwrap() .unwrap()
.assume_checked(); .assume_checked();
let fee_rate = FeeRate::from_sat_per_kwu(500); let fee_rate = FeeRate::from_sat_per_vb(2.01);
let incoming_op = receive_output_in_latest_block(&mut wallet, 8859); let incoming_op = receive_output_in_latest_block(&mut wallet, 8859);
let mut builder = wallet.build_tx(); let mut builder = wallet.build_tx();
@@ -2831,32 +2799,6 @@ fn test_get_address_no_reuse_single_descriptor() {
}); });
} }
#[test]
fn test_taproot_remove_tapfields_after_finalize_sign_option() {
let (mut wallet, _) = get_funded_wallet(get_test_tr_with_taptree());
let addr = wallet.get_address(New);
let mut builder = wallet.build_tx();
builder.drain_to(addr.script_pubkey()).drain_wallet();
let mut psbt = builder.finish().unwrap();
let finalized = wallet.sign(&mut psbt, SignOptions::default()).unwrap();
assert!(finalized);
// removes tap_* from inputs
for input in &psbt.inputs {
assert!(input.tap_key_sig.is_none());
assert!(input.tap_script_sigs.is_empty());
assert!(input.tap_scripts.is_empty());
assert!(input.tap_key_origins.is_empty());
assert!(input.tap_internal_key.is_none());
assert!(input.tap_merkle_root.is_none());
}
// removes key origins from outputs
for output in &psbt.outputs {
assert!(output.tap_key_origins.is_empty());
}
}
#[test] #[test]
fn test_taproot_psbt_populate_tap_key_origins() { fn test_taproot_psbt_populate_tap_key_origins() {
let (mut wallet, _) = get_funded_wallet(get_test_tr_single_sig_xprv()); let (mut wallet, _) = get_funded_wallet(get_test_tr_single_sig_xprv());
@@ -3543,7 +3485,7 @@ fn test_fee_rate_sign_no_grinding_high_r() {
// alright. // alright.
let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)"); let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.get_address(New); let addr = wallet.get_address(New);
let fee_rate = FeeRate::from_sat_per_vb_unchecked(1); let fee_rate = FeeRate::from_sat_per_vb(1.0);
let mut builder = wallet.build_tx(); let mut builder = wallet.build_tx();
let mut data = PushBytesBuf::try_from(vec![0]).unwrap(); let mut data = PushBytesBuf::try_from(vec![0]).unwrap();
builder builder
@@ -3609,7 +3551,7 @@ fn test_fee_rate_sign_grinding_low_r() {
// signature is 70 bytes. // signature is 70 bytes.
let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)"); let (mut wallet, _) = get_funded_wallet("wpkh(tprv8ZgxMBicQKsPd3EupYiPRhaMooHKUHJxNsTfYuScep13go8QFfHdtkG9nRkFGb7busX4isf6X9dURGCoKgitaApQ6MupRhZMcELAxTBRJgS/*)");
let addr = wallet.get_address(New); let addr = wallet.get_address(New);
let fee_rate = FeeRate::from_sat_per_vb_unchecked(1); let fee_rate = FeeRate::from_sat_per_vb(1.0);
let mut builder = wallet.build_tx(); let mut builder = wallet.build_tx();
builder builder
.drain_to(addr.script_pubkey()) .drain_to(addr.script_pubkey())
@@ -3635,6 +3577,41 @@ fn test_fee_rate_sign_grinding_low_r() {
assert_fee_rate!(psbt, fee.unwrap_or(0), fee_rate); assert_fee_rate!(psbt, fee.unwrap_or(0), fee_rate);
} }
// #[cfg(feature = "test-hardware-signer")]
// #[test]
// fn test_hardware_signer() {
// use std::sync::Arc;
//
// use bdk::signer::SignerOrdering;
// use bdk::wallet::hardwaresigner::HWISigner;
// use hwi::types::HWIChain;
// use hwi::HWIClient;
//
// let mut devices = HWIClient::enumerate().unwrap();
// if devices.is_empty() {
// panic!("No devices found!");
// }
// let device = devices.remove(0).unwrap();
// let client = HWIClient::get_client(&device, true, HWIChain::Regtest).unwrap();
// let descriptors = client.get_descriptors::<String>(None).unwrap();
// let custom_signer = HWISigner::from_device(&device, HWIChain::Regtest).unwrap();
//
// let (mut wallet, _) = get_funded_wallet(&descriptors.internal[0]);
// wallet.add_signer(
// KeychainKind::External,
// SignerOrdering(200),
// Arc::new(custom_signer),
// );
//
// let addr = wallet.get_address(LastUnused);
// let mut builder = wallet.build_tx();
// builder.drain_to(addr.script_pubkey()).drain_wallet();
// let (mut psbt, _) = builder.finish().unwrap();
//
// let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
// assert!(finalized);
// }
#[test] #[test]
fn test_taproot_load_descriptor_duplicated_keys() { fn test_taproot_load_descriptor_duplicated_keys() {
// Added after issue https://github.com/bitcoindevkit/bdk/issues/760 // Added after issue https://github.com/bitcoindevkit/bdk/issues/760

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "bdk_bitcoind_rpc" name = "bdk_bitcoind_rpc"
version = "0.7.0" version = "0.1.0"
edition = "2021" edition = "2021"
rust-version = "1.63" rust-version = "1.57"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_bitcoind_rpc" documentation = "https://docs.rs/bdk_bitcoind_rpc"
@@ -16,10 +16,10 @@ readme = "README.md"
# For no-std, remember to enable the bitcoin/no-std feature # For no-std, remember to enable the bitcoin/no-std feature
bitcoin = { version = "0.30", default-features = false } bitcoin = { version = "0.30", default-features = false }
bitcoincore-rpc = { version = "0.17" } bitcoincore-rpc = { version = "0.17" }
bdk_chain = { path = "../chain", version = "0.11", default-features = false } bdk_chain = { path = "../chain", version = "0.6", default-features = false }
[dev-dependencies] [dev-dependencies]
bdk_testenv = { path = "../testenv", version = "0.1.0", default_features = false } bitcoind = { version = "0.33", features = ["25_0"] }
anyhow = { version = "1" } anyhow = { version = "1" }
[features] [features]

View File

@@ -14,7 +14,7 @@ use bitcoin::{block::Header, Block, BlockHash, Transaction};
pub use bitcoincore_rpc; pub use bitcoincore_rpc;
use bitcoincore_rpc::bitcoincore_rpc_json; use bitcoincore_rpc::bitcoincore_rpc_json;
/// The [`Emitter`] is used to emit data sourced from [`bitcoincore_rpc::Client`]. /// A structure that emits data sourced from [`bitcoincore_rpc::Client`].
/// ///
/// Refer to [module-level documentation] for more. /// Refer to [module-level documentation] for more.
/// ///
@@ -43,13 +43,11 @@ pub struct Emitter<'c, C> {
} }
impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> { impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> {
/// Construct a new [`Emitter`]. /// Construct a new [`Emitter`] with the given RPC `client`, `last_cp` and `start_height`.
/// ///
/// `last_cp` informs the emitter of the chain we are starting off with. This way, the emitter /// * `last_cp` is the check point used to find the latest block which is still part of the best
/// can start emission from a block that connects to the original chain. /// chain.
/// /// * `start_height` is the block height to start emitting blocks from.
/// `start_height` starts emission from a given height (if there are no conflicts with the
/// original chain).
pub fn new(client: &'c C, last_cp: CheckPoint, start_height: u32) -> Self { pub fn new(client: &'c C, last_cp: CheckPoint, start_height: u32) -> Self {
Self { Self {
client, client,
@@ -129,58 +127,13 @@ impl<'c, C: bitcoincore_rpc::RpcApi> Emitter<'c, C> {
} }
/// Emit the next block height and header (if any). /// Emit the next block height and header (if any).
pub fn next_header(&mut self) -> Result<Option<BlockEvent<Header>>, bitcoincore_rpc::Error> { pub fn next_header(&mut self) -> Result<Option<(u32, Header)>, bitcoincore_rpc::Error> {
Ok(poll(self, |hash| self.client.get_block_header(hash))? poll(self, |hash| self.client.get_block_header(hash))
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
} }
/// Emit the next block height and block (if any). /// Emit the next block height and block (if any).
pub fn next_block(&mut self) -> Result<Option<BlockEvent<Block>>, bitcoincore_rpc::Error> { pub fn next_block(&mut self) -> Result<Option<(u32, Block)>, bitcoincore_rpc::Error> {
Ok(poll(self, |hash| self.client.get_block(hash))? poll(self, |hash| self.client.get_block(hash))
.map(|(checkpoint, block)| BlockEvent { block, checkpoint }))
}
}
/// A newly emitted block from [`Emitter`].
#[derive(Debug)]
pub struct BlockEvent<B> {
/// Either a full [`Block`] or [`Header`] of the new block.
pub block: B,
/// The checkpoint of the new block.
///
/// A [`CheckPoint`] is a node of a linked list of [`BlockId`]s. This checkpoint is linked to
/// all [`BlockId`]s originally passed in [`Emitter::new`] as well as emitted blocks since then.
/// These blocks are guaranteed to be of the same chain.
///
/// This is important as BDK structures require block-to-apply to be connected with another
/// block in the original chain.
pub checkpoint: CheckPoint,
}
impl<B> BlockEvent<B> {
/// The block height of this new block.
pub fn block_height(&self) -> u32 {
self.checkpoint.height()
}
/// The block hash of this new block.
pub fn block_hash(&self) -> BlockHash {
self.checkpoint.hash()
}
/// The [`BlockId`] of a previous block that this block connects to.
///
/// This either returns a [`BlockId`] of a previously emitted block or from the chain we started
/// with (passed in as `last_cp` in [`Emitter::new`]).
///
/// This value is derived from [`BlockEvent::checkpoint`].
pub fn connected_to(&self) -> BlockId {
match self.checkpoint.prev() {
Some(prev_cp) => prev_cp.block_id(),
// there is no previous checkpoint, so just connect with itself
None => self.checkpoint.block_id(),
}
} }
} }
@@ -250,7 +203,7 @@ where
fn poll<C, V, F>( fn poll<C, V, F>(
emitter: &mut Emitter<C>, emitter: &mut Emitter<C>,
get_item: F, get_item: F,
) -> Result<Option<(CheckPoint, V)>, bitcoincore_rpc::Error> ) -> Result<Option<(u32, V)>, bitcoincore_rpc::Error>
where where
C: bitcoincore_rpc::RpcApi, C: bitcoincore_rpc::RpcApi,
F: Fn(&BlockHash) -> Result<V, bitcoincore_rpc::Error>, F: Fn(&BlockHash) -> Result<V, bitcoincore_rpc::Error>,
@@ -262,14 +215,13 @@ where
let hash = res.hash; let hash = res.hash;
let item = get_item(&hash)?; let item = get_item(&hash)?;
let new_cp = emitter emitter.last_cp = emitter
.last_cp .last_cp
.clone() .clone()
.push(BlockId { height, hash }) .push(BlockId { height, hash })
.expect("must push"); .expect("must push");
emitter.last_cp = new_cp.clone();
emitter.last_block = Some(res); emitter.last_block = Some(res);
return Ok(Some((new_cp, item))); return Ok(Some((height, item)));
} }
PollResponse::NoMoreBlocks => { PollResponse::NoMoreBlocks => {
emitter.last_block = None; emitter.last_block = None;

View File

@@ -2,14 +2,182 @@ use std::collections::{BTreeMap, BTreeSet};
use bdk_bitcoind_rpc::Emitter; use bdk_bitcoind_rpc::Emitter;
use bdk_chain::{ use bdk_chain::{
bitcoin::{Address, Amount, Txid}, bitcoin::{Address, Amount, BlockHash, Txid},
keychain::Balance, keychain::Balance,
local_chain::{self, CheckPoint, LocalChain}, local_chain::{self, CheckPoint, LocalChain},
Append, BlockId, IndexedTxGraph, SpkTxOutIndex, Append, BlockId, IndexedTxGraph, SpkTxOutIndex,
}; };
use bdk_testenv::TestEnv; use bitcoin::{
use bitcoin::{hashes::Hash, Block, OutPoint, ScriptBuf, WScriptHash}; address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash,
use bitcoincore_rpc::RpcApi; secp256k1::rand::random, Block, CompactTarget, OutPoint, ScriptBuf, ScriptHash, Transaction,
TxIn, TxOut, WScriptHash,
};
use bitcoincore_rpc::{
bitcoincore_rpc_json::{GetBlockTemplateModes, GetBlockTemplateRules},
RpcApi,
};
struct TestEnv {
#[allow(dead_code)]
daemon: bitcoind::BitcoinD,
client: bitcoincore_rpc::Client,
}
impl TestEnv {
fn new() -> anyhow::Result<Self> {
let daemon = match std::env::var_os("TEST_BITCOIND") {
Some(bitcoind_path) => bitcoind::BitcoinD::new(bitcoind_path),
None => bitcoind::BitcoinD::from_downloaded(),
}?;
let client = bitcoincore_rpc::Client::new(
&daemon.rpc_url(),
bitcoincore_rpc::Auth::CookieFile(daemon.params.cookie_file.clone()),
)?;
Ok(Self { daemon, client })
}
fn mine_blocks(
&self,
count: usize,
address: Option<Address>,
) -> anyhow::Result<Vec<BlockHash>> {
let coinbase_address = match address {
Some(address) => address,
None => self.client.get_new_address(None, None)?.assume_checked(),
};
let block_hashes = self
.client
.generate_to_address(count as _, &coinbase_address)?;
Ok(block_hashes)
}
fn mine_empty_block(&self) -> anyhow::Result<(usize, BlockHash)> {
let bt = self.client.get_block_template(
GetBlockTemplateModes::Template,
&[GetBlockTemplateRules::SegWit],
&[],
)?;
let txdata = vec![Transaction {
version: 1,
lock_time: bitcoin::absolute::LockTime::from_height(0)?,
input: vec![TxIn {
previous_output: bitcoin::OutPoint::default(),
script_sig: ScriptBuf::builder()
.push_int(bt.height as _)
// randomn number so that re-mining creates unique block
.push_int(random())
.into_script(),
sequence: bitcoin::Sequence::default(),
witness: bitcoin::Witness::new(),
}],
output: vec![TxOut {
value: 0,
script_pubkey: ScriptBuf::new_p2sh(&ScriptHash::all_zeros()),
}],
}];
let bits: [u8; 4] = bt
.bits
.clone()
.try_into()
.expect("rpc provided us with invalid bits");
let mut block = Block {
header: Header {
version: bitcoin::block::Version::default(),
prev_blockhash: bt.previous_block_hash,
merkle_root: TxMerkleNode::all_zeros(),
time: Ord::max(bt.min_time, std::time::UNIX_EPOCH.elapsed()?.as_secs()) as u32,
bits: CompactTarget::from_consensus(u32::from_be_bytes(bits)),
nonce: 0,
},
txdata,
};
block.header.merkle_root = block.compute_merkle_root().expect("must compute");
for nonce in 0..=u32::MAX {
block.header.nonce = nonce;
if block.header.target().is_met_by(block.block_hash()) {
break;
}
}
self.client.submit_block(&block)?;
Ok((bt.height as usize, block.block_hash()))
}
fn invalidate_blocks(&self, count: usize) -> anyhow::Result<()> {
let mut hash = self.client.get_best_block_hash()?;
for _ in 0..count {
let prev_hash = self.client.get_block_info(&hash)?.previousblockhash;
self.client.invalidate_block(&hash)?;
match prev_hash {
Some(prev_hash) => hash = prev_hash,
None => break,
}
}
Ok(())
}
fn reorg(&self, count: usize) -> anyhow::Result<Vec<BlockHash>> {
let start_height = self.client.get_block_count()?;
self.invalidate_blocks(count)?;
let res = self.mine_blocks(count, None);
assert_eq!(
self.client.get_block_count()?,
start_height,
"reorg should not result in height change"
);
res
}
fn reorg_empty_blocks(&self, count: usize) -> anyhow::Result<Vec<(usize, BlockHash)>> {
let start_height = self.client.get_block_count()?;
self.invalidate_blocks(count)?;
let res = (0..count)
.map(|_| self.mine_empty_block())
.collect::<Result<Vec<_>, _>>()?;
assert_eq!(
self.client.get_block_count()?,
start_height,
"reorg should not result in height change"
);
Ok(res)
}
fn send(&self, address: &Address<NetworkChecked>, amount: Amount) -> anyhow::Result<Txid> {
let txid = self
.client
.send_to_address(address, amount, None, None, None, None, None, None)?;
Ok(txid)
}
}
fn block_to_chain_update(block: &bitcoin::Block, height: u32) -> local_chain::Update {
let this_id = BlockId {
height,
hash: block.block_hash(),
};
let tip = if block.header.prev_blockhash == BlockHash::all_zeros() {
CheckPoint::new(this_id)
} else {
CheckPoint::new(BlockId {
height: height - 1,
hash: block.header.prev_blockhash,
})
.extend(core::iter::once(this_id))
.expect("must construct checkpoint")
};
local_chain::Update {
tip,
introduce_older_blocks: false,
}
}
/// Ensure that blocks are emitted in order even after reorg. /// Ensure that blocks are emitted in order even after reorg.
/// ///
@@ -20,38 +188,29 @@ use bitcoincore_rpc::RpcApi;
#[test] #[test]
pub fn test_sync_local_chain() -> anyhow::Result<()> { pub fn test_sync_local_chain() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let network_tip = env.rpc_client().get_block_count()?; let (mut local_chain, _) = LocalChain::from_genesis_hash(env.client.get_block_hash(0)?);
let (mut local_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?); let mut emitter = Emitter::new(&env.client, local_chain.tip(), 0);
let mut emitter = Emitter::new(env.rpc_client(), local_chain.tip(), 0);
// Mine some blocks and return the actual block hashes. // mine some blocks and returned the actual block hashes
// Because initializing `ElectrsD` already mines some blocks, we must include those too when
// returning block hashes.
let exp_hashes = { let exp_hashes = {
let mut hashes = (0..=network_tip) let mut hashes = vec![env.client.get_block_hash(0)?]; // include genesis block
.map(|height| env.rpc_client().get_block_hash(height)) hashes.extend(env.mine_blocks(101, None)?);
.collect::<Result<Vec<_>, _>>()?;
hashes.extend(env.mine_blocks(101 - network_tip as usize, None)?);
hashes hashes
}; };
// See if the emitter outputs the right blocks. // see if the emitter outputs the right blocks
println!("first sync:"); println!("first sync:");
while let Some(emission) = emitter.next_block()? { while let Some((height, block)) = emitter.next_block()? {
let height = emission.block_height();
let hash = emission.block_hash();
assert_eq!( assert_eq!(
emission.block_hash(), block.block_hash(),
exp_hashes[height as usize], exp_hashes[height as usize],
"emitted block hash is unexpected" "emitted block hash is unexpected"
); );
let chain_update = block_to_chain_update(&block, height);
assert_eq!( assert_eq!(
local_chain.apply_update(local_chain::Update { local_chain.apply_update(chain_update)?,
tip: emission.checkpoint, BTreeMap::from([(height, Some(block.block_hash()))]),
introduce_older_blocks: false,
})?,
BTreeMap::from([(height, Some(hash))]),
"chain update changeset is unexpected", "chain update changeset is unexpected",
); );
} }
@@ -66,7 +225,7 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
"final local_chain state is unexpected", "final local_chain state is unexpected",
); );
// Perform reorg. // perform reorg
let reorged_blocks = env.reorg(6)?; let reorged_blocks = env.reorg(6)?;
let exp_hashes = exp_hashes let exp_hashes = exp_hashes
.iter() .iter()
@@ -75,33 +234,30 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
.cloned() .cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// See if the emitter outputs the right blocks. // see if the emitter outputs the right blocks
println!("after reorg:"); println!("after reorg:");
let mut exp_height = exp_hashes.len() - reorged_blocks.len(); let mut exp_height = exp_hashes.len() - reorged_blocks.len();
while let Some(emission) = emitter.next_block()? { while let Some((height, block)) = emitter.next_block()? {
let height = emission.block_height();
let hash = emission.block_hash();
assert_eq!( assert_eq!(
height, exp_height as u32, height, exp_height as u32,
"emitted block has unexpected height" "emitted block has unexpected height"
); );
assert_eq!( assert_eq!(
hash, exp_hashes[height as usize], block.block_hash(),
exp_hashes[height as usize],
"emitted block is unexpected" "emitted block is unexpected"
); );
let chain_update = block_to_chain_update(&block, height);
assert_eq!( assert_eq!(
local_chain.apply_update(local_chain::Update { local_chain.apply_update(chain_update)?,
tip: emission.checkpoint,
introduce_older_blocks: false,
})?,
if exp_height == exp_hashes.len() - reorged_blocks.len() { if exp_height == exp_hashes.len() - reorged_blocks.len() {
core::iter::once((height, Some(hash))) core::iter::once((height, Some(block.block_hash())))
.chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None))) .chain((height + 1..exp_hashes.len() as u32).map(|h| (h, None)))
.collect::<bdk_chain::local_chain::ChangeSet>() .collect::<bdk_chain::local_chain::ChangeSet>()
} else { } else {
BTreeMap::from([(height, Some(hash))]) BTreeMap::from([(height, Some(block.block_hash()))])
}, },
"chain update changeset is unexpected", "chain update changeset is unexpected",
); );
@@ -131,25 +287,16 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
println!("getting new addresses!"); println!("getting new addresses!");
let addr_0 = env let addr_0 = env.client.get_new_address(None, None)?.assume_checked();
.rpc_client() let addr_1 = env.client.get_new_address(None, None)?.assume_checked();
.get_new_address(None, None)? let addr_2 = env.client.get_new_address(None, None)?.assume_checked();
.assume_checked();
let addr_1 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let addr_2 = env
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
println!("got new addresses!"); println!("got new addresses!");
println!("mining block!"); println!("mining block!");
env.mine_blocks(101, None)?; env.mine_blocks(101, None)?;
println!("mined blocks!"); println!("mined blocks!");
let (mut chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?); let (mut chain, _) = LocalChain::from_genesis_hash(env.client.get_block_hash(0)?);
let mut indexed_tx_graph = IndexedTxGraph::<BlockId, _>::new({ let mut indexed_tx_graph = IndexedTxGraph::<BlockId, _>::new({
let mut index = SpkTxOutIndex::<usize>::default(); let mut index = SpkTxOutIndex::<usize>::default();
index.insert_spk(0, addr_0.script_pubkey()); index.insert_spk(0, addr_0.script_pubkey());
@@ -158,15 +305,11 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
index index
}); });
let emitter = &mut Emitter::new(env.rpc_client(), chain.tip(), 0); let emitter = &mut Emitter::new(&env.client, chain.tip(), 0);
while let Some(emission) = emitter.next_block()? { while let Some((height, block)) = emitter.next_block()? {
let height = emission.block_height(); let _ = chain.apply_update(block_to_chain_update(&block, height))?;
let _ = chain.apply_update(local_chain::Update { let indexed_additions = indexed_tx_graph.apply_block_relevant(block, height);
tip: emission.checkpoint,
introduce_older_blocks: false,
})?;
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
assert!(indexed_additions.is_empty()); assert!(indexed_additions.is_empty());
} }
@@ -174,7 +317,7 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
let exp_txids = { let exp_txids = {
let mut txids = BTreeSet::new(); let mut txids = BTreeSet::new();
for _ in 0..3 { for _ in 0..3 {
txids.insert(env.rpc_client().send_to_address( txids.insert(env.client.send_to_address(
&addr_0, &addr_0,
Amount::from_sat(10_000), Amount::from_sat(10_000),
None, None,
@@ -210,7 +353,7 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
// mine a block that confirms the 3 txs // mine a block that confirms the 3 txs
let exp_block_hash = env.mine_blocks(1, None)?[0]; let exp_block_hash = env.mine_blocks(1, None)?[0];
let exp_block_height = env.rpc_client().get_block_info(&exp_block_hash)?.height as u32; let exp_block_height = env.client.get_block_info(&exp_block_hash)?.height as u32;
let exp_anchors = exp_txids let exp_anchors = exp_txids
.iter() .iter()
.map({ .map({
@@ -224,13 +367,10 @@ fn test_into_tx_graph() -> anyhow::Result<()> {
// must receive mined block which will confirm the transactions. // must receive mined block which will confirm the transactions.
{ {
let emission = emitter.next_block()?.expect("must get mined block"); let (height, block) = emitter.next_block()?.expect("must get mined block");
let height = emission.block_height(); let _ = chain
let _ = chain.apply_update(local_chain::Update { .apply_update(CheckPoint::from_header(&block.header, height).into_update(false))?;
tip: emission.checkpoint, let indexed_additions = indexed_tx_graph.apply_block_relevant(block, height);
introduce_older_blocks: false,
})?;
let indexed_additions = indexed_tx_graph.apply_block_relevant(&emission.block, height);
assert!(indexed_additions.graph.txs.is_empty()); assert!(indexed_additions.graph.txs.is_empty());
assert!(indexed_additions.graph.txouts.is_empty()); assert!(indexed_additions.graph.txouts.is_empty());
assert_eq!(indexed_additions.graph.anchors, exp_anchors); assert_eq!(indexed_additions.graph.anchors, exp_anchors);
@@ -254,10 +394,10 @@ fn ensure_block_emitted_after_reorg_is_at_reorg_height() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let mut emitter = Emitter::new( let mut emitter = Emitter::new(
env.rpc_client(), &env.client,
CheckPoint::new(BlockId { CheckPoint::new(BlockId {
height: 0, height: 0,
hash: env.rpc_client().get_block_hash(0)?, hash: env.client.get_block_hash(0)?,
}), }),
EMITTER_START_HEIGHT as _, EMITTER_START_HEIGHT as _,
); );
@@ -267,12 +407,9 @@ fn ensure_block_emitted_after_reorg_is_at_reorg_height() -> anyhow::Result<()> {
for reorg_count in 1..=10 { for reorg_count in 1..=10 {
let replaced_blocks = env.reorg_empty_blocks(reorg_count)?; let replaced_blocks = env.reorg_empty_blocks(reorg_count)?;
let next_emission = emitter.next_header()?.expect("must emit block after reorg"); let (height, next_header) = emitter.next_header()?.expect("must emit block after reorg");
assert_eq!( assert_eq!(
( (height as usize, next_header.block_hash()),
next_emission.block_height() as usize,
next_emission.block_hash()
),
replaced_blocks[0], replaced_blocks[0],
"block emitted after reorg should be at the reorg height" "block emitted after reorg should be at the reorg height"
); );
@@ -302,9 +439,8 @@ fn sync_from_emitter<C>(
where where
C: bitcoincore_rpc::RpcApi, C: bitcoincore_rpc::RpcApi,
{ {
while let Some(emission) = emitter.next_block()? { while let Some((height, block)) = emitter.next_block()? {
let height = emission.block_height(); process_block(recv_chain, recv_graph, block, height)?;
process_block(recv_chain, recv_graph, emission.block, height)?;
} }
Ok(()) Ok(())
} }
@@ -331,24 +467,21 @@ fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let mut emitter = Emitter::new( let mut emitter = Emitter::new(
env.rpc_client(), &env.client,
CheckPoint::new(BlockId { CheckPoint::new(BlockId {
height: 0, height: 0,
hash: env.rpc_client().get_block_hash(0)?, hash: env.client.get_block_hash(0)?,
}), }),
0, 0,
); );
// setup addresses // setup addresses
let addr_to_mine = env let addr_to_mine = env.client.get_new_address(None, None)?.assume_checked();
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_v0_p2wsh(&WScriptHash::all_zeros()); let spk_to_track = ScriptBuf::new_v0_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bitcoin::Network::Regtest)?; let addr_to_track = Address::from_script(&spk_to_track, bitcoin::Network::Regtest)?;
// setup receiver // setup receiver
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.rpc_client().get_block_hash(0)?); let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.client.get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<BlockId, _>::new({ let mut recv_graph = IndexedTxGraph::<BlockId, _>::new({
let mut recv_index = SpkTxOutIndex::default(); let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone()); recv_index.insert_spk((), spk_to_track.clone());
@@ -364,7 +497,7 @@ fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
// lock outputs that send to `addr_to_track` // lock outputs that send to `addr_to_track`
let outpoints_to_lock = env let outpoints_to_lock = env
.rpc_client() .client
.get_transaction(&txid, None)? .get_transaction(&txid, None)?
.transaction()? .transaction()?
.output .output
@@ -373,7 +506,7 @@ fn tx_can_become_unconfirmed_after_reorg() -> anyhow::Result<()> {
.filter(|(_, txo)| txo.script_pubkey == spk_to_track) .filter(|(_, txo)| txo.script_pubkey == spk_to_track)
.map(|(vout, _)| OutPoint::new(txid, vout as _)) .map(|(vout, _)| OutPoint::new(txid, vout as _))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
env.rpc_client().lock_unspent(&outpoints_to_lock)?; env.client.lock_unspent(&outpoints_to_lock)?;
let _ = env.mine_blocks(1, None)?; let _ = env.mine_blocks(1, None)?;
} }
@@ -422,19 +555,16 @@ fn mempool_avoids_re_emission() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let mut emitter = Emitter::new( let mut emitter = Emitter::new(
env.rpc_client(), &env.client,
CheckPoint::new(BlockId { CheckPoint::new(BlockId {
height: 0, height: 0,
hash: env.rpc_client().get_block_hash(0)?, hash: env.client.get_block_hash(0)?,
}), }),
0, 0,
); );
// mine blocks and sync up emitter // mine blocks and sync up emitter
let addr = env let addr = env.client.get_new_address(None, None)?.assume_checked();
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(BLOCKS_TO_MINE, Some(addr.clone()))?; env.mine_blocks(BLOCKS_TO_MINE, Some(addr.clone()))?;
while emitter.next_header()?.is_some() {} while emitter.next_header()?.is_some() {}
@@ -487,19 +617,16 @@ fn mempool_re_emits_if_tx_introduction_height_not_reached() -> anyhow::Result<()
let env = TestEnv::new()?; let env = TestEnv::new()?;
let mut emitter = Emitter::new( let mut emitter = Emitter::new(
env.rpc_client(), &env.client,
CheckPoint::new(BlockId { CheckPoint::new(BlockId {
height: 0, height: 0,
hash: env.rpc_client().get_block_hash(0)?, hash: env.client.get_block_hash(0)?,
}), }),
0, 0,
); );
// mine blocks to get initial balance, sync emitter up to tip // mine blocks to get initial balance, sync emitter up to tip
let addr = env let addr = env.client.get_new_address(None, None)?.assume_checked();
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?; env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?;
while emitter.next_header()?.is_some() {} while emitter.next_header()?.is_some() {}
@@ -533,8 +660,7 @@ fn mempool_re_emits_if_tx_introduction_height_not_reached() -> anyhow::Result<()
// At this point, the emitter has seen all mempool transactions. It should only re-emit those // At this point, the emitter has seen all mempool transactions. It should only re-emit those
// that have introduction heights less than the emitter's last-emitted block tip. // that have introduction heights less than the emitter's last-emitted block tip.
while let Some(emission) = emitter.next_header()? { while let Some((height, _)) = emitter.next_header()? {
let height = emission.block_height();
// We call `mempool()` twice. // We call `mempool()` twice.
// The second call (at height `h`) should skip the tx introduced at height `h`. // The second call (at height `h`) should skip the tx introduced at height `h`.
for try_index in 0..2 { for try_index in 0..2 {
@@ -575,19 +701,16 @@ fn mempool_during_reorg() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let mut emitter = Emitter::new( let mut emitter = Emitter::new(
env.rpc_client(), &env.client,
CheckPoint::new(BlockId { CheckPoint::new(BlockId {
height: 0, height: 0,
hash: env.rpc_client().get_block_hash(0)?, hash: env.client.get_block_hash(0)?,
}), }),
0, 0,
); );
// mine blocks to get initial balance // mine blocks to get initial balance
let addr = env let addr = env.client.get_new_address(None, None)?.assume_checked();
.rpc_client()
.get_new_address(None, None)?
.assume_checked();
env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?; env.mine_blocks(PREMINE_COUNT, Some(addr.clone()))?;
// introduce mempool tx at each block extension // introduce mempool tx at each block extension
@@ -605,7 +728,7 @@ fn mempool_during_reorg() -> anyhow::Result<()> {
.into_iter() .into_iter()
.map(|(tx, _)| tx.txid()) .map(|(tx, _)| tx.txid())
.collect::<BTreeSet<_>>(), .collect::<BTreeSet<_>>(),
env.rpc_client() env.client
.get_raw_mempool()? .get_raw_mempool()?
.into_iter() .into_iter()
.collect::<BTreeSet<_>>(), .collect::<BTreeSet<_>>(),
@@ -624,15 +747,14 @@ fn mempool_during_reorg() -> anyhow::Result<()> {
// emission. // emission.
// TODO: How can have have reorg logic in `TestEnv` NOT blacklast old blocks first? // TODO: How can have have reorg logic in `TestEnv` NOT blacklast old blocks first?
let tx_introductions = dbg!(env let tx_introductions = dbg!(env
.rpc_client() .client
.get_raw_mempool_verbose()? .get_raw_mempool_verbose()?
.into_iter() .into_iter()
.map(|(txid, entry)| (txid, entry.height as usize)) .map(|(txid, entry)| (txid, entry.height as usize))
.collect::<BTreeMap<_, _>>()); .collect::<BTreeMap<_, _>>());
// `next_header` emits the replacement block of the reorg // `next_header` emits the replacement block of the reorg
if let Some(emission) = emitter.next_header()? { if let Some((height, _)) = emitter.next_header()? {
let height = emission.block_height();
println!("\t- replacement height: {}", height); println!("\t- replacement height: {}", height);
// the mempool emission (that follows the first block emission after reorg) should only // the mempool emission (that follows the first block emission after reorg) should only
@@ -701,10 +823,10 @@ fn no_agreement_point() -> anyhow::Result<()> {
// start height is 99 // start height is 99
let mut emitter = Emitter::new( let mut emitter = Emitter::new(
env.rpc_client(), &env.client,
CheckPoint::new(BlockId { CheckPoint::new(BlockId {
height: 0, height: 0,
hash: env.rpc_client().get_block_hash(0)?, hash: env.client.get_block_hash(0)?,
}), }),
(PREMINE_COUNT - 2) as u32, (PREMINE_COUNT - 2) as u32,
); );
@@ -713,27 +835,27 @@ fn no_agreement_point() -> anyhow::Result<()> {
env.mine_blocks(PREMINE_COUNT, None)?; env.mine_blocks(PREMINE_COUNT, None)?;
// emit block 99a // emit block 99a
let block_header_99a = emitter.next_header()?.expect("block 99a header").block; let (_, block_header_99a) = emitter.next_header()?.expect("block 99a header");
let block_hash_99a = block_header_99a.block_hash(); let block_hash_99a = block_header_99a.block_hash();
let block_hash_98a = block_header_99a.prev_blockhash; let block_hash_98a = block_header_99a.prev_blockhash;
// emit block 100a // emit block 100a
let block_header_100a = emitter.next_header()?.expect("block 100a header").block; let (_, block_header_100a) = emitter.next_header()?.expect("block 100a header");
let block_hash_100a = block_header_100a.block_hash(); let block_hash_100a = block_header_100a.block_hash();
// get hash for block 101a // get hash for block 101a
let block_hash_101a = env.rpc_client().get_block_hash(101)?; let block_hash_101a = env.client.get_block_hash(101)?;
// invalidate blocks 99a, 100a, 101a // invalidate blocks 99a, 100a, 101a
env.rpc_client().invalidate_block(&block_hash_99a)?; env.client.invalidate_block(&block_hash_99a)?;
env.rpc_client().invalidate_block(&block_hash_100a)?; env.client.invalidate_block(&block_hash_100a)?;
env.rpc_client().invalidate_block(&block_hash_101a)?; env.client.invalidate_block(&block_hash_101a)?;
// mine new blocks 99b, 100b, 101b // mine new blocks 99b, 100b, 101b
env.mine_blocks(3, None)?; env.mine_blocks(3, None)?;
// emit block header 99b // emit block header 99b
let block_header_99b = emitter.next_header()?.expect("block 99b header").block; let (_, block_header_99b) = emitter.next_header()?.expect("block 99b header");
let block_hash_99b = block_header_99b.block_hash(); let block_hash_99b = block_header_99b.block_hash();
let block_hash_98b = block_header_99b.prev_blockhash; let block_hash_98b = block_header_99b.prev_blockhash;

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "bdk_chain" name = "bdk_chain"
version = "0.11.0" version = "0.6.0"
edition = "2021" edition = "2021"
rust-version = "1.63" rust-version = "1.57"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_chain" documentation = "https://docs.rs/bdk_chain"
@@ -18,6 +18,7 @@ bitcoin = { version = "0.30.0", default-features = false }
serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] } serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] }
# Use hashbrown as a feature flag to have HashSet and HashMap from it. # Use hashbrown as a feature flag to have HashSet and HashMap from it.
# note versions > 0.9.1 breaks ours 1.57.0 MSRV.
hashbrown = { version = "0.9.1", optional = true, features = ["serde"] } hashbrown = { version = "0.9.1", optional = true, features = ["serde"] }
miniscript = { version = "10.0.0", optional = true, default-features = false } miniscript = { version = "10.0.0", optional = true, default-features = false }

View File

@@ -9,7 +9,7 @@ use crate::{Anchor, AnchorFromBlockPosition, COINBASE_MATURITY};
pub enum ChainPosition<A> { pub enum ChainPosition<A> {
/// The chain data is seen as confirmed, and in anchored by `A`. /// The chain data is seen as confirmed, and in anchored by `A`.
Confirmed(A), Confirmed(A),
/// The chain data is not confirmed and last seen in the mempool at this timestamp. /// The chain data is seen in mempool at this given timestamp.
Unconfirmed(u64), Unconfirmed(u64),
} }
@@ -48,14 +48,14 @@ impl<A: Anchor> ChainPosition<A> {
serde(crate = "serde_crate") serde(crate = "serde_crate")
)] )]
pub enum ConfirmationTime { pub enum ConfirmationTime {
/// The transaction is confirmed /// The confirmed variant.
Confirmed { Confirmed {
/// Confirmation height. /// Confirmation height.
height: u32, height: u32,
/// Confirmation time in unix seconds. /// Confirmation time in unix seconds.
time: u64, time: u64,
}, },
/// The transaction is unconfirmed /// The unconfirmed variant.
Unconfirmed { Unconfirmed {
/// The last-seen timestamp in unix seconds. /// The last-seen timestamp in unix seconds.
last_seen: u64, last_seen: u64,
@@ -81,7 +81,7 @@ impl From<ChainPosition<ConfirmationTimeHeightAnchor>> for ConfirmationTime {
height: a.confirmation_height, height: a.confirmation_height,
time: a.confirmation_time, time: a.confirmation_time,
}, },
ChainPosition::Unconfirmed(last_seen) => Self::Unconfirmed { last_seen }, ChainPosition::Unconfirmed(_) => Self::Unconfirmed { last_seen: 0 },
} }
} }
} }
@@ -147,8 +147,6 @@ impl From<(&u32, &BlockHash)> for BlockId {
/// An [`Anchor`] implementation that also records the exact confirmation height of the transaction. /// An [`Anchor`] implementation that also records the exact confirmation height of the transaction.
/// ///
/// Note that the confirmation block and the anchor block can be different here.
///
/// Refer to [`Anchor`] for more details. /// Refer to [`Anchor`] for more details.
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] #[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr( #[cfg_attr(
@@ -157,12 +155,13 @@ impl From<(&u32, &BlockHash)> for BlockId {
serde(crate = "serde_crate") serde(crate = "serde_crate")
)] )]
pub struct ConfirmationHeightAnchor { pub struct ConfirmationHeightAnchor {
/// The anchor block.
pub anchor_block: BlockId,
/// The exact confirmation height of the transaction. /// The exact confirmation height of the transaction.
/// ///
/// It is assumed that this value is never larger than the height of the anchor block. /// It is assumed that this value is never larger than the height of the anchor block.
pub confirmation_height: u32, pub confirmation_height: u32,
/// The anchor block.
pub anchor_block: BlockId,
} }
impl Anchor for ConfirmationHeightAnchor { impl Anchor for ConfirmationHeightAnchor {
@@ -187,8 +186,6 @@ impl AnchorFromBlockPosition for ConfirmationHeightAnchor {
/// An [`Anchor`] implementation that also records the exact confirmation time and height of the /// An [`Anchor`] implementation that also records the exact confirmation time and height of the
/// transaction. /// transaction.
/// ///
/// Note that the confirmation block and the anchor block can be different here.
///
/// Refer to [`Anchor`] for more details. /// Refer to [`Anchor`] for more details.
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] #[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr( #[cfg_attr(
@@ -197,12 +194,12 @@ impl AnchorFromBlockPosition for ConfirmationHeightAnchor {
serde(crate = "serde_crate") serde(crate = "serde_crate")
)] )]
pub struct ConfirmationTimeHeightAnchor { pub struct ConfirmationTimeHeightAnchor {
/// The confirmation height of the transaction being anchored.
pub confirmation_height: u32,
/// The confirmation time of the transaction being anchored.
pub confirmation_time: u64,
/// The anchor block. /// The anchor block.
pub anchor_block: BlockId, pub anchor_block: BlockId,
/// The confirmation height of the chain data being anchored.
pub confirmation_height: u32,
/// The confirmation time of the chain data being anchored.
pub confirmation_time: u64,
} }
impl Anchor for ConfirmationTimeHeightAnchor { impl Anchor for ConfirmationTimeHeightAnchor {
@@ -228,12 +225,12 @@ impl AnchorFromBlockPosition for ConfirmationTimeHeightAnchor {
/// A `TxOut` with as much data as we can retrieve about it /// A `TxOut` with as much data as we can retrieve about it
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct FullTxOut<A> { pub struct FullTxOut<A> {
/// The position of the transaction in `outpoint` in the overall chain.
pub chain_position: ChainPosition<A>,
/// The location of the `TxOut`. /// The location of the `TxOut`.
pub outpoint: OutPoint, pub outpoint: OutPoint,
/// The `TxOut`. /// The `TxOut`.
pub txout: TxOut, pub txout: TxOut,
/// The position of the transaction in `outpoint` in the overall chain.
pub chain_position: ChainPosition<A>,
/// The txid and chain position of the transaction (if any) that has spent this output. /// The txid and chain position of the transaction (if any) that has spent this output.
pub spent_by: Option<(ChainPosition<A>, Txid)>, pub spent_by: Option<(ChainPosition<A>, Txid)>,
/// Whether this output is on a coinbase transaction. /// Whether this output is on a coinbase transaction.
@@ -298,35 +295,3 @@ impl<A: Anchor> FullTxOut<A> {
true true
} }
} }
#[cfg(test)]
mod test {
use super::*;
#[test]
fn chain_position_ord() {
let unconf1 = ChainPosition::<ConfirmationHeightAnchor>::Unconfirmed(10);
let unconf2 = ChainPosition::<ConfirmationHeightAnchor>::Unconfirmed(20);
let conf1 = ChainPosition::Confirmed(ConfirmationHeightAnchor {
confirmation_height: 9,
anchor_block: BlockId {
height: 20,
..Default::default()
},
});
let conf2 = ChainPosition::Confirmed(ConfirmationHeightAnchor {
confirmation_height: 12,
anchor_block: BlockId {
height: 15,
..Default::default()
},
});
assert!(unconf2 > unconf1, "higher last_seen means higher ord");
assert!(unconf1 > conf1, "unconfirmed is higher ord than confirmed");
assert!(
conf2 > conf1,
"confirmation_height is higher then it should be higher ord"
);
}
}

View File

@@ -3,7 +3,7 @@ use crate::BlockId;
/// Represents a service that tracks the blockchain. /// Represents a service that tracks the blockchain.
/// ///
/// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`] /// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`]
/// is an ancestor of the `chain_tip`. /// is an ancestor of another "static block".
/// ///
/// [`is_block_in_chain`]: Self::is_block_in_chain /// [`is_block_in_chain`]: Self::is_block_in_chain
pub trait ChainOracle { pub trait ChainOracle {

View File

@@ -1,5 +1,7 @@
//! Contains the [`IndexedTxGraph`] and associated types. Refer to the //! Contains the [`IndexedTxGraph`] structure and associated types.
//! [`IndexedTxGraph`] documentation for more. //!
//! This is essentially a [`TxGraph`] combined with an indexer.
use alloc::vec::Vec; use alloc::vec::Vec;
use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid}; use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid};
@@ -9,9 +11,9 @@ use crate::{
Anchor, AnchorFromBlockPosition, Append, BlockId, Anchor, AnchorFromBlockPosition, Append, BlockId,
}; };
/// The [`IndexedTxGraph`] combines a [`TxGraph`] and an [`Indexer`] implementation. /// A struct that combines [`TxGraph`] and an [`Indexer`] implementation.
/// ///
/// It ensures that [`TxGraph`] and [`Indexer`] are updated atomically. /// This structure ensures that [`TxGraph`] and [`Indexer`] are updated atomically.
#[derive(Debug)] #[derive(Debug)]
pub struct IndexedTxGraph<A, I> { pub struct IndexedTxGraph<A, I> {
/// Transaction index. /// Transaction index.
@@ -224,26 +226,20 @@ where
/// Irrelevant transactions in `txs` will be ignored. /// Irrelevant transactions in `txs` will be ignored.
pub fn apply_block_relevant( pub fn apply_block_relevant(
&mut self, &mut self,
block: &Block, block: Block,
height: u32, height: u32,
) -> ChangeSet<A, I::ChangeSet> { ) -> ChangeSet<A, I::ChangeSet> {
let block_id = BlockId { let block_id = BlockId {
hash: block.block_hash(), hash: block.block_hash(),
height, height,
}; };
let mut changeset = ChangeSet::<A, I::ChangeSet>::default(); let txs = block.txdata.iter().enumerate().map(|(tx_pos, tx)| {
for (tx_pos, tx) in block.txdata.iter().enumerate() { (
changeset.indexer.append(self.index.index_tx(tx)); tx,
if self.index.is_tx_relevant(tx) { core::iter::once(A::from_block_position(&block, block_id, tx_pos)),
let txid = tx.txid(); )
let anchor = A::from_block_position(block, block_id, tx_pos); });
changeset.graph.append(self.graph.insert_tx(tx.clone())); self.batch_insert_relevant(txs)
changeset
.graph
.append(self.graph.insert_anchor(txid, anchor));
}
}
changeset
} }
/// Batch insert all transactions of the given `block` of `height`. /// Batch insert all transactions of the given `block` of `height`.
@@ -270,7 +266,7 @@ where
} }
} }
/// Represents changes to an [`IndexedTxGraph`]. /// A structure that represents changes to an [`IndexedTxGraph`].
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
#[cfg_attr( #[cfg_attr(
feature = "serde", feature = "serde",

View File

@@ -20,7 +20,7 @@ pub use txout_index::*;
/// Represents updates to the derivation index of a [`KeychainTxOutIndex`]. /// Represents updates to the derivation index of a [`KeychainTxOutIndex`].
/// It maps each keychain `K` to its last revealed index. /// It maps each keychain `K` to its last revealed index.
/// ///
/// It can be applied to [`KeychainTxOutIndex`] with [`apply_changeset`]. [`ChangeSet`]s are /// It can be applied to [`KeychainTxOutIndex`] with [`apply_changeset`]. [`ChangeSet] are
/// monotone in that they will never decrease the revealed derivation index. /// monotone in that they will never decrease the revealed derivation index.
/// ///
/// [`KeychainTxOutIndex`]: crate::keychain::KeychainTxOutIndex /// [`KeychainTxOutIndex`]: crate::keychain::KeychainTxOutIndex
@@ -58,9 +58,8 @@ impl<K: Ord> Append for ChangeSet<K> {
*index = other_index.max(*index); *index = other_index.max(*index);
} }
}); });
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`.
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420 self.0.append(&mut other.0);
self.0.extend(other.0);
} }
/// Returns whether the changeset are empty. /// Returns whether the changeset are empty.

View File

@@ -5,56 +5,23 @@ use crate::{
spk_iter::BIP32_MAX_INDEX, spk_iter::BIP32_MAX_INDEX,
SpkIterator, SpkTxOutIndex, SpkIterator, SpkTxOutIndex,
}; };
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid}; use alloc::vec::Vec;
use core::{ use bitcoin::{OutPoint, Script, TxOut};
fmt::Debug, use core::{fmt::Debug, ops::Deref};
ops::{Bound, RangeBounds},
};
use crate::Append; use crate::Append;
const DEFAULT_LOOKAHEAD: u32 = 25; /// A convenient wrapper around [`SpkTxOutIndex`] that relates script pubkeys to miniscript public
/// [`Descriptor`]s.
/// [`KeychainTxOutIndex`] controls how script pubkeys are revealed for multiple keychains, and
/// indexes [`TxOut`]s with them.
/// ///
/// A single keychain is a chain of script pubkeys derived from a single [`Descriptor`]. Keychains /// Descriptors are referenced by the provided keychain generic (`K`).
/// are identified using the `K` generic. Script pubkeys are identified by the keychain that they
/// are derived from `K`, as well as the derivation index `u32`.
/// ///
/// # Revealed script pubkeys /// Script pubkeys for a descriptor are revealed chronologically from index 0. I.e., If the last
/// revealed index of a descriptor is 5; scripts of indices 0 to 4 are guaranteed to be already
/// revealed. In addition to revealed scripts, we have a `lookahead` parameter for each keychain,
/// which defines the number of script pubkeys to store ahead of the last revealed index.
/// ///
/// Tracking how script pubkeys are revealed is useful for collecting chain data. For example, if /// Methods that could update the last revealed index will return [`super::ChangeSet`] to report
/// the user has requested 5 script pubkeys (to receive money with), we only need to use those
/// script pubkeys to scan for chain data.
///
/// Call [`reveal_to_target`] or [`reveal_next_spk`] to reveal more script pubkeys.
/// Call [`revealed_keychain_spks`] or [`revealed_spks`] to iterate through revealed script pubkeys.
///
/// # Lookahead script pubkeys
///
/// When an user first recovers a wallet (i.e. from a recovery phrase and/or descriptor), we will
/// NOT have knowledge of which script pubkeys are revealed. So when we index a transaction or
/// txout (using [`index_tx`]/[`index_txout`]) we scan the txouts against script pubkeys derived
/// above the last revealed index. These additionally-derived script pubkeys are called the
/// lookahead.
///
/// The [`KeychainTxOutIndex`] is constructed with the `lookahead` and cannot be altered. The
/// default `lookahead` count is 1000. Use [`new`] to set a custom `lookahead`.
///
/// # Unbounded script pubkey iterator
///
/// For script-pubkey-based chain sources (such as Electrum/Esplora), an initial scan is best done
/// by iterating though derived script pubkeys one by one and requesting transaction histories for
/// each script pubkey. We will stop after x-number of script pubkeys have empty histories. An
/// unbounded script pubkey iterator is useful to pass to such a chain source.
///
/// Call [`unbounded_spk_iter`] to get an unbounded script pubkey iterator for a given keychain.
/// Call [`all_unbounded_spk_iters`] to get unbounded script pubkey iterators for all keychains.
///
/// # Change sets
///
/// Methods that can update the last revealed index will return [`super::ChangeSet`] to report
/// these changes. This can be persisted for future recovery. /// these changes. This can be persisted for future recovery.
/// ///
/// ## Synopsis /// ## Synopsis
@@ -79,7 +46,7 @@ const DEFAULT_LOOKAHEAD: u32 = 25;
/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only(); /// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
/// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); /// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
/// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap(); /// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
/// # let (descriptor_for_user_42, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/2/*)").unwrap(); /// # let descriptor_for_user_42 = external_descriptor.clone();
/// txout_index.add_keychain(MyKeychain::External, external_descriptor); /// txout_index.add_keychain(MyKeychain::External, external_descriptor);
/// txout_index.add_keychain(MyKeychain::Internal, internal_descriptor); /// txout_index.add_keychain(MyKeychain::Internal, internal_descriptor);
/// txout_index.add_keychain(MyKeychain::MyAppUser { user_id: 42 }, descriptor_for_user_42); /// txout_index.add_keychain(MyKeychain::MyAppUser { user_id: 42 }, descriptor_for_user_42);
@@ -90,15 +57,6 @@ const DEFAULT_LOOKAHEAD: u32 = 25;
/// [`Ord`]: core::cmp::Ord /// [`Ord`]: core::cmp::Ord
/// [`SpkTxOutIndex`]: crate::spk_txout_index::SpkTxOutIndex /// [`SpkTxOutIndex`]: crate::spk_txout_index::SpkTxOutIndex
/// [`Descriptor`]: crate::miniscript::Descriptor /// [`Descriptor`]: crate::miniscript::Descriptor
/// [`reveal_to_target`]: KeychainTxOutIndex::reveal_to_target
/// [`reveal_next_spk`]: KeychainTxOutIndex::reveal_next_spk
/// [`revealed_keychain_spks`]: KeychainTxOutIndex::revealed_keychain_spks
/// [`revealed_spks`]: KeychainTxOutIndex::revealed_spks
/// [`index_tx`]: KeychainTxOutIndex::index_tx
/// [`index_txout`]: KeychainTxOutIndex::index_txout
/// [`new`]: KeychainTxOutIndex::new
/// [`unbounded_spk_iter`]: KeychainTxOutIndex::unbounded_spk_iter
/// [`all_unbounded_spk_iters`]: KeychainTxOutIndex::all_unbounded_spk_iters
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct KeychainTxOutIndex<K> { pub struct KeychainTxOutIndex<K> {
inner: SpkTxOutIndex<(K, u32)>, inner: SpkTxOutIndex<(K, u32)>,
@@ -107,12 +65,25 @@ pub struct KeychainTxOutIndex<K> {
// last revealed indexes // last revealed indexes
last_revealed: BTreeMap<K, u32>, last_revealed: BTreeMap<K, u32>,
// lookahead settings for each keychain // lookahead settings for each keychain
lookahead: u32, lookahead: BTreeMap<K, u32>,
} }
impl<K> Default for KeychainTxOutIndex<K> { impl<K> Default for KeychainTxOutIndex<K> {
fn default() -> Self { fn default() -> Self {
Self::new(DEFAULT_LOOKAHEAD) Self {
inner: SpkTxOutIndex::default(),
keychains: BTreeMap::default(),
last_revealed: BTreeMap::default(),
lookahead: BTreeMap::default(),
}
}
}
impl<K> Deref for KeychainTxOutIndex<K> {
type Target = SpkTxOutIndex<(K, u32)>;
fn deref(&self) -> &Self::Target {
&self.inner
} }
} }
@@ -143,37 +114,12 @@ impl<K: Clone + Ord + Debug> Indexer for KeychainTxOutIndex<K> {
} }
fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool { fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool {
self.inner.is_relevant(tx) self.is_relevant(tx)
} }
} }
impl<K> KeychainTxOutIndex<K> {
/// Construct a [`KeychainTxOutIndex`] with the given `lookahead`.
///
/// The `lookahead` is the number of script pubkeys to derive and cache from the internal
/// descriptors over and above the last revealed script index. Without a lookahead the index
/// will miss outputs you own when processing transactions whose output script pubkeys lie
/// beyond the last revealed index. In certain situations, such as when performing an initial
/// scan of the blockchain during wallet import, it may be uncertain or unknown what the index
/// of the last revealed script pubkey actually is.
///
/// Refer to [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
pub fn new(lookahead: u32) -> Self {
Self {
inner: SpkTxOutIndex::default(),
keychains: BTreeMap::new(),
last_revealed: BTreeMap::new(),
lookahead,
}
}
}
/// Methods that are *re-exposed* from the internal [`SpkTxOutIndex`].
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Return a reference to the internal [`SpkTxOutIndex`]. /// Return a reference to the internal [`SpkTxOutIndex`].
///
/// **WARNING:** The internal index will contain lookahead spks. Refer to
/// [struct-level docs](KeychainTxOutIndex) for more about `lookahead`.
pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> { pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> {
&self.inner &self.inner
} }
@@ -183,116 +129,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
self.inner.outpoints() self.inner.outpoints()
} }
/// Iterate over known txouts that spend to tracked script pubkeys. /// Return a reference to the internal map of the keychain to descriptors.
pub fn txouts(
&self,
) -> impl DoubleEndedIterator<Item = (K, u32, OutPoint, &TxOut)> + ExactSizeIterator {
self.inner
.txouts()
.map(|((k, i), op, txo)| (k.clone(), *i, op, txo))
}
/// Finds all txouts on a transaction that has previously been scanned and indexed.
pub fn txouts_in_tx(
&self,
txid: Txid,
) -> impl DoubleEndedIterator<Item = (K, u32, OutPoint, &TxOut)> {
self.inner
.txouts_in_tx(txid)
.map(|((k, i), op, txo)| (k.clone(), *i, op, txo))
}
/// Return the [`TxOut`] of `outpoint` if it has been indexed.
///
/// The associated keychain and keychain index of the txout's spk is also returned.
///
/// This calls [`SpkTxOutIndex::txout`] internally.
pub fn txout(&self, outpoint: OutPoint) -> Option<(K, u32, &TxOut)> {
self.inner
.txout(outpoint)
.map(|((k, i), txo)| (k.clone(), *i, txo))
}
/// Return the script that exists under the given `keychain`'s `index`.
///
/// This calls [`SpkTxOutIndex::spk_at_index`] internally.
pub fn spk_at_index(&self, keychain: K, index: u32) -> Option<&Script> {
self.inner.spk_at_index(&(keychain, index))
}
/// Returns the keychain and keychain index associated with the spk.
///
/// This calls [`SpkTxOutIndex::index_of_spk`] internally.
pub fn index_of_spk(&self, script: &Script) -> Option<(K, u32)> {
self.inner.index_of_spk(script).cloned()
}
/// Returns whether the spk under the `keychain`'s `index` has been used.
///
/// Here, "unused" means that after the script pubkey was stored in the index, the index has
/// never scanned a transaction output with it.
///
/// This calls [`SpkTxOutIndex::is_used`] internally.
pub fn is_used(&self, keychain: K, index: u32) -> bool {
self.inner.is_used(&(keychain, index))
}
/// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output
/// with it.
///
/// This only has an effect when the `index` had been added to `self` already and was unused.
///
/// Returns whether the `index` was initially present as `unused`.
///
/// This is useful when you want to reserve a script pubkey for something but don't want to add
/// the transaction output using it to the index yet. Other callers will consider `index` on
/// `keychain` used until you call [`unmark_used`].
///
/// This calls [`SpkTxOutIndex::mark_used`] internally.
///
/// [`unmark_used`]: Self::unmark_used
pub fn mark_used(&mut self, keychain: K, index: u32) -> bool {
self.inner.mark_used(&(keychain, index))
}
/// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
/// `unused`.
///
/// Note that if `self` has scanned an output with this script pubkey, then this will have no
/// effect.
///
/// This calls [`SpkTxOutIndex::unmark_used`] internally.
///
/// [`mark_used`]: Self::mark_used
pub fn unmark_used(&mut self, keychain: K, index: u32) -> bool {
self.inner.unmark_used(&(keychain, index))
}
/// Computes total input value going from script pubkeys in the index (sent) and the total output
/// value going to script pubkeys in the index (received) in `tx`. For the `sent` to be computed
/// correctly, the output being spent must have already been scanned by the index. Calculating
/// received just uses the [`Transaction`] outputs directly, so it will be correct even if it has
/// not been scanned.
///
/// This calls [`SpkTxOutIndex::sent_and_received`] internally.
pub fn sent_and_received(&self, tx: &Transaction) -> (u64, u64) {
self.inner.sent_and_received(tx)
}
/// Computes the net value that this transaction gives to the script pubkeys in the index and
/// *takes* from the transaction outputs in the index. Shorthand for calling
/// [`sent_and_received`] and subtracting sent from received.
///
/// This calls [`SpkTxOutIndex::net_value`] internally.
///
/// [`sent_and_received`]: Self::sent_and_received
pub fn net_value(&self, tx: &Transaction) -> i64 {
self.inner.net_value(tx)
}
}
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Return a reference to the internal map of keychain to descriptors.
pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> { pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
&self.keychains &self.keychains
} }
@@ -308,43 +145,78 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) { pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
let old_descriptor = &*self let old_descriptor = &*self
.keychains .keychains
.entry(keychain.clone()) .entry(keychain)
.or_insert_with(|| descriptor.clone()); .or_insert_with(|| descriptor.clone());
assert_eq!( assert_eq!(
&descriptor, old_descriptor, &descriptor, old_descriptor,
"keychain already contains a different descriptor" "keychain already contains a different descriptor"
); );
self.replenish_lookahead(&keychain, self.lookahead);
} }
/// Get the lookahead setting. /// Return the lookahead setting for each keychain.
/// ///
/// Refer to [`new`] for more information on the `lookahead`. /// Refer to [`set_lookahead`] for a deeper explanation of the `lookahead`.
/// ///
/// [`new`]: Self::new /// [`set_lookahead`]: Self::set_lookahead
pub fn lookahead(&self) -> u32 { pub fn lookaheads(&self) -> &BTreeMap<K, u32> {
self.lookahead &self.lookahead
} }
/// Store lookahead scripts until `target_index` (inclusive). /// Convenience method to call [`set_lookahead`] for all keychains.
/// ///
/// This does not change the global `lookahead` setting. /// [`set_lookahead`]: Self::set_lookahead
pub fn lookahead_to_target(&mut self, keychain: &K, target_index: u32) { pub fn set_lookahead_for_all(&mut self, lookahead: u32) {
let (next_index, _) = self.next_index(keychain); for keychain in &self.keychains.keys().cloned().collect::<Vec<_>>() {
self.set_lookahead(keychain, lookahead);
let temp_lookahead = (target_index + 1)
.checked_sub(next_index)
.filter(|&index| index > 0);
if let Some(temp_lookahead) = temp_lookahead {
self.replenish_lookahead(keychain, temp_lookahead);
} }
} }
fn replenish_lookahead(&mut self, keychain: &K, lookahead: u32) { /// Set the lookahead count for `keychain`.
///
/// The lookahead is the number of scripts to cache ahead of the last revealed script index. This
/// is useful to find outputs you own when processing block data that lie beyond the last revealed
/// index. In certain situations, such as when performing an initial scan of the blockchain during
/// wallet import, it may be uncertain or unknown what the last revealed index is.
///
/// # Panics
///
/// This will panic if the `keychain` does not exist.
pub fn set_lookahead(&mut self, keychain: &K, lookahead: u32) {
self.lookahead.insert(keychain.clone(), lookahead);
self.replenish_lookahead(keychain);
}
/// Convenience method to call [`lookahead_to_target`] for multiple keychains.
///
/// [`lookahead_to_target`]: Self::lookahead_to_target
pub fn lookahead_to_target_multi(&mut self, target_indexes: BTreeMap<K, u32>) {
for (keychain, target_index) in target_indexes {
self.lookahead_to_target(&keychain, target_index)
}
}
/// Store lookahead scripts until `target_index`.
///
/// This does not change the `lookahead` setting.
pub fn lookahead_to_target(&mut self, keychain: &K, target_index: u32) {
let next_index = self.next_store_index(keychain);
if let Some(temp_lookahead) = target_index.checked_sub(next_index).filter(|&v| v > 0) {
let old_lookahead = self.lookahead.insert(keychain.clone(), temp_lookahead);
self.replenish_lookahead(keychain);
// revert
match old_lookahead {
Some(lookahead) => self.lookahead.insert(keychain.clone(), lookahead),
None => self.lookahead.remove(keychain),
};
}
}
fn replenish_lookahead(&mut self, keychain: &K) {
let descriptor = self.keychains.get(keychain).expect("keychain must exist"); let descriptor = self.keychains.get(keychain).expect("keychain must exist");
let next_store_index = self.next_store_index(keychain); let next_store_index = self.next_store_index(keychain);
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1); let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
for (new_index, new_spk) in for (new_index, new_spk) in
SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead) SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead)
@@ -359,74 +231,64 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
fn next_store_index(&self, keychain: &K) -> u32 { fn next_store_index(&self, keychain: &K) -> u32 {
self.inner() self.inner()
.all_spks() .all_spks()
// This range is filtering out the spks with a keychain different than
// `keychain`. We don't use filter here as range is more optimized.
.range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX)) .range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX))
.last() .last()
.map_or(0, |((_, index), _)| *index + 1) .map_or(0, |((_, v), _)| *v + 1)
} }
/// Get an unbounded spk iterator over a given `keychain`. /// Generates script pubkey iterators for every `keychain`. The iterators iterate over all
/// /// derivable script pubkeys.
/// # Panics pub fn spks_of_all_keychains(
///
/// This will panic if the given `keychain`'s descriptor does not exist.
pub fn unbounded_spk_iter(&self, keychain: &K) -> SpkIterator<Descriptor<DescriptorPublicKey>> {
SpkIterator::new(
self.keychains
.get(keychain)
.expect("keychain does not exist")
.clone(),
)
}
/// Get unbounded spk iterators for all keychains.
pub fn all_unbounded_spk_iters(
&self, &self,
) -> BTreeMap<K, SpkIterator<Descriptor<DescriptorPublicKey>>> { ) -> BTreeMap<K, SpkIterator<Descriptor<DescriptorPublicKey>>> {
self.keychains self.keychains
.iter() .iter()
.map(|(k, descriptor)| (k.clone(), SpkIterator::new(descriptor.clone()))) .map(|(keychain, descriptor)| {
(
keychain.clone(),
SpkIterator::new_with_range(descriptor.clone(), 0..),
)
})
.collect() .collect()
} }
/// Iterate over revealed spks of all keychains. /// Generates a script pubkey iterator for the given `keychain`'s descriptor (if it exists). The
pub fn revealed_spks(&self) -> impl DoubleEndedIterator<Item = (K, u32, &Script)> + Clone { /// iterator iterates over all derivable scripts of the keychain's descriptor.
self.keychains.keys().flat_map(|keychain| { ///
self.revealed_keychain_spks(keychain) /// # Panics
.map(|(i, spk)| (keychain.clone(), i, spk)) ///
}) /// This will panic if the `keychain` does not exist.
pub fn spks_of_keychain(&self, keychain: &K) -> SpkIterator<Descriptor<DescriptorPublicKey>> {
let descriptor = self
.keychains
.get(keychain)
.expect("keychain must exist")
.clone();
SpkIterator::new_with_range(descriptor, 0..)
} }
/// Iterate over revealed spks of the given `keychain`. /// Convenience method to get [`revealed_spks_of_keychain`] of all keychains.
pub fn revealed_keychain_spks( ///
/// [`revealed_spks_of_keychain`]: Self::revealed_spks_of_keychain
pub fn revealed_spks_of_all_keychains(
&self,
) -> BTreeMap<K, impl Iterator<Item = (u32, &Script)> + Clone> {
self.keychains
.keys()
.map(|keychain| (keychain.clone(), self.revealed_spks_of_keychain(keychain)))
.collect()
}
/// Iterates over the script pubkeys revealed by this index under `keychain`.
pub fn revealed_spks_of_keychain(
&self, &self,
keychain: &K, keychain: &K,
) -> impl DoubleEndedIterator<Item = (u32, &Script)> + Clone { ) -> impl DoubleEndedIterator<Item = (u32, &Script)> + Clone {
let next_i = self.last_revealed.get(keychain).map_or(0, |&i| i + 1); let next_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
self.inner self.inner
.all_spks() .all_spks()
.range((keychain.clone(), u32::MIN)..(keychain.clone(), next_i)) .range((keychain.clone(), u32::MIN)..(keychain.clone(), next_index))
.map(|((_, i), spk)| (*i, spk.as_script())) .map(|((_, derivation_index), spk)| (*derivation_index, spk.as_script()))
}
/// Iterate over revealed, but unused, spks of all keychains.
pub fn unused_spks(&self) -> impl DoubleEndedIterator<Item = (K, u32, &Script)> + Clone {
self.keychains.keys().flat_map(|keychain| {
self.unused_keychain_spks(keychain)
.map(|(i, spk)| (keychain.clone(), i, spk))
})
}
/// Iterate over revealed, but unused, spks of the given `keychain`.
pub fn unused_keychain_spks(
&self,
keychain: &K,
) -> impl DoubleEndedIterator<Item = (u32, &Script)> + Clone {
let next_i = self.last_revealed.get(keychain).map_or(0, |&i| i + 1);
self.inner
.unused_spks((keychain.clone(), u32::MIN)..(keychain.clone(), next_i))
.map(|((_, i), spk)| (*i, spk))
} }
/// Get the next derivation index for `keychain`. The next index is the index after the last revealed /// Get the next derivation index for `keychain`. The next index is the index after the last revealed
@@ -525,45 +387,55 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
let has_wildcard = descriptor.has_wildcard(); let has_wildcard = descriptor.has_wildcard();
let target_index = if has_wildcard { target_index } else { 0 }; let target_index = if has_wildcard { target_index } else { 0 };
let next_reveal_index = self let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
.last_revealed let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
.get(keychain)
.map_or(0, |index| *index + 1);
debug_assert!(next_reveal_index + self.lookahead >= self.next_store_index(keychain)); debug_assert_eq!(
next_reveal_index + lookahead,
self.next_store_index(keychain)
);
// If the target_index is already revealed, we are done // if we need to reveal new indices, the latest revealed index goes here
if next_reveal_index > target_index { let mut reveal_to_index = None;
return (
// if the target is not yet revealed, but is already stored (due to lookahead), we need to
// set the `reveal_to_index` as target here (as the `for` loop below only updates
// `reveal_to_index` for indexes that are NOT stored)
if next_reveal_index <= target_index && target_index < next_reveal_index + lookahead {
reveal_to_index = Some(target_index);
}
// we range over indexes that are not stored
let range = next_reveal_index + lookahead..=target_index + lookahead;
for (new_index, new_spk) in SpkIterator::new_with_range(descriptor, range) {
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
debug_assert!(_inserted, "must not have existing spk",);
// everything after `target_index` is stored for lookahead only
if new_index <= target_index {
reveal_to_index = Some(new_index);
}
}
match reveal_to_index {
Some(index) => {
let _old_index = self.last_revealed.insert(keychain.clone(), index);
debug_assert!(_old_index < Some(index));
(
SpkIterator::new_with_range(descriptor.clone(), next_reveal_index..index + 1),
super::ChangeSet(core::iter::once((keychain.clone(), index)).collect()),
)
}
None => (
SpkIterator::new_with_range( SpkIterator::new_with_range(
descriptor.clone(), descriptor.clone(),
next_reveal_index..next_reveal_index, next_reveal_index..next_reveal_index,
), ),
super::ChangeSet::default(), super::ChangeSet::default(),
); ),
} }
// We range over the indexes that are not stored and insert their spks in the index.
// Indexes from next_reveal_index to next_reveal_index + lookahead are already stored (due
// to lookahead), so we only range from next_reveal_index + lookahead to target + lookahead
let range = next_reveal_index + self.lookahead..=target_index + self.lookahead;
for (new_index, new_spk) in SpkIterator::new_with_range(descriptor, range) {
let _inserted = self
.inner
.insert_spk((keychain.clone(), new_index), new_spk);
debug_assert!(_inserted, "must not have existing spk");
debug_assert!(
has_wildcard || new_index == 0,
"non-wildcard descriptors must not iterate past index 0"
);
}
let _old_index = self.last_revealed.insert(keychain.clone(), target_index);
debug_assert!(_old_index < Some(target_index));
(
SpkIterator::new_with_range(descriptor.clone(), next_reveal_index..target_index + 1),
super::ChangeSet(core::iter::once((keychain.clone(), target_index)).collect()),
)
} }
/// Attempts to reveal the next script pubkey for `keychain`. /// Attempts to reveal the next script pubkey for `keychain`.
@@ -603,13 +475,13 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// ///
/// Panics if `keychain` has never been added to the index /// Panics if `keychain` has never been added to the index
pub fn next_unused_spk(&mut self, keychain: &K) -> ((u32, &Script), super::ChangeSet<K>) { pub fn next_unused_spk(&mut self, keychain: &K) -> ((u32, &Script), super::ChangeSet<K>) {
let need_new = self.unused_keychain_spks(keychain).next().is_none(); let need_new = self.unused_spks_of_keychain(keychain).next().is_none();
// this rather strange branch is needed because of some lifetime issues // this rather strange branch is needed because of some lifetime issues
if need_new { if need_new {
self.reveal_next_spk(keychain) self.reveal_next_spk(keychain)
} else { } else {
( (
self.unused_keychain_spks(keychain) self.unused_spks_of_keychain(keychain)
.next() .next()
.expect("we already know next exists"), .expect("we already know next exists"),
super::ChangeSet::default(), super::ChangeSet::default(),
@@ -617,44 +489,58 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
} }
} }
/// Iterate over all [`OutPoint`]s that point to `TxOut`s with script pubkeys derived from /// Marks the script pubkey at `index` as used even though the tracker hasn't seen an output with it.
/// `keychain`. /// This only has an effect when the `index` had been added to `self` already and was unused.
/// ///
/// Use [`keychain_outpoints_in_range`](KeychainTxOutIndex::keychain_outpoints_in_range) to /// Returns whether the `index` was initially present as `unused`.
/// iterate over a specific derivation range. ///
pub fn keychain_outpoints( /// This is useful when you want to reserve a script pubkey for something but don't want to add
&self, /// the transaction output using it to the index yet. Other callers will consider `index` on
keychain: &K, /// `keychain` used until you call [`unmark_used`].
) -> impl DoubleEndedIterator<Item = (u32, OutPoint)> + '_ { ///
self.keychain_outpoints_in_range(keychain, ..) /// [`unmark_used`]: Self::unmark_used
pub fn mark_used(&mut self, keychain: &K, index: u32) -> bool {
self.inner.mark_used(&(keychain.clone(), index))
} }
/// Iterate over [`OutPoint`]s that point to `TxOut`s with script pubkeys derived from /// Undoes the effect of [`mark_used`]. Returns whether the `index` is inserted back into
/// `keychain` in a given derivation `range`. /// `unused`.
pub fn keychain_outpoints_in_range( ///
/// Note that if `self` has scanned an output with this script pubkey, then this will have no
/// effect.
///
/// [`mark_used`]: Self::mark_used
pub fn unmark_used(&mut self, keychain: &K, index: u32) -> bool {
self.inner.unmark_used(&(keychain.clone(), index))
}
/// Iterates over all unused script pubkeys for a `keychain` stored in the index.
pub fn unused_spks_of_keychain(
&self, &self,
keychain: &K, keychain: &K,
range: impl RangeBounds<u32>, ) -> impl DoubleEndedIterator<Item = (u32, &Script)> {
) -> impl DoubleEndedIterator<Item = (u32, OutPoint)> + '_ { let next_index = self.last_revealed.get(keychain).map_or(0, |&v| v + 1);
let start = match range.start_bound() { let range = (keychain.clone(), u32::MIN)..(keychain.clone(), next_index);
Bound::Included(i) => Bound::Included((keychain.clone(), *i)),
Bound::Excluded(i) => Bound::Excluded((keychain.clone(), *i)),
Bound::Unbounded => Bound::Unbounded,
};
let end = match range.end_bound() {
Bound::Included(i) => Bound::Included((keychain.clone(), *i)),
Bound::Excluded(i) => Bound::Excluded((keychain.clone(), *i)),
Bound::Unbounded => Bound::Unbounded,
};
self.inner self.inner
.outputs_in_range((start, end)) .unused_spks(range)
.map(|((_, i), script)| (*i, script))
}
/// Iterates over all the [`OutPoint`] that have a `TxOut` with a script pubkey derived from
/// `keychain`.
pub fn txouts_of_keychain(
&self,
keychain: &K,
) -> impl DoubleEndedIterator<Item = (u32, OutPoint)> + '_ {
self.inner
.outputs_in_range((keychain.clone(), u32::MIN)..(keychain.clone(), u32::MAX))
.map(|((_, i), op)| (*i, op)) .map(|((_, i), op)| (*i, op))
} }
/// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has /// Returns the highest derivation index of the `keychain` where [`KeychainTxOutIndex`] has
/// found a [`TxOut`] with it's script pubkey. /// found a [`TxOut`] with it's script pubkey.
pub fn last_used_index(&self, keychain: &K) -> Option<u32> { pub fn last_used_index(&self, keychain: &K) -> Option<u32> {
self.keychain_outpoints(keychain).last().map(|(i, _)| i) self.txouts_of_keychain(keychain).last().map(|(i, _)| i)
} }
/// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found /// Returns the highest derivation index of each keychain that [`KeychainTxOutIndex`] has found

View File

@@ -1,4 +1,4 @@
//! This crate is a collection of core structures for [Bitcoin Dev Kit]. //! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release).
//! //!
//! The goal of this crate is to give wallets the mechanisms needed to: //! The goal of this crate is to give wallets the mechanisms needed to:
//! //!
@@ -12,8 +12,9 @@
//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just //! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done //! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
//! consistently. //! consistently.
//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you //! 2. Error-free APIs.
//! cache or how you retrieve it from persistent storage. //! 3. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
//! cache or how you fetch it.
//! //!
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/ //! [Bitcoin Dev Kit]: https://bitcoindevkit.org/

View File

@@ -5,10 +5,9 @@ use core::convert::Infallible;
use crate::collections::BTreeMap; use crate::collections::BTreeMap;
use crate::{BlockId, ChainOracle}; use crate::{BlockId, ChainOracle};
use alloc::sync::Arc; use alloc::sync::Arc;
use bitcoin::block::Header;
use bitcoin::BlockHash; use bitcoin::BlockHash;
/// The [`ChangeSet`] represents changes to [`LocalChain`]. /// A structure that represents changes to [`LocalChain`].
/// ///
/// The key represents the block height, and the value either represents added a new [`CheckPoint`] /// The key represents the block height, and the value either represents added a new [`CheckPoint`]
/// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]). /// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
@@ -40,28 +39,6 @@ impl CheckPoint {
Self(Arc::new(CPInner { block, prev: None })) Self(Arc::new(CPInner { block, prev: None }))
} }
/// Construct a checkpoint from a list of [`BlockId`]s in ascending height order.
///
/// # Errors
///
/// This method will error if any of the follow occurs:
///
/// - The `blocks` iterator is empty, in which case, the error will be `None`.
/// - The `blocks` iterator is not in ascending height order.
/// - The `blocks` iterator contains multiple [`BlockId`]s of the same height.
///
/// The error type is the last successful checkpoint constructed (if any).
pub fn from_block_ids(
block_ids: impl IntoIterator<Item = BlockId>,
) -> Result<Self, Option<Self>> {
let mut blocks = block_ids.into_iter();
let mut acc = CheckPoint::new(blocks.next().ok_or(None)?);
for id in blocks {
acc = acc.push(id).map_err(Some)?;
}
Ok(acc)
}
/// Construct a checkpoint from the given `header` and block `height`. /// Construct a checkpoint from the given `header` and block `height`.
/// ///
/// If `header` is of the genesis block, the checkpoint won't have a [`prev`] node. Otherwise, /// If `header` is of the genesis block, the checkpoint won't have a [`prev`] node. Otherwise,
@@ -150,7 +127,7 @@ impl CheckPoint {
} }
} }
/// Iterates over checkpoints backwards. /// A structure that iterates over checkpoints backwards.
pub struct CheckPointIter { pub struct CheckPointIter {
current: Option<Arc<CPInner>>, current: Option<Arc<CPInner>>,
} }
@@ -176,7 +153,7 @@ impl IntoIterator for CheckPoint {
} }
} }
/// Used to update [`LocalChain`]. /// A struct to update [`LocalChain`].
/// ///
/// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and /// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and
/// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing /// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing
@@ -370,95 +347,6 @@ impl LocalChain {
Ok(changeset) Ok(changeset)
} }
/// Update the chain with a given [`Header`] at `height` which you claim is connected to a existing block in the chain.
///
/// This is useful when you have a block header that you want to record as part of the chain but
/// don't necessarily know that the `prev_blockhash` is in the chain.
///
/// This will usually insert two new [`BlockId`]s into the chain: the header's block and the
/// header's `prev_blockhash` block. `connected_to` must already be in the chain but is allowed
/// to be `prev_blockhash` (in which case only one new block id will be inserted).
/// To be successful, `connected_to` must be chosen carefully so that `LocalChain`'s [update
/// rules][`apply_update`] are satisfied.
///
/// # Errors
///
/// [`ApplyHeaderError::InconsistentBlocks`] occurs if the `connected_to` block and the
/// [`Header`] is inconsistent. For example, if the `connected_to` block is the same height as
/// `header` or `prev_blockhash`, but has a different block hash. Or if the `connected_to`
/// height is greater than the header's `height`.
///
/// [`ApplyHeaderError::CannotConnect`] occurs if the internal call to [`apply_update`] fails.
///
/// [`apply_update`]: Self::apply_update
pub fn apply_header_connected_to(
&mut self,
header: &Header,
height: u32,
connected_to: BlockId,
) -> Result<ChangeSet, ApplyHeaderError> {
let this = BlockId {
height,
hash: header.block_hash(),
};
let prev = height.checked_sub(1).map(|prev_height| BlockId {
height: prev_height,
hash: header.prev_blockhash,
});
let conn = match connected_to {
// `connected_to` can be ignored if same as `this` or `prev` (duplicate)
conn if conn == this || Some(conn) == prev => None,
// this occurs if:
// - `connected_to` height is the same as `prev`, but different hash
// - `connected_to` height is the same as `this`, but different hash
// - `connected_to` height is greater than `this` (this is not allowed)
conn if conn.height >= height.saturating_sub(1) => {
return Err(ApplyHeaderError::InconsistentBlocks)
}
conn => Some(conn),
};
let update = Update {
tip: CheckPoint::from_block_ids([conn, prev, Some(this)].into_iter().flatten())
.expect("block ids must be in order"),
introduce_older_blocks: false,
};
self.apply_update(update)
.map_err(ApplyHeaderError::CannotConnect)
}
/// Update the chain with a given [`Header`] connecting it with the previous block.
///
/// This is a convenience method to call [`apply_header_connected_to`] with the `connected_to`
/// parameter being `height-1:prev_blockhash`. If there is no previous block (i.e. genesis), we
/// use the current block as `connected_to`.
///
/// [`apply_header_connected_to`]: LocalChain::apply_header_connected_to
pub fn apply_header(
&mut self,
header: &Header,
height: u32,
) -> Result<ChangeSet, CannotConnectError> {
let connected_to = match height.checked_sub(1) {
Some(prev_height) => BlockId {
height: prev_height,
hash: header.prev_blockhash,
},
None => BlockId {
height,
hash: header.block_hash(),
},
};
self.apply_header_connected_to(header, height, connected_to)
.map_err(|err| match err {
ApplyHeaderError::InconsistentBlocks => {
unreachable!("connected_to is derived from the block so is always consistent")
}
ApplyHeaderError::CannotConnect(err) => err,
})
}
/// Apply the given `changeset`. /// Apply the given `changeset`.
pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> { pub fn apply_changeset(&mut self, changeset: &ChangeSet) -> Result<(), MissingGenesisError> {
if let Some(start_height) = changeset.keys().next().cloned() { if let Some(start_height) = changeset.keys().next().cloned() {
@@ -532,28 +420,6 @@ impl LocalChain {
Ok(changeset) Ok(changeset)
} }
/// Removes blocks from (and inclusive of) the given `block_id`.
///
/// This will remove blocks with a height equal or greater than `block_id`, but only if
/// `block_id` exists in the chain.
///
/// # Errors
///
/// This will fail with [`MissingGenesisError`] if the caller attempts to disconnect from the
/// genesis block.
pub fn disconnect_from(&mut self, block_id: BlockId) -> Result<ChangeSet, MissingGenesisError> {
if self.index.get(&block_id.height) != Some(&block_id.hash) {
return Ok(ChangeSet::default());
}
let changeset = self
.index
.range(block_id.height..)
.map(|(&height, _)| (height, None))
.collect::<ChangeSet>();
self.apply_changeset(&changeset).map(|_| changeset)
}
/// Reindex the heights in the chain from (and including) `from` height /// Reindex the heights in the chain from (and including) `from` height
fn reindex(&mut self, from: u32) { fn reindex(&mut self, from: u32) {
let _ = self.index.split_off(&from); let _ = self.index.split_off(&from);
@@ -669,30 +535,6 @@ impl core::fmt::Display for CannotConnectError {
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl std::error::Error for CannotConnectError {} impl std::error::Error for CannotConnectError {}
/// The error type for [`LocalChain::apply_header_connected_to`].
#[derive(Debug, Clone, PartialEq)]
pub enum ApplyHeaderError {
/// Occurs when `connected_to` block conflicts with either the current block or previous block.
InconsistentBlocks,
/// Occurs when the update cannot connect with the original chain.
CannotConnect(CannotConnectError),
}
impl core::fmt::Display for ApplyHeaderError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
ApplyHeaderError::InconsistentBlocks => write!(
f,
"the `connected_to` block conflicts with either the current or previous block"
),
ApplyHeaderError::CannotConnect(err) => core::fmt::Display::fmt(err, f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ApplyHeaderError {}
fn merge_chains( fn merge_chains(
original_tip: CheckPoint, original_tip: CheckPoint,
update_tip: CheckPoint, update_tip: CheckPoint,

View File

@@ -55,18 +55,6 @@ where
// if written successfully, take and return `self.stage` // if written successfully, take and return `self.stage`
.map(|_| Some(core::mem::take(&mut self.stage))) .map(|_| Some(core::mem::take(&mut self.stage)))
} }
/// Stages a new changeset and commits it (along with any other previously staged changes) to
/// the persistence backend
///
/// Convenience method for calling [`stage`] and then [`commit`].
///
/// [`stage`]: Self::stage
/// [`commit`]: Self::commit
pub fn stage_and_commit(&mut self, changeset: C) -> Result<Option<C>, B::WriteError> {
self.stage(changeset);
self.commit()
}
} }
/// A persistence backend for [`Persist`]. /// A persistence backend for [`Persist`].

View File

@@ -43,24 +43,18 @@ impl<D> SpkIterator<D>
where where
D: Borrow<Descriptor<DescriptorPublicKey>>, D: Borrow<Descriptor<DescriptorPublicKey>>,
{ {
/// Create a new script pubkey iterator from `descriptor`. /// Creates a new script pubkey iterator starting at 0 from a descriptor.
///
/// This iterates from derivation index 0 and stops at index 0x7FFFFFFF (as specified in
/// BIP-32). Non-wildcard descriptors will only return one script pubkey at derivation index 0.
///
/// Use [`new_with_range`](SpkIterator::new_with_range) to create an iterator with a specified
/// derivation index range.
pub fn new(descriptor: D) -> Self { pub fn new(descriptor: D) -> Self {
SpkIterator::new_with_range(descriptor, 0..=BIP32_MAX_INDEX) SpkIterator::new_with_range(descriptor, 0..=BIP32_MAX_INDEX)
} }
/// Create a new script pubkey iterator from `descriptor` and a given `range`. // Creates a new script pubkey iterator from a descriptor with a given range.
/// // If the descriptor doesn't have a wildcard, we shorten whichever range you pass in
/// Non-wildcard descriptors will only emit a single script pubkey (at derivation index 0). // to have length <= 1. This means that if you pass in 0..0 or 0..1 the range will
/// Wildcard descriptors have an end-bound of 0x7FFFFFFF (inclusive). // remain the same, but if you pass in 0..10, we'll shorten it to 0..1
/// // Also note that if the descriptor doesn't have a wildcard, passing in a range starting
/// Refer to [`new`](SpkIterator::new) for more. // from n > 0, will return an empty iterator.
pub fn new_with_range<R>(descriptor: D, range: R) -> Self pub(crate) fn new_with_range<R>(descriptor: D, range: R) -> Self
where where
R: RangeBounds<u32>, R: RangeBounds<u32>,
{ {
@@ -79,6 +73,13 @@ where
// Because `end` is exclusive, we want the maximum value to be BIP32_MAX_INDEX + 1. // Because `end` is exclusive, we want the maximum value to be BIP32_MAX_INDEX + 1.
end = end.min(BIP32_MAX_INDEX + 1); end = end.min(BIP32_MAX_INDEX + 1);
if !descriptor.borrow().has_wildcard() {
// The length of the range should be at most 1
if end != start {
end = start + 1;
}
}
Self { Self {
next_index: start, next_index: start,
end, end,
@@ -86,11 +87,6 @@ where
secp: Secp256k1::verification_only(), secp: Secp256k1::verification_only(),
} }
} }
/// Get a reference to the internal descriptor.
pub fn descriptor(&self) -> &D {
&self.descriptor
}
} }
impl<D> Iterator for SpkIterator<D> impl<D> Iterator for SpkIterator<D>
@@ -152,7 +148,7 @@ mod test {
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
) { ) {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0); let mut txout_index = KeychainTxOutIndex::<TestKeychain>::default();
let secp = Secp256k1::signing_only(); let secp = Secp256k1::signing_only();
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
@@ -249,14 +245,6 @@ mod test {
SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=2).next(), SpkIterator::new_with_range(&no_wildcard_descriptor, 1..=2).next(),
None None
); );
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..11).next(),
None
);
assert_eq!(
SpkIterator::new_with_range(&no_wildcard_descriptor, 10..=10).next(),
None
);
} }
// The following dummy traits were created to test if SpkIterator is working properly. // The following dummy traits were created to test if SpkIterator is working properly.

View File

@@ -168,7 +168,9 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> {
/// ///
/// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there. /// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there.
pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> { pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> {
self.txouts.get(&outpoint).map(|v| (&v.0, &v.1)) self.txouts
.get(&outpoint)
.map(|(spk_i, txout)| (spk_i, txout))
} }
/// Returns the script that has been inserted at the `index`. /// Returns the script that has been inserted at the `index`.
@@ -215,7 +217,7 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> {
/// let unused_change_spks = /// let unused_change_spks =
/// txout_index.unused_spks((change_index, u32::MIN)..(change_index, u32::MAX)); /// txout_index.unused_spks((change_index, u32::MIN)..(change_index, u32::MAX));
/// ``` /// ```
pub fn unused_spks<R>(&self, range: R) -> impl DoubleEndedIterator<Item = (&I, &Script)> + Clone pub fn unused_spks<R>(&self, range: R) -> impl DoubleEndedIterator<Item = (&I, &Script)>
where where
R: RangeBounds<I>, R: RangeBounds<I>,
{ {

View File

@@ -5,25 +5,21 @@ use alloc::vec::Vec;
/// Trait that "anchors" blockchain data to a specific block of height and hash. /// Trait that "anchors" blockchain data to a specific block of height and hash.
/// ///
/// If transaction A is anchored in block B, and block B is in the best chain, we can /// [`Anchor`] implementations must be [`Ord`] by the anchor block's [`BlockId`] first.
///
/// I.e. If transaction A is anchored in block B, then if block B is in the best chain, we can
/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean /// assume that transaction A is also confirmed in the best chain. This does not necessarily mean
/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a /// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a
/// parent block of B. /// parent block of B.
/// ///
/// Every [`Anchor`] implementation must contain a [`BlockId`] parameter, and must implement
/// [`Ord`]. When implementing [`Ord`], the anchors' [`BlockId`]s should take precedence
/// over other elements inside the [`Anchor`]s for comparison purposes, i.e., you should first
/// compare the anchors' [`BlockId`]s and then care about the rest.
///
/// The example shows different types of anchors:
/// ``` /// ```
/// # use bdk_chain::local_chain::LocalChain; /// # use bdk_chain::local_chain::LocalChain;
/// # use bdk_chain::tx_graph::TxGraph; /// # use bdk_chain::tx_graph::TxGraph;
/// # use bdk_chain::BlockId; /// # use bdk_chain::BlockId;
/// # use bdk_chain::ConfirmationHeightAnchor; /// # use bdk_chain::ConfirmationHeightAnchor;
/// # use bdk_chain::ConfirmationTimeHeightAnchor;
/// # use bdk_chain::example_utils::*; /// # use bdk_chain::example_utils::*;
/// # use bitcoin::hashes::Hash; /// # use bitcoin::hashes::Hash;
///
/// // Initialize the local chain with two blocks. /// // Initialize the local chain with two blocks.
/// let chain = LocalChain::from_blocks( /// let chain = LocalChain::from_blocks(
/// [ /// [
@@ -51,7 +47,6 @@ use alloc::vec::Vec;
/// ); /// );
/// ///
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationHeightAnchor` as the anchor type. /// // Insert `tx` into a `TxGraph` that uses `ConfirmationHeightAnchor` as the anchor type.
/// // This anchor records the anchor block and the confirmation height of the transaction.
/// // When a transaction is anchored with `ConfirmationHeightAnchor`, the anchor block and /// // When a transaction is anchored with `ConfirmationHeightAnchor`, the anchor block and
/// // confirmation block can be different. However, the confirmation block cannot be higher than /// // confirmation block can be different. However, the confirmation block cannot be higher than
/// // the anchor block and both blocks must be in the same chain for the anchor to be valid. /// // the anchor block and both blocks must be in the same chain for the anchor to be valid.
@@ -67,25 +62,6 @@ use alloc::vec::Vec;
/// confirmation_height: 1, /// confirmation_height: 1,
/// }, /// },
/// ); /// );
///
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationTimeHeightAnchor` as the anchor type.
/// // This anchor records the anchor block, the confirmation height and time of the transaction.
/// // When a transaction is anchored with `ConfirmationTimeHeightAnchor`, the anchor block and
/// // confirmation block can be different. However, the confirmation block cannot be higher than
/// // the anchor block and both blocks must be in the same chain for the anchor to be valid.
/// let mut graph_c = TxGraph::<ConfirmationTimeHeightAnchor>::default();
/// let _ = graph_c.insert_tx(tx.clone());
/// graph_c.insert_anchor(
/// tx.txid(),
/// ConfirmationTimeHeightAnchor {
/// anchor_block: BlockId {
/// height: 2,
/// hash: Hash::hash("third".as_bytes()),
/// },
/// confirmation_height: 1,
/// confirmation_time: 123,
/// },
/// );
/// ``` /// ```
pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash { pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash {
/// Returns the [`BlockId`] that the associated blockchain data is "anchored" in. /// Returns the [`BlockId`] that the associated blockchain data is "anchored" in.
@@ -123,10 +99,8 @@ pub trait Append {
} }
impl<K: Ord, V> Append for BTreeMap<K, V> { impl<K: Ord, V> Append for BTreeMap<K, V> {
fn append(&mut self, other: Self) { fn append(&mut self, mut other: Self) {
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`. BTreeMap::append(self, &mut other)
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
BTreeMap::extend(self, other)
} }
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
@@ -135,10 +109,8 @@ impl<K: Ord, V> Append for BTreeMap<K, V> {
} }
impl<T: Ord> Append for BTreeSet<T> { impl<T: Ord> Append for BTreeSet<T> {
fn append(&mut self, other: Self) { fn append(&mut self, mut other: Self) {
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`. BTreeSet::append(self, &mut other)
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420
BTreeSet::extend(self, other)
} }
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {

View File

@@ -1,32 +1,12 @@
//! Module for structures that store and traverse transactions. //! Module for structures that store and traverse transactions.
//! //!
//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of those transactions. //! [`TxGraph`] is a monotone structure that inserts transactions and indexes the spends. The
//! `TxGraph` is *monotone* in that you can always insert a transaction -- it doesn't care whether that //! [`ChangeSet`] structure reports changes of [`TxGraph`] but can also be applied to a
//! transaction is in the current best chain or whether it conflicts with any of the //! [`TxGraph`] as well. Lastly, [`TxDescendants`] is an [`Iterator`] that traverses descendants of
//! existing transactions or what order you insert the transactions. This means that you can always //! a given transaction.
//! combine two [`TxGraph`]s together, without resulting in inconsistencies.
//! Furthermore, there is currently no way to delete a transaction.
//!
//! Transactions can be either whole or partial (i.e., transactions for which we only
//! know some outputs, which we usually call "floating outputs"; these are usually inserted
//! using the [`insert_txout`] method.).
//!
//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the
//! txid, the transaction (whole or partial), the blocks it's anchored in (see the [`Anchor`]
//! documentation for more details), and the timestamp of the last time we saw
//! the transaction as unconfirmed.
//! //!
//! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for //! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
//! identifying and traversing conflicts and descendants of a given transaction. Some [`TxGraph`] //! identifying and traversing conflicts and descendants of a given transaction.
//! methods only consider "canonical" (i.e., in the best chain or in mempool) transactions,
//! we decide which transactions are canonical based on anchors `last_seen_unconfirmed`;
//! see the [`try_get_chain_position`] documentation for more details.
//!
//! The [`ChangeSet`] reports changes made to a [`TxGraph`]; it can be used to either save to
//! persistent storage, or to be applied to another [`TxGraph`].
//!
//! Lastly, you can use [`TxAncestors`]/[`TxDescendants`] to traverse ancestors and descendants of
//! a given transaction, respectively.
//! //!
//! # Applying changes //! # Applying changes
//! //!
@@ -40,23 +20,20 @@
//! # use bdk_chain::example_utils::*; //! # use bdk_chain::example_utils::*;
//! # use bitcoin::Transaction; //! # use bitcoin::Transaction;
//! # let tx_a = tx_from_hex(RAW_TX_1); //! # let tx_a = tx_from_hex(RAW_TX_1);
//! let mut tx_graph: TxGraph = TxGraph::default(); //! let mut graph: TxGraph = TxGraph::default();
//! let mut another_graph: TxGraph = TxGraph::default();
//! //!
//! // insert a transaction //! // insert a transaction
//! let changeset = tx_graph.insert_tx(tx_a); //! let changeset = graph.insert_tx(tx_a);
//! //!
//! // We can restore the state of the `tx_graph` by applying all //! // the resulting changeset can be applied to another tx graph
//! // the changesets obtained by mutating the original (the order doesn't matter). //! another_graph.apply_changeset(changeset);
//! let mut restored_tx_graph: TxGraph = TxGraph::default();
//! restored_tx_graph.apply_changeset(changeset);
//!
//! assert_eq!(tx_graph, restored_tx_graph);
//! ``` //! ```
//! //!
//! A [`TxGraph`] can also be updated with another [`TxGraph`] which merges them together. //! A [`TxGraph`] can also be updated with another [`TxGraph`].
//! //!
//! ``` //! ```
//! # use bdk_chain::{Append, BlockId}; //! # use bdk_chain::BlockId;
//! # use bdk_chain::tx_graph::TxGraph; //! # use bdk_chain::tx_graph::TxGraph;
//! # use bdk_chain::example_utils::*; //! # use bdk_chain::example_utils::*;
//! # use bitcoin::Transaction; //! # use bitcoin::Transaction;
@@ -72,8 +49,6 @@
//! let changeset = graph.apply_update(update); //! let changeset = graph.apply_update(update);
//! assert!(changeset.is_empty()); //! assert!(changeset.is_empty());
//! ``` //! ```
//! [`try_get_chain_position`]: TxGraph::try_get_chain_position
//! [`insert_txout`]: TxGraph::insert_txout
use crate::{ use crate::{
collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId, collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
@@ -116,7 +91,7 @@ impl<A> Default for TxGraph<A> {
} }
} }
/// A transaction node in the [`TxGraph`]. /// An outward-facing view of a (transaction) node in the [`TxGraph`].
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct TxNode<'a, T, A> { pub struct TxNode<'a, T, A> {
/// Txid of the transaction. /// Txid of the transaction.
@@ -153,7 +128,7 @@ impl Default for TxNodeInternal {
} }
} }
/// A transaction that is included in the chain, or is still in mempool. /// An outwards-facing view of a transaction that is part of the *best chain*'s history.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct CanonicalTx<'a, T, A> { pub struct CanonicalTx<'a, T, A> {
/// How the transaction is observed as (confirmed or unconfirmed). /// How the transaction is observed as (confirmed or unconfirmed).
@@ -340,7 +315,7 @@ impl<A> TxGraph<A> {
/// The transactions spending from this output. /// The transactions spending from this output.
/// ///
/// [`TxGraph`] allows conflicting transactions within the graph. Obviously the transactions in /// `TxGraph` allows conflicting transactions within the graph. Obviously the transactions in
/// the returned set will never be in the same active-chain. /// the returned set will never be in the same active-chain.
pub fn outspends(&self, outpoint: OutPoint) -> &HashSet<Txid> { pub fn outspends(&self, outpoint: OutPoint) -> &HashSet<Txid> {
self.spends.get(&outpoint).unwrap_or(&self.empty_outspends) self.spends.get(&outpoint).unwrap_or(&self.empty_outspends)
@@ -454,21 +429,6 @@ impl<A> TxGraph<A> {
} }
} }
impl<A: Clone + Ord> TxGraph<A> {
/// Transform the [`TxGraph`] to have [`Anchor`]s of another type.
///
/// This takes in a closure of signature `FnMut(A) -> A2` which is called for each [`Anchor`] to
/// transform it.
pub fn map_anchors<A2: Clone + Ord, F>(self, f: F) -> TxGraph<A2>
where
F: FnMut(A) -> A2,
{
let mut new_graph = TxGraph::<A2>::default();
new_graph.apply_changeset(self.initial_changeset().map_anchors(f));
new_graph
}
}
impl<A: Clone + Ord> TxGraph<A> { impl<A: Clone + Ord> TxGraph<A> {
/// Construct a new [`TxGraph`] from a list of transactions. /// Construct a new [`TxGraph`] from a list of transactions.
pub fn new(txs: impl IntoIterator<Item = Transaction>) -> Self { pub fn new(txs: impl IntoIterator<Item = Transaction>) -> Self {
@@ -515,7 +475,7 @@ impl<A: Clone + Ord> TxGraph<A> {
/// Batch insert unconfirmed transactions. /// Batch insert unconfirmed transactions.
/// ///
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The /// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
/// *last seen* communicates when the transaction is last seen in mempool which is used for /// *last seen* communicates when the transaction is last seen in the mempool which is used for
/// conflict-resolution (refer to [`TxGraph::insert_seen_at`] for details). /// conflict-resolution (refer to [`TxGraph::insert_seen_at`] for details).
pub fn batch_insert_unconfirmed( pub fn batch_insert_unconfirmed(
&mut self, &mut self,
@@ -599,7 +559,10 @@ impl<A: Clone + Ord> TxGraph<A> {
} }
for (outpoint, txout) in changeset.txouts { for (outpoint, txout) in changeset.txouts {
let tx_entry = self.txs.entry(outpoint.txid).or_default(); let tx_entry = self
.txs
.entry(outpoint.txid)
.or_insert_with(Default::default);
match tx_entry { match tx_entry {
(TxNodeInternal::Whole(_), _, _) => { /* do nothing since we already have full tx */ (TxNodeInternal::Whole(_), _, _) => { /* do nothing since we already have full tx */
@@ -612,13 +575,13 @@ impl<A: Clone + Ord> TxGraph<A> {
for (anchor, txid) in changeset.anchors { for (anchor, txid) in changeset.anchors {
if self.anchors.insert((anchor.clone(), txid)) { if self.anchors.insert((anchor.clone(), txid)) {
let (_, anchors, _) = self.txs.entry(txid).or_default(); let (_, anchors, _) = self.txs.entry(txid).or_insert_with(Default::default);
anchors.insert(anchor); anchors.insert(anchor);
} }
} }
for (txid, new_last_seen) in changeset.last_seen { for (txid, new_last_seen) in changeset.last_seen {
let (_, _, last_seen) = self.txs.entry(txid).or_default(); let (_, _, last_seen) = self.txs.entry(txid).or_insert_with(Default::default);
if new_last_seen > *last_seen { if new_last_seen > *last_seen {
*last_seen = new_last_seen; *last_seen = new_last_seen;
} }
@@ -745,20 +708,8 @@ impl<A: Anchor> TxGraph<A> {
/// Get the position of the transaction in `chain` with tip `chain_tip`. /// Get the position of the transaction in `chain` with tip `chain_tip`.
/// ///
/// Chain data is fetched from `chain`, a [`ChainOracle`] implementation. /// If the given transaction of `txid` does not exist in the chain of `chain_tip`, `None` is
/// /// returned.
/// This method returns `Ok(None)` if the transaction is not found in the chain, and no longer
/// belongs in the mempool. The following factors are used to approximate whether an
/// unconfirmed transaction exists in the mempool (not evicted):
///
/// 1. Unconfirmed transactions that conflict with confirmed transactions are evicted.
/// 2. Unconfirmed transactions that spend from transactions that are evicted, are also
/// evicted.
/// 3. Given two conflicting unconfirmed transactions, the transaction with the lower
/// `last_seen_unconfirmed` parameter is evicted. A transaction's `last_seen_unconfirmed`
/// parameter is the max of all it's descendants' `last_seen_unconfirmed` parameters. If the
/// final `last_seen_unconfirmed`s are the same, the transaction with the lower `txid` (by
/// lexicographical order) is evicted.
/// ///
/// # Error /// # Error
/// ///
@@ -784,7 +735,7 @@ impl<A: Anchor> TxGraph<A> {
} }
} }
// The tx is not anchored to a block in the best chain, which means that it // The tx is not anchored to a block which is in the best chain, which means that it
// might be in mempool, or it might have been dropped already. // might be in mempool, or it might have been dropped already.
// Let's check conflicts to find out! // Let's check conflicts to find out!
let tx = match tx_node { let tx = match tx_node {
@@ -994,8 +945,7 @@ impl<A: Anchor> TxGraph<A> {
/// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or /// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or
/// [`Iterator::enumerate`] over a list of [`OutPoint`]s. /// [`Iterator::enumerate`] over a list of [`OutPoint`]s.
/// ///
/// Floating outputs (i.e., outputs for which we don't have the full transaction in the graph) /// Floating outputs are ignored.
/// are ignored.
/// ///
/// # Error /// # Error
/// ///
@@ -1186,9 +1136,9 @@ impl<A: Anchor> TxGraph<A> {
} }
} }
/// The [`ChangeSet`] represents changes to a [`TxGraph`]. /// A structure that represents changes to a [`TxGraph`].
/// ///
/// Since [`TxGraph`] is monotone, the "changeset" can only contain transactions to be added and /// Since [`TxGraph`] is monotone "changeset" can only contain transactions to be added and
/// not removed. /// not removed.
/// ///
/// Refer to [module-level documentation] for more. /// Refer to [module-level documentation] for more.
@@ -1230,6 +1180,11 @@ impl<A> Default for ChangeSet<A> {
} }
impl<A> ChangeSet<A> { impl<A> ChangeSet<A> {
/// Returns true if the [`ChangeSet`] is empty (no transactions or txouts).
pub fn is_empty(&self) -> bool {
self.txs.is_empty() && self.txouts.is_empty()
}
/// Iterates over all outpoints contained within [`ChangeSet`]. /// Iterates over all outpoints contained within [`ChangeSet`].
pub fn txouts(&self) -> impl Iterator<Item = (OutPoint, &TxOut)> { pub fn txouts(&self) -> impl Iterator<Item = (OutPoint, &TxOut)> {
self.txs self.txs
@@ -1284,12 +1239,10 @@ impl<A> ChangeSet<A> {
} }
impl<A: Ord> Append for ChangeSet<A> { impl<A: Ord> Append for ChangeSet<A> {
fn append(&mut self, other: Self) { fn append(&mut self, mut other: Self) {
// We use `extend` instead of `BTreeMap::append` due to performance issues with `append`. self.txs.append(&mut other.txs);
// Refer to https://github.com/rust-lang/rust/issues/34666#issuecomment-675658420 self.txouts.append(&mut other.txouts);
self.txs.extend(other.txs); self.anchors.append(&mut other.anchors);
self.txouts.extend(other.txouts);
self.anchors.extend(other.anchors);
// last_seen timestamps should only increase // last_seen timestamps should only increase
self.last_seen.extend( self.last_seen.extend(
@@ -1309,26 +1262,6 @@ impl<A: Ord> Append for ChangeSet<A> {
} }
} }
impl<A: Ord> ChangeSet<A> {
/// Transform the [`ChangeSet`] to have [`Anchor`]s of another type.
///
/// This takes in a closure of signature `FnMut(A) -> A2` which is called for each [`Anchor`] to
/// transform it.
pub fn map_anchors<A2: Ord, F>(self, mut f: F) -> ChangeSet<A2>
where
F: FnMut(A) -> A2,
{
ChangeSet {
txs: self.txs,
txouts: self.txouts,
anchors: BTreeSet::<(A2, Txid)>::from_iter(
self.anchors.into_iter().map(|(a, txid)| (f(a), txid)),
),
last_seen: self.last_seen,
}
}
}
impl<A> AsRef<TxGraph<A>> for TxGraph<A> { impl<A> AsRef<TxGraph<A>> for TxGraph<A> {
fn as_ref(&self) -> &TxGraph<A> { fn as_ref(&self) -> &TxGraph<A> {
self self
@@ -1339,7 +1272,7 @@ impl<A> AsRef<TxGraph<A>> for TxGraph<A> {
/// ///
/// The iterator excludes partial transactions. /// The iterator excludes partial transactions.
/// ///
/// Returned by the [`walk_ancestors`] method of [`TxGraph`]. /// This `struct` is created by the [`walk_ancestors`] method of [`TxGraph`].
/// ///
/// [`walk_ancestors`]: TxGraph::walk_ancestors /// [`walk_ancestors`]: TxGraph::walk_ancestors
pub struct TxAncestors<'g, A, F> { pub struct TxAncestors<'g, A, F> {
@@ -1457,7 +1390,7 @@ where
/// An iterator that traverses transaction descendants. /// An iterator that traverses transaction descendants.
/// ///
/// Returned by the [`walk_descendants`] method of [`TxGraph`]. /// This `struct` is created by the [`walk_descendants`] method of [`TxGraph`].
/// ///
/// [`walk_descendants`]: TxGraph::walk_descendants /// [`walk_descendants`]: TxGraph::walk_descendants
pub struct TxDescendants<'g, A, F> { pub struct TxDescendants<'g, A, F> {

View File

@@ -1,5 +1,4 @@
mod tx_template; mod tx_template;
#[allow(unused_imports)]
pub use tx_template::*; pub use tx_template::*;
#[allow(unused_macros)] #[allow(unused_macros)]

View File

@@ -1,7 +1,7 @@
use rand::distributions::{Alphanumeric, DistString}; use rand::distributions::{Alphanumeric, DistString};
use std::collections::HashMap; use std::collections::HashMap;
use bdk_chain::{tx_graph::TxGraph, Anchor, SpkTxOutIndex}; use bdk_chain::{tx_graph::TxGraph, BlockId, SpkTxOutIndex};
use bitcoin::{ use bitcoin::{
locktime::absolute::LockTime, secp256k1::Secp256k1, OutPoint, ScriptBuf, Sequence, Transaction, locktime::absolute::LockTime, secp256k1::Secp256k1, OutPoint, ScriptBuf, Sequence, Transaction,
TxIn, TxOut, Txid, Witness, TxIn, TxOut, Txid, Witness,
@@ -49,11 +49,11 @@ impl TxOutTemplate {
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn init_graph<'a, A: Anchor + Clone + 'a>( pub fn init_graph<'a>(
tx_templates: impl IntoIterator<Item = &'a TxTemplate<'a, A>>, tx_templates: impl IntoIterator<Item = &'a TxTemplate<'a, BlockId>>,
) -> (TxGraph<A>, SpkTxOutIndex<u32>, HashMap<&'a str, Txid>) { ) -> (TxGraph<BlockId>, SpkTxOutIndex<u32>, HashMap<&'a str, Txid>) {
let (descriptor, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap(); let (descriptor, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap();
let mut graph = TxGraph::<A>::default(); let mut graph = TxGraph::<BlockId>::default();
let mut spk_index = SpkTxOutIndex::default(); let mut spk_index = SpkTxOutIndex::default();
(0..10).for_each(|index| { (0..10).for_each(|index| {
spk_index.insert_spk( spk_index.insert_spk(
@@ -126,7 +126,7 @@ pub fn init_graph<'a, A: Anchor + Clone + 'a>(
spk_index.scan(&tx); spk_index.scan(&tx);
let _ = graph.insert_tx(tx.clone()); let _ = graph.insert_tx(tx.clone());
for anchor in tx_tmp.anchors.iter() { for anchor in tx_tmp.anchors.iter() {
let _ = graph.insert_anchor(tx.txid(), anchor.clone()); let _ = graph.insert_anchor(tx.txid(), *anchor);
} }
if let Some(seen_at) = tx_tmp.last_seen { if let Some(seen_at) = tx_tmp.last_seen {
let _ = graph.insert_seen_at(tx.txid(), seen_at); let _ = graph.insert_seen_at(tx.txid(), seen_at);

View File

@@ -27,10 +27,9 @@ fn insert_relevant_txs() {
let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey(); let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey(); let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey();
let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<()>>::new( let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<()>>::default();
KeychainTxOutIndex::new(10),
);
graph.index.add_keychain((), descriptor); graph.index.add_keychain((), descriptor);
graph.index.set_lookahead(&(), 10);
let tx_a = Transaction { let tx_a = Transaction {
output: vec![ output: vec![
@@ -119,12 +118,12 @@ fn test_list_owned_txouts() {
let (desc_1, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap(); let (desc_1, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap();
let (desc_2, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)").unwrap(); let (desc_2, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)").unwrap();
let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<String>>::new( let mut graph =
KeychainTxOutIndex::new(10), IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<String>>::default();
);
graph.index.add_keychain("keychain_1".into(), desc_1); graph.index.add_keychain("keychain_1".into(), desc_1);
graph.index.add_keychain("keychain_2".into(), desc_2); graph.index.add_keychain("keychain_2".into(), desc_2);
graph.index.set_lookahead_for_all(10);
// Get trusted and untrusted addresses // Get trusted and untrusted addresses

View File

@@ -18,14 +18,12 @@ enum TestKeychain {
Internal, Internal,
} }
fn init_txout_index( fn init_txout_index() -> (
lookahead: u32,
) -> (
bdk_chain::keychain::KeychainTxOutIndex<TestKeychain>, bdk_chain::keychain::KeychainTxOutIndex<TestKeychain>,
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
) { ) {
let mut txout_index = bdk_chain::keychain::KeychainTxOutIndex::<TestKeychain>::new(lookahead); let mut txout_index = bdk_chain::keychain::KeychainTxOutIndex::<TestKeychain>::default();
let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only(); let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
@@ -48,7 +46,7 @@ fn spk_at_index(descriptor: &Descriptor<DescriptorPublicKey>, index: u32) -> Scr
fn test_set_all_derivation_indices() { fn test_set_all_derivation_indices() {
use bdk_chain::indexed_tx_graph::Indexer; use bdk_chain::indexed_tx_graph::Indexer;
let (mut txout_index, _, _) = init_txout_index(0); let (mut txout_index, _, _) = init_txout_index();
let derive_to: BTreeMap<_, _> = let derive_to: BTreeMap<_, _> =
[(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into(); [(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into();
assert_eq!( assert_eq!(
@@ -66,10 +64,19 @@ fn test_set_all_derivation_indices() {
#[test] #[test]
fn test_lookahead() { fn test_lookahead() {
let (mut txout_index, external_desc, internal_desc) = init_txout_index(10); let (mut txout_index, external_desc, internal_desc) = init_txout_index();
// ensure it does not break anything if lookahead is set multiple times
(0..=10).for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::External, lookahead));
(0..=20)
.filter(|v| v % 2 == 0)
.for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::Internal, lookahead));
assert_eq!(txout_index.inner().all_spks().len(), 30);
// given: // given:
// - external lookahead set to 10 // - external lookahead set to 10
// - internal lookahead set to 20
// when: // when:
// - set external derivation index to value higher than last, but within the lookahead value // - set external derivation index to value higher than last, but within the lookahead value
// expect: // expect:
@@ -90,37 +97,37 @@ fn test_lookahead() {
assert_eq!( assert_eq!(
txout_index.inner().all_spks().len(), txout_index.inner().all_spks().len(),
10 /* external lookahead */ + 10 /* external lookahead */ +
10 /* internal lookahead */ + 20 /* internal lookahead */ +
index as usize + 1 /* `derived` count */ index as usize + 1 /* `derived` count */
); );
assert_eq!( assert_eq!(
txout_index txout_index
.revealed_keychain_spks(&TestKeychain::External) .revealed_spks_of_keychain(&TestKeychain::External)
.count(), .count(),
index as usize + 1, index as usize + 1,
); );
assert_eq!( assert_eq!(
txout_index txout_index
.revealed_keychain_spks(&TestKeychain::Internal) .revealed_spks_of_keychain(&TestKeychain::Internal)
.count(), .count(),
0, 0,
); );
assert_eq!( assert_eq!(
txout_index txout_index
.unused_keychain_spks(&TestKeychain::External) .unused_spks_of_keychain(&TestKeychain::External)
.count(), .count(),
index as usize + 1, index as usize + 1,
); );
assert_eq!( assert_eq!(
txout_index txout_index
.unused_keychain_spks(&TestKeychain::Internal) .unused_spks_of_keychain(&TestKeychain::Internal)
.count(), .count(),
0, 0,
); );
} }
// given: // given:
// - internal lookahead is 10 // - internal lookahead is 20
// - internal derivation index is `None` // - internal derivation index is `None`
// when: // when:
// - derivation index is set ahead of current derivation index + lookahead // - derivation index is set ahead of current derivation index + lookahead
@@ -141,13 +148,13 @@ fn test_lookahead() {
assert_eq!( assert_eq!(
txout_index.inner().all_spks().len(), txout_index.inner().all_spks().len(),
10 /* external lookahead */ + 10 /* external lookahead */ +
10 /* internal lookahead */ + 20 /* internal lookahead */ +
20 /* external stored index count */ + 20 /* external stored index count */ +
25 /* internal stored index count */ 25 /* internal stored index count */
); );
assert_eq!( assert_eq!(
txout_index txout_index
.revealed_keychain_spks(&TestKeychain::Internal) .revealed_spks_of_keychain(&TestKeychain::Internal)
.count(), .count(),
25, 25,
); );
@@ -199,13 +206,13 @@ fn test_lookahead() {
); );
assert_eq!( assert_eq!(
txout_index txout_index
.revealed_keychain_spks(&TestKeychain::External) .revealed_spks_of_keychain(&TestKeychain::External)
.count(), .count(),
last_external_index as usize + 1, last_external_index as usize + 1,
); );
assert_eq!( assert_eq!(
txout_index txout_index
.revealed_keychain_spks(&TestKeychain::Internal) .revealed_spks_of_keychain(&TestKeychain::Internal)
.count(), .count(),
last_internal_index as usize + 1, last_internal_index as usize + 1,
); );
@@ -219,7 +226,8 @@ fn test_lookahead() {
// - last used index should change as expected // - last used index should change as expected
#[test] #[test]
fn test_scan_with_lookahead() { fn test_scan_with_lookahead() {
let (mut txout_index, external_desc, _) = init_txout_index(10); let (mut txout_index, external_desc, _) = init_txout_index();
txout_index.set_lookahead_for_all(10);
let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30] let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30]
.into_iter() .into_iter()
@@ -273,7 +281,7 @@ fn test_scan_with_lookahead() {
#[test] #[test]
#[rustfmt::skip] #[rustfmt::skip]
fn test_wildcard_derivations() { fn test_wildcard_derivations() {
let (mut txout_index, external_desc, _) = init_txout_index(0); let (mut txout_index, external_desc, _) = init_txout_index();
let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey(); let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey();
let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey(); let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey();
let external_spk_26 = external_desc.at_derivation_index(26).unwrap().script_pubkey(); let external_spk_26 = external_desc.at_derivation_index(26).unwrap().script_pubkey();
@@ -305,7 +313,7 @@ fn test_wildcard_derivations() {
(0..=15) (0..=15)
.chain([17, 20, 23]) .chain([17, 20, 23])
.for_each(|index| assert!(txout_index.mark_used(TestKeychain::External, index))); .for_each(|index| assert!(txout_index.mark_used(&TestKeychain::External, index)));
assert_eq!(txout_index.next_index(&TestKeychain::External), (26, true)); assert_eq!(txout_index.next_index(&TestKeychain::External), (26, true));
@@ -321,7 +329,7 @@ fn test_wildcard_derivations() {
// - Use all the derived till 26. // - Use all the derived till 26.
// - next_unused() = ((27, <spk>), keychain::ChangeSet) // - next_unused() = ((27, <spk>), keychain::ChangeSet)
(0..=26).for_each(|index| { (0..=26).for_each(|index| {
txout_index.mark_used(TestKeychain::External, index); txout_index.mark_used(&TestKeychain::External, index);
}); });
let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External); let (spk, changeset) = txout_index.next_unused_spk(&TestKeychain::External);
@@ -331,7 +339,7 @@ fn test_wildcard_derivations() {
#[test] #[test]
fn test_non_wildcard_derivations() { fn test_non_wildcard_derivations() {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0); let mut txout_index = KeychainTxOutIndex::<TestKeychain>::default();
let secp = bitcoin::secp256k1::Secp256k1::signing_only(); let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap(); let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();
@@ -364,7 +372,7 @@ fn test_non_wildcard_derivations() {
// - derive new and next unused should return the old script // - derive new and next unused should return the old script
// - store_up_to should not panic and return empty changeset // - store_up_to should not panic and return empty changeset
assert_eq!(txout_index.next_index(&TestKeychain::External), (0, false)); assert_eq!(txout_index.next_index(&TestKeychain::External), (0, false));
txout_index.mark_used(TestKeychain::External, 0); txout_index.mark_used(&TestKeychain::External, 0);
let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External); let (spk, changeset) = txout_index.reveal_next_spk(&TestKeychain::External);
assert_eq!(spk, (0, external_spk.as_script())); assert_eq!(spk, (0, external_spk.as_script()));
@@ -381,108 +389,8 @@ fn test_non_wildcard_derivations() {
// we check that spks_of_keychain returns a SpkIterator with just one element // we check that spks_of_keychain returns a SpkIterator with just one element
assert_eq!( assert_eq!(
txout_index txout_index
.revealed_keychain_spks(&TestKeychain::External) .spks_of_keychain(&TestKeychain::External)
.count(), .count(),
1, 1,
); );
} }
/// Check that calling `lookahead_to_target` stores the expected spks.
#[test]
fn lookahead_to_target() {
#[derive(Default)]
struct TestCase {
/// Global lookahead value.
lookahead: u32,
/// Last revealed index for external keychain.
external_last_revealed: Option<u32>,
/// Last revealed index for internal keychain.
internal_last_revealed: Option<u32>,
/// Call `lookahead_to_target(External, u32)`.
external_target: Option<u32>,
/// Call `lookahead_to_target(Internal, u32)`.
internal_target: Option<u32>,
}
let test_cases = &[
TestCase {
lookahead: 0,
external_target: Some(100),
..Default::default()
},
TestCase {
lookahead: 10,
internal_target: Some(99),
..Default::default()
},
TestCase {
lookahead: 100,
internal_target: Some(9),
external_target: Some(10),
..Default::default()
},
TestCase {
lookahead: 12,
external_last_revealed: Some(2),
internal_last_revealed: Some(2),
internal_target: Some(15),
external_target: Some(13),
},
TestCase {
lookahead: 13,
external_last_revealed: Some(100),
internal_last_revealed: Some(21),
internal_target: Some(120),
external_target: Some(130),
},
];
for t in test_cases {
let (mut index, _, _) = init_txout_index(t.lookahead);
if let Some(last_revealed) = t.external_last_revealed {
let _ = index.reveal_to_target(&TestKeychain::External, last_revealed);
}
if let Some(last_revealed) = t.internal_last_revealed {
let _ = index.reveal_to_target(&TestKeychain::Internal, last_revealed);
}
let keychain_test_cases = [
(
TestKeychain::External,
t.external_last_revealed,
t.external_target,
),
(
TestKeychain::Internal,
t.internal_last_revealed,
t.internal_target,
),
];
for (keychain, last_revealed, target) in keychain_test_cases {
if let Some(target) = target {
let original_last_stored_index = match last_revealed {
Some(last_revealed) => Some(last_revealed + t.lookahead),
None => t.lookahead.checked_sub(1),
};
let exp_last_stored_index = match original_last_stored_index {
Some(original_last_stored_index) => {
Ord::max(target, original_last_stored_index)
}
None => target,
};
index.lookahead_to_target(&keychain, target);
let keys = index
.inner()
.all_spks()
.range((keychain.clone(), 0)..=(keychain.clone(), u32::MAX))
.map(|(k, _)| k.clone())
.collect::<Vec<_>>();
let exp_keys = core::iter::repeat(keychain)
.zip(0_u32..=exp_last_stored_index)
.collect::<Vec<_>>();
assert_eq!(keys, exp_keys);
}
}
}
}

View File

@@ -1,11 +1,7 @@
use bdk_chain::{ use bdk_chain::local_chain::{
local_chain::{ AlterCheckPointError, CannotConnectError, ChangeSet, LocalChain, Update,
AlterCheckPointError, ApplyHeaderError, CannotConnectError, ChangeSet, CheckPoint,
LocalChain, MissingGenesisError, Update,
},
BlockId,
}; };
use bitcoin::{block::Header, hashes::Hash, BlockHash}; use bitcoin::BlockHash;
#[macro_use] #[macro_use]
mod common; mod common;
@@ -292,27 +288,6 @@ fn update_local_chain() {
], ],
}, },
}, },
// Allow update that is shorter than original chain
// | 0 | 1 | 2 | 3 | 4 | 5
// chain | A C D E F
// update | A C D'
TestLocalChain {
name: "allow update that is shorter than original chain",
chain: local_chain![(0, h!("_")), (2, h!("C")), (3, h!("D")), (4, h!("E")), (5, h!("F"))],
update: chain_update![(0, h!("_")), (2, h!("C")), (3, h!("D'"))],
exp: ExpectedResult::Ok {
changeset: &[
(3, Some(h!("D'"))),
(4, None),
(5, None),
],
init_changeset: &[
(0, Some(h!("_"))),
(2, Some(h!("C"))),
(3, Some(h!("D'"))),
],
},
},
] ]
.into_iter() .into_iter()
.for_each(TestLocalChain::run); .for_each(TestLocalChain::run);
@@ -375,307 +350,3 @@ fn local_chain_insert_block() {
assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,); assert_eq!(chain, t.expected_final, "[{}] unexpected final chain", i,);
} }
} }
#[test]
fn local_chain_disconnect_from() {
struct TestCase {
name: &'static str,
original: LocalChain,
disconnect_from: (u32, BlockHash),
exp_result: Result<ChangeSet, MissingGenesisError>,
exp_final: LocalChain,
}
let test_cases = [
TestCase {
name: "try_replace_genesis_should_fail",
original: local_chain![(0, h!("_"))],
disconnect_from: (0, h!("_")),
exp_result: Err(MissingGenesisError),
exp_final: local_chain![(0, h!("_"))],
},
TestCase {
name: "try_replace_genesis_should_fail_2",
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
disconnect_from: (0, h!("_")),
exp_result: Err(MissingGenesisError),
exp_final: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
},
TestCase {
name: "from_does_not_exist",
original: local_chain![(0, h!("_")), (3, h!("C"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::default()),
exp_final: local_chain![(0, h!("_")), (3, h!("C"))],
},
TestCase {
name: "from_has_different_blockhash",
original: local_chain![(0, h!("_")), (2, h!("B"))],
disconnect_from: (2, h!("not_B")),
exp_result: Ok(ChangeSet::default()),
exp_final: local_chain![(0, h!("_")), (2, h!("B"))],
},
TestCase {
name: "disconnect_one",
original: local_chain![(0, h!("_")), (2, h!("B"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::from_iter([(2, None)])),
exp_final: local_chain![(0, h!("_"))],
},
TestCase {
name: "disconnect_three",
original: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C")), (4, h!("D"))],
disconnect_from: (2, h!("B")),
exp_result: Ok(ChangeSet::from_iter([(2, None), (3, None), (4, None)])),
exp_final: local_chain![(0, h!("_"))],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("Case {}: {}", i, t.name);
let mut chain = t.original;
let result = chain.disconnect_from(t.disconnect_from.into());
assert_eq!(
result, t.exp_result,
"[{}:{}] unexpected changeset result",
i, t.name
);
assert_eq!(
chain, t.exp_final,
"[{}:{}] unexpected final chain",
i, t.name
);
}
}
#[test]
fn checkpoint_from_block_ids() {
struct TestCase<'a> {
name: &'a str,
blocks: &'a [(u32, BlockHash)],
exp_result: Result<(), Option<(u32, BlockHash)>>,
}
let test_cases = [
TestCase {
name: "in_order",
blocks: &[(0, h!("A")), (1, h!("B")), (3, h!("D"))],
exp_result: Ok(()),
},
TestCase {
name: "with_duplicates",
blocks: &[(1, h!("B")), (2, h!("C")), (2, h!("C'"))],
exp_result: Err(Some((2, h!("C")))),
},
TestCase {
name: "not_in_order",
blocks: &[(1, h!("B")), (3, h!("D")), (2, h!("C"))],
exp_result: Err(Some((3, h!("D")))),
},
TestCase {
name: "empty",
blocks: &[],
exp_result: Err(None),
},
TestCase {
name: "single",
blocks: &[(21, h!("million"))],
exp_result: Ok(()),
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("running test case {}: '{}'", i, t.name);
let result = CheckPoint::from_block_ids(
t.blocks
.iter()
.map(|&(height, hash)| BlockId { height, hash }),
);
match t.exp_result {
Ok(_) => {
assert!(result.is_ok(), "[{}:{}] should be Ok", i, t.name);
let result_vec = {
let mut v = result
.unwrap()
.into_iter()
.map(|cp| (cp.height(), cp.hash()))
.collect::<Vec<_>>();
v.reverse();
v
};
assert_eq!(
&result_vec, t.blocks,
"[{}:{}] not equal to original block ids",
i, t.name
);
}
Err(exp_last) => {
assert!(result.is_err(), "[{}:{}] should be Err", i, t.name);
let err = result.unwrap_err();
assert_eq!(
err.as_ref()
.map(|last_cp| (last_cp.height(), last_cp.hash())),
exp_last,
"[{}:{}] error's last cp height should be {:?}, got {:?}",
i,
t.name,
exp_last,
err
);
}
}
}
}
#[test]
fn local_chain_apply_header_connected_to() {
fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
Header {
version: bitcoin::block::Version::default(),
prev_blockhash,
merkle_root: bitcoin::hash_types::TxMerkleNode::all_zeros(),
time: 0,
bits: bitcoin::CompactTarget::default(),
nonce: 0,
}
}
struct TestCase {
name: &'static str,
chain: LocalChain,
header: Header,
height: u32,
connected_to: BlockId,
exp_result: Result<Vec<(u32, Option<BlockHash>)>, ApplyHeaderError>,
}
let test_cases = [
{
let header = header_from_prev_blockhash(h!("A"));
let hash = header.block_hash();
let height = 2;
let connected_to = BlockId { height, hash };
TestCase {
name: "connected_to_self_header_applied_to_self",
chain: local_chain![(0, h!("_")), (height, hash)],
header,
height,
connected_to,
exp_result: Ok(vec![]),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let hash = header.block_hash();
let height = prev_height + 1;
let connected_to = BlockId {
height: prev_height,
hash: prev_hash,
};
TestCase {
name: "connected_to_prev_header_applied_to_self",
chain: local_chain![(0, h!("_")), (prev_height, prev_hash)],
header,
height,
connected_to,
exp_result: Ok(vec![(height, Some(hash))]),
}
},
{
let header = header_from_prev_blockhash(BlockHash::all_zeros());
let hash = header.block_hash();
let height = 0;
let connected_to = BlockId { height, hash };
TestCase {
name: "genesis_applied_to_self",
chain: local_chain![(0, hash)],
header,
height,
connected_to,
exp_result: Ok(vec![]),
}
},
{
let header = header_from_prev_blockhash(h!("Z"));
let height = 10;
let hash = header.block_hash();
let prev_height = height - 1;
let prev_hash = header.prev_blockhash;
TestCase {
name: "connect_at_connected_to",
chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
header,
height: 10,
connected_to: BlockId {
height: 3,
hash: h!("C"),
},
exp_result: Ok(vec![(prev_height, Some(prev_hash)), (height, Some(hash))]),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let connected_to = BlockId {
height: prev_height,
hash: h!("not_prev_hash"),
};
TestCase {
name: "inconsistent_prev_hash",
chain: local_chain![(0, h!("_")), (prev_height, h!("not_prev_hash"))],
header,
height: prev_height + 1,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
{
let prev_hash = h!("A");
let prev_height = 1;
let header = header_from_prev_blockhash(prev_hash);
let height = prev_height + 1;
let connected_to = BlockId {
height,
hash: h!("not_current_hash"),
};
TestCase {
name: "inconsistent_current_block",
chain: local_chain![(0, h!("_")), (height, h!("not_current_hash"))],
header,
height,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
{
let header = header_from_prev_blockhash(h!("B"));
let height = 3;
let connected_to = BlockId {
height: 4,
hash: h!("D"),
};
TestCase {
name: "connected_to_is_greater",
chain: local_chain![(0, h!("_")), (2, h!("B"))],
header,
height,
connected_to,
exp_result: Err(ApplyHeaderError::InconsistentBlocks),
}
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("running test case {}: '{}'", i, t.name);
let mut chain = t.chain;
let result = chain.apply_header_connected_to(&t.header, t.height, t.connected_to);
let exp_result = t
.exp_result
.map(|cs| cs.iter().cloned().collect::<ChangeSet>());
assert_eq!(result, exp_result, "[{}:{}] unexpected result", i, t.name);
}
}

View File

@@ -10,9 +10,7 @@ use bdk_chain::{
use bitcoin::{ use bitcoin::{
absolute, hashes::Hash, BlockHash, OutPoint, ScriptBuf, Transaction, TxIn, TxOut, Txid, absolute, hashes::Hash, BlockHash, OutPoint, ScriptBuf, Transaction, TxIn, TxOut, Txid,
}; };
use common::*;
use core::iter; use core::iter;
use rand::RngCore;
use std::vec; use std::vec;
#[test] #[test]
@@ -215,8 +213,7 @@ fn insert_tx_graph_doesnt_count_coinbase_as_spent() {
}; };
let mut graph = TxGraph::<()>::default(); let mut graph = TxGraph::<()>::default();
let changeset = graph.insert_tx(tx); let _ = graph.insert_tx(tx);
assert!(!changeset.is_empty());
assert!(graph.outspends(OutPoint::null()).is_empty()); assert!(graph.outspends(OutPoint::null()).is_empty());
assert!(graph.tx_spends(Txid::all_zeros()).next().is_none()); assert!(graph.tx_spends(Txid::all_zeros()).next().is_none());
} }
@@ -292,7 +289,7 @@ fn insert_tx_displaces_txouts() {
}], }],
}; };
let changeset = tx_graph.insert_txout( let _ = tx_graph.insert_txout(
OutPoint { OutPoint {
txid: tx.txid(), txid: tx.txid(),
vout: 0, vout: 0,
@@ -303,8 +300,6 @@ fn insert_tx_displaces_txouts() {
}, },
); );
assert!(!changeset.is_empty());
let _ = tx_graph.insert_txout( let _ = tx_graph.insert_txout(
OutPoint { OutPoint {
txid: tx.txid(), txid: tx.txid(),
@@ -658,8 +653,7 @@ fn test_walk_ancestors() {
]); ]);
[&tx_a0, &tx_b1].iter().for_each(|&tx| { [&tx_a0, &tx_b1].iter().for_each(|&tx| {
let changeset = graph.insert_anchor(tx.txid(), tip.block_id()); let _ = graph.insert_anchor(tx.txid(), tip.block_id());
assert!(!changeset.is_empty());
}); });
let ancestors = [ let ancestors = [
@@ -1033,12 +1027,10 @@ fn test_changeset_last_seen_append() {
last_seen: original_ls.map(|ls| (txid, ls)).into_iter().collect(), last_seen: original_ls.map(|ls| (txid, ls)).into_iter().collect(),
..Default::default() ..Default::default()
}; };
assert!(!original.is_empty() || original_ls.is_none());
let update = ChangeSet::<()> { let update = ChangeSet::<()> {
last_seen: update_ls.map(|ls| (txid, ls)).into_iter().collect(), last_seen: update_ls.map(|ls| (txid, ls)).into_iter().collect(),
..Default::default() ..Default::default()
}; };
assert!(!update.is_empty() || update_ls.is_none());
original.append(update); original.append(update);
assert_eq!( assert_eq!(
@@ -1180,86 +1172,3 @@ fn test_missing_blocks() {
), ),
]); ]);
} }
#[test]
/// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any [`Anchor`],
/// even though the function is non-deterministic.
fn call_map_anchors_with_non_deterministic_anchor() {
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
/// A non-deterministic anchor
pub struct NonDeterministicAnchor {
pub anchor_block: BlockId,
pub non_deterministic_field: u32,
}
let template = [
TxTemplate {
tx_name: "tx1",
inputs: &[TxInTemplate::Bogus],
outputs: &[TxOutTemplate::new(10000, Some(1))],
anchors: &[block_id!(1, "A")],
last_seen: None,
},
TxTemplate {
tx_name: "tx2",
inputs: &[TxInTemplate::PrevTx("tx1", 0)],
outputs: &[TxOutTemplate::new(20000, Some(2))],
anchors: &[block_id!(2, "B")],
..Default::default()
},
TxTemplate {
tx_name: "tx3",
inputs: &[TxInTemplate::PrevTx("tx2", 0)],
outputs: &[TxOutTemplate::new(30000, Some(3))],
anchors: &[block_id!(3, "C"), block_id!(4, "D")],
..Default::default()
},
];
let (graph, _, _) = init_graph(&template);
let new_graph = graph.clone().map_anchors(|a| NonDeterministicAnchor {
anchor_block: a,
// A non-deterministic value
non_deterministic_field: rand::thread_rng().next_u32(),
});
// Check all the details in new_graph reconstruct as well
let mut full_txs_vec: Vec<_> = graph.full_txs().collect();
full_txs_vec.sort();
let mut new_txs_vec: Vec<_> = new_graph.full_txs().collect();
new_txs_vec.sort();
let mut new_txs = new_txs_vec.iter();
for tx_node in full_txs_vec.iter() {
let new_txnode = new_txs.next().unwrap();
assert_eq!(new_txnode.txid, tx_node.txid);
assert_eq!(new_txnode.tx, tx_node.tx);
assert_eq!(
new_txnode.last_seen_unconfirmed,
tx_node.last_seen_unconfirmed
);
assert_eq!(new_txnode.anchors.len(), tx_node.anchors.len());
let mut new_anchors: Vec<_> = new_txnode.anchors.iter().map(|a| a.anchor_block).collect();
new_anchors.sort();
let mut old_anchors: Vec<_> = tx_node.anchors.iter().copied().collect();
old_anchors.sort();
assert_eq!(new_anchors, old_anchors);
}
assert!(new_txs.next().is_none());
let new_graph_anchors: Vec<_> = new_graph
.all_anchors()
.iter()
.map(|i| i.0.anchor_block)
.collect();
assert_eq!(
new_graph_anchors,
vec![
block_id!(1, "A"),
block_id!(2, "B"),
block_id!(3, "C"),
block_id!(4, "D"),
]
);
}

View File

@@ -110,7 +110,6 @@ fn test_tx_conflict_handling() {
..Default::default() ..Default::default()
}, },
], ],
// the txgraph is going to pick tx_conflict_2 because of higher lexicographical txid
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]), exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]), exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]),
exp_unspents: HashSet::from([("tx_conflict_2", 0)]), exp_unspents: HashSet::from([("tx_conflict_2", 0)]),

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "bdk_electrum" name = "bdk_electrum"
version = "0.10.0" version = "0.4.0"
edition = "2021" edition = "2021"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
@@ -12,11 +12,6 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
bdk_chain = { path = "../chain", version = "0.11.0", default-features = false } bdk_chain = { path = "../chain", version = "0.6.0", default-features = false }
electrum-client = { version = "0.18" } electrum-client = { version = "0.18" }
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] } #rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }
[dev-dependencies]
bdk_testenv = { path = "../testenv", version = "0.1.0", default-features = false }
electrsd = { version= "0.25.0", features = ["bitcoind_25_0", "esplora_a33e97e1", "legacy"] }
anyhow = "1"

View File

@@ -1,7 +1,3 @@
# BDK Electrum # BDK Electrum
BDK Electrum extends [`electrum-client`] to update [`bdk_chain`] structures BDK Electrum client library for updating the keychain tracker.
from an Electrum server.
[`electrum-client`]: https://docs.rs/electrum-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/

View File

@@ -56,14 +56,12 @@ impl RelevantTxids {
Ok(graph) Ok(graph)
} }
/// Finalizes the update by fetching `missing` txids from the `client`, where the /// Finalizes [`RelevantTxids`] with `new_txs` and anchors of type
/// resulting [`TxGraph`] has anchors of type [`ConfirmationTimeHeightAnchor`]. /// [`ConfirmationTimeHeightAnchor`].
///
/// Refer to [`RelevantTxids`] for more details.
/// ///
/// **Note:** The confirmation time might not be precisely correct if there has been a reorg. /// **Note:** The confirmation time might not be precisely correct if there has been a reorg.
// Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to /// Electrum's API intends that we use the merkle proof API, we should change `bdk_electrum` to
// use it. /// use it.
pub fn into_confirmation_time_tx_graph( pub fn into_confirmation_time_tx_graph(
self, self,
client: &Client, client: &Client,
@@ -136,54 +134,64 @@ pub struct ElectrumUpdate {
/// Trait to extend [`Client`] functionality. /// Trait to extend [`Client`] functionality.
pub trait ElectrumExt { pub trait ElectrumExt {
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and /// Scan the blockchain (via electrum) for the data specified and returns updates for
/// returns updates for [`bdk_chain`] data structures. /// [`bdk_chain`] data structures.
/// ///
/// - `prev_tip`: the most recent blockchain tip present locally /// - `prev_tip`: the most recent blockchain tip present locally
/// - `keychain_spks`: keychains that we want to scan transactions for /// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`Anchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
/// ///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a /// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
/// single batch request. /// single batch request.
fn full_scan<K: Ord + Clone>( fn scan<K: Ord + Clone>(
&self, &self,
prev_tip: CheckPoint, prev_tip: CheckPoint,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>, keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize, stop_gap: usize,
batch_size: usize, batch_size: usize,
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>; ) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified /// Convenience method to call [`scan`] without requiring a keychain.
/// and returns updates for [`bdk_chain`] data structures.
/// ///
/// - `prev_tip`: the most recent blockchain tip present locally /// [`scan`]: ElectrumExt::scan
/// - `misc_spks`: an iterator of scripts we want to sync transactions for fn scan_without_keychain(
/// - `txids`: transactions for which we want updated [`Anchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update
///
/// `batch_size` specifies the max number of script pubkeys to request for in a single batch
/// request.
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: ElectrumExt::full_scan
fn sync(
&self, &self,
prev_tip: CheckPoint, prev_tip: CheckPoint,
misc_spks: impl IntoIterator<Item = ScriptBuf>, misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>, txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>, outpoints: impl IntoIterator<Item = OutPoint>,
batch_size: usize, batch_size: usize,
) -> Result<ElectrumUpdate, Error>; ) -> Result<ElectrumUpdate, Error> {
let spk_iter = misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk));
let (electrum_update, _) = self.scan(
prev_tip,
[((), spk_iter)].into(),
txids,
outpoints,
usize::MAX,
batch_size,
)?;
Ok(electrum_update)
}
} }
impl<A: ElectrumApi> ElectrumExt for A { impl ElectrumExt for Client {
fn full_scan<K: Ord + Clone>( fn scan<K: Ord + Clone>(
&self, &self,
prev_tip: CheckPoint, prev_tip: CheckPoint,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>, keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize, stop_gap: usize,
batch_size: usize, batch_size: usize,
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> { ) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
@@ -193,6 +201,9 @@ impl<A: ElectrumApi> ElectrumExt for A {
.collect::<BTreeMap<K, _>>(); .collect::<BTreeMap<K, _>>();
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new(); let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
let txids = txids.into_iter().collect::<Vec<_>>();
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
let (electrum_update, keychain_update) = loop { let (electrum_update, keychain_update) = loop {
let (tip, _) = construct_update_tip(self, prev_tip.clone())?; let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
let mut relevant_txids = RelevantTxids::default(); let mut relevant_txids = RelevantTxids::default();
@@ -231,6 +242,15 @@ impl<A: ElectrumApi> ElectrumExt for A {
} }
} }
populate_with_txids(self, &cps, &mut relevant_txids, &mut txids.iter().cloned())?;
let _txs = populate_with_outpoints(
self,
&cps,
&mut relevant_txids,
&mut outpoints.iter().cloned(),
)?;
// check for reorgs during scan process // check for reorgs during scan process
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash(); let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
if tip.hash() != server_blockhash { if tip.hash() != server_blockhash {
@@ -264,46 +284,11 @@ impl<A: ElectrumApi> ElectrumExt for A {
Ok((electrum_update, keychain_update)) Ok((electrum_update, keychain_update))
} }
fn sync(
&self,
prev_tip: CheckPoint,
misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
batch_size: usize,
) -> Result<ElectrumUpdate, Error> {
let spk_iter = misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk));
let (mut electrum_update, _) = self.full_scan(
prev_tip.clone(),
[((), spk_iter)].into(),
usize::MAX,
batch_size,
)?;
let (tip, _) = construct_update_tip(self, prev_tip)?;
let cps = tip
.iter()
.take(10)
.map(|cp| (cp.height(), cp))
.collect::<BTreeMap<u32, CheckPoint>>();
populate_with_txids(self, &cps, &mut electrum_update.relevant_txids, txids)?;
let _txs =
populate_with_outpoints(self, &cps, &mut electrum_update.relevant_txids, outpoints)?;
Ok(electrum_update)
}
} }
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. /// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
fn construct_update_tip( fn construct_update_tip(
client: &impl ElectrumApi, client: &Client,
prev_tip: CheckPoint, prev_tip: CheckPoint,
) -> Result<(CheckPoint, Option<u32>), Error> { ) -> Result<(CheckPoint, Option<u32>), Error> {
let HeaderNotification { height, .. } = client.block_headers_subscribe()?; let HeaderNotification { height, .. } = client.block_headers_subscribe()?;
@@ -417,10 +402,10 @@ fn determine_tx_anchor(
} }
fn populate_with_outpoints( fn populate_with_outpoints(
client: &impl ElectrumApi, client: &Client,
cps: &BTreeMap<u32, CheckPoint>, cps: &BTreeMap<u32, CheckPoint>,
relevant_txids: &mut RelevantTxids, relevant_txids: &mut RelevantTxids,
outpoints: impl IntoIterator<Item = OutPoint>, outpoints: &mut impl Iterator<Item = OutPoint>,
) -> Result<HashMap<Txid, Transaction>, Error> { ) -> Result<HashMap<Txid, Transaction>, Error> {
let mut full_txs = HashMap::new(); let mut full_txs = HashMap::new();
for outpoint in outpoints { for outpoint in outpoints {
@@ -478,10 +463,10 @@ fn populate_with_outpoints(
} }
fn populate_with_txids( fn populate_with_txids(
client: &impl ElectrumApi, client: &Client,
cps: &BTreeMap<u32, CheckPoint>, cps: &BTreeMap<u32, CheckPoint>,
relevant_txids: &mut RelevantTxids, relevant_txids: &mut RelevantTxids,
txids: impl IntoIterator<Item = Txid>, txids: &mut impl Iterator<Item = Txid>,
) -> Result<(), Error> { ) -> Result<(), Error> {
for txid in txids { for txid in txids {
let tx = match client.transaction_get(&txid) { let tx = match client.transaction_get(&txid) {
@@ -492,7 +477,7 @@ fn populate_with_txids(
let spk = tx let spk = tx
.output .output
.first() .get(0)
.map(|txo| &txo.script_pubkey) .map(|txo| &txo.script_pubkey)
.expect("tx must have an output"); .expect("tx must have an output");
@@ -514,7 +499,7 @@ fn populate_with_txids(
} }
fn populate_with_spks<I: Ord + Clone>( fn populate_with_spks<I: Ord + Clone>(
client: &impl ElectrumApi, client: &Client,
cps: &BTreeMap<u32, CheckPoint>, cps: &BTreeMap<u32, CheckPoint>,
relevant_txids: &mut RelevantTxids, relevant_txids: &mut RelevantTxids,
spks: &mut impl Iterator<Item = (I, ScriptBuf)>, spks: &mut impl Iterator<Item = (I, ScriptBuf)>,

View File

@@ -1,26 +1,26 @@
//! This crate is used for updating structures of [`bdk_chain`] with data from an Electrum server. //! This crate is used for updating structures of the [`bdk_chain`] crate with data from electrum.
//! //!
//! The two primary methods are [`ElectrumExt::sync`] and [`ElectrumExt::full_scan`]. In most cases //! The star of the show is the [`ElectrumExt::scan`] method, which scans for relevant blockchain
//! [`ElectrumExt::sync`] is used to sync the transaction histories of scripts that the application //! data (via electrum) and outputs updates for [`bdk_chain`] structures as a tuple of form:
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`ElectrumExt::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for
//! [`bdk_chain`] including [`RelevantTxids`].
//! //!
//! The [`RelevantTxids`] only includes `txid`s and not full transactions. The caller is responsible //! ([`bdk_chain::local_chain::Update`], [`RelevantTxids`], `keychain_update`)
//! for obtaining full transactions before applying new data to their [`bdk_chain`]. This can be
//! done with these steps:
//! //!
//! 1. Determine which full transactions are missing. Use [`RelevantTxids::missing_full_txs`]. //! An [`RelevantTxids`] only includes `txid`s and no full transactions. The caller is
//! responsible for obtaining full transactions before applying. This can be done with
//! these steps:
//! //!
//! 2. Obtaining the full transactions. To do this via electrum use [`ElectrumApi::batch_transaction_get`]. //! 1. Determine which full transactions are missing. The method [`missing_full_txs`] of
//! [`RelevantTxids`] can be used.
//! //!
//! Refer to [`example_electrum`] for a complete example. //! 2. Obtaining the full transactions. To do this via electrum, the method
//! [`batch_transaction_get`] can be used.
//! //!
//! [`ElectrumApi::batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get //! Refer to [`bdk_electrum_example`] for a complete example.
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum //!
//! [`ElectrumClient::scan`]: electrum_client::ElectrumClient::scan
//! [`missing_full_txs`]: RelevantTxids::missing_full_txs
//! [`batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
#![warn(missing_docs)] #![warn(missing_docs)]

View File

@@ -1,192 +0,0 @@
use anyhow::Result;
use bdk_chain::{
bitcoin::{hashes::Hash, Address, Amount, ScriptBuf, WScriptHash},
keychain::Balance,
local_chain::LocalChain,
ConfirmationTimeHeightAnchor, IndexedTxGraph, SpkTxOutIndex,
};
use bdk_electrum::{ElectrumExt, ElectrumUpdate};
use bdk_testenv::TestEnv;
use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
fn get_balance(
recv_chain: &LocalChain,
recv_graph: &IndexedTxGraph<ConfirmationTimeHeightAnchor, SpkTxOutIndex<()>>,
) -> Result<Balance> {
let chain_tip = recv_chain.tip().block_id();
let outpoints = recv_graph.index.outpoints().clone();
let balance = recv_graph
.graph()
.balance(recv_chain, chain_tip, outpoints, |_, _| true);
Ok(balance)
}
/// Ensure that [`ElectrumExt`] can sync properly.
///
/// 1. Mine 101 blocks.
/// 2. Send a tx.
/// 3. Mine extra block to confirm sent tx.
/// 4. Check [`Balance`] to ensure tx is confirmed.
#[test]
fn scan_detects_confirmed_tx() -> Result<()> {
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
// Setup addresses.
let addr_to_mine = env
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_v0_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?;
// Setup receiver.
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<ConfirmationTimeHeightAnchor, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// Mine some blocks.
env.mine_blocks(101, Some(addr_to_mine))?;
// Create transaction that is tracked by our receiver.
env.send(&addr_to_track, SEND_AMOUNT)?;
// Mine a block to confirm sent tx.
env.mine_blocks(1, None)?;
// Sync up to tip.
env.wait_until_electrum_sees_block()?;
let ElectrumUpdate {
chain_update,
relevant_txids,
} = client.sync(recv_chain.tip(), [spk_to_track], None, None, 5)?;
let missing = relevant_txids.missing_full_txs(recv_graph.graph());
let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, None, missing)?;
let _ = recv_chain
.apply_update(chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
let _ = recv_graph.apply_update(graph_update);
// Check to see if tx is confirmed.
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT.to_sat(),
..Balance::default()
},
);
Ok(())
}
/// Ensure that confirmed txs that are reorged become unconfirmed.
///
/// 1. Mine 101 blocks.
/// 2. Mine 8 blocks with a confirmed tx in each.
/// 3. Perform 8 separate reorgs on each block with a confirmed tx.
/// 4. Check [`Balance`] after each reorg to ensure unconfirmed amount is correct.
#[test]
fn tx_can_become_unconfirmed_after_reorg() -> Result<()> {
const REORG_COUNT: usize = 8;
const SEND_AMOUNT: Amount = Amount::from_sat(10_000);
let env = TestEnv::new()?;
let client = electrum_client::Client::new(env.electrsd.electrum_url.as_str())?;
// Setup addresses.
let addr_to_mine = env
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked();
let spk_to_track = ScriptBuf::new_v0_p2wsh(&WScriptHash::all_zeros());
let addr_to_track = Address::from_script(&spk_to_track, bdk_chain::bitcoin::Network::Regtest)?;
// Setup receiver.
let (mut recv_chain, _) = LocalChain::from_genesis_hash(env.bitcoind.client.get_block_hash(0)?);
let mut recv_graph = IndexedTxGraph::<ConfirmationTimeHeightAnchor, _>::new({
let mut recv_index = SpkTxOutIndex::default();
recv_index.insert_spk((), spk_to_track.clone());
recv_index
});
// Mine some blocks.
env.mine_blocks(101, Some(addr_to_mine))?;
// Create transactions that are tracked by our receiver.
for _ in 0..REORG_COUNT {
env.send(&addr_to_track, SEND_AMOUNT)?;
env.mine_blocks(1, None)?;
}
// Sync up to tip.
env.wait_until_electrum_sees_block()?;
let ElectrumUpdate {
chain_update,
relevant_txids,
} = client.sync(recv_chain.tip(), [spk_to_track.clone()], None, None, 5)?;
let missing = relevant_txids.missing_full_txs(recv_graph.graph());
let graph_update = relevant_txids.into_confirmation_time_tx_graph(&client, None, missing)?;
let _ = recv_chain
.apply_update(chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
let _ = recv_graph.apply_update(graph_update.clone());
// Retain a snapshot of all anchors before reorg process.
let initial_anchors = graph_update.all_anchors();
// Check if initial balance is correct.
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT.to_sat() * REORG_COUNT as u64,
..Balance::default()
},
"initial balance must be correct",
);
// Perform reorgs with different depths.
for depth in 1..=REORG_COUNT {
env.reorg_empty_blocks(depth)?;
env.wait_until_electrum_sees_block()?;
let ElectrumUpdate {
chain_update,
relevant_txids,
} = client.sync(recv_chain.tip(), [spk_to_track.clone()], None, None, 5)?;
let missing = relevant_txids.missing_full_txs(recv_graph.graph());
let graph_update =
relevant_txids.into_confirmation_time_tx_graph(&client, None, missing)?;
let _ = recv_chain
.apply_update(chain_update)
.map_err(|err| anyhow::anyhow!("LocalChain update error: {:?}", err))?;
// Check to see if a new anchor is added during current reorg.
if !initial_anchors.is_superset(graph_update.all_anchors()) {
println!("New anchor added at reorg depth {}", depth);
}
let _ = recv_graph.apply_update(graph_update);
assert_eq!(
get_balance(&recv_chain, &recv_graph)?,
Balance {
confirmed: SEND_AMOUNT.to_sat() * (REORG_COUNT - depth) as u64,
trusted_pending: SEND_AMOUNT.to_sat() * depth as u64,
..Balance::default()
},
"reorg_count: {}",
depth,
);
}
Ok(())
}

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "bdk_esplora" name = "bdk_esplora"
version = "0.10.0" version = "0.4.0"
edition = "2021" edition = "2021"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
@@ -12,7 +12,7 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
bdk_chain = { path = "../chain", version = "0.11.0", default-features = false } bdk_chain = { path = "../chain", version = "0.6.0", default-features = false }
esplora-client = { version = "0.6.0", default-features = false } esplora-client = { version = "0.6.0", default-features = false }
async-trait = { version = "0.1.66", optional = true } async-trait = { version = "0.1.66", optional = true }
futures = { version = "0.3.26", optional = true } futures = { version = "0.3.26", optional = true }
@@ -21,8 +21,7 @@ futures = { version = "0.3.26", optional = true }
bitcoin = { version = "0.30.0", optional = true, default-features = false } bitcoin = { version = "0.30.0", optional = true, default-features = false }
miniscript = { version = "10.0.0", optional = true, default-features = false } miniscript = { version = "10.0.0", optional = true, default-features = false }
[dev-dependencies] [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies]
bdk_testenv = { path = "../testenv", version = "0.1.0", default_features = false }
electrsd = { version= "0.25.0", features = ["bitcoind_25_0", "esplora_a33e97e1", "legacy"] } electrsd = { version= "0.25.0", features = ["bitcoind_25_0", "esplora_a33e97e1", "legacy"] }
tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] } tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }

View File

@@ -30,7 +30,7 @@ use bdk_esplora::EsploraExt;
// use bdk_esplora::EsploraAsyncExt; // use bdk_esplora::EsploraAsyncExt;
``` ```
For full examples, refer to [`example-crates/wallet_esplora_blocking`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async). For full examples, refer to [`example-crates/wallet_esplora`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora) (blocking) and [`example-crates/wallet_esplora_async`](https://github.com/bitcoindevkit/bdk/tree/master/example-crates/wallet_esplora_async).
[`esplora-client`]: https://docs.rs/esplora-client/ [`esplora-client`]: https://docs.rs/esplora-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/ [`bdk_chain`]: https://docs.rs/bdk-chain/

View File

@@ -1,18 +1,15 @@
use async_trait::async_trait; use async_trait::async_trait;
use bdk_chain::collections::btree_map; use bdk_chain::collections::btree_map;
use bdk_chain::{ use bdk_chain::{
bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid}, bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
collections::BTreeMap, collections::{BTreeMap, BTreeSet},
local_chain::{self, CheckPoint}, local_chain::{self, CheckPoint},
BlockId, ConfirmationTimeHeightAnchor, TxGraph, BlockId, ConfirmationTimeHeightAnchor, TxGraph,
}; };
use esplora_client::TxStatus; use esplora_client::{Error, TxStatus};
use futures::{stream::FuturesOrdered, TryStreamExt}; use futures::{stream::FuturesOrdered, TryStreamExt};
use crate::anchor_from_status; use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
/// [`esplora_client::Error`]
type Error = Box<esplora_client::Error>;
/// Trait to extend the functionality of [`esplora_client::AsyncClient`]. /// Trait to extend the functionality of [`esplora_client::AsyncClient`].
/// ///
@@ -22,78 +19,75 @@ type Error = Box<esplora_client::Error>;
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(not(target_arch = "wasm32"), async_trait)]
pub trait EsploraAsyncExt { pub trait EsploraAsyncExt {
/// Prepare a [`LocalChain`] update with blocks fetched from Esplora. /// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
/// ///
/// * `local_tip` is the previous tip of [`LocalChain::tip`]. /// * `local_tip` is the previous tip of [`LocalChain::tip`].
/// * `request_heights` is the block heights that we are interested in fetching from Esplora. /// * `request_heights` is the block heights that we are interested in fetching from Esplora.
/// ///
/// The result of this method can be applied to [`LocalChain::apply_update`]. /// The result of this method can be applied to [`LocalChain::apply_update`].
/// ///
/// ## Consistency
///
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
/// during the call. The size of re-org we can tollerate is server dependent but will be at
/// least 10.
///
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain /// [`LocalChain`]: bdk_chain::local_chain::LocalChain
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update /// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
#[allow(clippy::result_large_err)]
async fn update_local_chain( async fn update_local_chain(
&self, &self,
local_tip: CheckPoint, local_tip: CheckPoint,
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send, request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
) -> Result<local_chain::Update, Error>; ) -> Result<local_chain::Update, Error>;
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and /// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
/// returns a [`TxGraph`] and a map of last active indices. /// indices.
/// ///
/// * `keychain_spks`: keychains that we want to scan transactions for /// * `keychain_spks`: keychains that we want to scan transactions for
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update
/// ///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel. /// parallel.
/// #[allow(clippy::result_large_err)]
/// ## Note async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
///
/// `stop_gap` is defined as "the maximum number of consecutive unused addresses".
/// For example, with a `stop_gap` of 3, `full_scan` will keep scanning
/// until it encounters 3 consecutive script pubkeys with no associated transactions.
///
/// This follows the same approach as other Bitcoin-related software,
/// such as [Electrum](https://electrum.readthedocs.io/en/latest/faq.html#what-is-the-gap-limit),
/// [BTCPay Server](https://docs.btcpayserver.org/FAQ/Wallet/#the-gap-limit-problem),
/// and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing).
///
/// A `stop_gap` of 0 will be treated as a `stop_gap` of 1.
async fn full_scan<K: Ord + Clone + Send>(
&self, &self,
keychain_spks: BTreeMap< keychain_spks: BTreeMap<
K, K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send, impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
>, >,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize, stop_gap: usize,
parallel_requests: usize, parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>; ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data /// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
/// specified and return a [`TxGraph`].
/// ///
/// * `misc_spks`: scripts that we want to sync transactions for /// [`scan_txs_with_keychains`]: EsploraAsyncExt::scan_txs_with_keychains
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s #[allow(clippy::result_large_err)]
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we async fn scan_txs(
/// want to include in the update
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: EsploraAsyncExt::full_scan
async fn sync(
&self, &self,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send, misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send, txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send, outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize, parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>; ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
self.scan_txs_with_keychains(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
txids,
outpoints,
usize::MAX,
parallel_requests,
)
.await
.map(|(g, _)| g)
}
} }
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
@@ -104,22 +98,21 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
local_tip: CheckPoint, local_tip: CheckPoint,
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send, request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
) -> Result<local_chain::Update, Error> { ) -> Result<local_chain::Update, Error> {
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
// consistent. let new_tip_height = self.get_height().await?;
let mut fetched_blocks = self
.get_blocks(None)
.await?
.into_iter()
.map(|b| (b.time.height, b.id))
.collect::<BTreeMap<u32, BlockHash>>();
let new_tip_height = fetched_blocks
.keys()
.last()
.copied()
.expect("must have atleast one block");
// Fetch blocks of heights that the caller is interested in, skipping blocks that are // atomically fetch blocks from esplora
// already fetched when constructing `fetched_blocks`. let mut fetched_blocks = {
let heights = (0..=new_tip_height).rev();
let hashes = self
.get_blocks(Some(new_tip_height))
.await?
.into_iter()
.map(|b| b.id);
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
};
// fetch heights that the caller is interested in
for height in request_heights { for height in request_heights {
// do not fetch blocks higher than remote tip // do not fetch blocks higher than remote tip
if height > new_tip_height { if height > new_tip_height {
@@ -127,47 +120,93 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
} }
// only fetch what is missing // only fetch what is missing
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) { if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent let hash = self.get_block_hash(height).await?;
// with the chain at the time of `get_blocks` above (there could have been a deep entry.insert(hash);
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
// not possible to have a re-org deeper than that.
entry.insert(self.get_block_hash(height).await?);
} }
} }
// Ensure `fetched_blocks` can create an update that connects with the original chain by // find the earliest point of agreement between local chain and fetched chain
// finding a "Point of Agreement". let earliest_agreement_cp = {
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) { let mut earliest_agreement_cp = Option::<CheckPoint>::None;
if height > new_tip_height {
continue; let local_tip_height = local_tip.height();
for local_cp in local_tip.iter() {
let local_block = local_cp.block_id();
// the updated hash (block hash at this height after the update), can either be:
// 1. a block that already existed in `fetched_blocks`
// 2. a block that exists locally and at least has a depth of ASSUME_FINAL_DEPTH
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
// remote tip
let updated_hash = match fetched_blocks.entry(local_block.height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => *entry.insert(
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
local_block.hash
} else {
self.get_block_hash(local_block.height).await?
},
),
};
// since we may introduce blocks below the point of agreement, we cannot break
// here unconditionally - we only break if we guarantee there are no new heights
// below our current local checkpoint
if local_block.hash == updated_hash {
earliest_agreement_cp = Some(local_cp);
let first_new_height = *fetched_blocks
.keys()
.next()
.expect("must have at least one new block");
if first_new_height >= local_block.height {
break;
}
}
} }
let fetched_hash = match fetched_blocks.entry(height) { earliest_agreement_cp
btree_map::Entry::Occupied(entry) => *entry.get(), };
btree_map::Entry::Vacant(entry) => {
*entry.insert(self.get_block_hash(height).await?) let tip = {
// first checkpoint to use for the update chain
let first_cp = match earliest_agreement_cp {
Some(cp) => cp,
None => {
let (&height, &hash) = fetched_blocks
.iter()
.next()
.expect("must have at least one new block");
CheckPoint::new(BlockId { height, hash })
} }
}; };
// transform fetched chain into the update chain
// We have found point of agreement so the update will connect! fetched_blocks
if fetched_hash == local_hash { // we exclude anything at or below the first cp of the update chain otherwise
break; // building the chain will fail
} .split_off(&(first_cp.height() + 1))
} .into_iter()
.map(|(height, hash)| BlockId { height, hash })
.fold(first_cp, |prev_cp, block| {
prev_cp.push(block).expect("must extend checkpoint")
})
};
Ok(local_chain::Update { Ok(local_chain::Update {
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from)) tip,
.expect("must be in height order"),
introduce_older_blocks: true, introduce_older_blocks: true,
}) })
} }
async fn full_scan<K: Ord + Clone + Send>( async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
&self, &self,
keychain_spks: BTreeMap< keychain_spks: BTreeMap<
K, K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send, impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
>, >,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize, stop_gap: usize,
parallel_requests: usize, parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> { ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
@@ -175,7 +214,6 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
let parallel_requests = Ord::max(parallel_requests, 1); let parallel_requests = Ord::max(parallel_requests, 1);
let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default(); let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
let mut last_active_indexes = BTreeMap::<K, u32>::new(); let mut last_active_indexes = BTreeMap::<K, u32>::new();
let stop_gap = Ord::max(stop_gap, 1);
for (keychain, spks) in keychain_spks { for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter(); let mut spks = spks.into_iter();
@@ -218,34 +256,16 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
if let Some(anchor) = anchor_from_status(&tx.status) { if let Some(anchor) = anchor_from_status(&tx.status) {
let _ = graph.insert_anchor(tx.txid, anchor); let _ = graph.insert_anchor(tx.txid, anchor);
} }
let previous_outputs = tx.vin.iter().filter_map(|vin| {
let prevout = vin.prevout.as_ref()?;
Some((
OutPoint {
txid: vin.txid,
vout: vin.vout,
},
TxOut {
script_pubkey: prevout.scriptpubkey.clone(),
value: prevout.value,
},
))
});
for (outpoint, txout) in previous_outputs {
let _ = graph.insert_txout(outpoint, txout);
}
} }
} }
let last_index = last_index.expect("Must be set since handles wasn't empty."); let last_index = last_index.expect("Must be set since handles wasn't empty.");
let gap_limit_reached = if let Some(i) = last_active_index { let past_gap_limit = if let Some(i) = last_active_index {
last_index >= i.saturating_add(stop_gap as u32) last_index > i.saturating_add(stop_gap as u32)
} else { } else {
last_index + 1 >= stop_gap as u32 last_index >= stop_gap as u32
}; };
if gap_limit_reached { if past_gap_limit {
break; break;
} }
} }
@@ -255,32 +275,6 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
} }
} }
Ok((graph, last_active_indexes))
}
async fn sync(
&self,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
let mut graph = self
.full_scan(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
usize::MAX,
parallel_requests,
)
.await
.map(|(g, _)| g)?;
let mut txids = txids.into_iter(); let mut txids = txids.into_iter();
loop { loop {
let handles = txids let handles = txids
@@ -329,6 +323,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
} }
} }
} }
Ok(graph)
Ok((graph, last_active_indexes))
} }
} }

View File

@@ -1,18 +1,15 @@
use std::thread::JoinHandle; use std::thread::JoinHandle;
use bdk_chain::collections::btree_map; use bdk_chain::collections::btree_map;
use bdk_chain::collections::BTreeMap; use bdk_chain::collections::{BTreeMap, BTreeSet};
use bdk_chain::{ use bdk_chain::{
bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid}, bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
local_chain::{self, CheckPoint}, local_chain::{self, CheckPoint},
BlockId, ConfirmationTimeHeightAnchor, TxGraph, BlockId, ConfirmationTimeHeightAnchor, TxGraph,
}; };
use esplora_client::TxStatus; use esplora_client::{Error, TxStatus};
use crate::anchor_from_status; use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
/// [`esplora_client::Error`]
type Error = Box<esplora_client::Error>;
/// Trait to extend the functionality of [`esplora_client::BlockingClient`]. /// Trait to extend the functionality of [`esplora_client::BlockingClient`].
/// ///
@@ -20,75 +17,71 @@ type Error = Box<esplora_client::Error>;
/// ///
/// [crate-level documentation]: crate /// [crate-level documentation]: crate
pub trait EsploraExt { pub trait EsploraExt {
/// Prepare a [`LocalChain`] update with blocks fetched from Esplora. /// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
/// ///
/// * `local_tip` is the previous tip of [`LocalChain::tip`]. /// * `prev_tip` is the previous tip of [`LocalChain::tip`].
/// * `request_heights` is the block heights that we are interested in fetching from Esplora. /// * `get_heights` is the block heights that we are interested in fetching from Esplora.
/// ///
/// The result of this method can be applied to [`LocalChain::apply_update`]. /// The result of this method can be applied to [`LocalChain::apply_update`].
/// ///
/// ## Consistency
///
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
/// during the call. The size of re-org we can tollerate is server dependent but will be at
/// least 10.
///
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain /// [`LocalChain`]: bdk_chain::local_chain::LocalChain
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update /// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
#[allow(clippy::result_large_err)]
fn update_local_chain( fn update_local_chain(
&self, &self,
local_tip: CheckPoint, local_tip: CheckPoint,
request_heights: impl IntoIterator<Item = u32>, request_heights: impl IntoIterator<Item = u32>,
) -> Result<local_chain::Update, Error>; ) -> Result<local_chain::Update, Error>;
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and /// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
/// returns a [`TxGraph`] and a map of last active indices. /// indices.
/// ///
/// * `keychain_spks`: keychains that we want to scan transactions for /// * `keychain_spks`: keychains that we want to scan transactions for
///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
///
/// ## Note
///
/// `stop_gap` is defined as "the maximum number of consecutive unused addresses".
/// For example, with a `stop_gap` of 3, `full_scan` will keep scanning
/// until it encounters 3 consecutive script pubkeys with no associated transactions.
///
/// This follows the same approach as other Bitcoin-related software,
/// such as [Electrum](https://electrum.readthedocs.io/en/latest/faq.html#what-is-the-gap-limit),
/// [BTCPay Server](https://docs.btcpayserver.org/FAQ/Wallet/#the-gap-limit-problem),
/// and [Sparrow](https://www.sparrowwallet.com/docs/faq.html#ive-restored-my-wallet-but-some-of-my-funds-are-missing).
///
/// A `stop_gap` of 0 will be treated as a `stop_gap` of 1.
fn full_scan<K: Ord + Clone>(
&self,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
/// specified and return a [`TxGraph`].
///
/// * `misc_spks`: scripts that we want to sync transactions for
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s /// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update /// want to include in the update
/// ///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// may include scripts that have been used, use [`full_scan`] with the keychain. /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
#[allow(clippy::result_large_err)]
fn scan_txs_with_keychains<K: Ord + Clone>(
&self,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
/// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
/// ///
/// [`full_scan`]: EsploraExt::full_scan /// [`scan_txs_with_keychains`]: EsploraExt::scan_txs_with_keychains
fn sync( #[allow(clippy::result_large_err)]
fn scan_txs(
&self, &self,
misc_spks: impl IntoIterator<Item = ScriptBuf>, misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>, txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>, outpoints: impl IntoIterator<Item = OutPoint>,
parallel_requests: usize, parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>; ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
self.scan_txs_with_keychains(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
txids,
outpoints,
usize::MAX,
parallel_requests,
)
.map(|(g, _)| g)
}
} }
impl EsploraExt for esplora_client::BlockingClient { impl EsploraExt for esplora_client::BlockingClient {
@@ -97,21 +90,20 @@ impl EsploraExt for esplora_client::BlockingClient {
local_tip: CheckPoint, local_tip: CheckPoint,
request_heights: impl IntoIterator<Item = u32>, request_heights: impl IntoIterator<Item = u32>,
) -> Result<local_chain::Update, Error> { ) -> Result<local_chain::Update, Error> {
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
// consistent. let new_tip_height = self.get_height()?;
let mut fetched_blocks = self
.get_blocks(None)?
.into_iter()
.map(|b| (b.time.height, b.id))
.collect::<BTreeMap<u32, BlockHash>>();
let new_tip_height = fetched_blocks
.keys()
.last()
.copied()
.expect("must atleast have one block");
// Fetch blocks of heights that the caller is interested in, skipping blocks that are // atomically fetch blocks from esplora
// already fetched when constructing `fetched_blocks`. let mut fetched_blocks = {
let heights = (0..=new_tip_height).rev();
let hashes = self
.get_blocks(Some(new_tip_height))?
.into_iter()
.map(|b| b.id);
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
};
// fetch heights that the caller is interested in
for height in request_heights { for height in request_heights {
// do not fetch blocks higher than remote tip // do not fetch blocks higher than remote tip
if height > new_tip_height { if height > new_tip_height {
@@ -119,42 +111,90 @@ impl EsploraExt for esplora_client::BlockingClient {
} }
// only fetch what is missing // only fetch what is missing
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) { if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent let hash = self.get_block_hash(height)?;
// with the chain at the time of `get_blocks` above (there could have been a deep entry.insert(hash);
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
// not possible to have a re-org deeper than that.
entry.insert(self.get_block_hash(height)?);
} }
} }
// Ensure `fetched_blocks` can create an update that connects with the original chain by // find the earliest point of agreement between local chain and fetched chain
// finding a "Point of Agreement". let earliest_agreement_cp = {
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) { let mut earliest_agreement_cp = Option::<CheckPoint>::None;
if height > new_tip_height {
continue; let local_tip_height = local_tip.height();
for local_cp in local_tip.iter() {
let local_block = local_cp.block_id();
// the updated hash (block hash at this height after the update), can either be:
// 1. a block that already existed in `fetched_blocks`
// 2. a block that exists locally and at least has a depth of ASSUME_FINAL_DEPTH
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
// remote tip
let updated_hash = match fetched_blocks.entry(local_block.height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => *entry.insert(
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
local_block.hash
} else {
self.get_block_hash(local_block.height)?
},
),
};
// since we may introduce blocks below the point of agreement, we cannot break
// here unconditionally - we only break if we guarantee there are no new heights
// below our current local checkpoint
if local_block.hash == updated_hash {
earliest_agreement_cp = Some(local_cp);
let first_new_height = *fetched_blocks
.keys()
.next()
.expect("must have at least one new block");
if first_new_height >= local_block.height {
break;
}
}
} }
let fetched_hash = match fetched_blocks.entry(height) { earliest_agreement_cp
btree_map::Entry::Occupied(entry) => *entry.get(), };
btree_map::Entry::Vacant(entry) => *entry.insert(self.get_block_hash(height)?),
let tip = {
// first checkpoint to use for the update chain
let first_cp = match earliest_agreement_cp {
Some(cp) => cp,
None => {
let (&height, &hash) = fetched_blocks
.iter()
.next()
.expect("must have at least one new block");
CheckPoint::new(BlockId { height, hash })
}
}; };
// transform fetched chain into the update chain
// We have found point of agreement so the update will connect! fetched_blocks
if fetched_hash == local_hash { // we exclude anything at or below the first cp of the update chain otherwise
break; // building the chain will fail
} .split_off(&(first_cp.height() + 1))
} .into_iter()
.map(|(height, hash)| BlockId { height, hash })
.fold(first_cp, |prev_cp, block| {
prev_cp.push(block).expect("must extend checkpoint")
})
};
Ok(local_chain::Update { Ok(local_chain::Update {
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from)) tip,
.expect("must be in height order"),
introduce_older_blocks: true, introduce_older_blocks: true,
}) })
} }
fn full_scan<K: Ord + Clone>( fn scan_txs_with_keychains<K: Ord + Clone>(
&self, &self,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>, keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize, stop_gap: usize,
parallel_requests: usize, parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> { ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
@@ -162,7 +202,6 @@ impl EsploraExt for esplora_client::BlockingClient {
let parallel_requests = Ord::max(parallel_requests, 1); let parallel_requests = Ord::max(parallel_requests, 1);
let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default(); let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
let mut last_active_indexes = BTreeMap::<K, u32>::new(); let mut last_active_indexes = BTreeMap::<K, u32>::new();
let stop_gap = Ord::max(stop_gap, 1);
for (keychain, spks) in keychain_spks { for (keychain, spks) in keychain_spks {
let mut spks = spks.into_iter(); let mut spks = spks.into_iter();
@@ -208,34 +247,16 @@ impl EsploraExt for esplora_client::BlockingClient {
if let Some(anchor) = anchor_from_status(&tx.status) { if let Some(anchor) = anchor_from_status(&tx.status) {
let _ = graph.insert_anchor(tx.txid, anchor); let _ = graph.insert_anchor(tx.txid, anchor);
} }
let previous_outputs = tx.vin.iter().filter_map(|vin| {
let prevout = vin.prevout.as_ref()?;
Some((
OutPoint {
txid: vin.txid,
vout: vin.vout,
},
TxOut {
script_pubkey: prevout.scriptpubkey.clone(),
value: prevout.value,
},
))
});
for (outpoint, txout) in previous_outputs {
let _ = graph.insert_txout(outpoint, txout);
}
} }
} }
let last_index = last_index.expect("Must be set since handles wasn't empty."); let last_index = last_index.expect("Must be set since handles wasn't empty.");
let gap_limit_reached = if let Some(i) = last_active_index { let past_gap_limit = if let Some(i) = last_active_index {
last_index >= i.saturating_add(stop_gap as u32) last_index > i.saturating_add(stop_gap as u32)
} else { } else {
last_index + 1 >= stop_gap as u32 last_index >= stop_gap as u32
}; };
if gap_limit_reached { if past_gap_limit {
break; break;
} }
} }
@@ -245,31 +266,6 @@ impl EsploraExt for esplora_client::BlockingClient {
} }
} }
Ok((graph, last_active_indexes))
}
fn sync(
&self,
misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
let mut graph = self
.full_scan(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
usize::MAX,
parallel_requests,
)
.map(|(g, _)| g)?;
let mut txids = txids.into_iter(); let mut txids = txids.into_iter();
loop { loop {
let handles = txids let handles = txids
@@ -279,12 +275,7 @@ impl EsploraExt for esplora_client::BlockingClient {
.map(|txid| { .map(|txid| {
std::thread::spawn({ std::thread::spawn({
let client = self.clone(); let client = self.clone();
move || { move || client.get_tx_status(&txid).map(|s| (txid, s))
client
.get_tx_status(&txid)
.map_err(Box::new)
.map(|s| (txid, s))
}
}) })
}) })
.collect::<Vec<JoinHandle<Result<(Txid, TxStatus), Error>>>>(); .collect::<Vec<JoinHandle<Result<(Txid, TxStatus), Error>>>>();
@@ -301,7 +292,7 @@ impl EsploraExt for esplora_client::BlockingClient {
} }
} }
for op in outpoints { for op in outpoints.into_iter() {
if graph.get_tx(op.txid).is_none() { if graph.get_tx(op.txid).is_none() {
if let Some(tx) = self.get_tx(&op.txid)? { if let Some(tx) = self.get_tx(&op.txid)? {
let _ = graph.insert_tx(tx); let _ = graph.insert_tx(tx);
@@ -326,6 +317,7 @@ impl EsploraExt for esplora_client::BlockingClient {
} }
} }
} }
Ok(graph)
Ok((graph, last_active_indexes))
} }
} }

View File

@@ -1,21 +1,4 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
//! This crate is used for updating structures of [`bdk_chain`] with data from an Esplora server.
//!
//! The two primary methods are [`EsploraExt::sync`] and [`EsploraExt::full_scan`]. In most cases
//! [`EsploraExt::sync`] is used to sync the transaction histories of scripts that the application
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`EsploraExt::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for [`bdk_chain`]
//! via a new [`TxGraph`] to be appended to any existing [`TxGraph`] data.
//!
//! Refer to [`example_esplora`] for a complete example.
//!
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor}; use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor};
use esplora_client::TxStatus; use esplora_client::TxStatus;
@@ -31,6 +14,8 @@ mod async_ext;
#[cfg(feature = "async")] #[cfg(feature = "async")]
pub use async_ext::*; pub use async_ext::*;
const ASSUME_FINAL_DEPTH: u32 = 15;
fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeHeightAnchor> { fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeHeightAnchor> {
if let TxStatus { if let TxStatus {
block_height: Some(height), block_height: Some(height),

View File

@@ -1,21 +1,68 @@
use bdk_esplora::EsploraAsyncExt; use bdk_esplora::EsploraAsyncExt;
use electrsd::bitcoind::anyhow;
use electrsd::bitcoind::bitcoincore_rpc::RpcApi; use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
use esplora_client::{self, Builder}; use electrsd::bitcoind::{self, anyhow, BitcoinD};
use electrsd::{Conf, ElectrsD};
use esplora_client::{self, AsyncClient, Builder};
use std::collections::{BTreeMap, HashSet}; use std::collections::{BTreeMap, HashSet};
use std::str::FromStr; use std::str::FromStr;
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
use bdk_chain::bitcoin::{Address, Amount, Txid}; use bdk_chain::bitcoin::{Address, Amount, BlockHash, Txid};
use bdk_testenv::TestEnv;
struct TestEnv {
bitcoind: BitcoinD,
#[allow(dead_code)]
electrsd: ElectrsD,
client: AsyncClient,
}
impl TestEnv {
fn new() -> Result<Self, anyhow::Error> {
let bitcoind_exe =
bitcoind::downloaded_exe_path().expect("bitcoind version feature must be enabled");
let bitcoind = BitcoinD::new(bitcoind_exe).unwrap();
let mut electrs_conf = Conf::default();
electrs_conf.http_enabled = true;
let electrs_exe =
electrsd::downloaded_exe_path().expect("electrs version feature must be enabled");
let electrsd = ElectrsD::with_conf(electrs_exe, &bitcoind, &electrs_conf)?;
let base_url = format!("http://{}", &electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
Ok(Self {
bitcoind,
electrsd,
client,
})
}
fn mine_blocks(
&self,
count: usize,
address: Option<Address>,
) -> anyhow::Result<Vec<BlockHash>> {
let coinbase_address = match address {
Some(address) => address,
None => self
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked(),
};
let block_hashes = self
.bitcoind
.client
.generate_to_address(count as _, &coinbase_address)?;
Ok(block_hashes)
}
}
#[tokio::test] #[tokio::test]
pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> { pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
let receive_address0 = let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked(); Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 = let receive_address1 =
@@ -48,12 +95,13 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
None, None,
)?; )?;
let _block_hashes = env.mine_blocks(1, None)?; let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 102 { while env.client.get_height().await.unwrap() < 102 {
sleep(Duration::from_millis(10)) sleep(Duration::from_millis(10))
} }
let graph_update = client let graph_update = env
.sync( .client
.scan_txs(
misc_spks.into_iter(), misc_spks.into_iter(),
vec![].into_iter(), vec![].into_iter(),
vec![].into_iter(), vec![].into_iter(),
@@ -61,28 +109,6 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
) )
.await?; .await?;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_sat() as u64;
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect(); let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort(); graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2]; let mut expected_txids = vec![txid1, txid2];
@@ -91,12 +117,10 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
Ok(()) Ok(())
} }
/// Test the bounds of the address scan depending on the `stop_gap`. /// Test the bounds of the address scan depending on the gap limit.
#[tokio::test] #[tokio::test]
pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> { pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_async()?;
let _block_hashes = env.mine_blocks(101, None)?; let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses. // Now let's test the gap limit. First of all get a chain of 10 addresses.
@@ -136,16 +160,34 @@ pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> {
None, None,
)?; )?;
let _block_hashes = env.mine_blocks(1, None)?; let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 103 { while env.client.get_height().await.unwrap() < 103 {
sleep(Duration::from_millis(10)) sleep(Duration::from_millis(10))
} }
// A scan with a gap limit of 3 won't find the transaction, but a scan with a gap limit of 4 // A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
// will. // will.
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 3, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
2,
1,
)
.await?;
assert!(graph_update.full_txs().next().is_none()); assert!(graph_update.full_txs().next().is_none());
assert!(active_indices.is_empty()); assert!(active_indices.is_empty());
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 4, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
3,
1,
)
.await?;
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr); assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
@@ -161,18 +203,30 @@ pub async fn test_async_update_tx_graph_stop_gap() -> anyhow::Result<()> {
None, None,
)?; )?;
let _block_hashes = env.mine_blocks(1, None)?; let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().await.unwrap() < 104 { while env.client.get_height().await.unwrap() < 104 {
sleep(Duration::from_millis(10)) sleep(Duration::from_millis(10))
} }
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will. // A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
// The last active indice won't be updated in the first case but will in the second one. // The last active indice won't be updated in the first case but will in the second one.
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 5, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
4,
1,
)
.await?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 1); assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr)); assert!(txs.contains(&txid_4th_addr));
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
let (graph_update, active_indices) = client.full_scan(keychains, 6, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(keychains, vec![].into_iter(), vec![].into_iter(), 5, 1)
.await?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 2); assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr)); assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));

View File

@@ -1,37 +1,68 @@
use bdk_chain::local_chain::LocalChain;
use bdk_chain::BlockId;
use bdk_esplora::EsploraExt; use bdk_esplora::EsploraExt;
use electrsd::bitcoind::anyhow;
use electrsd::bitcoind::bitcoincore_rpc::RpcApi; use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
use esplora_client::{self, Builder}; use electrsd::bitcoind::{self, anyhow, BitcoinD};
use std::collections::{BTreeMap, BTreeSet, HashSet}; use electrsd::{Conf, ElectrsD};
use esplora_client::{self, BlockingClient, Builder};
use std::collections::{BTreeMap, HashSet};
use std::str::FromStr; use std::str::FromStr;
use std::thread::sleep; use std::thread::sleep;
use std::time::Duration; use std::time::Duration;
use bdk_chain::bitcoin::{Address, Amount, Txid}; use bdk_chain::bitcoin::{Address, Amount, BlockHash, Txid};
use bdk_testenv::TestEnv;
macro_rules! h { struct TestEnv {
($index:literal) => {{ bitcoind: BitcoinD,
bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes()) #[allow(dead_code)]
}}; electrsd: ElectrsD,
client: BlockingClient,
} }
macro_rules! local_chain { impl TestEnv {
[ $(($height:expr, $block_hash:expr)), * ] => {{ fn new() -> Result<Self, anyhow::Error> {
#[allow(unused_mut)] let bitcoind_exe =
bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect()) bitcoind::downloaded_exe_path().expect("bitcoind version feature must be enabled");
.expect("chain must have genesis block") let bitcoind = BitcoinD::new(bitcoind_exe).unwrap();
}};
let mut electrs_conf = Conf::default();
electrs_conf.http_enabled = true;
let electrs_exe =
electrsd::downloaded_exe_path().expect("electrs version feature must be enabled");
let electrsd = ElectrsD::with_conf(electrs_exe, &bitcoind, &electrs_conf)?;
let base_url = format!("http://{}", &electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking()?;
Ok(Self {
bitcoind,
electrsd,
client,
})
}
fn mine_blocks(
&self,
count: usize,
address: Option<Address>,
) -> anyhow::Result<Vec<BlockHash>> {
let coinbase_address = match address {
Some(address) => address,
None => self
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked(),
};
let block_hashes = self
.bitcoind
.client
.generate_to_address(count as _, &coinbase_address)?;
Ok(block_hashes)
}
} }
#[test] #[test]
pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> { pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking()?;
let receive_address0 = let receive_address0 =
Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked(); Address::from_str("bcrt1qc6fweuf4xjvz4x3gx3t9e0fh4hvqyu2qw4wvxm")?.assume_checked();
let receive_address1 = let receive_address1 =
@@ -64,39 +95,17 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
None, None,
)?; )?;
let _block_hashes = env.mine_blocks(1, None)?; let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 102 { while env.client.get_height().unwrap() < 102 {
sleep(Duration::from_millis(10)) sleep(Duration::from_millis(10))
} }
let graph_update = client.sync( let graph_update = env.client.scan_txs(
misc_spks.into_iter(), misc_spks.into_iter(),
vec![].into_iter(), vec![].into_iter(),
vec![].into_iter(), vec![].into_iter(),
1, 1,
)?; )?;
// Check to see if we have the floating txouts available from our two created transactions'
// previous outputs in order to calculate transaction fees.
for tx in graph_update.full_txs() {
// Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
// floating txouts available from the transactions' previous outputs.
let fee = graph_update.calculate_fee(tx.tx).expect("Fee must exist");
// Retrieve the fee in the transaction data from `bitcoind`.
let tx_fee = env
.bitcoind
.client
.get_transaction(&tx.txid, None)
.expect("Tx must exist")
.fee
.expect("Fee must exist")
.abs()
.to_sat() as u64;
// Check that the calculated fee matches the fee from the transaction data.
assert_eq!(fee, tx_fee);
}
let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect(); let mut graph_update_txids: Vec<Txid> = graph_update.full_txs().map(|tx| tx.txid).collect();
graph_update_txids.sort(); graph_update_txids.sort();
let mut expected_txids = vec![txid1, txid2]; let mut expected_txids = vec![txid1, txid2];
@@ -106,12 +115,10 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
Ok(()) Ok(())
} }
/// Test the bounds of the address scan depending on the `stop_gap`. /// Test the bounds of the address scan depending on the gap limit.
#[test] #[test]
pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> { pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
let env = TestEnv::new()?; let env = TestEnv::new()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking()?;
let _block_hashes = env.mine_blocks(101, None)?; let _block_hashes = env.mine_blocks(101, None)?;
// Now let's test the gap limit. First of all get a chain of 10 addresses. // Now let's test the gap limit. First of all get a chain of 10 addresses.
@@ -151,16 +158,28 @@ pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
None, None,
)?; )?;
let _block_hashes = env.mine_blocks(1, None)?; let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 103 { while env.client.get_height().unwrap() < 103 {
sleep(Duration::from_millis(10)) sleep(Duration::from_millis(10))
} }
// A scan with a stop_gap of 3 won't find the transaction, but a scan with a gap limit of 4 // A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
// will. // will.
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 3, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
2,
1,
)?;
assert!(graph_update.full_txs().next().is_none()); assert!(graph_update.full_txs().next().is_none());
assert!(active_indices.is_empty()); assert!(active_indices.is_empty());
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 4, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
3,
1,
)?;
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr); assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
@@ -176,18 +195,30 @@ pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
None, None,
)?; )?;
let _block_hashes = env.mine_blocks(1, None)?; let _block_hashes = env.mine_blocks(1, None)?;
while client.get_height().unwrap() < 104 { while env.client.get_height().unwrap() < 104 {
sleep(Duration::from_millis(10)) sleep(Duration::from_millis(10))
} }
// A scan with gap limit 5 won't find the second transaction, but a scan with gap limit 6 will. // A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
// The last active indice won't be updated in the first case but will in the second one. // The last active indice won't be updated in the first case but will in the second one.
let (graph_update, active_indices) = client.full_scan(keychains.clone(), 5, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
4,
1,
)?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 1); assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr)); assert!(txs.contains(&txid_4th_addr));
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
let (graph_update, active_indices) = client.full_scan(keychains, 6, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains,
vec![].into_iter(),
vec![].into_iter(),
5,
1,
)?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 2); assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr)); assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
@@ -195,181 +226,3 @@ pub fn test_update_tx_graph_stop_gap() -> anyhow::Result<()> {
Ok(()) Ok(())
} }
#[test]
fn update_local_chain() -> anyhow::Result<()> {
const TIP_HEIGHT: u32 = 50;
let env = TestEnv::new()?;
let blocks = {
let bitcoind_client = &env.bitcoind.client;
assert_eq!(bitcoind_client.get_block_count()?, 1);
[
(0, bitcoind_client.get_block_hash(0)?),
(1, bitcoind_client.get_block_hash(1)?),
]
.into_iter()
.chain((2..).zip(env.mine_blocks((TIP_HEIGHT - 1) as usize, None)?))
.collect::<BTreeMap<_, _>>()
};
// so new blocks can be seen by Electrs
let env = env.reset_electrsd()?;
let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
let client = Builder::new(base_url.as_str()).build_blocking()?;
struct TestCase {
name: &'static str,
chain: LocalChain,
request_heights: &'static [u32],
exp_update_heights: &'static [u32],
}
let test_cases = [
TestCase {
name: "request_later_blocks",
chain: local_chain![(0, blocks[&0]), (21, blocks[&21])],
request_heights: &[22, 25, 28],
exp_update_heights: &[21, 22, 25, 28],
},
TestCase {
name: "request_prev_blocks",
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (5, blocks[&5])],
request_heights: &[4],
exp_update_heights: &[4, 5],
},
TestCase {
name: "request_prev_blocks_2",
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (10, blocks[&10])],
request_heights: &[4, 6],
exp_update_heights: &[4, 6, 10],
},
TestCase {
name: "request_later_and_prev_blocks",
chain: local_chain![(0, blocks[&0]), (7, blocks[&7]), (11, blocks[&11])],
request_heights: &[8, 9, 15],
exp_update_heights: &[8, 9, 11, 15],
},
TestCase {
name: "request_tip_only",
chain: local_chain![(0, blocks[&0]), (5, blocks[&5]), (49, blocks[&49])],
request_heights: &[TIP_HEIGHT],
exp_update_heights: &[49],
},
TestCase {
name: "request_nothing",
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, blocks[&23])],
request_heights: &[],
exp_update_heights: &[23],
},
TestCase {
name: "request_nothing_during_reorg",
chain: local_chain![(0, blocks[&0]), (13, blocks[&13]), (23, h!("23"))],
request_heights: &[],
exp_update_heights: &[13, 23],
},
TestCase {
name: "request_nothing_during_reorg_2",
chain: local_chain![
(0, blocks[&0]),
(21, blocks[&21]),
(22, h!("22")),
(23, h!("23"))
],
request_heights: &[],
exp_update_heights: &[21, 22, 23],
},
TestCase {
name: "request_prev_blocks_during_reorg",
chain: local_chain![
(0, blocks[&0]),
(21, blocks[&21]),
(22, h!("22")),
(23, h!("23"))
],
request_heights: &[17, 20],
exp_update_heights: &[17, 20, 21, 22, 23],
},
TestCase {
name: "request_later_blocks_during_reorg",
chain: local_chain![
(0, blocks[&0]),
(9, blocks[&9]),
(22, h!("22")),
(23, h!("23"))
],
request_heights: &[25, 27],
exp_update_heights: &[9, 22, 23, 25, 27],
},
TestCase {
name: "request_later_blocks_during_reorg_2",
chain: local_chain![(0, blocks[&0]), (9, h!("9"))],
request_heights: &[10],
exp_update_heights: &[0, 9, 10],
},
TestCase {
name: "request_later_and_prev_blocks_during_reorg",
chain: local_chain![(0, blocks[&0]), (1, blocks[&1]), (9, h!("9"))],
request_heights: &[8, 11],
exp_update_heights: &[1, 8, 9, 11],
},
];
for (i, t) in test_cases.into_iter().enumerate() {
println!("Case {}: {}", i, t.name);
let mut chain = t.chain;
let update = client
.update_local_chain(chain.tip(), t.request_heights.iter().copied())
.map_err(|err| {
anyhow::format_err!("[{}:{}] `update_local_chain` failed: {}", i, t.name, err)
})?;
let update_blocks = update
.tip
.iter()
.map(|cp| cp.block_id())
.collect::<BTreeSet<_>>();
let exp_update_blocks = t
.exp_update_heights
.iter()
.map(|&height| {
let hash = blocks[&height];
BlockId { height, hash }
})
.chain(
// Electrs Esplora `get_block` call fetches 10 blocks which is included in the
// update
blocks
.range(TIP_HEIGHT - 9..)
.map(|(&height, &hash)| BlockId { height, hash }),
)
.collect::<BTreeSet<_>>();
assert_eq!(
update_blocks, exp_update_blocks,
"[{}:{}] unexpected update",
i, t.name
);
let _ = chain
.apply_update(update)
.unwrap_or_else(|err| panic!("[{}:{}] update failed to apply: {}", i, t.name, err));
// all requested heights must exist in the final chain
for height in t.request_heights {
let exp_blockhash = blocks.get(height).expect("block must exist in bitcoind");
assert_eq!(
chain.blocks().get(height),
Some(exp_blockhash),
"[{}:{}] block {}:{} must exist in final chain",
i,
t.name,
height,
exp_blockhash
);
}
}
Ok(())
}

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "bdk_file_store" name = "bdk_file_store"
version = "0.8.0" version = "0.2.0"
edition = "2021" edition = "2021"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
@@ -11,7 +11,7 @@ authors = ["Bitcoin Dev Kit Developers"]
readme = "README.md" readme = "README.md"
[dependencies] [dependencies]
bdk_chain = { path = "../chain", version = "0.11.0", features = [ "serde", "miniscript" ] } bdk_chain = { path = "../chain", version = "0.6.0", features = [ "serde", "miniscript" ] }
bincode = { version = "1" } bincode = { version = "1" }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }

View File

@@ -1,7 +1,7 @@
use bincode::Options; use bincode::Options;
use std::{ use std::{
fs::File, fs::File,
io::{self, BufReader, Seek}, io::{self, Seek},
marker::PhantomData, marker::PhantomData,
}; };
@@ -14,9 +14,8 @@ use crate::bincode_options;
/// ///
/// [`next`]: Self::next /// [`next`]: Self::next
pub struct EntryIter<'t, T> { pub struct EntryIter<'t, T> {
/// Buffered reader around the file db_file: Option<&'t mut File>,
db_file: BufReader<&'t mut File>,
finished: bool,
/// The file position for the first read of `db_file`. /// The file position for the first read of `db_file`.
start_pos: Option<u64>, start_pos: Option<u64>,
types: PhantomData<T>, types: PhantomData<T>,
@@ -25,9 +24,8 @@ pub struct EntryIter<'t, T> {
impl<'t, T> EntryIter<'t, T> { impl<'t, T> EntryIter<'t, T> {
pub fn new(start_pos: u64, db_file: &'t mut File) -> Self { pub fn new(start_pos: u64, db_file: &'t mut File) -> Self {
Self { Self {
db_file: BufReader::new(db_file), db_file: Some(db_file),
start_pos: Some(start_pos), start_pos: Some(start_pos),
finished: false,
types: PhantomData, types: PhantomData,
} }
} }
@@ -40,44 +38,44 @@ where
type Item = Result<T, IterError>; type Item = Result<T, IterError>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
if self.finished { // closure which reads a single entry starting from `self.pos`
return None; let read_one = |f: &mut File, start_pos: Option<u64>| -> Result<Option<T>, IterError> {
} let pos = match start_pos {
(|| { Some(pos) => f.seek(io::SeekFrom::Start(pos))?,
if let Some(start) = self.start_pos.take() { None => f.stream_position()?,
self.db_file.seek(io::SeekFrom::Start(start))?; };
}
let pos_before_read = self.db_file.stream_position()?; match bincode_options().deserialize_from(&*f) {
match bincode_options().deserialize_from(&mut self.db_file) { Ok(changeset) => {
Ok(changeset) => Ok(Some(changeset)), f.stream_position()?;
Ok(Some(changeset))
}
Err(e) => { Err(e) => {
self.finished = true;
let pos_after_read = self.db_file.stream_position()?;
// allow unexpected EOF if 0 bytes were read
if let bincode::ErrorKind::Io(inner) = &*e { if let bincode::ErrorKind::Io(inner) = &*e {
if inner.kind() == io::ErrorKind::UnexpectedEof if inner.kind() == io::ErrorKind::UnexpectedEof {
&& pos_after_read == pos_before_read let eof = f.seek(io::SeekFrom::End(0))?;
{ if pos == eof {
return Ok(None); return Ok(None);
}
} }
} }
self.db_file.seek(io::SeekFrom::Start(pos_before_read))?; f.seek(io::SeekFrom::Start(pos))?;
Err(IterError::Bincode(*e)) Err(IterError::Bincode(*e))
} }
} }
})() };
.transpose()
let result = read_one(self.db_file.as_mut()?, self.start_pos.take());
if result.is_err() {
self.db_file = None;
}
result.transpose()
} }
} }
impl<'t, T> Drop for EntryIter<'t, T> { impl From<io::Error> for IterError {
fn drop(&mut self) { fn from(value: io::Error) -> Self {
// This syncs the underlying file's offset with the buffer's position. This way, we IterError::Io(value)
// maintain the correct position to start the next read/write.
if let Ok(pos) = self.db_file.stream_position() {
let _ = self.db_file.get_mut().seek(io::SeekFrom::Start(pos));
}
} }
} }
@@ -99,10 +97,4 @@ impl core::fmt::Display for IterError {
} }
} }
impl From<io::Error> for IterError {
fn from(value: io::Error) -> Self {
IterError::Io(value)
}
}
impl std::error::Error for IterError {} impl std::error::Error for IterError {}

View File

@@ -13,14 +13,14 @@ pub(crate) fn bincode_options() -> impl bincode::Options {
/// Error that occurs due to problems encountered with the file. /// Error that occurs due to problems encountered with the file.
#[derive(Debug)] #[derive(Debug)]
pub enum FileError { pub enum FileError<'a> {
/// IO error, this may mean that the file is too short. /// IO error, this may mean that the file is too short.
Io(io::Error), Io(io::Error),
/// Magic bytes do not match what is expected. /// Magic bytes do not match what is expected.
InvalidMagicBytes { got: Vec<u8>, expected: Vec<u8> }, InvalidMagicBytes { got: Vec<u8>, expected: &'a [u8] },
} }
impl core::fmt::Display for FileError { impl<'a> core::fmt::Display for FileError<'a> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self { match self {
Self::Io(e) => write!(f, "io error trying to read file: {}", e), Self::Io(e) => write!(f, "io error trying to read file: {}", e),
@@ -33,10 +33,10 @@ impl core::fmt::Display for FileError {
} }
} }
impl From<io::Error> for FileError { impl<'a> From<io::Error> for FileError<'a> {
fn from(value: io::Error) -> Self { fn from(value: io::Error) -> Self {
Self::Io(value) Self::Io(value)
} }
} }
impl std::error::Error for FileError {} impl<'a> std::error::Error for FileError<'a> {}

View File

@@ -15,13 +15,13 @@ use crate::{bincode_options, EntryIter, FileError, IterError};
/// ///
/// The changesets are the results of altering a tracker implementation (`T`). /// The changesets are the results of altering a tracker implementation (`T`).
#[derive(Debug)] #[derive(Debug)]
pub struct Store<C> { pub struct Store<'a, C> {
magic_len: usize, magic: &'a [u8],
db_file: File, db_file: File,
marker: PhantomData<C>, marker: PhantomData<C>,
} }
impl<C> PersistBackend<C> for Store<C> impl<'a, C> PersistBackend<C> for Store<'a, C>
where where
C: Append + serde::Serialize + serde::de::DeserializeOwned, C: Append + serde::Serialize + serde::de::DeserializeOwned,
{ {
@@ -38,7 +38,7 @@ where
} }
} }
impl<C> Store<C> impl<'a, C> Store<'a, C>
where where
C: Append + serde::Serialize + serde::de::DeserializeOwned, C: Append + serde::Serialize + serde::de::DeserializeOwned,
{ {
@@ -48,7 +48,7 @@ where
/// the `Store` in the future with [`open`]. /// the `Store` in the future with [`open`].
/// ///
/// [`open`]: Store::open /// [`open`]: Store::open
pub fn create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError> pub fn create_new<P>(magic: &'a [u8], file_path: P) -> Result<Self, FileError>
where where
P: AsRef<Path>, P: AsRef<Path>,
{ {
@@ -64,11 +64,10 @@ where
.create(true) .create(true)
.read(true) .read(true)
.write(true) .write(true)
.truncate(true)
.open(file_path)?; .open(file_path)?;
f.write_all(magic)?; f.write_all(magic)?;
Ok(Self { Ok(Self {
magic_len: magic.len(), magic,
db_file: f, db_file: f,
marker: Default::default(), marker: Default::default(),
}) })
@@ -84,7 +83,7 @@ where
/// [`FileError::InvalidMagicBytes`] error variant will be returned. /// [`FileError::InvalidMagicBytes`] error variant will be returned.
/// ///
/// [`create_new`]: Store::create_new /// [`create_new`]: Store::create_new
pub fn open<P>(magic: &[u8], file_path: P) -> Result<Self, FileError> pub fn open<P>(magic: &'a [u8], file_path: P) -> Result<Self, FileError>
where where
P: AsRef<Path>, P: AsRef<Path>,
{ {
@@ -95,24 +94,24 @@ where
if magic_buf != magic { if magic_buf != magic {
return Err(FileError::InvalidMagicBytes { return Err(FileError::InvalidMagicBytes {
got: magic_buf, got: magic_buf,
expected: magic.to_vec(), expected: magic,
}); });
} }
Ok(Self { Ok(Self {
magic_len: magic.len(), magic,
db_file: f, db_file: f,
marker: Default::default(), marker: Default::default(),
}) })
} }
/// Attempt to open existing [`Store`] file; create it if the file is non-existent. /// Attempt to open existing [`Store`] file; create it if the file is non-existant.
/// ///
/// Internally, this calls either [`open`] or [`create_new`]. /// Internally, this calls either [`open`] or [`create_new`].
/// ///
/// [`open`]: Store::open /// [`open`]: Store::open
/// [`create_new`]: Store::create_new /// [`create_new`]: Store::create_new
pub fn open_or_create_new<P>(magic: &[u8], file_path: P) -> Result<Self, FileError> pub fn open_or_create_new<P>(magic: &'a [u8], file_path: P) -> Result<Self, FileError>
where where
P: AsRef<Path>, P: AsRef<Path>,
{ {
@@ -133,14 +132,14 @@ where
/// always iterate over all entries until `None` is returned if you want your next write to go /// always iterate over all entries until `None` is returned if you want your next write to go
/// at the end; otherwise, you will write over existing entries. /// at the end; otherwise, you will write over existing entries.
pub fn iter_changesets(&mut self) -> EntryIter<C> { pub fn iter_changesets(&mut self) -> EntryIter<C> {
EntryIter::new(self.magic_len as u64, &mut self.db_file) EntryIter::new(self.magic.len() as u64, &mut self.db_file)
} }
/// Loads all the changesets that have been stored as one giant changeset. /// Loads all the changesets that have been stored as one giant changeset.
/// ///
/// This function returns the aggregate changeset, or `None` if nothing was persisted. /// This function returns a tuple of the aggregate changeset and a result that indicates
/// If reading or deserializing any of the entries fails, an error is returned that /// whether an error occurred while reading or deserializing one of the entries. If so the
/// consists of all those it was able to read. /// changeset will consist of all of those it was able to read.
/// ///
/// You should usually check the error. In many applications, it may make sense to do a full /// You should usually check the error. In many applications, it may make sense to do a full
/// wallet scan with a stop-gap after getting an error, since it is likely that one of the /// wallet scan with a stop-gap after getting an error, since it is likely that one of the
@@ -220,7 +219,6 @@ mod test {
use bincode::DefaultOptions; use bincode::DefaultOptions;
use std::{ use std::{
collections::BTreeSet,
io::{Read, Write}, io::{Read, Write},
vec::Vec, vec::Vec,
}; };
@@ -230,7 +228,7 @@ mod test {
const TEST_MAGIC_BYTES: [u8; TEST_MAGIC_BYTES_LEN] = const TEST_MAGIC_BYTES: [u8; TEST_MAGIC_BYTES_LEN] =
[98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49]; [98, 100, 107, 102, 115, 49, 49, 49, 49, 49, 49, 49];
type TestChangeSet = BTreeSet<String>; type TestChangeSet = Vec<String>;
#[derive(Debug)] #[derive(Debug)]
struct TestTracker; struct TestTracker;
@@ -255,7 +253,7 @@ mod test {
fn open_or_create_new() { fn open_or_create_new() {
let temp_dir = tempfile::tempdir().unwrap(); let temp_dir = tempfile::tempdir().unwrap();
let file_path = temp_dir.path().join("db_file"); let file_path = temp_dir.path().join("db_file");
let changeset = BTreeSet::from(["hello".to_string(), "world".to_string()]); let changeset = vec!["hello".to_string(), "world".to_string()];
{ {
let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path) let mut db = Store::<TestChangeSet>::open_or_create_new(&TEST_MAGIC_BYTES, &file_path)
@@ -306,7 +304,7 @@ mod test {
let mut data = [255_u8; 2000]; let mut data = [255_u8; 2000];
data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES); data[..TEST_MAGIC_BYTES_LEN].copy_from_slice(&TEST_MAGIC_BYTES);
let changeset = TestChangeSet::from(["one".into(), "two".into(), "three!".into()]); let changeset = vec!["one".into(), "two".into(), "three!".into()];
let mut file = NamedTempFile::new().unwrap(); let mut file = NamedTempFile::new().unwrap();
file.write_all(&data).expect("should write"); file.write_all(&data).expect("should write");
@@ -342,119 +340,4 @@ mod test {
assert_eq!(got_bytes, expected_bytes); assert_eq!(got_bytes, expected_bytes);
} }
#[test]
fn last_write_is_short() {
let temp_dir = tempfile::tempdir().unwrap();
let changesets = [
TestChangeSet::from(["1".into()]),
TestChangeSet::from(["2".into(), "3".into()]),
TestChangeSet::from(["4".into(), "5".into(), "6".into()]),
];
let last_changeset = TestChangeSet::from(["7".into(), "8".into(), "9".into()]);
let last_changeset_bytes = bincode_options().serialize(&last_changeset).unwrap();
for short_write_len in 1..last_changeset_bytes.len() - 1 {
let file_path = temp_dir.path().join(format!("{}.dat", short_write_len));
println!("Test file: {:?}", file_path);
// simulate creating a file, writing data where the last write is incomplete
{
let mut db =
Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
for changeset in &changesets {
db.append_changeset(changeset).unwrap();
}
// this is the incomplete write
db.db_file
.write_all(&last_changeset_bytes[..short_write_len])
.unwrap();
}
// load file again and aggregate changesets
// write the last changeset again (this time it succeeds)
{
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let err = db
.aggregate_changesets()
.expect_err("should return error as last read is short");
assert_eq!(
err.changeset,
changesets.iter().cloned().reduce(|mut acc, cs| {
Append::append(&mut acc, cs);
acc
}),
"should recover all changesets that are written in full",
);
db.db_file.write_all(&last_changeset_bytes).unwrap();
}
// load file again - this time we should successfully aggregate all changesets
{
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let aggregated_changesets = db
.aggregate_changesets()
.expect("aggregating all changesets should succeed");
assert_eq!(
aggregated_changesets,
changesets
.iter()
.cloned()
.chain(core::iter::once(last_changeset.clone()))
.reduce(|mut acc, cs| {
Append::append(&mut acc, cs);
acc
}),
"should recover all changesets",
);
}
}
}
#[test]
fn write_after_short_read() {
let temp_dir = tempfile::tempdir().unwrap();
let changesets = (0..20)
.map(|n| TestChangeSet::from([format!("{}", n)]))
.collect::<Vec<_>>();
let last_changeset = TestChangeSet::from(["last".into()]);
for read_count in 0..changesets.len() {
let file_path = temp_dir.path().join(format!("{}.dat", read_count));
println!("Test file: {:?}", file_path);
// First, we create the file with all the changesets!
let mut db = Store::<TestChangeSet>::create_new(&TEST_MAGIC_BYTES, &file_path).unwrap();
for changeset in &changesets {
db.append_changeset(changeset).unwrap();
}
drop(db);
// We re-open the file and read `read_count` number of changesets.
let mut db = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path).unwrap();
let mut exp_aggregation = db
.iter_changesets()
.take(read_count)
.map(|r| r.expect("must read valid changeset"))
.fold(TestChangeSet::default(), |mut acc, v| {
Append::append(&mut acc, v);
acc
});
// We write after a short read.
db.write_changes(&last_changeset)
.expect("last write must succeed");
Append::append(&mut exp_aggregation, last_changeset.clone());
drop(db);
// We open the file again and check whether aggregate changeset is expected.
let aggregation = Store::<TestChangeSet>::open(&TEST_MAGIC_BYTES, &file_path)
.unwrap()
.aggregate_changesets()
.expect("must aggregate changesets")
.unwrap_or_default();
assert_eq!(aggregation, exp_aggregation);
}
}
} }

View File

@@ -1,13 +0,0 @@
[package]
name = "bdk_hwi"
version = "0.2.0"
edition = "2021"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
description = "Utilities to use bdk with hardware wallets"
license = "MIT OR Apache-2.0"
readme = "README.md"
[dependencies]
bdk = { path = "../bdk" }
hwi = { version = "0.7.0", features = [ "miniscript"] }

View File

@@ -1,42 +0,0 @@
//! HWI Signer
//!
//! This crate contains HWISigner, an implementation of a [`TransactionSigner`] to be
//! used with hardware wallets.
//! ```no_run
//! # use bdk::bitcoin::Network;
//! # use bdk::signer::SignerOrdering;
//! # use bdk_hwi::HWISigner;
//! # use bdk::wallet::AddressIndex::New;
//! # use bdk::{KeychainKind, SignOptions, Wallet};
//! # use hwi::HWIClient;
//! # use std::sync::Arc;
//! #
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let mut devices = HWIClient::enumerate()?;
//! if devices.is_empty() {
//! panic!("No devices found!");
//! }
//! let first_device = devices.remove(0)?;
//! let custom_signer = HWISigner::from_device(&first_device, Network::Testnet.into())?;
//!
//! # let mut wallet = Wallet::new_no_persist(
//! # "",
//! # None,
//! # Network::Testnet,
//! # )?;
//! #
//! // Adding the hardware signer to the BDK wallet
//! wallet.add_signer(
//! KeychainKind::External,
//! SignerOrdering(200),
//! Arc::new(custom_signer),
//! );
//!
//! # Ok(())
//! # }
//! ```
//!
//! [`TransactionSigner`]: bdk::wallet::signer::TransactionSigner
mod signer;
pub use signer::*;

View File

@@ -1,94 +0,0 @@
use bdk::bitcoin::bip32::Fingerprint;
use bdk::bitcoin::psbt::PartiallySignedTransaction;
use bdk::bitcoin::secp256k1::{All, Secp256k1};
use hwi::error::Error;
use hwi::types::{HWIChain, HWIDevice};
use hwi::HWIClient;
use bdk::signer::{SignerCommon, SignerError, SignerId, TransactionSigner};
#[derive(Debug)]
/// Custom signer for Hardware Wallets
///
/// This ignores `sign_options` and leaves the decisions up to the hardware wallet.
pub struct HWISigner {
fingerprint: Fingerprint,
client: HWIClient,
}
impl HWISigner {
/// Create a instance from the specified device and chain
pub fn from_device(device: &HWIDevice, chain: HWIChain) -> Result<HWISigner, Error> {
let client = HWIClient::get_client(device, false, chain)?;
Ok(HWISigner {
fingerprint: device.fingerprint,
client,
})
}
}
impl SignerCommon for HWISigner {
fn id(&self, _secp: &Secp256k1<All>) -> SignerId {
SignerId::Fingerprint(self.fingerprint)
}
}
impl TransactionSigner for HWISigner {
fn sign_transaction(
&self,
psbt: &mut PartiallySignedTransaction,
_sign_options: &bdk::SignOptions,
_secp: &Secp256k1<All>,
) -> Result<(), SignerError> {
psbt.combine(
self.client
.sign_tx(psbt)
.map_err(|e| {
SignerError::External(format!("While signing with hardware wallet: {}", e))
})?
.psbt,
)
.expect("Failed to combine HW signed psbt with passed PSBT");
Ok(())
}
}
// TODO: re-enable this once we have the `get_funded_wallet` test util
// #[cfg(test)]
// mod tests {
// #[test]
// fn test_hardware_signer() {
// use std::sync::Arc;
//
// use bdk::tests::get_funded_wallet;
// use bdk::signer::SignerOrdering;
// use bdk::bitcoin::Network;
// use crate::HWISigner;
// use hwi::HWIClient;
//
// let mut devices = HWIClient::enumerate().unwrap();
// if devices.is_empty() {
// panic!("No devices found!");
// }
// let device = devices.remove(0).unwrap();
// let client = HWIClient::get_client(&device, true, Network::Regtest.into()).unwrap();
// let descriptors = client.get_descriptors::<String>(None).unwrap();
// let custom_signer = HWISigner::from_device(&device, Network::Regtest.into()).unwrap();
//
// let (mut wallet, _) = get_funded_wallet(&descriptors.internal[0]);
// wallet.add_signer(
// bdk::KeychainKind::External,
// SignerOrdering(200),
// Arc::new(custom_signer),
// );
//
// let addr = wallet.get_address(bdk::wallet::AddressIndex::LastUnused);
// let mut builder = wallet.build_tx();
// builder.drain_to(addr.script_pubkey()).drain_wallet();
// let (mut psbt, _) = builder.finish().unwrap();
//
// let finalized = wallet.sign(&mut psbt, Default::default()).unwrap();
// assert!(finalized);
// }
// }

View File

@@ -1,24 +0,0 @@
[package]
name = "bdk_testenv"
version = "0.1.0"
edition = "2021"
rust-version = "1.63"
homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_testenv"
description = "Testing framework for BDK chain sources."
license = "MIT OR Apache-2.0"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitcoincore-rpc = { version = "0.17" }
bdk_chain = { path = "../chain", version = "0.11", default-features = false }
electrsd = { version= "0.25.0", features = ["bitcoind_25_0", "esplora_a33e97e1", "legacy"] }
anyhow = { version = "1" }
[features]
default = ["std"]
std = ["bdk_chain/std"]
serde = ["bdk_chain/serde"]

View File

@@ -1,6 +0,0 @@
# BDK TestEnv
This crate sets up a regtest environment with a single [`bitcoind`] node
connected to an [`electrs`] instance. This framework provides the infrastructure
for testing chain source crates, e.g., [`bdk_chain`], [`bdk_electrum`],
[`bdk_esplora`], etc.

View File

@@ -1,278 +0,0 @@
use bdk_chain::bitcoin::{
address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash,
secp256k1::rand::random, Address, Amount, Block, BlockHash, CompactTarget, ScriptBuf,
ScriptHash, Transaction, TxIn, TxOut, Txid,
};
use bitcoincore_rpc::{
bitcoincore_rpc_json::{GetBlockTemplateModes, GetBlockTemplateRules},
RpcApi,
};
use electrsd::electrum_client::ElectrumApi;
use std::time::Duration;
/// Struct for running a regtest environment with a single `bitcoind` node with an `electrs`
/// instance connected to it.
pub struct TestEnv {
pub bitcoind: electrsd::bitcoind::BitcoinD,
pub electrsd: electrsd::ElectrsD,
}
impl TestEnv {
/// Construct a new [`TestEnv`] instance with default configurations.
pub fn new() -> anyhow::Result<Self> {
let bitcoind = match std::env::var_os("BITCOIND_EXE") {
Some(bitcoind_path) => electrsd::bitcoind::BitcoinD::new(bitcoind_path),
None => {
let bitcoind_exe = electrsd::bitcoind::downloaded_exe_path()
.expect(
"you need to provide an env var BITCOIND_EXE or specify a bitcoind version feature",
);
electrsd::bitcoind::BitcoinD::with_conf(
bitcoind_exe,
&electrsd::bitcoind::Conf::default(),
)
}
}?;
let mut electrsd_conf = electrsd::Conf::default();
electrsd_conf.http_enabled = true;
let electrsd = match std::env::var_os("ELECTRS_EXE") {
Some(env_electrs_exe) => {
electrsd::ElectrsD::with_conf(env_electrs_exe, &bitcoind, &electrsd_conf)
}
None => {
let electrs_exe = electrsd::downloaded_exe_path()
.expect("electrs version feature must be enabled");
electrsd::ElectrsD::with_conf(electrs_exe, &bitcoind, &electrsd_conf)
}
}?;
Ok(Self { bitcoind, electrsd })
}
/// Exposes the [`ElectrumApi`] calls from the Electrum client.
pub fn electrum_client(&self) -> &impl ElectrumApi {
&self.electrsd.client
}
/// Exposes the [`RpcApi`] calls from [`bitcoincore_rpc`].
pub fn rpc_client(&self) -> &impl RpcApi {
&self.bitcoind.client
}
// Reset `electrsd` so that new blocks can be seen.
pub fn reset_electrsd(mut self) -> anyhow::Result<Self> {
let mut electrsd_conf = electrsd::Conf::default();
electrsd_conf.http_enabled = true;
let electrsd = match std::env::var_os("ELECTRS_EXE") {
Some(env_electrs_exe) => {
electrsd::ElectrsD::with_conf(env_electrs_exe, &self.bitcoind, &electrsd_conf)
}
None => {
let electrs_exe = electrsd::downloaded_exe_path()
.expect("electrs version feature must be enabled");
electrsd::ElectrsD::with_conf(electrs_exe, &self.bitcoind, &electrsd_conf)
}
}?;
self.electrsd = electrsd;
Ok(self)
}
/// Mine a number of blocks of a given size `count`, which may be specified to a given coinbase
/// `address`.
pub fn mine_blocks(
&self,
count: usize,
address: Option<Address>,
) -> anyhow::Result<Vec<BlockHash>> {
let coinbase_address = match address {
Some(address) => address,
None => self
.bitcoind
.client
.get_new_address(None, None)?
.assume_checked(),
};
let block_hashes = self
.bitcoind
.client
.generate_to_address(count as _, &coinbase_address)?;
Ok(block_hashes)
}
/// Mine a block that is guaranteed to be empty even with transactions in the mempool.
pub fn mine_empty_block(&self) -> anyhow::Result<(usize, BlockHash)> {
let bt = self.bitcoind.client.get_block_template(
GetBlockTemplateModes::Template,
&[GetBlockTemplateRules::SegWit],
&[],
)?;
let txdata = vec![Transaction {
version: 1,
lock_time: bdk_chain::bitcoin::absolute::LockTime::from_height(0)?,
input: vec![TxIn {
previous_output: bdk_chain::bitcoin::OutPoint::default(),
script_sig: ScriptBuf::builder()
.push_int(bt.height as _)
// randomn number so that re-mining creates unique block
.push_int(random())
.into_script(),
sequence: bdk_chain::bitcoin::Sequence::default(),
witness: bdk_chain::bitcoin::Witness::new(),
}],
output: vec![TxOut {
value: 0,
script_pubkey: ScriptBuf::new_p2sh(&ScriptHash::all_zeros()),
}],
}];
let bits: [u8; 4] = bt
.bits
.clone()
.try_into()
.expect("rpc provided us with invalid bits");
let mut block = Block {
header: Header {
version: bdk_chain::bitcoin::block::Version::default(),
prev_blockhash: bt.previous_block_hash,
merkle_root: TxMerkleNode::all_zeros(),
time: Ord::max(bt.min_time, std::time::UNIX_EPOCH.elapsed()?.as_secs()) as u32,
bits: CompactTarget::from_consensus(u32::from_be_bytes(bits)),
nonce: 0,
},
txdata,
};
block.header.merkle_root = block.compute_merkle_root().expect("must compute");
for nonce in 0..=u32::MAX {
block.header.nonce = nonce;
if block.header.target().is_met_by(block.block_hash()) {
break;
}
}
self.bitcoind.client.submit_block(&block)?;
Ok((bt.height as usize, block.block_hash()))
}
/// This method waits for the Electrum notification indicating that a new block has been mined.
pub fn wait_until_electrum_sees_block(&self) -> anyhow::Result<()> {
self.electrsd.client.block_headers_subscribe()?;
let mut delay = Duration::from_millis(64);
loop {
self.electrsd.trigger()?;
self.electrsd.client.ping()?;
if self.electrsd.client.block_headers_pop()?.is_some() {
return Ok(());
}
if delay.as_millis() < 512 {
delay = delay.mul_f32(2.0);
}
std::thread::sleep(delay);
}
}
/// Invalidate a number of blocks of a given size `count`.
pub fn invalidate_blocks(&self, count: usize) -> anyhow::Result<()> {
let mut hash = self.bitcoind.client.get_best_block_hash()?;
for _ in 0..count {
let prev_hash = self
.bitcoind
.client
.get_block_info(&hash)?
.previousblockhash;
self.bitcoind.client.invalidate_block(&hash)?;
match prev_hash {
Some(prev_hash) => hash = prev_hash,
None => break,
}
}
Ok(())
}
/// Reorg a number of blocks of a given size `count`.
/// Refer to [`TestEnv::mine_empty_block`] for more information.
pub fn reorg(&self, count: usize) -> anyhow::Result<Vec<BlockHash>> {
let start_height = self.bitcoind.client.get_block_count()?;
self.invalidate_blocks(count)?;
let res = self.mine_blocks(count, None);
assert_eq!(
self.bitcoind.client.get_block_count()?,
start_height,
"reorg should not result in height change"
);
res
}
/// Reorg with a number of empty blocks of a given size `count`.
pub fn reorg_empty_blocks(&self, count: usize) -> anyhow::Result<Vec<(usize, BlockHash)>> {
let start_height = self.bitcoind.client.get_block_count()?;
self.invalidate_blocks(count)?;
let res = (0..count)
.map(|_| self.mine_empty_block())
.collect::<Result<Vec<_>, _>>()?;
assert_eq!(
self.bitcoind.client.get_block_count()?,
start_height,
"reorg should not result in height change"
);
Ok(res)
}
/// Send a tx of a given `amount` to a given `address`.
pub fn send(&self, address: &Address<NetworkChecked>, amount: Amount) -> anyhow::Result<Txid> {
let txid = self
.bitcoind
.client
.send_to_address(address, amount, None, None, None, None, None, None)?;
Ok(txid)
}
}
#[cfg(test)]
mod test {
use crate::TestEnv;
use anyhow::Result;
use bitcoincore_rpc::RpcApi;
/// This checks that reorgs initiated by `bitcoind` is detected by our `electrsd` instance.
#[test]
fn test_reorg_is_detected_in_electrsd() -> Result<()> {
let env = TestEnv::new()?;
// Mine some blocks.
env.mine_blocks(101, None)?;
env.wait_until_electrum_sees_block()?;
let height = env.bitcoind.client.get_block_count()?;
let blocks = (0..=height)
.map(|i| env.bitcoind.client.get_block_hash(i))
.collect::<Result<Vec<_>, _>>()?;
// Perform reorg on six blocks.
env.reorg(6)?;
env.wait_until_electrum_sees_block()?;
let reorged_height = env.bitcoind.client.get_block_count()?;
let reorged_blocks = (0..=height)
.map(|i| env.bitcoind.client.get_block_hash(i))
.collect::<Result<Vec<_>, _>>()?;
assert_eq!(height, reorged_height);
// Block hashes should not be equal on the six reorged blocks.
for (i, (block, reorged_block)) in blocks.iter().zip(reorged_blocks.iter()).enumerate() {
match i <= height as usize - 6 {
true => assert_eq!(block, reorged_block),
false => assert_ne!(block, reorged_block),
}
}
Ok(())
}
}

View File

@@ -1,68 +0,0 @@
# Example RPC CLI
### Simple Regtest Test
1. Start local regtest bitcoind.
```
mkdir -p /tmp/regtest/bitcoind
bitcoind -regtest -server -fallbackfee=0.0002 -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -datadir=/tmp/regtest/bitcoind -daemon
```
2. Create a test bitcoind wallet and set bitcoind env.
```
bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -named createwallet wallet_name="test"
export RPC_URL=127.0.0.1:18443
export RPC_USER=<your-rpc-username>
export RPC_PASS=<your-rpc-password>
```
3. Get test bitcoind wallet info.
```
bitcoin-cli -rpcwallet="test" -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -datadir=/tmp/regtest/bitcoind -regtest getwalletinfo
```
4. Get new test bitcoind wallet address.
```
BITCOIND_ADDRESS=$(bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> getnewaddress)
echo $BITCOIND_ADDRESS
```
5. Generate 101 blocks with reward to test bitcoind wallet address.
```
bitcoin-cli -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> generatetoaddress 101 $BITCOIND_ADDRESS
```
6. Verify test bitcoind wallet balance.
```
bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> getbalances
```
7. Set descriptor env and get address from RPC CLI wallet.
```
export DESCRIPTOR="wpkh(tprv8ZgxMBicQKsPfK9BTf82oQkHhawtZv19CorqQKPFeaHDMA4dXYX6eWsJGNJ7VTQXWmoHdrfjCYuDijcRmNFwSKcVhswzqs4fugE8turndGc/1/*)"
cargo run -- --network regtest address next
```
8. Send 5 test bitcoin to RPC CLI wallet.
```
bitcoin-cli -rpcwallet="test" -datadir=/tmp/regtest/bitcoind -regtest -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> sendtoaddress <address> 5
```
9. Sync blockchain with RPC CLI wallet.
```
cargo run -- --network regtest sync
<CNTRL-C to stop syncing>
```
10. Get RPC CLI wallet unconfirmed balances.
```
cargo run -- --network regtest balance
```
11. Generate 1 block with reward to test bitcoind wallet address.
```
bitcoin-cli -datadir=/tmp/regtest/bitcoind -rpcuser=<your-rpc-username> -rpcpassword=<your-rpc-password> -regtest generatetoaddress 10 $BITCOIND_ADDRESS
```
12. Sync the blockchain with RPC CLI wallet.
```
cargo run -- --network regtest sync
<CNTRL-C to stop syncing>
```
13. Get RPC CLI wallet confirmed balances.
```
cargo run -- --network regtest balance
```
14. Get RPC CLI wallet transactions.
```
cargo run -- --network regtest txout list
```

View File

@@ -12,9 +12,9 @@ use bdk_bitcoind_rpc::{
Emitter, Emitter,
}; };
use bdk_chain::{ use bdk_chain::{
bitcoin::{constants::genesis_block, Block, Transaction}, bitcoin::{Block, Transaction},
indexed_tx_graph, keychain, indexed_tx_graph, keychain,
local_chain::{self, LocalChain}, local_chain::{self, CheckPoint, LocalChain},
ConfirmationTimeHeightAnchor, IndexedTxGraph, ConfirmationTimeHeightAnchor, IndexedTxGraph,
}; };
use example_cli::{ use example_cli::{
@@ -42,7 +42,7 @@ type ChangeSet = (
#[derive(Debug)] #[derive(Debug)]
enum Emission { enum Emission {
Block(bdk_bitcoind_rpc::BlockEvent<Block>), Block { height: u32, block: Block },
Mempool(Vec<(Transaction, u64)>), Mempool(Vec<(Transaction, u64)>),
Tip(u32), Tip(u32),
} }
@@ -64,6 +64,9 @@ struct RpcArgs {
/// Starting block height to fallback to if no point of agreement if found /// Starting block height to fallback to if no point of agreement if found
#[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")] #[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")]
fallback_height: u32, fallback_height: u32,
/// The unused-scripts lookahead will be kept at this size
#[clap(long, default_value = "10")]
lookahead: u32,
} }
impl From<RpcArgs> for Auth { impl From<RpcArgs> for Auth {
@@ -110,22 +113,17 @@ enum RpcCommands {
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let start = Instant::now(); let start = Instant::now();
let example_cli::Init {
args, let (args, keymap, index, db, init_changeset) =
keymap, example_cli::init::<RpcCommands, RpcArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
index,
db,
init_changeset,
} = example_cli::init::<RpcCommands, RpcArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
println!( println!(
"[{:>10}s] loaded initial changeset from db", "[{:>10}s] loaded initial changeset from db",
start.elapsed().as_secs_f32() start.elapsed().as_secs_f32()
); );
let (init_chain_changeset, init_graph_changeset) = init_changeset;
let graph = Mutex::new({ let graph = Mutex::new({
let mut graph = IndexedTxGraph::new(index); let mut graph = IndexedTxGraph::new(index);
graph.apply_changeset(init_graph_changeset); graph.apply_changeset(init_changeset.1);
graph graph
}); });
println!( println!(
@@ -133,16 +131,7 @@ fn main() -> anyhow::Result<()> {
start.elapsed().as_secs_f32() start.elapsed().as_secs_f32()
); );
let chain = Mutex::new(if init_chain_changeset.is_empty() { let chain = Mutex::new(LocalChain::from_changeset(init_changeset.0)?);
let genesis_hash = genesis_block(args.network).block_hash();
let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash);
let mut db = db.lock().unwrap();
db.stage((chain_changeset, Default::default()));
db.commit()?;
chain
} else {
LocalChain::from_changeset(init_chain_changeset)?
});
println!( println!(
"[{:>10}s] loaded local chain from changeset", "[{:>10}s] loaded local chain from changeset",
start.elapsed().as_secs_f32() start.elapsed().as_secs_f32()
@@ -151,7 +140,7 @@ fn main() -> anyhow::Result<()> {
let rpc_cmd = match args.command { let rpc_cmd = match args.command {
example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd, example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd,
general_cmd => { general_cmd => {
return example_cli::handle_commands( let res = example_cli::handle_commands(
&graph, &graph,
&db, &db,
&chain, &chain,
@@ -164,15 +153,21 @@ fn main() -> anyhow::Result<()> {
}, },
general_cmd, general_cmd,
); );
db.lock().unwrap().commit()?;
return res;
} }
}; };
match rpc_cmd { match rpc_cmd {
RpcCommands::Sync { rpc_args } => { RpcCommands::Sync { rpc_args } => {
let RpcArgs { let RpcArgs {
fallback_height, .. fallback_height,
lookahead,
..
} = rpc_args; } = rpc_args;
graph.lock().unwrap().index.set_lookahead_for_all(lookahead);
let chain_tip = chain.lock().unwrap().tip(); let chain_tip = chain.lock().unwrap().tip();
let rpc_client = rpc_args.new_client()?; let rpc_client = rpc_args.new_client()?;
let mut emitter = Emitter::new(&rpc_client, chain_tip, fallback_height); let mut emitter = Emitter::new(&rpc_client, chain_tip, fallback_height);
@@ -180,20 +175,17 @@ fn main() -> anyhow::Result<()> {
let mut last_db_commit = Instant::now(); let mut last_db_commit = Instant::now();
let mut last_print = Instant::now(); let mut last_print = Instant::now();
while let Some(emission) = emitter.next_block()? { while let Some((height, block)) = emitter.next_block()? {
let height = emission.block_height();
let mut chain = chain.lock().unwrap(); let mut chain = chain.lock().unwrap();
let mut graph = graph.lock().unwrap(); let mut graph = graph.lock().unwrap();
let mut db = db.lock().unwrap(); let mut db = db.lock().unwrap();
let chain_update =
CheckPoint::from_header(&block.header, height).into_update(false);
let chain_changeset = chain let chain_changeset = chain
.apply_update(local_chain::Update { .apply_update(chain_update)
tip: emission.checkpoint,
introduce_older_blocks: false,
})
.expect("must always apply as we receive blocks in order from emitter"); .expect("must always apply as we receive blocks in order from emitter");
let graph_changeset = graph.apply_block_relevant(&emission.block, height); let graph_changeset = graph.apply_block_relevant(block, height);
db.stage((chain_changeset, graph_changeset)); db.stage((chain_changeset, graph_changeset));
// commit staged db changes in intervals // commit staged db changes in intervals
@@ -241,10 +233,13 @@ fn main() -> anyhow::Result<()> {
} }
RpcCommands::Live { rpc_args } => { RpcCommands::Live { rpc_args } => {
let RpcArgs { let RpcArgs {
fallback_height, .. fallback_height,
lookahead,
..
} = rpc_args; } = rpc_args;
let sigterm_flag = start_ctrlc_handler(); let sigterm_flag = start_ctrlc_handler();
graph.lock().unwrap().index.set_lookahead_for_all(lookahead);
let last_cp = chain.lock().unwrap().tip(); let last_cp = chain.lock().unwrap().tip();
println!( println!(
@@ -261,8 +256,7 @@ fn main() -> anyhow::Result<()> {
loop { loop {
match emitter.next_block()? { match emitter.next_block()? {
Some(block_emission) => { Some((height, block)) => {
let height = block_emission.block_height();
if sigterm_flag.load(Ordering::Acquire) { if sigterm_flag.load(Ordering::Acquire) {
break; break;
} }
@@ -270,7 +264,7 @@ fn main() -> anyhow::Result<()> {
block_count = rpc_client.get_block_count()? as u32; block_count = rpc_client.get_block_count()? as u32;
tx.send(Emission::Tip(block_count))?; tx.send(Emission::Tip(block_count))?;
} }
tx.send(Emission::Block(block_emission))?; tx.send(Emission::Block { height, block })?;
} }
None => { None => {
if await_flag(&sigterm_flag, MEMPOOL_EMIT_DELAY) { if await_flag(&sigterm_flag, MEMPOOL_EMIT_DELAY) {
@@ -299,17 +293,13 @@ fn main() -> anyhow::Result<()> {
let mut chain = chain.lock().unwrap(); let mut chain = chain.lock().unwrap();
let changeset = match emission { let changeset = match emission {
Emission::Block(block_emission) => { Emission::Block { height, block } => {
let height = block_emission.block_height(); let chain_update =
let chain_update = local_chain::Update { CheckPoint::from_header(&block.header, height).into_update(false);
tip: block_emission.checkpoint,
introduce_older_blocks: false,
};
let chain_changeset = chain let chain_changeset = chain
.apply_update(chain_update) .apply_update(chain_update)
.expect("must always apply as we receive blocks in order from emitter"); .expect("must always apply as we receive blocks in order from emitter");
let graph_changeset = let graph_changeset = graph.apply_block_relevant(block, height);
graph.apply_block_relevant(&block_emission.block, height);
(chain_changeset, graph_changeset) (chain_changeset, graph_changeset)
} }
Emission::Mempool(mempool_txs) => { Emission::Mempool(mempool_txs) => {

View File

@@ -29,7 +29,7 @@ pub type KeychainChangeSet<A> = (
local_chain::ChangeSet, local_chain::ChangeSet,
indexed_tx_graph::ChangeSet<A, keychain::ChangeSet<Keychain>>, indexed_tx_graph::ChangeSet<A, keychain::ChangeSet<Keychain>>,
); );
pub type Database<C> = Persist<Store<C>, C>; pub type Database<'m, C> = Persist<Store<'m, C>, C>;
#[derive(Parser)] #[derive(Parser)]
#[clap(author, version, about, long_about = None)] #[clap(author, version, about, long_about = None)]
@@ -53,6 +53,7 @@ pub struct Args<CS: clap::Subcommand, S: clap::Args> {
pub command: Commands<CS, S>, pub command: Commands<CS, S>,
} }
#[allow(clippy::almost_swapped)]
#[derive(Subcommand, Debug, Clone)] #[derive(Subcommand, Debug, Clone)]
pub enum Commands<CS: clap::Subcommand, S: clap::Args> { pub enum Commands<CS: clap::Subcommand, S: clap::Args> {
#[clap(flatten)] #[clap(flatten)]
@@ -72,9 +73,7 @@ pub enum Commands<CS: clap::Subcommand, S: clap::Args> {
}, },
/// Send coins to an address. /// Send coins to an address.
Send { Send {
/// Amount to send in satoshis
value: u64, value: u64,
/// Destination address
address: Address<address::NetworkUnchecked>, address: Address<address::NetworkUnchecked>,
#[clap(short, default_value = "bnb")] #[clap(short, default_value = "bnb")]
coin_select: CoinSelectionAlgo, coin_select: CoinSelectionAlgo,
@@ -136,6 +135,7 @@ impl core::fmt::Display for CoinSelectionAlgo {
} }
} }
#[allow(clippy::almost_swapped)]
#[derive(Subcommand, Debug, Clone)] #[derive(Subcommand, Debug, Clone)]
pub enum AddressCmd { pub enum AddressCmd {
/// Get the next unused address. /// Get the next unused address.
@@ -144,17 +144,14 @@ pub enum AddressCmd {
New, New,
/// List all addresses /// List all addresses
List { List {
/// List change addresses
#[clap(long)] #[clap(long)]
change: bool, change: bool,
}, },
/// Get last revealed address index for each keychain.
Index, Index,
} }
#[derive(Subcommand, Debug, Clone)] #[derive(Subcommand, Debug, Clone)]
pub enum TxOutCmd { pub enum TxOutCmd {
/// List transaction outputs.
List { List {
/// Return only spent outputs. /// Return only spent outputs.
#[clap(short, long)] #[clap(short, long)]
@@ -188,12 +185,7 @@ impl core::fmt::Display for Keychain {
} }
} }
pub struct CreateTxChange { #[allow(clippy::type_complexity)]
pub index_changeset: keychain::ChangeSet<Keychain>,
pub change_keychain: Keychain,
pub index: u32,
}
pub fn create_tx<A: Anchor, O: ChainOracle>( pub fn create_tx<A: Anchor, O: ChainOracle>(
graph: &mut KeychainTxGraph<A>, graph: &mut KeychainTxGraph<A>,
chain: &O, chain: &O,
@@ -201,7 +193,10 @@ pub fn create_tx<A: Anchor, O: ChainOracle>(
cs_algorithm: CoinSelectionAlgo, cs_algorithm: CoinSelectionAlgo,
address: Address, address: Address,
value: u64, value: u64,
) -> anyhow::Result<(Transaction, Option<CreateTxChange>)> ) -> anyhow::Result<(
Transaction,
Option<(keychain::ChangeSet<Keychain>, (Keychain, u32))>,
)>
where where
O::Error: std::error::Error + Send + Sync + 'static, O::Error: std::error::Error + Send + Sync + 'static,
{ {
@@ -393,11 +388,7 @@ where
} }
let change_info = if selection_meta.drain_value.is_some() { let change_info = if selection_meta.drain_value.is_some() {
Some(CreateTxChange { Some((changeset, (internal_keychain, change_index)))
index_changeset: changeset,
change_keychain: internal_keychain,
index: change_index,
})
} else { } else {
None None
}; };
@@ -405,34 +396,35 @@ where
Ok((transaction, change_info)) Ok((transaction, change_info))
} }
// Alias the elements of `Result` of `planned_utxos` #[allow(clippy::type_complexity)]
pub type PlannedUtxo<K, A> = (bdk_tmp_plan::Plan<K>, FullTxOut<A>);
pub fn planned_utxos<A: Anchor, O: ChainOracle, K: Clone + bdk_tmp_plan::CanDerive>( pub fn planned_utxos<A: Anchor, O: ChainOracle, K: Clone + bdk_tmp_plan::CanDerive>(
graph: &KeychainTxGraph<A>, graph: &KeychainTxGraph<A>,
chain: &O, chain: &O,
assets: &bdk_tmp_plan::Assets<K>, assets: &bdk_tmp_plan::Assets<K>,
) -> Result<Vec<PlannedUtxo<K, A>>, O::Error> { ) -> Result<Vec<(bdk_tmp_plan::Plan<K>, FullTxOut<A>)>, O::Error> {
let chain_tip = chain.get_chain_tip()?; let chain_tip = chain.get_chain_tip()?;
let outpoints = graph.index.outpoints().iter().cloned(); let outpoints = graph.index.outpoints().iter().cloned();
graph graph
.graph() .graph()
.try_filter_chain_unspents(chain, chain_tip, outpoints) .try_filter_chain_unspents(chain, chain_tip, outpoints)
.filter_map(|r| -> Option<Result<PlannedUtxo<K, A>, _>> { .filter_map(
let (k, i, full_txo) = match r { #[allow(clippy::type_complexity)]
Err(err) => return Some(Err(err)), |r| -> Option<Result<(bdk_tmp_plan::Plan<K>, FullTxOut<A>), _>> {
Ok(((k, i), full_txo)) => (k, i, full_txo), let (k, i, full_txo) = match r {
}; Err(err) => return Some(Err(err)),
let desc = graph Ok(((k, i), full_txo)) => (k, i, full_txo),
.index };
.keychains() let desc = graph
.get(&k) .index
.expect("keychain must exist") .keychains()
.at_derivation_index(i) .get(&k)
.expect("i can't be hardened"); .expect("keychain must exist")
let plan = bdk_tmp_plan::plan_satisfaction(&desc, assets)?; .at_derivation_index(i)
Some(Ok((plan, full_txo))) .expect("i can't be hardened");
}) let plan = bdk_tmp_plan::plan_satisfaction(&desc, assets)?;
Some(Ok((plan, full_txo)))
},
)
.collect() .collect()
} }
@@ -465,10 +457,11 @@ where
let ((spk_i, spk), index_changeset) = spk_chooser(index, &Keychain::External); let ((spk_i, spk), index_changeset) = spk_chooser(index, &Keychain::External);
let db = &mut *db.lock().unwrap(); let db = &mut *db.lock().unwrap();
db.stage_and_commit(C::from(( db.stage(C::from((
local_chain::ChangeSet::default(), local_chain::ChangeSet::default(),
indexed_tx_graph::ChangeSet::from(index_changeset), indexed_tx_graph::ChangeSet::from(index_changeset),
)))?; )));
db.commit()?;
let addr = let addr =
Address::from_script(spk, network).context("failed to derive address")?; Address::from_script(spk, network).context("failed to derive address")?;
println!("[address @ {}] {}", spk_i, addr); println!("[address @ {}] {}", spk_i, addr);
@@ -485,14 +478,14 @@ where
true => Keychain::Internal, true => Keychain::Internal,
false => Keychain::External, false => Keychain::External,
}; };
for (spk_i, spk) in index.revealed_keychain_spks(&target_keychain) { for (spk_i, spk) in index.revealed_spks_of_keychain(&target_keychain) {
let address = Address::from_script(spk, network) let address = Address::from_script(spk, network)
.expect("should always be able to derive address"); .expect("should always be able to derive address");
println!( println!(
"{:?} {} used:{}", "{:?} {} used:{}",
spk_i, spk_i,
address, address,
index.is_used(target_keychain, spk_i) index.is_used(&(target_keychain, spk_i))
); );
} }
Ok(()) Ok(())
@@ -602,27 +595,23 @@ where
let (tx, change_info) = let (tx, change_info) =
create_tx(graph, chain, keymap, coin_select, address, value)?; create_tx(graph, chain, keymap, coin_select, address, value)?;
if let Some(CreateTxChange { if let Some((index_changeset, (change_keychain, index))) = change_info {
index_changeset,
change_keychain,
index,
}) = change_info
{
// We must first persist to disk the fact that we've got a new address from the // We must first persist to disk the fact that we've got a new address from the
// change keychain so future scans will find the tx we're about to broadcast. // change keychain so future scans will find the tx we're about to broadcast.
// If we're unable to persist this, then we don't want to broadcast. // If we're unable to persist this, then we don't want to broadcast.
{ {
let db = &mut *db.lock().unwrap(); let db = &mut *db.lock().unwrap();
db.stage_and_commit(C::from(( db.stage(C::from((
local_chain::ChangeSet::default(), local_chain::ChangeSet::default(),
indexed_tx_graph::ChangeSet::from(index_changeset), indexed_tx_graph::ChangeSet::from(index_changeset),
)))?; )));
db.commit()?;
} }
// We don't want other callers/threads to use this address while we're using it // We don't want other callers/threads to use this address while we're using it
// but we also don't want to scan the tx we just created because it's not // but we also don't want to scan the tx we just created because it's not
// technically in the blockchain yet. // technically in the blockchain yet.
graph.index.mark_used(change_keychain, index); graph.index.mark_used(&change_keychain, index);
(tx, Some((change_keychain, index))) (tx, Some((change_keychain, index)))
} else { } else {
(tx, None) (tx, None)
@@ -638,16 +627,16 @@ where
// We know the tx is at least unconfirmed now. Note if persisting here fails, // We know the tx is at least unconfirmed now. Note if persisting here fails,
// it's not a big deal since we can always find it again form // it's not a big deal since we can always find it again form
// blockchain. // blockchain.
db.lock().unwrap().stage_and_commit(C::from(( db.lock().unwrap().stage(C::from((
local_chain::ChangeSet::default(), local_chain::ChangeSet::default(),
keychain_changeset, keychain_changeset,
)))?; )));
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
if let Some((keychain, index)) = change_index { if let Some((keychain, index)) = change_index {
// We failed to broadcast, so allow our change address to be used in the future // We failed to broadcast, so allow our change address to be used in the future
graph.lock().unwrap().index.unmark_used(keychain, index); graph.lock().unwrap().index.unmark_used(&keychain, index);
} }
Err(e) Err(e)
} }
@@ -656,26 +645,17 @@ where
} }
} }
/// The initial state returned by [`init`]. #[allow(clippy::type_complexity)]
pub struct Init<CS: clap::Subcommand, S: clap::Args, C> { pub fn init<'m, CS: clap::Subcommand, S: clap::Args, C>(
/// Arguments parsed by the cli. db_magic: &'m [u8],
pub args: Args<CS, S>,
/// Descriptor keymap.
pub keymap: KeyMap,
/// Keychain-txout index.
pub index: KeychainTxOutIndex<Keychain>,
/// Persistence backend.
pub db: Mutex<Database<C>>,
/// Initial changeset.
pub init_changeset: C,
}
/// Parses command line arguments and initializes all components, creating
/// a file store with the given parameters, or loading one if it exists.
pub fn init<CS: clap::Subcommand, S: clap::Args, C>(
db_magic: &[u8],
db_default_path: &str, db_default_path: &str,
) -> anyhow::Result<Init<CS, S, C>> ) -> anyhow::Result<(
Args<CS, S>,
KeyMap,
KeychainTxOutIndex<Keychain>,
Mutex<Database<'m, C>>,
C,
)>
where where
C: Default + Append + Serialize + DeserializeOwned, C: Default + Append + Serialize + DeserializeOwned,
{ {
@@ -701,7 +681,7 @@ where
index.add_keychain(Keychain::Internal, internal_descriptor); index.add_keychain(Keychain::Internal, internal_descriptor);
} }
let mut db_backend = match Store::<C>::open_or_create_new(db_magic, &args.db_path) { let mut db_backend = match Store::<'m, C>::open_or_create_new(db_magic, &args.db_path) {
Ok(db_backend) => db_backend, Ok(db_backend) => db_backend,
// we cannot return `err` directly as it has lifetime `'m` // we cannot return `err` directly as it has lifetime `'m`
Err(err) => return Err(anyhow::anyhow!("failed to init db backend: {:?}", err)), Err(err) => return Err(anyhow::anyhow!("failed to init db backend: {:?}", err)),
@@ -709,11 +689,11 @@ where
let init_changeset = db_backend.load_from_persistence()?.unwrap_or_default(); let init_changeset = db_backend.load_from_persistence()?.unwrap_or_default();
Ok(Init { Ok((
args, args,
keymap, keymap,
index, index,
db: Mutex::new(Database::new(db_backend)), Mutex::new(Database::new(db_backend)),
init_changeset, init_changeset,
}) ))
} }

View File

@@ -5,7 +5,7 @@ use std::{
}; };
use bdk_chain::{ use bdk_chain::{
bitcoin::{constants::genesis_block, Address, Network, OutPoint, Txid}, bitcoin::{Address, Network, OutPoint, ScriptBuf, Txid},
indexed_tx_graph::{self, IndexedTxGraph}, indexed_tx_graph::{self, IndexedTxGraph},
keychain, keychain,
local_chain::{self, LocalChain}, local_chain::{self, LocalChain},
@@ -103,15 +103,8 @@ type ChangeSet = (
); );
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let example_cli::Init { let (args, keymap, index, db, (disk_local_chain, disk_tx_graph)) =
args, example_cli::init::<ElectrumCommands, ElectrumArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
keymap,
index,
db,
init_changeset,
} = example_cli::init::<ElectrumCommands, ElectrumArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
let (disk_local_chain, disk_tx_graph) = init_changeset;
let graph = Mutex::new({ let graph = Mutex::new({
let mut graph = IndexedTxGraph::new(index); let mut graph = IndexedTxGraph::new(index);
@@ -119,17 +112,12 @@ fn main() -> anyhow::Result<()> {
graph graph
}); });
let chain = Mutex::new({ let chain = Mutex::new(LocalChain::from_changeset(disk_local_chain)?);
let genesis_hash = genesis_block(args.network).block_hash();
let (mut chain, _) = LocalChain::from_genesis_hash(genesis_hash);
chain.apply_changeset(&disk_local_chain)?;
chain
});
let electrum_cmd = match &args.command { let electrum_cmd = match &args.command {
example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd, example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd,
general_cmd => { general_cmd => {
return example_cli::handle_commands( let res = example_cli::handle_commands(
&graph, &graph,
&db, &db,
&chain, &chain,
@@ -142,6 +130,9 @@ fn main() -> anyhow::Result<()> {
}, },
general_cmd.clone(), general_cmd.clone(),
); );
db.lock().unwrap().commit()?;
return res;
} }
}; };
@@ -159,7 +150,7 @@ fn main() -> anyhow::Result<()> {
let keychain_spks = graph let keychain_spks = graph
.index .index
.all_unbounded_spk_iters() .spks_of_all_keychains()
.into_iter() .into_iter()
.map(|(keychain, iter)| { .map(|(keychain, iter)| {
let mut first = true; let mut first = true;
@@ -181,7 +172,14 @@ fn main() -> anyhow::Result<()> {
}; };
client client
.full_scan(tip, keychain_spks, stop_gap, scan_options.batch_size) .scan(
tip,
keychain_spks,
core::iter::empty(),
core::iter::empty(),
stop_gap,
scan_options.batch_size,
)
.context("scanning the blockchain")? .context("scanning the blockchain")?
} }
ElectrumCommands::Sync { ElectrumCommands::Sync {
@@ -210,28 +208,29 @@ fn main() -> anyhow::Result<()> {
if all_spks { if all_spks {
let all_spks = graph let all_spks = graph
.index .index
.revealed_spks() .all_spks()
.map(|(k, i, spk)| (k, i, spk.to_owned())) .iter()
.map(|(k, v)| (*k, v.clone()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
spks = Box::new(spks.chain(all_spks.into_iter().map(|(k, i, spk)| { spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| {
eprintln!("scanning {}:{}", k, i); eprintln!("scanning {:?}", index);
spk script
}))); })));
} }
if unused_spks { if unused_spks {
let unused_spks = graph let unused_spks = graph
.index .index
.unused_spks() .unused_spks(..)
.map(|(k, i, spk)| (k, i, spk.to_owned())) .map(|(k, v)| (*k, ScriptBuf::from(v)))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(k, i, spk)| { spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| {
eprintln!( eprintln!(
"Checking if address {} {}:{} has been used", "Checking if address {} {:?} has been used",
Address::from_script(&spk, args.network).unwrap(), Address::from_script(&script, args.network).unwrap(),
k, index
i,
); );
spk
script
}))); })));
} }
@@ -280,7 +279,7 @@ fn main() -> anyhow::Result<()> {
drop((graph, chain)); drop((graph, chain));
let electrum_update = client let electrum_update = client
.sync(tip, spks, txids, outpoints, scan_options.batch_size) .scan_without_keychain(tip, spks, txids, outpoints, scan_options.batch_size)
.context("scanning the blockchain")?; .context("scanning the blockchain")?;
(electrum_update, BTreeMap::new()) (electrum_update, BTreeMap::new())
} }

View File

@@ -99,13 +99,8 @@ pub struct ScanOptions {
} }
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let example_cli::Init { let (args, keymap, index, db, init_changeset) =
args, example_cli::init::<EsploraCommands, EsploraArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
keymap,
index,
db,
init_changeset,
} = example_cli::init::<EsploraCommands, EsploraArgs, ChangeSet>(DB_MAGIC, DB_PATH)?;
let genesis_hash = genesis_block(args.network).block_hash(); let genesis_hash = genesis_block(args.network).block_hash();
@@ -130,7 +125,7 @@ fn main() -> anyhow::Result<()> {
example_cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd, example_cli::Commands::ChainSpecific(esplora_cmd) => esplora_cmd,
// These are general commands handled by example_cli. Execute the cmd and return. // These are general commands handled by example_cli. Execute the cmd and return.
general_cmd => { general_cmd => {
return example_cli::handle_commands( let res = example_cli::handle_commands(
&graph, &graph,
&db, &db,
&chain, &chain,
@@ -145,6 +140,9 @@ fn main() -> anyhow::Result<()> {
}, },
general_cmd.clone(), general_cmd.clone(),
); );
db.lock().unwrap().commit()?;
return res;
} }
}; };
@@ -167,7 +165,7 @@ fn main() -> anyhow::Result<()> {
.lock() .lock()
.expect("mutex must not be poisoned") .expect("mutex must not be poisoned")
.index .index
.all_unbounded_spk_iters() .spks_of_all_keychains()
.into_iter() .into_iter()
// This `map` is purely for logging. // This `map` is purely for logging.
.map(|(keychain, iter)| { .map(|(keychain, iter)| {
@@ -190,7 +188,13 @@ fn main() -> anyhow::Result<()> {
// represents the last active spk derivation indices of keychains // represents the last active spk derivation indices of keychains
// (`keychain_indices_update`). // (`keychain_indices_update`).
let (graph_update, last_active_indices) = client let (graph_update, last_active_indices) = client
.full_scan(keychain_spks, *stop_gap, scan_options.parallel_requests) .scan_txs_with_keychains(
keychain_spks,
core::iter::empty(),
core::iter::empty(),
*stop_gap,
scan_options.parallel_requests,
)
.context("scanning for transactions")?; .context("scanning for transactions")?;
let mut graph = graph.lock().expect("mutex must not be poisoned"); let mut graph = graph.lock().expect("mutex must not be poisoned");
@@ -237,32 +241,32 @@ fn main() -> anyhow::Result<()> {
if *all_spks { if *all_spks {
let all_spks = graph let all_spks = graph
.index .index
.revealed_spks() .all_spks()
.map(|(k, i, spk)| (k, i, spk.to_owned())) .iter()
.map(|(k, v)| (*k, v.clone()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
spks = Box::new(spks.chain(all_spks.into_iter().map(|(k, i, spk)| { spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| {
eprintln!("scanning {}:{}", k, i); eprintln!("scanning {:?}", index);
// Flush early to ensure we print at every iteration. // Flush early to ensure we print at every iteration.
let _ = io::stderr().flush(); let _ = io::stderr().flush();
spk script
}))); })));
} }
if unused_spks { if unused_spks {
let unused_spks = graph let unused_spks = graph
.index .index
.unused_spks() .unused_spks(..)
.map(|(k, i, spk)| (k, i, spk.to_owned())) .map(|(k, v)| (*k, v.to_owned()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
spks = Box::new(spks.chain(unused_spks.into_iter().map(|(k, i, spk)| { spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| {
eprintln!( eprintln!(
"Checking if address {} {}:{} has been used", "Checking if address {} {:?} has been used",
Address::from_script(&spk, args.network).unwrap(), Address::from_script(&script, args.network).unwrap(),
k, index
i,
); );
// Flush early to ensure we print at every iteration. // Flush early to ensure we print at every iteration.
let _ = io::stderr().flush(); let _ = io::stderr().flush();
spk script
}))); })));
} }
if utxos { if utxos {
@@ -308,7 +312,7 @@ fn main() -> anyhow::Result<()> {
} }
let graph_update = let graph_update =
client.sync(spks, txids, outpoints, scan_options.parallel_requests)?; client.scan_txs(spks, txids, outpoints, scan_options.parallel_requests)?;
graph.lock().unwrap().apply_update(graph_update) graph.lock().unwrap().apply_update(graph_update)
} }

View File

@@ -40,7 +40,7 @@ fn main() -> Result<(), anyhow::Error> {
let prev_tip = wallet.latest_checkpoint(); let prev_tip = wallet.latest_checkpoint();
let keychain_spks = wallet let keychain_spks = wallet
.all_unbounded_spk_iters() .spks_of_all_keychains()
.into_iter() .into_iter()
.map(|(k, k_spks)| { .map(|(k, k_spks)| {
let mut once = Some(()); let mut once = Some(());
@@ -61,7 +61,7 @@ fn main() -> Result<(), anyhow::Error> {
relevant_txids, relevant_txids,
}, },
keychain_update, keychain_update,
) = client.full_scan(prev_tip, keychain_spks, STOP_GAP, BATCH_SIZE)?; ) = client.scan(prev_tip, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?;
println!(); println!();

View File

@@ -39,7 +39,7 @@ async fn main() -> Result<(), anyhow::Error> {
let prev_tip = wallet.latest_checkpoint(); let prev_tip = wallet.latest_checkpoint();
let keychain_spks = wallet let keychain_spks = wallet
.all_unbounded_spk_iters() .spks_of_all_keychains()
.into_iter() .into_iter()
.map(|(k, k_spks)| { .map(|(k, k_spks)| {
let mut once = Some(()); let mut once = Some(());
@@ -54,7 +54,7 @@ async fn main() -> Result<(), anyhow::Error> {
}) })
.collect(); .collect();
let (update_graph, last_active_indices) = client let (update_graph, last_active_indices) = client
.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS) .scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)
.await?; .await?;
let missing_heights = update_graph.missing_heights(wallet.local_chain()); let missing_heights = update_graph.missing_heights(wallet.local_chain());
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?; let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;

View File

@@ -38,7 +38,7 @@ fn main() -> Result<(), anyhow::Error> {
let prev_tip = wallet.latest_checkpoint(); let prev_tip = wallet.latest_checkpoint();
let keychain_spks = wallet let keychain_spks = wallet
.all_unbounded_spk_iters() .spks_of_all_keychains()
.into_iter() .into_iter()
.map(|(k, k_spks)| { .map(|(k, k_spks)| {
let mut once = Some(()); let mut once = Some(());
@@ -54,7 +54,7 @@ fn main() -> Result<(), anyhow::Error> {
.collect(); .collect();
let (update_graph, last_active_indices) = let (update_graph, last_active_indices) =
client.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)?; client.scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)?;
let missing_heights = update_graph.missing_heights(wallet.local_chain()); let missing_heights = update_graph.missing_heights(wallet.local_chain());
let chain_update = client.update_local_chain(prev_tip, missing_heights)?; let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
let update = Update { let update = Update {

View File

@@ -1,15 +0,0 @@
[package]
name = "wallet_rpc"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bdk = { path = "../../crates/bdk" }
bdk_file_store = { path = "../../crates/file_store" }
bdk_bitcoind_rpc = { path = "../../crates/bitcoind_rpc" }
anyhow = "1"
clap = { version = "3.2.25", features = ["derive", "env"] }
ctrlc = "2.0.1"

View File

@@ -1,45 +0,0 @@
# Wallet RPC Example
```
$ cargo run --bin wallet_rpc -- --help
wallet_rpc 0.1.0
Bitcoind RPC example using `bdk::Wallet`
USAGE:
wallet_rpc [OPTIONS] <DESCRIPTOR> [CHANGE_DESCRIPTOR]
ARGS:
<DESCRIPTOR> Wallet descriptor [env: DESCRIPTOR=]
<CHANGE_DESCRIPTOR> Wallet change descriptor [env: CHANGE_DESCRIPTOR=]
OPTIONS:
--db-path <DB_PATH>
Where to store wallet data [env: BDK_DB_PATH=] [default: .bdk_wallet_rpc_example.db]
-h, --help
Print help information
--network <NETWORK>
Bitcoin network to connect to [env: BITCOIN_NETWORK=] [default: testnet]
--rpc-cookie <RPC_COOKIE>
RPC auth cookie file [env: RPC_COOKIE=]
--rpc-pass <RPC_PASS>
RPC auth password [env: RPC_PASS=]
--rpc-user <RPC_USER>
RPC auth username [env: RPC_USER=]
--start-height <START_HEIGHT>
Earliest block height to start sync from [env: START_HEIGHT=] [default: 481824]
--url <URL>
RPC URL [env: RPC_URL=] [default: 127.0.0.1:8332]
-V, --version
Print version information
```

View File

@@ -1,182 +0,0 @@
use bdk::{
bitcoin::{Block, Network, Transaction},
wallet::Wallet,
};
use bdk_bitcoind_rpc::{
bitcoincore_rpc::{Auth, Client, RpcApi},
Emitter,
};
use bdk_file_store::Store;
use clap::{self, Parser};
use std::{path::PathBuf, sync::mpsc::sync_channel, thread::spawn, time::Instant};
const DB_MAGIC: &str = "bdk-rpc-wallet-example";
/// Bitcoind RPC example using `bdk::Wallet`.
///
/// This syncs the chain block-by-block and prints the current balance, transaction count and UTXO
/// count.
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
#[clap(propagate_version = true)]
pub struct Args {
/// Wallet descriptor
#[clap(env = "DESCRIPTOR")]
pub descriptor: String,
/// Wallet change descriptor
#[clap(env = "CHANGE_DESCRIPTOR")]
pub change_descriptor: Option<String>,
/// Earliest block height to start sync from
#[clap(env = "START_HEIGHT", long, default_value = "481824")]
pub start_height: u32,
/// Bitcoin network to connect to
#[clap(env = "BITCOIN_NETWORK", long, default_value = "testnet")]
pub network: Network,
/// Where to store wallet data
#[clap(
env = "BDK_DB_PATH",
long,
default_value = ".bdk_wallet_rpc_example.db"
)]
pub db_path: PathBuf,
/// RPC URL
#[clap(env = "RPC_URL", long, default_value = "127.0.0.1:8332")]
pub url: String,
/// RPC auth cookie file
#[clap(env = "RPC_COOKIE", long)]
pub rpc_cookie: Option<PathBuf>,
/// RPC auth username
#[clap(env = "RPC_USER", long)]
pub rpc_user: Option<String>,
/// RPC auth password
#[clap(env = "RPC_PASS", long)]
pub rpc_pass: Option<String>,
}
impl Args {
fn client(&self) -> anyhow::Result<Client> {
Ok(Client::new(
&self.url,
match (&self.rpc_cookie, &self.rpc_user, &self.rpc_pass) {
(None, None, None) => Auth::None,
(Some(path), _, _) => Auth::CookieFile(path.clone()),
(_, Some(user), Some(pass)) => Auth::UserPass(user.clone(), pass.clone()),
(_, Some(_), None) => panic!("rpc auth: missing rpc_pass"),
(_, None, Some(_)) => panic!("rpc auth: missing rpc_user"),
},
)?)
}
}
#[derive(Debug)]
enum Emission {
SigTerm,
Block(bdk_bitcoind_rpc::BlockEvent<Block>),
Mempool(Vec<(Transaction, u64)>),
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
let rpc_client = args.client()?;
println!(
"Connected to Bitcoin Core RPC at {:?}",
rpc_client.get_blockchain_info().unwrap()
);
let start_load_wallet = Instant::now();
let mut wallet = Wallet::new_or_load(
&args.descriptor,
args.change_descriptor.as_ref(),
Store::<bdk::wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), args.db_path)?,
args.network,
)?;
println!(
"Loaded wallet in {}s",
start_load_wallet.elapsed().as_secs_f32()
);
let balance = wallet.get_balance();
println!("Wallet balance before syncing: {} sats", balance.total());
let wallet_tip = wallet.latest_checkpoint();
println!(
"Wallet tip: {} at height {}",
wallet_tip.hash(),
wallet_tip.height()
);
let (sender, receiver) = sync_channel::<Emission>(21);
let signal_sender = sender.clone();
ctrlc::set_handler(move || {
signal_sender
.send(Emission::SigTerm)
.expect("failed to send sigterm")
});
let emitter_tip = wallet_tip.clone();
spawn(move || -> Result<(), anyhow::Error> {
let mut emitter = Emitter::new(&rpc_client, emitter_tip, args.start_height);
while let Some(emission) = emitter.next_block()? {
sender.send(Emission::Block(emission))?;
}
sender.send(Emission::Mempool(emitter.mempool()?))?;
Ok(())
});
let mut blocks_received = 0_usize;
for emission in receiver {
match emission {
Emission::SigTerm => {
println!("Sigterm received, exiting...");
break;
}
Emission::Block(block_emission) => {
blocks_received += 1;
let height = block_emission.block_height();
let hash = block_emission.block_hash();
let connected_to = block_emission.connected_to();
let start_apply_block = Instant::now();
wallet.apply_block_connected_to(&block_emission.block, height, connected_to)?;
wallet.commit()?;
let elapsed = start_apply_block.elapsed().as_secs_f32();
println!(
"Applied block {} at height {} in {}s",
hash, height, elapsed
);
}
Emission::Mempool(mempool_emission) => {
let start_apply_mempool = Instant::now();
wallet.apply_unconfirmed_txs(mempool_emission.iter().map(|(tx, time)| (tx, *time)));
wallet.commit()?;
println!(
"Applied unconfirmed transactions in {}s",
start_apply_mempool.elapsed().as_secs_f32()
);
break;
}
}
}
let wallet_tip_end = wallet.latest_checkpoint();
let balance = wallet.get_balance();
println!(
"Synced {} blocks in {}s",
blocks_received,
start_load_wallet.elapsed().as_secs_f32(),
);
println!(
"Wallet tip is '{}:{}'",
wallet_tip_end.height(),
wallet_tip_end.hash()
);
println!("Wallet balance is {} sats", balance.total());
println!(
"Wallet has {} transactions and {} utxos",
wallet.transactions().count(),
wallet.list_unspent().count()
);
Ok(())
}