Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
4d040b7057 build(deps): bump actions/setup-python from 4 to 5
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-12-11 06:05:08 +00:00
46 changed files with 496 additions and 471 deletions

View File

@@ -33,7 +33,7 @@ jobs:
- name: Run simulator image - name: Run simulator image
run: docker run --name simulator --network=host hwi/ledger_emulator & run: docker run --name simulator --network=host hwi/ledger_emulator &
- name: Install Python - name: Install Python
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: '3.9' python-version: '3.9'
- name: Install python dependencies - name: Install python dependencies

View File

@@ -12,7 +12,7 @@ jobs:
rust: rust:
- version: stable - version: stable
clippy: true clippy: true
- version: 1.63.0 # MSRV - version: 1.57.0 # MSRV
features: features:
- --no-default-features - --no-default-features
- --all-features - --all-features
@@ -28,12 +28,28 @@ jobs:
- name: Rust Cache - name: Rust Cache
uses: Swatinem/rust-cache@v2.2.1 uses: Swatinem/rust-cache@v2.2.1
- name: Pin dependencies for MSRV - name: Pin dependencies for MSRV
if: matrix.rust.version == '1.63.0' if: matrix.rust.version == '1.57.0'
run: | run: |
cargo update -p log --precise "0.4.18"
cargo update -p tempfile --precise "3.6.0"
cargo update -p reqwest --precise "0.11.18"
cargo update -p hyper-rustls --precise 0.24.0
cargo update -p rustls:0.21.9 --precise "0.21.1"
cargo update -p rustls:0.20.9 --precise "0.20.8"
cargo update -p tokio --precise "1.29.1"
cargo update -p tokio-util --precise "0.7.8"
cargo update -p flate2 --precise "1.0.26"
cargo update -p h2 --precise "0.3.20"
cargo update -p rustls-webpki:0.100.3 --precise "0.100.1"
cargo update -p rustls-webpki:0.101.7 --precise "0.101.1"
cargo update -p zip --precise "0.6.2" cargo update -p zip --precise "0.6.2"
cargo update -p time --precise "0.3.20" cargo update -p time --precise "0.3.13"
cargo update -p byteorder --precise "1.4.3"
cargo update -p webpki --precise "0.22.2"
cargo update -p os_str_bytes --precise 6.5.1
cargo update -p sct --precise 0.7.0
cargo update -p cc --precise "1.0.81"
cargo update -p jobserver --precise "0.1.26" cargo update -p jobserver --precise "0.1.26"
cargo update -p home --precise "0.5.5"
- name: Build - name: Build
run: cargo build ${{ matrix.features }} run: cargo build ${{ matrix.features }}
- name: Test - name: Test
@@ -118,7 +134,9 @@ jobs:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
toolchain: stable # we pin clippy instead of using "stable" so that our CI doesn't break
# at each new cargo release
toolchain: "1.67.0"
components: clippy components: clippy
override: true override: true
- name: Rust Cache - name: Rust Cache

View File

@@ -15,7 +15,7 @@
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a> <a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a> <a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a> <a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a> <a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a> <a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p> </p>
@@ -60,19 +60,51 @@ Fully working examples of how to use these components are in `/example-crates`:
[`bdk_chain`]: https://docs.rs/bdk-chain/ [`bdk_chain`]: https://docs.rs/bdk-chain/
## Minimum Supported Rust Version (MSRV) ## Minimum Supported Rust Version (MSRV)
This library should compile with any combination of features with Rust 1.63.0. This library should compile with any combination of features with Rust 1.57.0.
To build with the MSRV you will need to pin dependencies as follows: To build with the MSRV you will need to pin dependencies as follows:
```shell ```shell
# zip 0.6.3 has MSRV 1.64.0 # log 0.4.19 has MSRV 1.60.0+
cargo update -p log --precise "0.4.18"
# tempfile 3.7.0 has MSRV 1.63.0+
cargo update -p tempfile --precise "3.6.0"
# reqwest 0.11.19 has MSRV 1.63.0+
cargo update -p reqwest --precise "0.11.18"
# hyper-rustls 0.24.1 has MSRV 1.60.0+
cargo update -p hyper-rustls --precise 0.24.0
# rustls 0.21.7 has MSRV 1.60.0+
cargo update -p rustls:0.21.9 --precise "0.21.1"
# rustls 0.20.9 has MSRV 1.60.0+
cargo update -p rustls:0.20.9 --precise "0.20.8"
# tokio 1.33 has MSRV 1.63.0+
cargo update -p tokio --precise "1.29.1"
# tokio-util 0.7.9 doesn't build with MSRV 1.57.0
cargo update -p tokio-util --precise "0.7.8"
# flate2 1.0.27 has MSRV 1.63.0+
cargo update -p flate2 --precise "1.0.26"
# h2 0.3.21 has MSRV 1.63.0+
cargo update -p h2 --precise "0.3.20"
# rustls-webpki 0.100.3 has MSRV 1.60.0+
cargo update -p rustls-webpki:0.100.3 --precise "0.100.1"
# rustls-webpki 0.101.2 has MSRV 1.60.0+
cargo update -p rustls-webpki:0.101.7 --precise "0.101.1"
# zip 0.6.6 has MSRV 1.59.0+
cargo update -p zip --precise "0.6.2" cargo update -p zip --precise "0.6.2"
# time 0.3.21 has MSRV 1.65.0 # time 0.3.14 has MSRV 1.59.0+
cargo update -p time --precise "0.3.20" cargo update -p time --precise "0.3.13"
# jobserver 0.1.27 has MSRV 1.66.0 # byteorder 1.5.0 has MSRV 1.60.0+
cargo update -p byteorder --precise "1.4.3"
# webpki 0.22.4 requires `ring:0.17.2` which has MSRV 1.61.0+
cargo update -p webpki --precise "0.22.2"
# os_str_bytes 6.6.0 has MSRV 1.61.0+
cargo update -p os_str_bytes --precise 6.5.1
# sct 0.7.1 has MSRV 1.61.0+
cargo update -p sct --precise 0.7.0
# cc 1.0.82 has MSRV 1.61.0+
cargo update -p cc --precise "1.0.81"
# jobserver 0.1.27 has MSRV 1.66.0+
cargo update -p jobserver --precise "0.1.26" cargo update -p jobserver --precise "0.1.26"
# home 0.5.9 has MSRV 1.70.0
cargo update -p home --precise "0.5.5"
``` ```
## License ## License

View File

@@ -1 +1 @@
msrv="1.63.0" msrv="1.57.0"

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "bdk" name = "bdk"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
version = "1.0.0-alpha.3" version = "1.0.0-alpha.2"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk" documentation = "https://docs.rs/bdk"
description = "A modern, lightweight, descriptor-based wallet library" description = "A modern, lightweight, descriptor-based wallet library"
@@ -10,7 +10,7 @@ readme = "README.md"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
authors = ["Bitcoin Dev Kit Developers"] authors = ["Bitcoin Dev Kit Developers"]
edition = "2021" edition = "2021"
rust-version = "1.63" rust-version = "1.57"
[dependencies] [dependencies]
rand = "^0.8" rand = "^0.8"
@@ -18,11 +18,11 @@ miniscript = { version = "10.0.0", features = ["serde"], default-features = fals
bitcoin = { version = "0.30.0", features = ["serde", "base64", "rand-std"], default-features = false } bitcoin = { version = "0.30.0", features = ["serde", "base64", "rand-std"], default-features = false }
serde = { version = "^1.0", features = ["derive"] } serde = { version = "^1.0", features = ["derive"] }
serde_json = { version = "^1.0" } serde_json = { version = "^1.0" }
bdk_chain = { path = "../chain", version = "0.7.0", features = ["miniscript", "serde"], default-features = false } bdk_chain = { path = "../chain", version = "0.6.0", features = ["miniscript", "serde"], default-features = false }
# Optional dependencies # Optional dependencies
hwi = { version = "0.7.0", optional = true, features = [ "miniscript"] } hwi = { version = "0.7.0", optional = true, features = [ "miniscript"] }
bip39 = { version = "2.0", optional = true } bip39 = { version = "1.0.1", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies] [target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = "0.2" getrandom = "0.2"

View File

@@ -13,7 +13,7 @@
<a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a> <a href="https://github.com/bitcoindevkit/bdk/actions?query=workflow%3ACI"><img alt="CI Status" src="https://github.com/bitcoindevkit/bdk/workflows/CI/badge.svg"></a>
<a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a> <a href="https://coveralls.io/github/bitcoindevkit/bdk?branch=master"><img src="https://coveralls.io/repos/github/bitcoindevkit/bdk/badge.svg?branch=master"/></a>
<a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a> <a href="https://docs.rs/bdk"><img alt="API Docs" src="https://img.shields.io/badge/docs.rs-bdk-green"/></a>
<a href="https://blog.rust-lang.org/2022/08/11/Rust-1.63.0.html"><img alt="Rustc Version 1.63.0+" src="https://img.shields.io/badge/rustc-1.63.0%2B-lightgrey.svg"/></a> <a href="https://blog.rust-lang.org/2021/12/02/Rust-1.57.0.html"><img alt="Rustc Version 1.57.0+" src="https://img.shields.io/badge/rustc-1.57.0%2B-lightgrey.svg"/></a>
<a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a> <a href="https://discord.gg/d7NkDKm"><img alt="Chat on Discord" src="https://img.shields.io/discord/753336465005608961?logo=discord"></a>
</p> </p>

View File

@@ -575,7 +575,7 @@ mod test {
if let ExtendedDescriptor::Pkh(pkh) = xdesc.0 { if let ExtendedDescriptor::Pkh(pkh) = xdesc.0 {
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into(); let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
let purpose = path.first().unwrap(); let purpose = path.get(0).unwrap();
assert_matches!(purpose, Hardened { index: 44 }); assert_matches!(purpose, Hardened { index: 44 });
let coin_type = path.get(1).unwrap(); let coin_type = path.get(1).unwrap();
assert_matches!(coin_type, Hardened { index: 0 }); assert_matches!(coin_type, Hardened { index: 0 });
@@ -589,7 +589,7 @@ mod test {
if let ExtendedDescriptor::Pkh(pkh) = tdesc.0 { if let ExtendedDescriptor::Pkh(pkh) = tdesc.0 {
let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into(); let path: Vec<ChildNumber> = pkh.into_inner().full_derivation_path().unwrap().into();
let purpose = path.first().unwrap(); let purpose = path.get(0).unwrap();
assert_matches!(purpose, Hardened { index: 44 }); assert_matches!(purpose, Hardened { index: 44 });
let coin_type = path.get(1).unwrap(); let coin_type = path.get(1).unwrap();
assert_matches!(coin_type, Hardened { index: 1 }); assert_matches!(coin_type, Hardened { index: 1 });

View File

@@ -11,7 +11,7 @@
//! Wallet //! Wallet
//! //!
//! This module defines the [`Wallet`]. //! This module defines the [`Wallet`] structure.
use crate::collections::{BTreeMap, HashMap, HashSet}; use crate::collections::{BTreeMap, HashMap, HashSet};
use alloc::{ use alloc::{
boxed::Box, boxed::Box,
@@ -77,7 +77,7 @@ const COINBASE_MATURITY: u32 = 100;
/// A Bitcoin wallet /// A Bitcoin wallet
/// ///
/// The `Wallet` acts as a way of coherently interfacing with output descriptors and related transactions. /// The `Wallet` struct acts as a way of coherently interfacing with output descriptors and related transactions.
/// Its main components are: /// Its main components are:
/// ///
/// 1. output *descriptors* from which it can derive addresses. /// 1. output *descriptors* from which it can derive addresses.
@@ -237,7 +237,6 @@ impl Wallet {
network: Network, network: Network,
) -> Result<Self, DescriptorError> { ) -> Result<Self, DescriptorError> {
Self::new(descriptor, change_descriptor, (), network).map_err(|e| match e { Self::new(descriptor, change_descriptor, (), network).map_err(|e| match e {
NewError::NonEmptyDatabase => unreachable!("mock-database cannot have data"),
NewError::Descriptor(e) => e, NewError::Descriptor(e) => e,
NewError::Write(_) => unreachable!("mock-write must always succeed"), NewError::Write(_) => unreachable!("mock-write must always succeed"),
}) })
@@ -252,7 +251,6 @@ impl Wallet {
) -> Result<Self, crate::descriptor::DescriptorError> { ) -> Result<Self, crate::descriptor::DescriptorError> {
Self::new_with_genesis_hash(descriptor, change_descriptor, (), network, genesis_hash) Self::new_with_genesis_hash(descriptor, change_descriptor, (), network, genesis_hash)
.map_err(|e| match e { .map_err(|e| match e {
NewError::NonEmptyDatabase => unreachable!("mock-database cannot have data"),
NewError::Descriptor(e) => e, NewError::Descriptor(e) => e,
NewError::Write(_) => unreachable!("mock-write must always succeed"), NewError::Write(_) => unreachable!("mock-write must always succeed"),
}) })
@@ -290,8 +288,6 @@ where
/// [`new_with_genesis_hash`]: Wallet::new_with_genesis_hash /// [`new_with_genesis_hash`]: Wallet::new_with_genesis_hash
#[derive(Debug)] #[derive(Debug)]
pub enum NewError<W> { pub enum NewError<W> {
/// Database already has data.
NonEmptyDatabase,
/// There was problem with the passed-in descriptor(s). /// There was problem with the passed-in descriptor(s).
Descriptor(crate::descriptor::DescriptorError), Descriptor(crate::descriptor::DescriptorError),
/// We were unable to write the wallet's data to the persistence backend. /// We were unable to write the wallet's data to the persistence backend.
@@ -304,10 +300,6 @@ where
{ {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
NewError::NonEmptyDatabase => write!(
f,
"database already has data - use `load` or `new_or_load` methods instead"
),
NewError::Descriptor(e) => e.fmt(f), NewError::Descriptor(e) => e.fmt(f),
NewError::Write(e) => e.fmt(f), NewError::Write(e) => e.fmt(f),
} }
@@ -356,7 +348,7 @@ where
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl<L> std::error::Error for LoadError<L> where L: core::fmt::Display + core::fmt::Debug {} impl<L> std::error::Error for LoadError<L> where L: core::fmt::Display + core::fmt::Debug {}
/// Error type for when we try load a [`Wallet`] from persistence and creating it if non-existent. /// Error type for when we try load a [`Wallet`] from persistence and creating it if non-existant.
/// ///
/// Methods [`new_or_load`] and [`new_or_load_with_genesis_hash`] may return this error. /// Methods [`new_or_load`] and [`new_or_load_with_genesis_hash`] may return this error.
/// ///
@@ -454,18 +446,13 @@ impl<D> Wallet<D> {
pub fn new_with_genesis_hash<E: IntoWalletDescriptor>( pub fn new_with_genesis_hash<E: IntoWalletDescriptor>(
descriptor: E, descriptor: E,
change_descriptor: Option<E>, change_descriptor: Option<E>,
mut db: D, db: D,
network: Network, network: Network,
genesis_hash: BlockHash, genesis_hash: BlockHash,
) -> Result<Self, NewError<D::WriteError>> ) -> Result<Self, NewError<D::WriteError>>
where where
D: PersistBackend<ChangeSet>, D: PersistBackend<ChangeSet>,
{ {
if let Ok(changeset) = db.load_from_persistence() {
if changeset.is_some() {
return Err(NewError::NonEmptyDatabase);
}
}
let secp = Secp256k1::new(); let secp = Secp256k1::new();
let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash); let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash);
let mut index = KeychainTxOutIndex::<KeychainKind>::default(); let mut index = KeychainTxOutIndex::<KeychainKind>::default();
@@ -530,9 +517,7 @@ impl<D> Wallet<D> {
create_signers(&mut index, &secp, descriptor, change_descriptor, network) create_signers(&mut index, &secp, descriptor, change_descriptor, network)
.map_err(LoadError::Descriptor)?; .map_err(LoadError::Descriptor)?;
let mut indexed_graph = IndexedTxGraph::new(index); let indexed_graph = IndexedTxGraph::new(index);
indexed_graph.apply_changeset(changeset.indexed_tx_graph);
let persist = Persist::new(db); let persist = Persist::new(db);
Ok(Wallet { Ok(Wallet {
@@ -628,9 +613,6 @@ impl<D> Wallet<D> {
genesis_hash, genesis_hash,
) )
.map_err(|e| match e { .map_err(|e| match e {
NewError::NonEmptyDatabase => {
unreachable!("database is already checked to have no data")
}
NewError::Descriptor(e) => NewOrLoadError::Descriptor(e), NewError::Descriptor(e) => NewOrLoadError::Descriptor(e),
NewError::Write(e) => NewOrLoadError::Write(e), NewError::Write(e) => NewOrLoadError::Write(e),
}), }),

View File

@@ -221,7 +221,7 @@ pub enum SignerContext {
}, },
} }
/// Wrapper to pair a signer with its context /// Wrapper structure to pair a signer with its context
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct SignerWrapper<S: Sized + fmt::Debug + Clone> { pub struct SignerWrapper<S: Sized + fmt::Debug + Clone> {
signer: S, signer: S,
@@ -812,10 +812,9 @@ pub struct SignOptions {
} }
/// Customize which taproot script-path leaves the signer should sign. /// Customize which taproot script-path leaves the signer should sign.
#[derive(Default, Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum TapLeavesOptions { pub enum TapLeavesOptions {
/// The signer will sign all the leaves it has a key for. /// The signer will sign all the leaves it has a key for.
#[default]
All, All,
/// The signer won't sign leaves other than the ones specified. Note that it could still ignore /// The signer won't sign leaves other than the ones specified. Note that it could still ignore
/// some of the specified leaves, if it doesn't have the right key to sign them. /// some of the specified leaves, if it doesn't have the right key to sign them.
@@ -826,6 +825,12 @@ pub enum TapLeavesOptions {
None, None,
} }
impl Default for TapLeavesOptions {
fn default() -> Self {
TapLeavesOptions::All
}
}
#[allow(clippy::derivable_impls)] #[allow(clippy::derivable_impls)]
impl Default for SignOptions { impl Default for SignOptions {
fn default() -> Self { fn default() -> Self {

View File

@@ -811,10 +811,9 @@ impl<'a, D> TxBuilder<'a, D, DefaultCoinSelectionAlgorithm, BumpFee> {
} }
/// Ordering of the transaction's inputs and outputs /// Ordering of the transaction's inputs and outputs
#[derive(Default, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)] #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum TxOrdering { pub enum TxOrdering {
/// Randomized (default) /// Randomized (default)
#[default]
Shuffle, Shuffle,
/// Unchanged /// Unchanged
Untouched, Untouched,
@@ -822,6 +821,12 @@ pub enum TxOrdering {
Bip69Lexicographic, Bip69Lexicographic,
} }
impl Default for TxOrdering {
fn default() -> Self {
TxOrdering::Shuffle
}
}
impl TxOrdering { impl TxOrdering {
/// Sort transaction inputs and outputs by [`TxOrdering`] variant /// Sort transaction inputs and outputs by [`TxOrdering`] variant
pub fn sort_tx(&self, tx: &mut Transaction) { pub fn sort_tx(&self, tx: &mut Transaction) {
@@ -875,10 +880,9 @@ impl RbfValue {
} }
/// Policy regarding the use of change outputs when creating a transaction /// Policy regarding the use of change outputs when creating a transaction
#[derive(Default, Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)] #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum ChangeSpendPolicy { pub enum ChangeSpendPolicy {
/// Use both change and non-change outputs (default) /// Use both change and non-change outputs (default)
#[default]
ChangeAllowed, ChangeAllowed,
/// Only use change outputs (see [`TxBuilder::only_spend_change`]) /// Only use change outputs (see [`TxBuilder::only_spend_change`])
OnlyChange, OnlyChange,
@@ -886,6 +890,12 @@ pub enum ChangeSpendPolicy {
ChangeForbidden, ChangeForbidden,
} }
impl Default for ChangeSpendPolicy {
fn default() -> Self {
ChangeSpendPolicy::ChangeAllowed
}
}
impl ChangeSpendPolicy { impl ChangeSpendPolicy {
pub(crate) fn is_satisfied_by(&self, utxo: &LocalOutput) -> bool { pub(crate) fn is_satisfied_by(&self, utxo: &LocalOutput) -> bool {
match self { match self {

View File

@@ -7,8 +7,8 @@ use bdk::signer::{SignOptions, SignerError};
use bdk::wallet::coin_selection::{self, LargestFirstCoinSelection}; use bdk::wallet::coin_selection::{self, LargestFirstCoinSelection};
use bdk::wallet::error::CreateTxError; use bdk::wallet::error::CreateTxError;
use bdk::wallet::tx_builder::AddForeignUtxoError; use bdk::wallet::tx_builder::AddForeignUtxoError;
use bdk::wallet::AddressIndex::*;
use bdk::wallet::{AddressIndex, AddressInfo, Balance, Wallet}; use bdk::wallet::{AddressIndex, AddressInfo, Balance, Wallet};
use bdk::wallet::{AddressIndex::*, NewError};
use bdk::{FeeRate, KeychainKind}; use bdk::{FeeRate, KeychainKind};
use bdk_chain::COINBASE_MATURITY; use bdk_chain::COINBASE_MATURITY;
use bdk_chain::{BlockId, ConfirmationTime}; use bdk_chain::{BlockId, ConfirmationTime};
@@ -71,33 +71,19 @@ fn load_recovers_wallet() {
let file_path = temp_dir.path().join("store.db"); let file_path = temp_dir.path().join("store.db");
// create new wallet // create new wallet
let wallet_spk_index = { let wallet_keychains = {
let db = bdk_file_store::Store::create_new(DB_MAGIC, &file_path).expect("must create db"); let db = bdk_file_store::Store::create_new(DB_MAGIC, &file_path).expect("must create db");
let mut wallet = Wallet::new(get_test_tr_single_sig_xprv(), None, db, Network::Testnet) let wallet =
.expect("must init wallet"); Wallet::new(get_test_wpkh(), None, db, Network::Testnet).expect("must init wallet");
wallet.keychains().clone()
wallet.try_get_address(New).unwrap();
wallet.spk_index().clone()
}; };
// recover wallet // recover wallet
{ {
let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db"); let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db");
let wallet = let wallet = Wallet::load(get_test_wpkh(), None, db).expect("must recover wallet");
Wallet::load(get_test_tr_single_sig_xprv(), None, db).expect("must recover wallet");
assert_eq!(wallet.network(), Network::Testnet); assert_eq!(wallet.network(), Network::Testnet);
assert_eq!(wallet.spk_index().keychains(), wallet_spk_index.keychains()); assert_eq!(wallet.spk_index().keychains(), &wallet_keychains);
assert_eq!(
wallet.spk_index().last_revealed_indices(),
wallet_spk_index.last_revealed_indices()
);
}
// `new` can only be called on empty db
{
let db = bdk_file_store::Store::open(DB_MAGIC, &file_path).expect("must recover db");
let result = Wallet::new(get_test_tr_single_sig_xprv(), None, db, Network::Testnet);
assert!(matches!(result, Err(NewError::NonEmptyDatabase)));
} }
} }
@@ -106,7 +92,7 @@ fn new_or_load() {
let temp_dir = tempfile::tempdir().expect("must create tempdir"); let temp_dir = tempfile::tempdir().expect("must create tempdir");
let file_path = temp_dir.path().join("store.db"); let file_path = temp_dir.path().join("store.db");
// init wallet when non-existent // init wallet when non-existant
let wallet_keychains = { let wallet_keychains = {
let db = bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path) let db = bdk_file_store::Store::open_or_create_new(DB_MAGIC, &file_path)
.expect("must create db"); .expect("must create db");

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "bdk_bitcoind_rpc" name = "bdk_bitcoind_rpc"
version = "0.2.0" version = "0.1.0"
edition = "2021" edition = "2021"
rust-version = "1.63" rust-version = "1.57"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_bitcoind_rpc" documentation = "https://docs.rs/bdk_bitcoind_rpc"
@@ -16,7 +16,7 @@ readme = "README.md"
# For no-std, remember to enable the bitcoin/no-std feature # For no-std, remember to enable the bitcoin/no-std feature
bitcoin = { version = "0.30", default-features = false } bitcoin = { version = "0.30", default-features = false }
bitcoincore-rpc = { version = "0.17" } bitcoincore-rpc = { version = "0.17" }
bdk_chain = { path = "../chain", version = "0.7", default-features = false } bdk_chain = { path = "../chain", version = "0.6", default-features = false }
[dev-dependencies] [dev-dependencies]
bitcoind = { version = "0.33", features = ["25_0"] } bitcoind = { version = "0.33", features = ["25_0"] }

View File

@@ -14,7 +14,7 @@ use bitcoin::{block::Header, Block, BlockHash, Transaction};
pub use bitcoincore_rpc; pub use bitcoincore_rpc;
use bitcoincore_rpc::bitcoincore_rpc_json; use bitcoincore_rpc::bitcoincore_rpc_json;
/// The [`Emitter`] is used to emit data sourced from [`bitcoincore_rpc::Client`]. /// A structure that emits data sourced from [`bitcoincore_rpc::Client`].
/// ///
/// Refer to [module-level documentation] for more. /// Refer to [module-level documentation] for more.
/// ///

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "bdk_chain" name = "bdk_chain"
version = "0.7.0" version = "0.6.0"
edition = "2021" edition = "2021"
rust-version = "1.63" rust-version = "1.57"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
documentation = "https://docs.rs/bdk_chain" documentation = "https://docs.rs/bdk_chain"

View File

@@ -147,8 +147,6 @@ impl From<(&u32, &BlockHash)> for BlockId {
/// An [`Anchor`] implementation that also records the exact confirmation height of the transaction. /// An [`Anchor`] implementation that also records the exact confirmation height of the transaction.
/// ///
/// Note that the confirmation block and the anchor block can be different here.
///
/// Refer to [`Anchor`] for more details. /// Refer to [`Anchor`] for more details.
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] #[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr( #[cfg_attr(
@@ -188,8 +186,6 @@ impl AnchorFromBlockPosition for ConfirmationHeightAnchor {
/// An [`Anchor`] implementation that also records the exact confirmation time and height of the /// An [`Anchor`] implementation that also records the exact confirmation time and height of the
/// transaction. /// transaction.
/// ///
/// Note that the confirmation block and the anchor block can be different here.
///
/// Refer to [`Anchor`] for more details. /// Refer to [`Anchor`] for more details.
#[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)] #[derive(Debug, Default, Clone, PartialEq, Eq, Copy, PartialOrd, Ord, core::hash::Hash)]
#[cfg_attr( #[cfg_attr(

View File

@@ -3,7 +3,7 @@ use crate::BlockId;
/// Represents a service that tracks the blockchain. /// Represents a service that tracks the blockchain.
/// ///
/// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`] /// The main method is [`is_block_in_chain`] which determines whether a given block of [`BlockId`]
/// is an ancestor of the `chain_tip`. /// is an ancestor of another "static block".
/// ///
/// [`is_block_in_chain`]: Self::is_block_in_chain /// [`is_block_in_chain`]: Self::is_block_in_chain
pub trait ChainOracle { pub trait ChainOracle {

View File

@@ -1,5 +1,7 @@
//! Contains the [`IndexedTxGraph`] and associated types. Refer to the //! Contains the [`IndexedTxGraph`] structure and associated types.
//! [`IndexedTxGraph`] documentation for more. //!
//! This is essentially a [`TxGraph`] combined with an indexer.
use alloc::vec::Vec; use alloc::vec::Vec;
use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid}; use bitcoin::{Block, OutPoint, Transaction, TxOut, Txid};
@@ -9,9 +11,9 @@ use crate::{
Anchor, AnchorFromBlockPosition, Append, BlockId, Anchor, AnchorFromBlockPosition, Append, BlockId,
}; };
/// The [`IndexedTxGraph`] combines a [`TxGraph`] and an [`Indexer`] implementation. /// A struct that combines [`TxGraph`] and an [`Indexer`] implementation.
/// ///
/// It ensures that [`TxGraph`] and [`Indexer`] are updated atomically. /// This structure ensures that [`TxGraph`] and [`Indexer`] are updated atomically.
#[derive(Debug)] #[derive(Debug)]
pub struct IndexedTxGraph<A, I> { pub struct IndexedTxGraph<A, I> {
/// Transaction index. /// Transaction index.
@@ -264,7 +266,7 @@ where
} }
} }
/// Represents changes to an [`IndexedTxGraph`]. /// A structure that represents changes to an [`IndexedTxGraph`].
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
#[cfg_attr( #[cfg_attr(
feature = "serde", feature = "serde",

View File

@@ -5,13 +5,12 @@ use crate::{
spk_iter::BIP32_MAX_INDEX, spk_iter::BIP32_MAX_INDEX,
SpkIterator, SpkTxOutIndex, SpkIterator, SpkTxOutIndex,
}; };
use alloc::vec::Vec;
use bitcoin::{OutPoint, Script, TxOut}; use bitcoin::{OutPoint, Script, TxOut};
use core::{fmt::Debug, ops::Deref}; use core::{fmt::Debug, ops::Deref};
use crate::Append; use crate::Append;
const DEFAULT_LOOKAHEAD: u32 = 1_000;
/// A convenient wrapper around [`SpkTxOutIndex`] that relates script pubkeys to miniscript public /// A convenient wrapper around [`SpkTxOutIndex`] that relates script pubkeys to miniscript public
/// [`Descriptor`]s. /// [`Descriptor`]s.
/// ///
@@ -47,7 +46,7 @@ const DEFAULT_LOOKAHEAD: u32 = 1_000;
/// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only(); /// # let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
/// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); /// # let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
/// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap(); /// # let (internal_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)").unwrap();
/// # let (descriptor_for_user_42, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/2/*)").unwrap(); /// # let descriptor_for_user_42 = external_descriptor.clone();
/// txout_index.add_keychain(MyKeychain::External, external_descriptor); /// txout_index.add_keychain(MyKeychain::External, external_descriptor);
/// txout_index.add_keychain(MyKeychain::Internal, internal_descriptor); /// txout_index.add_keychain(MyKeychain::Internal, internal_descriptor);
/// txout_index.add_keychain(MyKeychain::MyAppUser { user_id: 42 }, descriptor_for_user_42); /// txout_index.add_keychain(MyKeychain::MyAppUser { user_id: 42 }, descriptor_for_user_42);
@@ -66,12 +65,17 @@ pub struct KeychainTxOutIndex<K> {
// last revealed indexes // last revealed indexes
last_revealed: BTreeMap<K, u32>, last_revealed: BTreeMap<K, u32>,
// lookahead settings for each keychain // lookahead settings for each keychain
lookahead: u32, lookahead: BTreeMap<K, u32>,
} }
impl<K> Default for KeychainTxOutIndex<K> { impl<K> Default for KeychainTxOutIndex<K> {
fn default() -> Self { fn default() -> Self {
Self::new(DEFAULT_LOOKAHEAD) Self {
inner: SpkTxOutIndex::default(),
keychains: BTreeMap::default(),
last_revealed: BTreeMap::default(),
lookahead: BTreeMap::default(),
}
} }
} }
@@ -114,25 +118,6 @@ impl<K: Clone + Ord + Debug> Indexer for KeychainTxOutIndex<K> {
} }
} }
impl<K> KeychainTxOutIndex<K> {
/// Construct a [`KeychainTxOutIndex`] with the given `lookahead`.
///
/// The `lookahead` is the number of script pubkeys to derive and cache from the internal
/// descriptors over and above the last revealed script index. Without a lookahead the index
/// will miss outputs you own when processing transactions whose output script pubkeys lie
/// beyond the last revealed index. In certain situations, such as when performing an initial
/// scan of the blockchain during wallet import, it may be uncertain or unknown what the index
/// of the last revealed script pubkey actually is.
pub fn new(lookahead: u32) -> Self {
Self {
inner: SpkTxOutIndex::default(),
keychains: BTreeMap::new(),
last_revealed: BTreeMap::new(),
lookahead,
}
}
}
impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> { impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
/// Return a reference to the internal [`SpkTxOutIndex`]. /// Return a reference to the internal [`SpkTxOutIndex`].
pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> { pub fn inner(&self) -> &SpkTxOutIndex<(K, u32)> {
@@ -160,22 +145,54 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) { pub fn add_keychain(&mut self, keychain: K, descriptor: Descriptor<DescriptorPublicKey>) {
let old_descriptor = &*self let old_descriptor = &*self
.keychains .keychains
.entry(keychain.clone()) .entry(keychain)
.or_insert_with(|| descriptor.clone()); .or_insert_with(|| descriptor.clone());
assert_eq!( assert_eq!(
&descriptor, old_descriptor, &descriptor, old_descriptor,
"keychain already contains a different descriptor" "keychain already contains a different descriptor"
); );
self.replenish_lookahead(&keychain, self.lookahead);
} }
/// Get the lookahead setting. /// Return the lookahead setting for each keychain.
/// ///
/// Refer to [`new`] for more information on the `lookahead`. /// Refer to [`set_lookahead`] for a deeper explanation of the `lookahead`.
/// ///
/// [`new`]: Self::new /// [`set_lookahead`]: Self::set_lookahead
pub fn lookahead(&self) -> u32 { pub fn lookaheads(&self) -> &BTreeMap<K, u32> {
self.lookahead &self.lookahead
}
/// Convenience method to call [`set_lookahead`] for all keychains.
///
/// [`set_lookahead`]: Self::set_lookahead
pub fn set_lookahead_for_all(&mut self, lookahead: u32) {
for keychain in &self.keychains.keys().cloned().collect::<Vec<_>>() {
self.set_lookahead(keychain, lookahead);
}
}
/// Set the lookahead count for `keychain`.
///
/// The lookahead is the number of scripts to cache ahead of the last revealed script index. This
/// is useful to find outputs you own when processing block data that lie beyond the last revealed
/// index. In certain situations, such as when performing an initial scan of the blockchain during
/// wallet import, it may be uncertain or unknown what the last revealed index is.
///
/// # Panics
///
/// This will panic if the `keychain` does not exist.
pub fn set_lookahead(&mut self, keychain: &K, lookahead: u32) {
self.lookahead.insert(keychain.clone(), lookahead);
self.replenish_lookahead(keychain);
}
/// Convenience method to call [`lookahead_to_target`] for multiple keychains.
///
/// [`lookahead_to_target`]: Self::lookahead_to_target
pub fn lookahead_to_target_multi(&mut self, target_indexes: BTreeMap<K, u32>) {
for (keychain, target_index) in target_indexes {
self.lookahead_to_target(&keychain, target_index)
}
} }
/// Store lookahead scripts until `target_index`. /// Store lookahead scripts until `target_index`.
@@ -184,14 +201,22 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
pub fn lookahead_to_target(&mut self, keychain: &K, target_index: u32) { pub fn lookahead_to_target(&mut self, keychain: &K, target_index: u32) {
let next_index = self.next_store_index(keychain); let next_index = self.next_store_index(keychain);
if let Some(temp_lookahead) = target_index.checked_sub(next_index).filter(|&v| v > 0) { if let Some(temp_lookahead) = target_index.checked_sub(next_index).filter(|&v| v > 0) {
self.replenish_lookahead(keychain, temp_lookahead); let old_lookahead = self.lookahead.insert(keychain.clone(), temp_lookahead);
self.replenish_lookahead(keychain);
// revert
match old_lookahead {
Some(lookahead) => self.lookahead.insert(keychain.clone(), lookahead),
None => self.lookahead.remove(keychain),
};
} }
} }
fn replenish_lookahead(&mut self, keychain: &K, lookahead: u32) { fn replenish_lookahead(&mut self, keychain: &K) {
let descriptor = self.keychains.get(keychain).expect("keychain must exist"); let descriptor = self.keychains.get(keychain).expect("keychain must exist");
let next_store_index = self.next_store_index(keychain); let next_store_index = self.next_store_index(keychain);
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1); let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
for (new_index, new_spk) in for (new_index, new_spk) in
SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead) SpkIterator::new_with_range(descriptor, next_store_index..next_reveal_index + lookahead)
@@ -363,8 +388,12 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
let target_index = if has_wildcard { target_index } else { 0 }; let target_index = if has_wildcard { target_index } else { 0 };
let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1); let next_reveal_index = self.last_revealed.get(keychain).map_or(0, |v| *v + 1);
let lookahead = self.lookahead.get(keychain).map_or(0, |v| *v);
debug_assert!(next_reveal_index + self.lookahead >= self.next_store_index(keychain)); debug_assert_eq!(
next_reveal_index + lookahead,
self.next_store_index(keychain)
);
// if we need to reveal new indices, the latest revealed index goes here // if we need to reveal new indices, the latest revealed index goes here
let mut reveal_to_index = None; let mut reveal_to_index = None;
@@ -372,12 +401,12 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
// if the target is not yet revealed, but is already stored (due to lookahead), we need to // if the target is not yet revealed, but is already stored (due to lookahead), we need to
// set the `reveal_to_index` as target here (as the `for` loop below only updates // set the `reveal_to_index` as target here (as the `for` loop below only updates
// `reveal_to_index` for indexes that are NOT stored) // `reveal_to_index` for indexes that are NOT stored)
if next_reveal_index <= target_index && target_index < next_reveal_index + self.lookahead { if next_reveal_index <= target_index && target_index < next_reveal_index + lookahead {
reveal_to_index = Some(target_index); reveal_to_index = Some(target_index);
} }
// we range over indexes that are not stored // we range over indexes that are not stored
let range = next_reveal_index + self.lookahead..=target_index + self.lookahead; let range = next_reveal_index + lookahead..=target_index + lookahead;
for (new_index, new_spk) in SpkIterator::new_with_range(descriptor, range) { for (new_index, new_spk) in SpkIterator::new_with_range(descriptor, range) {
let _inserted = self let _inserted = self
.inner .inner

View File

@@ -1,4 +1,4 @@
//! This crate is a collection of core structures for [Bitcoin Dev Kit]. //! This crate is a collection of core structures for [Bitcoin Dev Kit] (alpha release).
//! //!
//! The goal of this crate is to give wallets the mechanisms needed to: //! The goal of this crate is to give wallets the mechanisms needed to:
//! //!
@@ -12,8 +12,9 @@
//! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just //! you do it synchronously or asynchronously. If you know a fact about the blockchain, you can just
//! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done //! tell `bdk_chain`'s APIs about it, and that information will be integrated, if it can be done
//! consistently. //! consistently.
//! 2. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you //! 2. Error-free APIs.
//! cache or how you retrieve it from persistent storage. //! 3. Data persistence agnostic -- `bdk_chain` does not care where you cache on-chain data, what you
//! cache or how you fetch it.
//! //!
//! [Bitcoin Dev Kit]: https://bitcoindevkit.org/ //! [Bitcoin Dev Kit]: https://bitcoindevkit.org/

View File

@@ -7,7 +7,7 @@ use crate::{BlockId, ChainOracle};
use alloc::sync::Arc; use alloc::sync::Arc;
use bitcoin::BlockHash; use bitcoin::BlockHash;
/// The [`ChangeSet`] represents changes to [`LocalChain`]. /// A structure that represents changes to [`LocalChain`].
/// ///
/// The key represents the block height, and the value either represents added a new [`CheckPoint`] /// The key represents the block height, and the value either represents added a new [`CheckPoint`]
/// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]). /// (if [`Some`]), or removing a [`CheckPoint`] (if [`None`]).
@@ -127,7 +127,7 @@ impl CheckPoint {
} }
} }
/// Iterates over checkpoints backwards. /// A structure that iterates over checkpoints backwards.
pub struct CheckPointIter { pub struct CheckPointIter {
current: Option<Arc<CPInner>>, current: Option<Arc<CPInner>>,
} }
@@ -153,7 +153,7 @@ impl IntoIterator for CheckPoint {
} }
} }
/// Used to update [`LocalChain`]. /// A struct to update [`LocalChain`].
/// ///
/// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and /// This is used as input for [`LocalChain::apply_update`]. It contains the update's chain `tip` and
/// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing /// a flag `introduce_older_blocks` which signals whether this update intends to introduce missing

View File

@@ -148,7 +148,7 @@ mod test {
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
) { ) {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0); let mut txout_index = KeychainTxOutIndex::<TestKeychain>::default();
let secp = Secp256k1::signing_only(); let secp = Secp256k1::signing_only();
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();

View File

@@ -168,7 +168,9 @@ impl<I: Clone + Ord> SpkTxOutIndex<I> {
/// ///
/// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there. /// Returns `None` if the `TxOut` hasn't been scanned or if nothing matching was found there.
pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> { pub fn txout(&self, outpoint: OutPoint) -> Option<(&I, &TxOut)> {
self.txouts.get(&outpoint).map(|v| (&v.0, &v.1)) self.txouts
.get(&outpoint)
.map(|(spk_i, txout)| (spk_i, txout))
} }
/// Returns the script that has been inserted at the `index`. /// Returns the script that has been inserted at the `index`.

View File

@@ -5,25 +5,21 @@ use alloc::vec::Vec;
/// Trait that "anchors" blockchain data to a specific block of height and hash. /// Trait that "anchors" blockchain data to a specific block of height and hash.
/// ///
/// If transaction A is anchored in block B, and block B is in the best chain, we can /// [`Anchor`] implementations must be [`Ord`] by the anchor block's [`BlockId`] first.
///
/// I.e. If transaction A is anchored in block B, then if block B is in the best chain, we can
/// assume that transaction A is also confirmed in the best chain. This does not necessarily mean /// assume that transaction A is also confirmed in the best chain. This does not necessarily mean
/// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a /// that transaction A is confirmed in block B. It could also mean transaction A is confirmed in a
/// parent block of B. /// parent block of B.
/// ///
/// Every [`Anchor`] implementation must contain a [`BlockId`] parameter, and must implement
/// [`Ord`]. When implementing [`Ord`], the anchors' [`BlockId`]s should take precedence
/// over other elements inside the [`Anchor`]s for comparison purposes, i.e., you should first
/// compare the anchors' [`BlockId`]s and then care about the rest.
///
/// The example shows different types of anchors:
/// ``` /// ```
/// # use bdk_chain::local_chain::LocalChain; /// # use bdk_chain::local_chain::LocalChain;
/// # use bdk_chain::tx_graph::TxGraph; /// # use bdk_chain::tx_graph::TxGraph;
/// # use bdk_chain::BlockId; /// # use bdk_chain::BlockId;
/// # use bdk_chain::ConfirmationHeightAnchor; /// # use bdk_chain::ConfirmationHeightAnchor;
/// # use bdk_chain::ConfirmationTimeHeightAnchor;
/// # use bdk_chain::example_utils::*; /// # use bdk_chain::example_utils::*;
/// # use bitcoin::hashes::Hash; /// # use bitcoin::hashes::Hash;
///
/// // Initialize the local chain with two blocks. /// // Initialize the local chain with two blocks.
/// let chain = LocalChain::from_blocks( /// let chain = LocalChain::from_blocks(
/// [ /// [
@@ -51,7 +47,6 @@ use alloc::vec::Vec;
/// ); /// );
/// ///
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationHeightAnchor` as the anchor type. /// // Insert `tx` into a `TxGraph` that uses `ConfirmationHeightAnchor` as the anchor type.
/// // This anchor records the anchor block and the confirmation height of the transaction.
/// // When a transaction is anchored with `ConfirmationHeightAnchor`, the anchor block and /// // When a transaction is anchored with `ConfirmationHeightAnchor`, the anchor block and
/// // confirmation block can be different. However, the confirmation block cannot be higher than /// // confirmation block can be different. However, the confirmation block cannot be higher than
/// // the anchor block and both blocks must be in the same chain for the anchor to be valid. /// // the anchor block and both blocks must be in the same chain for the anchor to be valid.
@@ -67,25 +62,6 @@ use alloc::vec::Vec;
/// confirmation_height: 1, /// confirmation_height: 1,
/// }, /// },
/// ); /// );
///
/// // Insert `tx` into a `TxGraph` that uses `ConfirmationTimeHeightAnchor` as the anchor type.
/// // This anchor records the anchor block, the confirmation height and time of the transaction.
/// // When a transaction is anchored with `ConfirmationTimeHeightAnchor`, the anchor block and
/// // confirmation block can be different. However, the confirmation block cannot be higher than
/// // the anchor block and both blocks must be in the same chain for the anchor to be valid.
/// let mut graph_c = TxGraph::<ConfirmationTimeHeightAnchor>::default();
/// let _ = graph_c.insert_tx(tx.clone());
/// graph_c.insert_anchor(
/// tx.txid(),
/// ConfirmationTimeHeightAnchor {
/// anchor_block: BlockId {
/// height: 2,
/// hash: Hash::hash("third".as_bytes()),
/// },
/// confirmation_height: 1,
/// confirmation_time: 123,
/// },
/// );
/// ``` /// ```
pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash { pub trait Anchor: core::fmt::Debug + Clone + Eq + PartialOrd + Ord + core::hash::Hash {
/// Returns the [`BlockId`] that the associated blockchain data is "anchored" in. /// Returns the [`BlockId`] that the associated blockchain data is "anchored" in.

View File

@@ -1,32 +1,12 @@
//! Module for structures that store and traverse transactions. //! Module for structures that store and traverse transactions.
//! //!
//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of those transactions. //! [`TxGraph`] is a monotone structure that inserts transactions and indexes the spends. The
//! `TxGraph` is *monotone* in that you can always insert a transaction -- it doesn't care whether that //! [`ChangeSet`] structure reports changes of [`TxGraph`] but can also be applied to a
//! transaction is in the current best chain or whether it conflicts with any of the //! [`TxGraph`] as well. Lastly, [`TxDescendants`] is an [`Iterator`] that traverses descendants of
//! existing transactions or what order you insert the transactions. This means that you can always //! a given transaction.
//! combine two [`TxGraph`]s together, without resulting in inconsistencies.
//! Furthermore, there is currently no way to delete a transaction.
//!
//! Transactions can be either whole or partial (i.e., transactions for which we only
//! know some outputs, which we usually call "floating outputs"; these are usually inserted
//! using the [`insert_txout`] method.).
//!
//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the
//! txid, the transaction (whole or partial), the blocks it's anchored in (see the [`Anchor`]
//! documentation for more details), and the timestamp of the last time we saw
//! the transaction as unconfirmed.
//! //!
//! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for //! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
//! identifying and traversing conflicts and descendants of a given transaction. Some [`TxGraph`] //! identifying and traversing conflicts and descendants of a given transaction.
//! methods only consider "canonical" (i.e., in the best chain or in mempool) transactions,
//! we decide which transactions are canonical based on anchors `last_seen_unconfirmed`;
//! see the [`try_get_chain_position`] documentation for more details.
//!
//! The [`ChangeSet`] reports changes made to a [`TxGraph`]; it can be used to either save to
//! persistent storage, or to be applied to another [`TxGraph`].
//!
//! Lastly, you can use [`TxAncestors`]/[`TxDescendants`] to traverse ancestors and descendants of
//! a given transaction, respectively.
//! //!
//! # Applying changes //! # Applying changes
//! //!
@@ -69,8 +49,6 @@
//! let changeset = graph.apply_update(update); //! let changeset = graph.apply_update(update);
//! assert!(changeset.is_empty()); //! assert!(changeset.is_empty());
//! ``` //! ```
//! [`try_get_chain_position`]: TxGraph::try_get_chain_position
//! [`insert_txout`]: TxGraph::insert_txout
use crate::{ use crate::{
collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId, collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
@@ -113,7 +91,7 @@ impl<A> Default for TxGraph<A> {
} }
} }
/// A transaction node in the [`TxGraph`]. /// An outward-facing view of a (transaction) node in the [`TxGraph`].
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct TxNode<'a, T, A> { pub struct TxNode<'a, T, A> {
/// Txid of the transaction. /// Txid of the transaction.
@@ -150,7 +128,7 @@ impl Default for TxNodeInternal {
} }
} }
/// A transaction that is included in the chain, or is still in mempool. /// An outwards-facing view of a transaction that is part of the *best chain*'s history.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct CanonicalTx<'a, T, A> { pub struct CanonicalTx<'a, T, A> {
/// How the transaction is observed as (confirmed or unconfirmed). /// How the transaction is observed as (confirmed or unconfirmed).
@@ -497,7 +475,7 @@ impl<A: Clone + Ord> TxGraph<A> {
/// Batch insert unconfirmed transactions. /// Batch insert unconfirmed transactions.
/// ///
/// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The /// Items of `txs` are tuples containing the transaction and a *last seen* timestamp. The
/// *last seen* communicates when the transaction is last seen in mempool which is used for /// *last seen* communicates when the transaction is last seen in the mempool which is used for
/// conflict-resolution (refer to [`TxGraph::insert_seen_at`] for details). /// conflict-resolution (refer to [`TxGraph::insert_seen_at`] for details).
pub fn batch_insert_unconfirmed( pub fn batch_insert_unconfirmed(
&mut self, &mut self,
@@ -581,7 +559,10 @@ impl<A: Clone + Ord> TxGraph<A> {
} }
for (outpoint, txout) in changeset.txouts { for (outpoint, txout) in changeset.txouts {
let tx_entry = self.txs.entry(outpoint.txid).or_default(); let tx_entry = self
.txs
.entry(outpoint.txid)
.or_insert_with(Default::default);
match tx_entry { match tx_entry {
(TxNodeInternal::Whole(_), _, _) => { /* do nothing since we already have full tx */ (TxNodeInternal::Whole(_), _, _) => { /* do nothing since we already have full tx */
@@ -594,13 +575,13 @@ impl<A: Clone + Ord> TxGraph<A> {
for (anchor, txid) in changeset.anchors { for (anchor, txid) in changeset.anchors {
if self.anchors.insert((anchor.clone(), txid)) { if self.anchors.insert((anchor.clone(), txid)) {
let (_, anchors, _) = self.txs.entry(txid).or_default(); let (_, anchors, _) = self.txs.entry(txid).or_insert_with(Default::default);
anchors.insert(anchor); anchors.insert(anchor);
} }
} }
for (txid, new_last_seen) in changeset.last_seen { for (txid, new_last_seen) in changeset.last_seen {
let (_, _, last_seen) = self.txs.entry(txid).or_default(); let (_, _, last_seen) = self.txs.entry(txid).or_insert_with(Default::default);
if new_last_seen > *last_seen { if new_last_seen > *last_seen {
*last_seen = new_last_seen; *last_seen = new_last_seen;
} }
@@ -727,20 +708,8 @@ impl<A: Anchor> TxGraph<A> {
/// Get the position of the transaction in `chain` with tip `chain_tip`. /// Get the position of the transaction in `chain` with tip `chain_tip`.
/// ///
/// Chain data is fetched from `chain`, a [`ChainOracle`] implementation. /// If the given transaction of `txid` does not exist in the chain of `chain_tip`, `None` is
/// /// returned.
/// This method returns `Ok(None)` if the transaction is not found in the chain, and no longer
/// belongs in the mempool. The following factors are used to approximate whether an
/// unconfirmed transaction exists in the mempool (not evicted):
///
/// 1. Unconfirmed transactions that conflict with confirmed transactions are evicted.
/// 2. Unconfirmed transactions that spend from transactions that are evicted, are also
/// evicted.
/// 3. Given two conflicting unconfirmed transactions, the transaction with the lower
/// `last_seen_unconfirmed` parameter is evicted. A transaction's `last_seen_unconfirmed`
/// parameter is the max of all it's descendants' `last_seen_unconfirmed` parameters. If the
/// final `last_seen_unconfirmed`s are the same, the transaction with the lower `txid` (by
/// lexicographical order) is evicted.
/// ///
/// # Error /// # Error
/// ///
@@ -766,7 +735,7 @@ impl<A: Anchor> TxGraph<A> {
} }
} }
// The tx is not anchored to a block in the best chain, which means that it // The tx is not anchored to a block which is in the best chain, which means that it
// might be in mempool, or it might have been dropped already. // might be in mempool, or it might have been dropped already.
// Let's check conflicts to find out! // Let's check conflicts to find out!
let tx = match tx_node { let tx = match tx_node {
@@ -976,8 +945,7 @@ impl<A: Anchor> TxGraph<A> {
/// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or /// (`OI`) for convenience. If `OI` is not necessary, the caller can use `()`, or
/// [`Iterator::enumerate`] over a list of [`OutPoint`]s. /// [`Iterator::enumerate`] over a list of [`OutPoint`]s.
/// ///
/// Floating outputs (i.e., outputs for which we don't have the full transaction in the graph) /// Floating outputs are ignored.
/// are ignored.
/// ///
/// # Error /// # Error
/// ///
@@ -1168,9 +1136,9 @@ impl<A: Anchor> TxGraph<A> {
} }
} }
/// The [`ChangeSet`] represents changes to a [`TxGraph`]. /// A structure that represents changes to a [`TxGraph`].
/// ///
/// Since [`TxGraph`] is monotone, the "changeset" can only contain transactions to be added and /// Since [`TxGraph`] is monotone "changeset" can only contain transactions to be added and
/// not removed. /// not removed.
/// ///
/// Refer to [module-level documentation] for more. /// Refer to [module-level documentation] for more.
@@ -1304,7 +1272,7 @@ impl<A> AsRef<TxGraph<A>> for TxGraph<A> {
/// ///
/// The iterator excludes partial transactions. /// The iterator excludes partial transactions.
/// ///
/// Returned by the [`walk_ancestors`] method of [`TxGraph`]. /// This `struct` is created by the [`walk_ancestors`] method of [`TxGraph`].
/// ///
/// [`walk_ancestors`]: TxGraph::walk_ancestors /// [`walk_ancestors`]: TxGraph::walk_ancestors
pub struct TxAncestors<'g, A, F> { pub struct TxAncestors<'g, A, F> {
@@ -1422,7 +1390,7 @@ where
/// An iterator that traverses transaction descendants. /// An iterator that traverses transaction descendants.
/// ///
/// Returned by the [`walk_descendants`] method of [`TxGraph`]. /// This `struct` is created by the [`walk_descendants`] method of [`TxGraph`].
/// ///
/// [`walk_descendants`]: TxGraph::walk_descendants /// [`walk_descendants`]: TxGraph::walk_descendants
pub struct TxDescendants<'g, A, F> { pub struct TxDescendants<'g, A, F> {

View File

@@ -1,5 +1,4 @@
mod tx_template; mod tx_template;
#[allow(unused_imports)]
pub use tx_template::*; pub use tx_template::*;
#[allow(unused_macros)] #[allow(unused_macros)]

View File

@@ -27,10 +27,9 @@ fn insert_relevant_txs() {
let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey(); let spk_0 = descriptor.at_derivation_index(0).unwrap().script_pubkey();
let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey(); let spk_1 = descriptor.at_derivation_index(9).unwrap().script_pubkey();
let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<()>>::new( let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<()>>::default();
KeychainTxOutIndex::new(10),
);
graph.index.add_keychain((), descriptor); graph.index.add_keychain((), descriptor);
graph.index.set_lookahead(&(), 10);
let tx_a = Transaction { let tx_a = Transaction {
output: vec![ output: vec![
@@ -119,12 +118,12 @@ fn test_list_owned_txouts() {
let (desc_1, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap(); let (desc_1, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/0/*)").unwrap();
let (desc_2, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)").unwrap(); let (desc_2, _) = Descriptor::parse_descriptor(&Secp256k1::signing_only(), "tr(tprv8ZgxMBicQKsPd3krDUsBAmtnRsK3rb8u5yi1zhQgMhF1tR8MW7xfE4rnrbbsrbPR52e7rKapu6ztw1jXveJSCGHEriUGZV7mCe88duLp5pj/86'/1'/0'/1/*)").unwrap();
let mut graph = IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<String>>::new( let mut graph =
KeychainTxOutIndex::new(10), IndexedTxGraph::<ConfirmationHeightAnchor, KeychainTxOutIndex<String>>::default();
);
graph.index.add_keychain("keychain_1".into(), desc_1); graph.index.add_keychain("keychain_1".into(), desc_1);
graph.index.add_keychain("keychain_2".into(), desc_2); graph.index.add_keychain("keychain_2".into(), desc_2);
graph.index.set_lookahead_for_all(10);
// Get trusted and untrusted addresses // Get trusted and untrusted addresses

View File

@@ -18,14 +18,12 @@ enum TestKeychain {
Internal, Internal,
} }
fn init_txout_index( fn init_txout_index() -> (
lookahead: u32,
) -> (
bdk_chain::keychain::KeychainTxOutIndex<TestKeychain>, bdk_chain::keychain::KeychainTxOutIndex<TestKeychain>,
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
Descriptor<DescriptorPublicKey>, Descriptor<DescriptorPublicKey>,
) { ) {
let mut txout_index = bdk_chain::keychain::KeychainTxOutIndex::<TestKeychain>::new(lookahead); let mut txout_index = bdk_chain::keychain::KeychainTxOutIndex::<TestKeychain>::default();
let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only(); let secp = bdk_chain::bitcoin::secp256k1::Secp256k1::signing_only();
let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap(); let (external_descriptor,_) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "tr([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)").unwrap();
@@ -48,7 +46,7 @@ fn spk_at_index(descriptor: &Descriptor<DescriptorPublicKey>, index: u32) -> Scr
fn test_set_all_derivation_indices() { fn test_set_all_derivation_indices() {
use bdk_chain::indexed_tx_graph::Indexer; use bdk_chain::indexed_tx_graph::Indexer;
let (mut txout_index, _, _) = init_txout_index(0); let (mut txout_index, _, _) = init_txout_index();
let derive_to: BTreeMap<_, _> = let derive_to: BTreeMap<_, _> =
[(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into(); [(TestKeychain::External, 12), (TestKeychain::Internal, 24)].into();
assert_eq!( assert_eq!(
@@ -66,10 +64,19 @@ fn test_set_all_derivation_indices() {
#[test] #[test]
fn test_lookahead() { fn test_lookahead() {
let (mut txout_index, external_desc, internal_desc) = init_txout_index(10); let (mut txout_index, external_desc, internal_desc) = init_txout_index();
// ensure it does not break anything if lookahead is set multiple times
(0..=10).for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::External, lookahead));
(0..=20)
.filter(|v| v % 2 == 0)
.for_each(|lookahead| txout_index.set_lookahead(&TestKeychain::Internal, lookahead));
assert_eq!(txout_index.inner().all_spks().len(), 30);
// given: // given:
// - external lookahead set to 10 // - external lookahead set to 10
// - internal lookahead set to 20
// when: // when:
// - set external derivation index to value higher than last, but within the lookahead value // - set external derivation index to value higher than last, but within the lookahead value
// expect: // expect:
@@ -90,7 +97,7 @@ fn test_lookahead() {
assert_eq!( assert_eq!(
txout_index.inner().all_spks().len(), txout_index.inner().all_spks().len(),
10 /* external lookahead */ + 10 /* external lookahead */ +
10 /* internal lookahead */ + 20 /* internal lookahead */ +
index as usize + 1 /* `derived` count */ index as usize + 1 /* `derived` count */
); );
assert_eq!( assert_eq!(
@@ -120,7 +127,7 @@ fn test_lookahead() {
} }
// given: // given:
// - internal lookahead is 10 // - internal lookahead is 20
// - internal derivation index is `None` // - internal derivation index is `None`
// when: // when:
// - derivation index is set ahead of current derivation index + lookahead // - derivation index is set ahead of current derivation index + lookahead
@@ -141,7 +148,7 @@ fn test_lookahead() {
assert_eq!( assert_eq!(
txout_index.inner().all_spks().len(), txout_index.inner().all_spks().len(),
10 /* external lookahead */ + 10 /* external lookahead */ +
10 /* internal lookahead */ + 20 /* internal lookahead */ +
20 /* external stored index count */ + 20 /* external stored index count */ +
25 /* internal stored index count */ 25 /* internal stored index count */
); );
@@ -219,7 +226,8 @@ fn test_lookahead() {
// - last used index should change as expected // - last used index should change as expected
#[test] #[test]
fn test_scan_with_lookahead() { fn test_scan_with_lookahead() {
let (mut txout_index, external_desc, _) = init_txout_index(10); let (mut txout_index, external_desc, _) = init_txout_index();
txout_index.set_lookahead_for_all(10);
let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30] let spks: BTreeMap<u32, ScriptBuf> = [0, 10, 20, 30]
.into_iter() .into_iter()
@@ -273,7 +281,7 @@ fn test_scan_with_lookahead() {
#[test] #[test]
#[rustfmt::skip] #[rustfmt::skip]
fn test_wildcard_derivations() { fn test_wildcard_derivations() {
let (mut txout_index, external_desc, _) = init_txout_index(0); let (mut txout_index, external_desc, _) = init_txout_index();
let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey(); let external_spk_0 = external_desc.at_derivation_index(0).unwrap().script_pubkey();
let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey(); let external_spk_16 = external_desc.at_derivation_index(16).unwrap().script_pubkey();
let external_spk_26 = external_desc.at_derivation_index(26).unwrap().script_pubkey(); let external_spk_26 = external_desc.at_derivation_index(26).unwrap().script_pubkey();
@@ -331,7 +339,7 @@ fn test_wildcard_derivations() {
#[test] #[test]
fn test_non_wildcard_derivations() { fn test_non_wildcard_derivations() {
let mut txout_index = KeychainTxOutIndex::<TestKeychain>::new(0); let mut txout_index = KeychainTxOutIndex::<TestKeychain>::default();
let secp = bitcoin::secp256k1::Secp256k1::signing_only(); let secp = bitcoin::secp256k1::Secp256k1::signing_only();
let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap(); let (no_wildcard_descriptor, _) = Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, "wpkh([73c5da0a/86'/0'/0']xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/0)").unwrap();

View File

@@ -110,7 +110,6 @@ fn test_tx_conflict_handling() {
..Default::default() ..Default::default()
}, },
], ],
// the txgraph is going to pick tx_conflict_2 because of higher lexicographical txid
exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]), exp_chain_txs: HashSet::from(["tx1", "tx_conflict_2"]),
exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]), exp_chain_txouts: HashSet::from([("tx1", 0), ("tx_conflict_2", 0)]),
exp_unspents: HashSet::from([("tx_conflict_2", 0)]), exp_unspents: HashSet::from([("tx_conflict_2", 0)]),

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "bdk_electrum" name = "bdk_electrum"
version = "0.5.0" version = "0.4.0"
edition = "2021" edition = "2021"
homepage = "https://bitcoindevkit.org" homepage = "https://bitcoindevkit.org"
repository = "https://github.com/bitcoindevkit/bdk" repository = "https://github.com/bitcoindevkit/bdk"
@@ -12,6 +12,6 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
bdk_chain = { path = "../chain", version = "0.7.0", default-features = false } bdk_chain = { path = "../chain", version = "0.6.0", default-features = false }
electrum-client = { version = "0.18" } electrum-client = { version = "0.18" }
#rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] } #rustls = { version = "=0.21.1", optional = true, features = ["dangerous_configuration"] }

View File

@@ -1,7 +1,3 @@
# BDK Electrum # BDK Electrum
BDK Electrum extends [`electrum-client`] to update [`bdk_chain`] structures BDK Electrum client library for updating the keychain tracker.
from an Electrum server.
[`electrum-client`]: https://docs.rs/electrum-client/
[`bdk_chain`]: https://docs.rs/bdk-chain/

View File

@@ -134,54 +134,64 @@ pub struct ElectrumUpdate {
/// Trait to extend [`Client`] functionality. /// Trait to extend [`Client`] functionality.
pub trait ElectrumExt { pub trait ElectrumExt {
/// Full scan the keychain scripts specified with the blockchain (via an Electrum client) and /// Scan the blockchain (via electrum) for the data specified and returns updates for
/// returns updates for [`bdk_chain`] data structures. /// [`bdk_chain`] data structures.
/// ///
/// - `prev_tip`: the most recent blockchain tip present locally /// - `prev_tip`: the most recent blockchain tip present locally
/// - `keychain_spks`: keychains that we want to scan transactions for /// - `keychain_spks`: keychains that we want to scan transactions for
/// - `txids`: transactions for which we want updated [`Anchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to included in the update
/// ///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `batch_size` specifies the max number of script pubkeys to request for in a /// transactions. `batch_size` specifies the max number of script pubkeys to request for in a
/// single batch request. /// single batch request.
fn full_scan<K: Ord + Clone>( fn scan<K: Ord + Clone>(
&self, &self,
prev_tip: CheckPoint, prev_tip: CheckPoint,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>, keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize, stop_gap: usize,
batch_size: usize, batch_size: usize,
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>; ) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error>;
/// Sync a set of scripts with the blockchain (via an Electrum client) for the data specified /// Convenience method to call [`scan`] without requiring a keychain.
/// and returns updates for [`bdk_chain`] data structures.
/// ///
/// - `prev_tip`: the most recent blockchain tip present locally /// [`scan`]: ElectrumExt::scan
/// - `misc_spks`: an iterator of scripts we want to sync transactions for fn scan_without_keychain(
/// - `txids`: transactions for which we want updated [`Anchor`]s
/// - `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update
///
/// `batch_size` specifies the max number of script pubkeys to request for in a single batch
/// request.
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: ElectrumExt::full_scan
fn sync(
&self, &self,
prev_tip: CheckPoint, prev_tip: CheckPoint,
misc_spks: impl IntoIterator<Item = ScriptBuf>, misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>, txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>, outpoints: impl IntoIterator<Item = OutPoint>,
batch_size: usize, batch_size: usize,
) -> Result<ElectrumUpdate, Error>; ) -> Result<ElectrumUpdate, Error> {
let spk_iter = misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk));
let (electrum_update, _) = self.scan(
prev_tip,
[((), spk_iter)].into(),
txids,
outpoints,
usize::MAX,
batch_size,
)?;
Ok(electrum_update)
}
} }
impl ElectrumExt for Client { impl ElectrumExt for Client {
fn full_scan<K: Ord + Clone>( fn scan<K: Ord + Clone>(
&self, &self,
prev_tip: CheckPoint, prev_tip: CheckPoint,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>, keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize, stop_gap: usize,
batch_size: usize, batch_size: usize,
) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> { ) -> Result<(ElectrumUpdate, BTreeMap<K, u32>), Error> {
@@ -191,6 +201,9 @@ impl ElectrumExt for Client {
.collect::<BTreeMap<K, _>>(); .collect::<BTreeMap<K, _>>();
let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new(); let mut scanned_spks = BTreeMap::<(K, u32), (ScriptBuf, bool)>::new();
let txids = txids.into_iter().collect::<Vec<_>>();
let outpoints = outpoints.into_iter().collect::<Vec<_>>();
let (electrum_update, keychain_update) = loop { let (electrum_update, keychain_update) = loop {
let (tip, _) = construct_update_tip(self, prev_tip.clone())?; let (tip, _) = construct_update_tip(self, prev_tip.clone())?;
let mut relevant_txids = RelevantTxids::default(); let mut relevant_txids = RelevantTxids::default();
@@ -229,6 +242,15 @@ impl ElectrumExt for Client {
} }
} }
populate_with_txids(self, &cps, &mut relevant_txids, &mut txids.iter().cloned())?;
let _txs = populate_with_outpoints(
self,
&cps,
&mut relevant_txids,
&mut outpoints.iter().cloned(),
)?;
// check for reorgs during scan process // check for reorgs during scan process
let server_blockhash = self.block_header(tip.height() as usize)?.block_hash(); let server_blockhash = self.block_header(tip.height() as usize)?.block_hash();
if tip.hash() != server_blockhash { if tip.hash() != server_blockhash {
@@ -262,41 +284,6 @@ impl ElectrumExt for Client {
Ok((electrum_update, keychain_update)) Ok((electrum_update, keychain_update))
} }
fn sync(
&self,
prev_tip: CheckPoint,
misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
batch_size: usize,
) -> Result<ElectrumUpdate, Error> {
let spk_iter = misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk));
let (mut electrum_update, _) = self.full_scan(
prev_tip.clone(),
[((), spk_iter)].into(),
usize::MAX,
batch_size,
)?;
let (tip, _) = construct_update_tip(self, prev_tip)?;
let cps = tip
.iter()
.take(10)
.map(|cp| (cp.height(), cp))
.collect::<BTreeMap<u32, CheckPoint>>();
populate_with_txids(self, &cps, &mut electrum_update.relevant_txids, txids)?;
let _txs =
populate_with_outpoints(self, &cps, &mut electrum_update.relevant_txids, outpoints)?;
Ok(electrum_update)
}
} }
/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. /// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`.
@@ -418,7 +405,7 @@ fn populate_with_outpoints(
client: &Client, client: &Client,
cps: &BTreeMap<u32, CheckPoint>, cps: &BTreeMap<u32, CheckPoint>,
relevant_txids: &mut RelevantTxids, relevant_txids: &mut RelevantTxids,
outpoints: impl IntoIterator<Item = OutPoint>, outpoints: &mut impl Iterator<Item = OutPoint>,
) -> Result<HashMap<Txid, Transaction>, Error> { ) -> Result<HashMap<Txid, Transaction>, Error> {
let mut full_txs = HashMap::new(); let mut full_txs = HashMap::new();
for outpoint in outpoints { for outpoint in outpoints {
@@ -479,7 +466,7 @@ fn populate_with_txids(
client: &Client, client: &Client,
cps: &BTreeMap<u32, CheckPoint>, cps: &BTreeMap<u32, CheckPoint>,
relevant_txids: &mut RelevantTxids, relevant_txids: &mut RelevantTxids,
txids: impl IntoIterator<Item = Txid>, txids: &mut impl Iterator<Item = Txid>,
) -> Result<(), Error> { ) -> Result<(), Error> {
for txid in txids { for txid in txids {
let tx = match client.transaction_get(&txid) { let tx = match client.transaction_get(&txid) {
@@ -490,7 +477,7 @@ fn populate_with_txids(
let spk = tx let spk = tx
.output .output
.first() .get(0)
.map(|txo| &txo.script_pubkey) .map(|txo| &txo.script_pubkey)
.expect("tx must have an output"); .expect("tx must have an output");

View File

@@ -1,26 +1,26 @@
//! This crate is used for updating structures of [`bdk_chain`] with data from an Electrum server. //! This crate is used for updating structures of the [`bdk_chain`] crate with data from electrum.
//! //!
//! The two primary methods are [`ElectrumExt::sync`] and [`ElectrumExt::full_scan`]. In most cases //! The star of the show is the [`ElectrumExt::scan`] method, which scans for relevant blockchain
//! [`ElectrumExt::sync`] is used to sync the transaction histories of scripts that the application //! data (via electrum) and outputs updates for [`bdk_chain`] structures as a tuple of form:
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`ElectrumExt::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for
//! [`bdk_chain`] including [`RelevantTxids`].
//! //!
//! The [`RelevantTxids`] only includes `txid`s and not full transactions. The caller is responsible //! ([`bdk_chain::local_chain::Update`], [`RelevantTxids`], `keychain_update`)
//! for obtaining full transactions before applying new data to their [`bdk_chain`]. This can be
//! done with these steps:
//! //!
//! 1. Determine which full transactions are missing. Use [`RelevantTxids::missing_full_txs`]. //! An [`RelevantTxids`] only includes `txid`s and no full transactions. The caller is
//! responsible for obtaining full transactions before applying. This can be done with
//! these steps:
//! //!
//! 2. Obtaining the full transactions. To do this via electrum use [`ElectrumApi::batch_transaction_get`]. //! 1. Determine which full transactions are missing. The method [`missing_full_txs`] of
//! [`RelevantTxids`] can be used.
//! //!
//! Refer to [`example_electrum`] for a complete example. //! 2. Obtaining the full transactions. To do this via electrum, the method
//! [`batch_transaction_get`] can be used.
//! //!
//! [`ElectrumApi::batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get //! Refer to [`bdk_electrum_example`] for a complete example.
//! [`example_electrum`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_electrum //!
//! [`ElectrumClient::scan`]: electrum_client::ElectrumClient::scan
//! [`missing_full_txs`]: RelevantTxids::missing_full_txs
//! [`batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get
//! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example
#![warn(missing_docs)] #![warn(missing_docs)]

View File

@@ -12,7 +12,7 @@ readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
bdk_chain = { path = "../chain", version = "0.7.0", default-features = false } bdk_chain = { path = "../chain", version = "0.6.0", default-features = false }
esplora-client = { version = "0.6.0", default-features = false } esplora-client = { version = "0.6.0", default-features = false }
async-trait = { version = "0.1.66", optional = true } async-trait = { version = "0.1.66", optional = true }
futures = { version = "0.3.26", optional = true } futures = { version = "0.3.26", optional = true }

View File

@@ -36,45 +36,58 @@ pub trait EsploraAsyncExt {
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send, request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
) -> Result<local_chain::Update, Error>; ) -> Result<local_chain::Update, Error>;
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and /// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
/// returns a [`TxGraph`] and a map of last active indices. /// indices.
/// ///
/// * `keychain_spks`: keychains that we want to scan transactions for /// * `keychain_spks`: keychains that we want to scan transactions for
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update
/// ///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel. /// parallel.
#[allow(clippy::result_large_err)] #[allow(clippy::result_large_err)]
async fn full_scan<K: Ord + Clone + Send>( async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
&self, &self,
keychain_spks: BTreeMap< keychain_spks: BTreeMap<
K, K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send, impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
>, >,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize, stop_gap: usize,
parallel_requests: usize, parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>; ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data /// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
/// specified and return a [`TxGraph`].
/// ///
/// * `misc_spks`: scripts that we want to sync transactions for /// [`scan_txs_with_keychains`]: EsploraAsyncExt::scan_txs_with_keychains
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update
///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
/// [`full_scan`]: EsploraAsyncExt::full_scan
#[allow(clippy::result_large_err)] #[allow(clippy::result_large_err)]
async fn sync( async fn scan_txs(
&self, &self,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send, misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send, txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send, outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize, parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>; ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
self.scan_txs_with_keychains(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
txids,
outpoints,
usize::MAX,
parallel_requests,
)
.await
.map(|(g, _)| g)
}
} }
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
@@ -186,12 +199,14 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
}) })
} }
async fn full_scan<K: Ord + Clone + Send>( async fn scan_txs_with_keychains<K: Ord + Clone + Send>(
&self, &self,
keychain_spks: BTreeMap< keychain_spks: BTreeMap<
K, K,
impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send, impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
>, >,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
stop_gap: usize, stop_gap: usize,
parallel_requests: usize, parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> { ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
@@ -260,32 +275,6 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
} }
} }
Ok((graph, last_active_indexes))
}
async fn sync(
&self,
misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
let mut graph = self
.full_scan(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
usize::MAX,
parallel_requests,
)
.await
.map(|(g, _)| g)?;
let mut txids = txids.into_iter(); let mut txids = txids.into_iter();
loop { loop {
let handles = txids let handles = txids
@@ -334,6 +323,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
} }
} }
} }
Ok(graph)
Ok((graph, last_active_indexes))
} }
} }

View File

@@ -19,8 +19,8 @@ use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
pub trait EsploraExt { pub trait EsploraExt {
/// Prepare an [`LocalChain`] update with blocks fetched from Esplora. /// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
/// ///
/// * `local_tip` is the previous tip of [`LocalChain::tip`]. /// * `prev_tip` is the previous tip of [`LocalChain::tip`].
/// * `request_heights` is the block heights that we are interested in fetching from Esplora. /// * `get_heights` is the block heights that we are interested in fetching from Esplora.
/// ///
/// The result of this method can be applied to [`LocalChain::apply_update`]. /// The result of this method can be applied to [`LocalChain::apply_update`].
/// ///
@@ -34,42 +34,54 @@ pub trait EsploraExt {
request_heights: impl IntoIterator<Item = u32>, request_heights: impl IntoIterator<Item = u32>,
) -> Result<local_chain::Update, Error>; ) -> Result<local_chain::Update, Error>;
/// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and /// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
/// returns a [`TxGraph`] and a map of last active indices. /// indices.
/// ///
/// * `keychain_spks`: keychains that we want to scan transactions for /// * `keychain_spks`: keychains that we want to scan transactions for
///
/// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// parallel.
#[allow(clippy::result_large_err)]
fn full_scan<K: Ord + Clone>(
&self,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
/// specified and return a [`TxGraph`].
///
/// * `misc_spks`: scripts that we want to sync transactions for
/// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s /// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
/// * `outpoints`: transactions associated with these outpoints (residing, spending) that we /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
/// want to include in the update /// want to include in the update
/// ///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
/// may include scripts that have been used, use [`full_scan`] with the keychain. /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
/// /// parallel.
/// [`full_scan`]: EsploraExt::full_scan
#[allow(clippy::result_large_err)] #[allow(clippy::result_large_err)]
fn sync( fn scan_txs_with_keychains<K: Ord + Clone>(
&self,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize,
parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
/// Convenience method to call [`scan_txs_with_keychains`] without requiring a keychain.
///
/// [`scan_txs_with_keychains`]: EsploraExt::scan_txs_with_keychains
#[allow(clippy::result_large_err)]
fn scan_txs(
&self, &self,
misc_spks: impl IntoIterator<Item = ScriptBuf>, misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>, txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>, outpoints: impl IntoIterator<Item = OutPoint>,
parallel_requests: usize, parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>; ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
self.scan_txs_with_keychains(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
txids,
outpoints,
usize::MAX,
parallel_requests,
)
.map(|(g, _)| g)
}
} }
impl EsploraExt for esplora_client::BlockingClient { impl EsploraExt for esplora_client::BlockingClient {
@@ -178,9 +190,11 @@ impl EsploraExt for esplora_client::BlockingClient {
}) })
} }
fn full_scan<K: Ord + Clone>( fn scan_txs_with_keychains<K: Ord + Clone>(
&self, &self,
keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>, keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
stop_gap: usize, stop_gap: usize,
parallel_requests: usize, parallel_requests: usize,
) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> { ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
@@ -252,31 +266,6 @@ impl EsploraExt for esplora_client::BlockingClient {
} }
} }
Ok((graph, last_active_indexes))
}
fn sync(
&self,
misc_spks: impl IntoIterator<Item = ScriptBuf>,
txids: impl IntoIterator<Item = Txid>,
outpoints: impl IntoIterator<Item = OutPoint>,
parallel_requests: usize,
) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
let mut graph = self
.full_scan(
[(
(),
misc_spks
.into_iter()
.enumerate()
.map(|(i, spk)| (i as u32, spk)),
)]
.into(),
usize::MAX,
parallel_requests,
)
.map(|(g, _)| g)?;
let mut txids = txids.into_iter(); let mut txids = txids.into_iter();
loop { loop {
let handles = txids let handles = txids
@@ -303,7 +292,7 @@ impl EsploraExt for esplora_client::BlockingClient {
} }
} }
for op in outpoints { for op in outpoints.into_iter() {
if graph.get_tx(op.txid).is_none() { if graph.get_tx(op.txid).is_none() {
if let Some(tx) = self.get_tx(&op.txid)? { if let Some(tx) = self.get_tx(&op.txid)? {
let _ = graph.insert_tx(tx); let _ = graph.insert_tx(tx);
@@ -328,6 +317,7 @@ impl EsploraExt for esplora_client::BlockingClient {
} }
} }
} }
Ok(graph)
Ok((graph, last_active_indexes))
} }
} }

View File

@@ -1,21 +1,4 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
//! This crate is used for updating structures of [`bdk_chain`] with data from an Esplora server.
//!
//! The two primary methods are [`EsploraExt::sync`] and [`EsploraExt::full_scan`]. In most cases
//! [`EsploraExt::sync`] is used to sync the transaction histories of scripts that the application
//! cares about, for example the scripts for all the receive addresses of a Wallet's keychain that it
//! has shown a user. [`EsploraExt::full_scan`] is meant to be used when importing or restoring a
//! keychain where the range of possibly used scripts is not known. In this case it is necessary to
//! scan all keychain scripts until a number (the "stop gap") of unused scripts is discovered. For a
//! sync or full scan the user receives relevant blockchain data and output updates for [`bdk_chain`]
//! via a new [`TxGraph`] to be appended to any existing [`TxGraph`] data.
//!
//! Refer to [`example_esplora`] for a complete example.
//!
//! [`TxGraph`]: bdk_chain::tx_graph::TxGraph
//! [`example_esplora`]: https://github.com/bitcoindevkit/bdk/tree/master/example-crates/example_esplora
use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor}; use bdk_chain::{BlockId, ConfirmationTimeHeightAnchor};
use esplora_client::TxStatus; use esplora_client::TxStatus;

View File

@@ -101,7 +101,7 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
let graph_update = env let graph_update = env
.client .client
.sync( .scan_txs(
misc_spks.into_iter(), misc_spks.into_iter(),
vec![].into_iter(), vec![].into_iter(),
vec![].into_iter(), vec![].into_iter(),
@@ -166,10 +166,28 @@ pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3 // A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
// will. // will.
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 2, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
2,
1,
)
.await?;
assert!(graph_update.full_txs().next().is_none()); assert!(graph_update.full_txs().next().is_none());
assert!(active_indices.is_empty()); assert!(active_indices.is_empty());
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 3, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
3,
1,
)
.await?;
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr); assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
@@ -191,12 +209,24 @@ pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will. // A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
// The last active indice won't be updated in the first case but will in the second one. // The last active indice won't be updated in the first case but will in the second one.
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 4, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
4,
1,
)
.await?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 1); assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr)); assert!(txs.contains(&txid_4th_addr));
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
let (graph_update, active_indices) = env.client.full_scan(keychains, 5, 1).await?; let (graph_update, active_indices) = env
.client
.scan_txs_with_keychains(keychains, vec![].into_iter(), vec![].into_iter(), 5, 1)
.await?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 2); assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr)); assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));

View File

@@ -99,7 +99,7 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
sleep(Duration::from_millis(10)) sleep(Duration::from_millis(10))
} }
let graph_update = env.client.sync( let graph_update = env.client.scan_txs(
misc_spks.into_iter(), misc_spks.into_iter(),
vec![].into_iter(), vec![].into_iter(),
vec![].into_iter(), vec![].into_iter(),
@@ -164,10 +164,22 @@ pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
// A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3 // A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
// will. // will.
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 2, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
2,
1,
)?;
assert!(graph_update.full_txs().next().is_none()); assert!(graph_update.full_txs().next().is_none());
assert!(active_indices.is_empty()); assert!(active_indices.is_empty());
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 3, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
3,
1,
)?;
assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr); assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
@@ -189,12 +201,24 @@ pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
// A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will. // A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
// The last active indice won't be updated in the first case but will in the second one. // The last active indice won't be updated in the first case but will in the second one.
let (graph_update, active_indices) = env.client.full_scan(keychains.clone(), 4, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains.clone(),
vec![].into_iter(),
vec![].into_iter(),
4,
1,
)?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 1); assert_eq!(txs.len(), 1);
assert!(txs.contains(&txid_4th_addr)); assert!(txs.contains(&txid_4th_addr));
assert_eq!(active_indices[&0], 3); assert_eq!(active_indices[&0], 3);
let (graph_update, active_indices) = env.client.full_scan(keychains, 5, 1)?; let (graph_update, active_indices) = env.client.scan_txs_with_keychains(
keychains,
vec![].into_iter(),
vec![].into_iter(),
5,
1,
)?;
let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect(); let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
assert_eq!(txs.len(), 2); assert_eq!(txs.len(), 2);
assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr)); assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));

View File

@@ -11,7 +11,7 @@ authors = ["Bitcoin Dev Kit Developers"]
readme = "README.md" readme = "README.md"
[dependencies] [dependencies]
bdk_chain = { path = "../chain", version = "0.7.0", features = [ "serde", "miniscript" ] } bdk_chain = { path = "../chain", version = "0.6.0", features = [ "serde", "miniscript" ] }
bincode = { version = "1" } bincode = { version = "1" }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }

View File

@@ -105,7 +105,7 @@ where
}) })
} }
/// Attempt to open existing [`Store`] file; create it if the file is non-existent. /// Attempt to open existing [`Store`] file; create it if the file is non-existant.
/// ///
/// Internally, this calls either [`open`] or [`create_new`]. /// Internally, this calls either [`open`] or [`create_new`].
/// ///

View File

@@ -12,7 +12,7 @@ use bdk_bitcoind_rpc::{
Emitter, Emitter,
}; };
use bdk_chain::{ use bdk_chain::{
bitcoin::{constants::genesis_block, Block, Transaction}, bitcoin::{Block, Transaction},
indexed_tx_graph, keychain, indexed_tx_graph, keychain,
local_chain::{self, CheckPoint, LocalChain}, local_chain::{self, CheckPoint, LocalChain},
ConfirmationTimeHeightAnchor, IndexedTxGraph, ConfirmationTimeHeightAnchor, IndexedTxGraph,
@@ -64,6 +64,9 @@ struct RpcArgs {
/// Starting block height to fallback to if no point of agreement if found /// Starting block height to fallback to if no point of agreement if found
#[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")] #[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")]
fallback_height: u32, fallback_height: u32,
/// The unused-scripts lookahead will be kept at this size
#[clap(long, default_value = "10")]
lookahead: u32,
} }
impl From<RpcArgs> for Auth { impl From<RpcArgs> for Auth {
@@ -117,11 +120,10 @@ fn main() -> anyhow::Result<()> {
"[{:>10}s] loaded initial changeset from db", "[{:>10}s] loaded initial changeset from db",
start.elapsed().as_secs_f32() start.elapsed().as_secs_f32()
); );
let (init_chain_changeset, init_graph_changeset) = init_changeset;
let graph = Mutex::new({ let graph = Mutex::new({
let mut graph = IndexedTxGraph::new(index); let mut graph = IndexedTxGraph::new(index);
graph.apply_changeset(init_graph_changeset); graph.apply_changeset(init_changeset.1);
graph graph
}); });
println!( println!(
@@ -129,16 +131,7 @@ fn main() -> anyhow::Result<()> {
start.elapsed().as_secs_f32() start.elapsed().as_secs_f32()
); );
let chain = Mutex::new(if init_chain_changeset.is_empty() { let chain = Mutex::new(LocalChain::from_changeset(init_changeset.0)?);
let genesis_hash = genesis_block(args.network).block_hash();
let (chain, chain_changeset) = LocalChain::from_genesis_hash(genesis_hash);
let mut db = db.lock().unwrap();
db.stage((chain_changeset, Default::default()));
db.commit()?;
chain
} else {
LocalChain::from_changeset(init_chain_changeset)?
});
println!( println!(
"[{:>10}s] loaded local chain from changeset", "[{:>10}s] loaded local chain from changeset",
start.elapsed().as_secs_f32() start.elapsed().as_secs_f32()
@@ -168,9 +161,13 @@ fn main() -> anyhow::Result<()> {
match rpc_cmd { match rpc_cmd {
RpcCommands::Sync { rpc_args } => { RpcCommands::Sync { rpc_args } => {
let RpcArgs { let RpcArgs {
fallback_height, .. fallback_height,
lookahead,
..
} = rpc_args; } = rpc_args;
graph.lock().unwrap().index.set_lookahead_for_all(lookahead);
let chain_tip = chain.lock().unwrap().tip(); let chain_tip = chain.lock().unwrap().tip();
let rpc_client = rpc_args.new_client()?; let rpc_client = rpc_args.new_client()?;
let mut emitter = Emitter::new(&rpc_client, chain_tip, fallback_height); let mut emitter = Emitter::new(&rpc_client, chain_tip, fallback_height);
@@ -236,10 +233,13 @@ fn main() -> anyhow::Result<()> {
} }
RpcCommands::Live { rpc_args } => { RpcCommands::Live { rpc_args } => {
let RpcArgs { let RpcArgs {
fallback_height, .. fallback_height,
lookahead,
..
} = rpc_args; } = rpc_args;
let sigterm_flag = start_ctrlc_handler(); let sigterm_flag = start_ctrlc_handler();
graph.lock().unwrap().index.set_lookahead_for_all(lookahead);
let last_cp = chain.lock().unwrap().tip(); let last_cp = chain.lock().unwrap().tip();
println!( println!(

View File

@@ -172,7 +172,14 @@ fn main() -> anyhow::Result<()> {
}; };
client client
.full_scan(tip, keychain_spks, stop_gap, scan_options.batch_size) .scan(
tip,
keychain_spks,
core::iter::empty(),
core::iter::empty(),
stop_gap,
scan_options.batch_size,
)
.context("scanning the blockchain")? .context("scanning the blockchain")?
} }
ElectrumCommands::Sync { ElectrumCommands::Sync {
@@ -272,7 +279,7 @@ fn main() -> anyhow::Result<()> {
drop((graph, chain)); drop((graph, chain));
let electrum_update = client let electrum_update = client
.sync(tip, spks, txids, outpoints, scan_options.batch_size) .scan_without_keychain(tip, spks, txids, outpoints, scan_options.batch_size)
.context("scanning the blockchain")?; .context("scanning the blockchain")?;
(electrum_update, BTreeMap::new()) (electrum_update, BTreeMap::new())
} }

View File

@@ -188,7 +188,13 @@ fn main() -> anyhow::Result<()> {
// represents the last active spk derivation indices of keychains // represents the last active spk derivation indices of keychains
// (`keychain_indices_update`). // (`keychain_indices_update`).
let (graph_update, last_active_indices) = client let (graph_update, last_active_indices) = client
.full_scan(keychain_spks, *stop_gap, scan_options.parallel_requests) .scan_txs_with_keychains(
keychain_spks,
core::iter::empty(),
core::iter::empty(),
*stop_gap,
scan_options.parallel_requests,
)
.context("scanning for transactions")?; .context("scanning for transactions")?;
let mut graph = graph.lock().expect("mutex must not be poisoned"); let mut graph = graph.lock().expect("mutex must not be poisoned");
@@ -306,7 +312,7 @@ fn main() -> anyhow::Result<()> {
} }
let graph_update = let graph_update =
client.sync(spks, txids, outpoints, scan_options.parallel_requests)?; client.scan_txs(spks, txids, outpoints, scan_options.parallel_requests)?;
graph.lock().unwrap().apply_update(graph_update) graph.lock().unwrap().apply_update(graph_update)
} }

View File

@@ -61,7 +61,7 @@ fn main() -> Result<(), anyhow::Error> {
relevant_txids, relevant_txids,
}, },
keychain_update, keychain_update,
) = client.full_scan(prev_tip, keychain_spks, STOP_GAP, BATCH_SIZE)?; ) = client.scan(prev_tip, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?;
println!(); println!();

View File

@@ -54,7 +54,7 @@ async fn main() -> Result<(), anyhow::Error> {
}) })
.collect(); .collect();
let (update_graph, last_active_indices) = client let (update_graph, last_active_indices) = client
.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS) .scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)
.await?; .await?;
let missing_heights = update_graph.missing_heights(wallet.local_chain()); let missing_heights = update_graph.missing_heights(wallet.local_chain());
let chain_update = client.update_local_chain(prev_tip, missing_heights).await?; let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;

View File

@@ -54,7 +54,7 @@ fn main() -> Result<(), anyhow::Error> {
.collect(); .collect();
let (update_graph, last_active_indices) = let (update_graph, last_active_indices) =
client.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)?; client.scan_txs_with_keychains(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)?;
let missing_heights = update_graph.missing_heights(wallet.local_chain()); let missing_heights = update_graph.missing_heights(wallet.local_chain());
let chain_update = client.update_local_chain(prev_tip, missing_heights)?; let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
let update = Update { let update = Update {