Severity: MEDIUM · CWE: CWE-362
Location: orange-sdk/src/trusted_wallet/cashu/cashu_store.rs:833-859
Scanner: llm-code-review · Fingerprint: 5448544ac6ebdb80…
Description
CashuKvDatabase::increment_keyset_counter reads the per-keyset counter, computes current_count + count, then writes the new value back as three independent KV operations with no locking, no compare-and-swap, and no in-memory mutex (lines 839-856). Two concurrent calls observe the same current_count, both compute and store the same new_count, and both return that identical value to their callers.
In cdk's wallet, this counter is the BIP32-style derivation index used to deterministically derive blinding factors / secrets for blinded outputs (mint/swap operations). Two callers that obtain the same "new" counter value will derive the same secrets/blinding factors for their respective batches, which (a) defeats the linkability protections the counter is meant to provide and (b) can cause one of the operations' blinded outputs to be rejected by the mint as duplicates (because the corresponding B_ is already pre-committed by the other in-flight operation) — meaning the second caller's quote can be paid by the user while the resulting proofs are unusable.
The race is exercised today inside cdk::Wallet because mint/swap operations can run concurrently against the same wallet (e.g. parallel mint and melt, or the rebalancer racing a user-triggered receive). I assume cdk's WalletDatabase contract expects increment_keyset_counter to be atomic — every other WalletDatabase backend I'm aware of implements it that way — but I cannot read cdk's source from the worktree, so flagging that assumption.
The attached regression test spawns four concurrent increment_keyset_counter(&id, 1) calls against an in-memory store that uses a tokio::sync::Barrier to force the reads to overlap. An atomic implementation must return the distinct values {1,2,3,4}; the current implementation returns {1,1,1,1}.
Proof of concept (regression test)
--- a/orange-sdk/src/trusted_wallet/cashu/cashu_store.rs
+++ b/orange-sdk/src/trusted_wallet/cashu/cashu_store.rs
@@ -1257,3 +1257,150 @@
.await
.map_err(TrustedError::IOError)
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::sync::Mutex as StdMutex;
+ use std::time::Duration;
+ use tokio::sync::Barrier;
+
+ type StoreData = HashMap<(String, String, String), Vec<u8>>;
+
+ struct InMemoryStore {
+ data: Arc<StdMutex<StoreData>>,
+ read_barrier: Option<Arc<Barrier>>,
+ barrier_secondary: String,
+ }
+
+ impl InMemoryStore {
+ fn with_read_barrier(secondary_ns: &str, count: usize) -> Self {
+ Self {
+ data: Arc::new(StdMutex::new(HashMap::new())),
+ read_barrier: Some(Arc::new(Barrier::new(count))),
+ barrier_secondary: secondary_ns.to_string(),
+ }
+ }
+ }
+
+ impl DynStore for InMemoryStore {
+ fn read_async(
+ &self, p: &str, s: &str, k: &str,
+ ) -> Pin<Box<dyn Future<Output = Result<Vec<u8>, io::Error>> + Send + 'static>> {
+ let data = Arc::clone(&self.data);
+ let barrier =
+ if s == self.barrier_secondary { self.read_barrier.clone() } else { None };
+ let key = (p.to_string(), s.to_string(), k.to_string());
+ Box::pin(async move {
+ if let Some(b) = barrier {
+ let _ = tokio::time::timeout(Duration::from_millis(200), b.wait()).await;
+ }
+ let map = data.lock().unwrap();
+ match map.get(&key) {
+ Some(v) => Ok(v.clone()),
+ None => Err(io::Error::new(io::ErrorKind::NotFound, "not found")),
+ }
+ })
+ }
+
+ fn write_async(
+ &self, p: &str, s: &str, k: &str, buf: Vec<u8>,
+ ) -> Pin<Box<dyn Future<Output = Result<(), io::Error>> + Send + 'static>> {
+ let data = Arc::clone(&self.data);
+ let key = (p.to_string(), s.to_string(), k.to_string());
+ Box::pin(async move {
+ data.lock().unwrap().insert(key, buf);
+ Ok(())
+ })
+ }
+
+ fn remove_async(
+ &self, p: &str, s: &str, k: &str, _lazy: bool,
+ ) -> Pin<Box<dyn Future<Output = Result<(), io::Error>> + Send + 'static>> {
+ let data = Arc::clone(&self.data);
+ let key = (p.to_string(), s.to_string(), k.to_string());
+ Box::pin(async move {
+ data.lock().unwrap().remove(&key);
+ Ok(())
+ })
+ }
+
+ fn list_async(
+ &self, p: &str, s: &str,
+ ) -> Pin<Box<dyn Future<Output = Result<Vec<String>, io::Error>> + Send + 'static>> {
+ let data = Arc::clone(&self.data);
+ let p = p.to_string();
+ let s = s.to_string();
+ Box::pin(async move {
+ let map = data.lock().unwrap();
+ let keys: Vec<String> = map
+ .iter()
+ .filter(|((pp, ss, _), _)| pp == &p && ss == &s)
+ .map(|((_, _, k), _)| k.clone())
+ .collect();
+ Ok(keys)
+ })
+ }
+
+ fn read_sync(&self, p: &str, s: &str, k: &str) -> Result<Vec<u8>, io::Error> {
+ let map = self.data.lock().unwrap();
+ match map.get(&(p.to_string(), s.to_string(), k.to_string())) {
+ Some(v) => Ok(v.clone()),
+ None => Err(io::Error::new(io::ErrorKind::NotFound, "not found")),
+ }
+ }
+
+ fn write_sync(&self, p: &str, s: &str, k: &str, buf: Vec<u8>) -> Result<(), io::Error> {
+ self.data.lock().unwrap().insert((p.to_string(), s.to_string(), k.to_string()), buf);
+ Ok(())
+ }
+
+ fn remove_sync(&self, p: &str, s: &str, k: &str, _lazy: bool) -> Result<(), io::Error> {
+ self.data.lock().unwrap().remove(&(p.to_string(), s.to_string(), k.to_string()));
+ Ok(())
+ }
+
+ fn list_sync(&self, p: &str, s: &str) -> Result<Vec<String>, io::Error> {
+ let map = self.data.lock().unwrap();
+ let keys: Vec<String> = map
+ .iter()
+ .filter(|((pp, ss, _), _)| pp == p && ss == s)
+ .map(|((_, _, k), _)| k.clone())
+ .collect();
+ Ok(keys)
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread", worker_threads = 4)]
+ async fn test_increment_keyset_counter_is_atomic() {
+ const N: usize = 4;
+ let store: Arc<dyn DynStore> =
+ Arc::new(InMemoryStore::with_read_barrier(KEYSET_COUNTERS_KEY, N));
+ let db = Arc::new(CashuKvDatabase::new(store).await.unwrap());
+
+ let keyset_id = Id::from_str("009a1f293253e41e").expect("valid cdk keyset id");
+
+ let mut handles = Vec::with_capacity(N);
+ for _ in 0..N {
+ let db = Arc::clone(&db);
+ handles.push(tokio::spawn(async move {
+ db.increment_keyset_counter(&keyset_id, 1).await.unwrap()
+ }));
+ }
+
+ let mut results = Vec::with_capacity(N);
+ for h in handles {
+ results.push(h.await.unwrap());
+ }
+ results.sort();
+
+ let expected: Vec<u32> = (1..=N as u32).collect();
+ assert_eq!(
+ results, expected,
+ "increment_keyset_counter is not atomic: got {:?}, expected {:?}",
+ results, expected,
+ );
+ }
+}
Reported by loupe scan, finding #4 (repo 1, job 3)
Severity: MEDIUM · CWE: CWE-362
Location:
orange-sdk/src/trusted_wallet/cashu/cashu_store.rs:833-859Scanner: llm-code-review · Fingerprint:
5448544ac6ebdb80…Description
CashuKvDatabase::increment_keyset_counterreads the per-keyset counter, computescurrent_count + count, then writes the new value back as three independent KV operations with no locking, no compare-and-swap, and no in-memory mutex (lines 839-856). Two concurrent calls observe the samecurrent_count, both compute and store the samenew_count, and both return that identical value to their callers.In cdk's wallet, this counter is the BIP32-style derivation index used to deterministically derive blinding factors / secrets for blinded outputs (mint/swap operations). Two callers that obtain the same "new" counter value will derive the same secrets/blinding factors for their respective batches, which (a) defeats the linkability protections the counter is meant to provide and (b) can cause one of the operations' blinded outputs to be rejected by the mint as duplicates (because the corresponding
B_is already pre-committed by the other in-flight operation) — meaning the second caller's quote can be paid by the user while the resulting proofs are unusable.The race is exercised today inside
cdk::Walletbecause mint/swap operations can run concurrently against the same wallet (e.g. parallelmintandmelt, or the rebalancer racing a user-triggered receive). I assume cdk'sWalletDatabasecontract expectsincrement_keyset_counterto be atomic — every otherWalletDatabasebackend I'm aware of implements it that way — but I cannot read cdk's source from the worktree, so flagging that assumption.The attached regression test spawns four concurrent
increment_keyset_counter(&id, 1)calls against an in-memory store that uses atokio::sync::Barrierto force the reads to overlap. An atomic implementation must return the distinct values{1,2,3,4}; the current implementation returns{1,1,1,1}.Proof of concept (regression test)
Reported by loupe scan, finding #4 (repo 1, job 3)