[pbs-devel] [PATCH proxmox-backup v2 08/12] local store cache: drop obsolete cacher implementation
Christian Ebner
c.ebner at proxmox.com
Wed Oct 8 17:21:21 CEST 2025
Since the local store cache uses no longer the inner lru cache on
chunk access, the S3Cacher implementation is obsolete and can be
replaced by the S3Client directly when fetching is required.
Drop all now obsolete code and adapt callsites accordingly.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
pbs-datastore/src/datastore.rs | 12 -----
pbs-datastore/src/local_chunk_reader.rs | 27 +++++-------
.../src/local_datastore_lru_cache.rs | 44 ++-----------------
src/api2/reader/mod.rs | 34 ++++++--------
4 files changed, 29 insertions(+), 88 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index acf22e9b0..a6b17e3c3 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -41,7 +41,6 @@ use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive};
use crate::index::IndexFile;
-use crate::local_datastore_lru_cache::S3Cacher;
use crate::s3::S3_CONTENT_PREFIX;
use crate::task_tracking::{self, update_active_operations};
use crate::{DataBlob, LocalDatastoreLruCache};
@@ -291,17 +290,6 @@ impl DataStore {
Ok(())
}
- /// Returns the cacher for datastores backed by S3 object stores.
- /// This allows to fetch chunks to the local cache store on-demand.
- pub fn cacher(&self) -> Result<Option<S3Cacher>, Error> {
- self.backend().map(|backend| match backend {
- DatastoreBackend::S3(s3_client) => {
- Some(S3Cacher::new(s3_client, self.inner.chunk_store.clone()))
- }
- DatastoreBackend::Filesystem => None,
- })
- }
-
pub fn lookup_datastore(
name: &str,
operation: Option<Operation>,
diff --git a/pbs-datastore/src/local_chunk_reader.rs b/pbs-datastore/src/local_chunk_reader.rs
index 36bce1552..c50a63fb7 100644
--- a/pbs-datastore/src/local_chunk_reader.rs
+++ b/pbs-datastore/src/local_chunk_reader.rs
@@ -70,13 +70,11 @@ impl ReadChunk for LocalChunkReader {
DatastoreBackend::S3(s3_client) => match self.store.cache() {
None => proxmox_async::runtime::block_on(fetch(Arc::clone(s3_client), digest))?,
Some(cache) => {
- let mut cacher = self
- .store
- .cacher()?
- .ok_or(format_err!("no cacher for datastore"))?;
- proxmox_async::runtime::block_on(cache.access(digest, &mut cacher))?.ok_or(
- format_err!("unable to access chunk with digest {}", hex::encode(digest)),
- )?
+ proxmox_async::runtime::block_on(cache.access(digest, s3_client.clone()))?
+ .ok_or(format_err!(
+ "unable to access chunk with digest {}",
+ hex::encode(digest)
+ ))?
}
},
};
@@ -109,14 +107,13 @@ impl AsyncReadChunk for LocalChunkReader {
DatastoreBackend::S3(s3_client) => match self.store.cache() {
None => fetch(Arc::clone(s3_client), digest).await?,
Some(cache) => {
- let mut cacher = self
- .store
- .cacher()?
- .ok_or(format_err!("no cacher for datastore"))?;
- cache.access(digest, &mut cacher).await?.ok_or(format_err!(
- "unable to access chunk with digest {}",
- hex::encode(digest)
- ))?
+ cache
+ .access(digest, s3_client.clone())
+ .await?
+ .ok_or(format_err!(
+ "unable to access chunk with digest {}",
+ hex::encode(digest)
+ ))?
}
},
};
diff --git a/pbs-datastore/src/local_datastore_lru_cache.rs b/pbs-datastore/src/local_datastore_lru_cache.rs
index f03265a5b..fe3b51a55 100644
--- a/pbs-datastore/src/local_datastore_lru_cache.rs
+++ b/pbs-datastore/src/local_datastore_lru_cache.rs
@@ -1,53 +1,17 @@
//! Use a local datastore as cache for operations on a datastore attached via
//! a network layer (e.g. via the S3 backend).
-use std::future::Future;
use std::sync::Arc;
use anyhow::{bail, Error};
use http_body_util::BodyExt;
-use pbs_tools::async_lru_cache::{AsyncCacher, AsyncLruCache};
+use pbs_tools::async_lru_cache::AsyncLruCache;
use proxmox_s3_client::S3Client;
use crate::ChunkStore;
use crate::DataBlob;
-#[derive(Clone)]
-/// Cacher to fetch chunks from the S3 object store and insert them in the local cache store.
-pub struct S3Cacher {
- client: Arc<S3Client>,
- store: Arc<ChunkStore>,
-}
-
-impl AsyncCacher<[u8; 32], ()> for S3Cacher {
- fn fetch(
- &self,
- key: [u8; 32],
- ) -> Box<dyn Future<Output = Result<Option<()>, Error>> + Send + 'static> {
- let client = Arc::clone(&self.client);
- let store = Arc::clone(&self.store);
- Box::new(async move {
- let object_key = crate::s3::object_key_from_digest(&key)?;
- match client.get_object(object_key).await? {
- None => bail!("could not fetch object with key {}", hex::encode(key)),
- Some(response) => {
- let bytes = response.content.collect().await?.to_bytes();
- let chunk = DataBlob::from_raw(bytes.to_vec())?;
- store.insert_chunk(&chunk, &key)?;
- Ok(Some(()))
- }
- }
- })
- }
-}
-
-impl S3Cacher {
- pub fn new(client: Arc<S3Client>, store: Arc<ChunkStore>) -> Self {
- Self { client, store }
- }
-}
-
/// LRU cache using local datastore for caching chunks
///
/// Uses a LRU cache, but without storing the values in-memory but rather
@@ -100,7 +64,7 @@ impl LocalDatastoreLruCache {
pub async fn access(
&self,
digest: &[u8; 32],
- cacher: &mut S3Cacher,
+ client: Arc<S3Client>,
) -> Result<Option<DataBlob>, Error> {
let (path, _digest_str) = self.store.chunk_path(digest);
match std::fs::File::open(&path) {
@@ -116,7 +80,7 @@ impl LocalDatastoreLruCache {
use std::io::Seek;
// Check if file is empty marker file, try fetching content if so
if file.seek(std::io::SeekFrom::End(0))? == 0 {
- let chunk = self.fetch_and_insert(cacher.client.clone(), digest).await?;
+ let chunk = self.fetch_and_insert(client.clone(), digest).await?;
Ok(Some(chunk))
} else {
Err(err)
@@ -126,7 +90,7 @@ impl LocalDatastoreLruCache {
Err(err) => {
// Failed to open file, missing
if err.kind() == std::io::ErrorKind::NotFound {
- let chunk = self.fetch_and_insert(cacher.client.clone(), digest).await?;
+ let chunk = self.fetch_and_insert(client.clone(), digest).await?;
Ok(Some(chunk))
} else {
Err(Error::from(err))
diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs
index 846493c61..155e862c6 100644
--- a/src/api2/reader/mod.rs
+++ b/src/api2/reader/mod.rs
@@ -327,28 +327,20 @@ fn download_chunk(
let body = match &env.backend {
DatastoreBackend::Filesystem => load_from_filesystem(env, &digest)?,
- DatastoreBackend::S3(s3_client) => {
- match env.datastore.cache() {
- None => fetch_from_object_store(s3_client, &digest).await?,
- Some(cache) => {
- let mut cacher = env
- .datastore
- .cacher()?
- .ok_or(format_err!("no cacher for datastore"))?;
- // Download from object store, insert to local cache store and read from
- // file. Can this be optimized?
- let chunk =
- cache
- .access(&digest, &mut cacher)
- .await?
- .ok_or(format_err!(
- "unable to access chunk with digest {}",
- hex::encode(digest)
- ))?;
- Body::from(chunk.raw_data().to_owned())
- }
+ DatastoreBackend::S3(s3_client) => match env.datastore.cache() {
+ None => fetch_from_object_store(s3_client, &digest).await?,
+ Some(cache) => {
+ let chunk =
+ cache
+ .access(&digest, s3_client.clone())
+ .await?
+ .ok_or(format_err!(
+ "unable to access chunk with digest {}",
+ hex::encode(digest)
+ ))?;
+ Body::from(chunk.raw_data().to_owned())
}
- }
+ },
};
// fixme: set other headers ?
--
2.47.3
More information about the pbs-devel
mailing list