snix_castore/proto/
grpc_blobservice_wrapper.rs1use crate::{B3Digest, blobservice::BlobService};
2use core::pin::pin;
3use futures::{TryFutureExt, stream::BoxStream};
4use std::{collections::VecDeque, ops::Deref};
5use tokio_stream::StreamExt;
6use tokio_util::io::ReaderStream;
7use tonic::{Request, Response, Status, Streaming, async_trait};
8use tracing::{Span, instrument, warn};
9
10pub struct GRPCBlobServiceWrapper<T> {
11 blob_service: T,
12}
13
14impl<T> GRPCBlobServiceWrapper<T> {
15 pub fn new(blob_service: T) -> Self {
16 Self { blob_service }
17 }
18}
19
20#[async_trait]
21impl<T> super::blob_service_server::BlobService for GRPCBlobServiceWrapper<T>
22where
23 T: Deref<Target = dyn BlobService> + Send + Sync + 'static,
24{
25 type ReadStream = BoxStream<'static, Result<super::BlobChunk, Status>>;
27
28 #[instrument(skip_all)]
29 async fn stat(
30 &self,
31 request: Request<super::StatBlobRequest>,
32 ) -> Result<Response<super::StatBlobResponse>, Status> {
33 let rq = request.into_inner();
34 let req_digest: B3Digest = rq
35 .digest
36 .try_into()
37 .map_err(|_e| Status::invalid_argument("invalid digest length"))?;
38
39 let span = Span::current();
40 span.record("blob.digest", req_digest.to_string());
41
42 match self.blob_service.chunks(&req_digest).await {
43 Ok(None) => Err(Status::not_found(format!("blob {} not found", &req_digest))),
44 Ok(Some(chunk_metas)) => Ok(Response::new(super::StatBlobResponse {
45 chunks: chunk_metas,
46 ..Default::default()
47 })),
48 Err(e) => {
49 warn!(err=%e, "failed to request chunks");
50 Err(e.into())
51 }
52 }
53 }
54
55 #[instrument(skip_all)]
56 async fn read(
57 &self,
58 request: Request<super::ReadBlobRequest>,
59 ) -> Result<Response<Self::ReadStream>, Status> {
60 let rq = request.into_inner();
61
62 let req_digest: B3Digest = rq
63 .digest
64 .try_into()
65 .map_err(|_e| Status::invalid_argument("invalid digest length"))?;
66
67 let span = Span::current();
68 span.record("blob.digest", req_digest.to_string());
69
70 match self.blob_service.open_read(&req_digest).await {
71 Ok(Some(r)) => {
72 let chunks_stream =
73 ReaderStream::new(r).map(|chunk| Ok(super::BlobChunk { data: chunk? }));
74 Ok(Response::new(Box::pin(chunks_stream)))
75 }
76 Ok(None) => Err(Status::not_found(format!("blob {} not found", &req_digest))),
77 Err(e) => {
78 warn!(err=%e, "failed to call open_read");
79 Err(e.into())
80 }
81 }
82 }
83
84 #[instrument(skip_all)]
85 async fn put(
86 &self,
87 request: Request<Streaming<super::BlobChunk>>,
88 ) -> Result<Response<super::PutBlobResponse>, Status> {
89 let req_inner = request.into_inner();
90
91 let data_stream = req_inner.map(|x| {
92 x.map(|x| VecDeque::from(x.data.to_vec()))
93 .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))
94 });
95
96 let mut data_reader = tokio_util::io::StreamReader::new(data_stream);
97
98 let mut blob_writer = pin!(self.blob_service.open_write().await);
99
100 tokio::io::copy(&mut data_reader, &mut blob_writer)
101 .await
102 .map_err(|e| {
103 warn!("error copying: {}", e);
104 Status::internal("error copying")
105 })?;
106
107 let digest = blob_writer
108 .close()
109 .map_err(|e| {
110 warn!("error closing stream: {}", e);
111 Status::internal("error closing stream")
112 })
113 .await?;
114
115 Ok(Response::new(super::PutBlobResponse {
116 digest: digest.into(),
117 }))
118 }
119}