summaryrefslogtreecommitdiffstats
path: root/src/launcher
diff options
context:
space:
mode:
authorLibravatar bigfoot547 <[email protected]>2025-01-10 16:43:09 -0600
committerLibravatar bigfoot547 <[email protected]>2025-01-10 16:43:09 -0600
commit6dc6edc7af928593590a922bc4b8a3166a322c13 (patch)
tree3e57c04c90558e3fbc59a81d03be7f2885ba1a72 /src/launcher
parentuntested moment (remove reqwest) (diff)
curl attemptcurl-attempt
Diffstat (limited to 'src/launcher')
-rw-r--r--src/launcher/download.rs286
-rw-r--r--src/launcher/request.rs2
2 files changed, 265 insertions, 23 deletions
diff --git a/src/launcher/download.rs b/src/launcher/download.rs
index 4294d33..5240d55 100644
--- a/src/launcher/download.rs
+++ b/src/launcher/download.rs
@@ -1,36 +1,278 @@
-use std::path::{Path, PathBuf};
-use sha1_smol::Digest;
+use std::cmp::min;
+use std::collections::VecDeque;
+use std::error::Error;
+use std::future::Future;
+use std::pin::{pin, Pin};
+use std::task::{Context, Poll};
+use curl::easy::{Easy2, Handler, WriteError};
+use curl::multi::{Easy2Handle, EasyHandle, Multi};
+use tokio::task::spawn_blocking;
+use crate::launcher::constants::USER_AGENT;
-pub trait Download {
- fn get_url(&self) -> &str;
- fn get_path(&self) -> &Path;
- fn get_expect_digest(&self) -> Option<Digest>;
- fn get_expect_size(&self) -> Option<usize>;
+trait Download {
+ async fn prepare(&mut self, easy: &EasyHandle) -> Result<bool, Box<dyn Error>>;
+ async fn handle_chunk(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>;
+ async fn finish(&mut self) -> Result<(), Box<dyn Error>>;
+}
+
+#[derive(Clone, Copy)]
+enum MultiDownloaderState {
+ Primed,
+ Running,
+ Complete
+}
+
+struct MultiDownloader<T>
+where
+ T: Download + Send + Unpin + 'static
+{
+ state: MultiDownloaderState,
+ nhandles: usize,
+ jobs: Option<VecDeque<T>>
+}
- fn always_redownload(&self) -> bool;
+impl<T> MultiDownloader<T>
+where
+ T: Download + Send + Unpin + 'static
+{
+ // TODO: this interface is kind of weird (we have to take ownership of the jobs, but maybe this isn't the best way to do that)
+
+ pub fn new(jobs: Vec<T>, nhandles: usize) -> MultiDownloader<T> {
+ assert!(nhandles > 0);
+
+ MultiDownloader {
+ state: MultiDownloaderState::Primed,
+ nhandles,
+ jobs: Some(jobs.into())
+ }
+ }
}
-pub type DownloadJob = dyn Download + Sync + Send;
+struct MultiDownloadResult {
-pub struct MultiDownloader<'j, 'js> {
- jobs: &'js [&'j DownloadJob],
- nhandles: usize
}
-impl<'j, 'js> MultiDownloader<'j, 'js> {
- pub fn new(jobs: &'js [&'j DownloadJob]) -> MultiDownloader<'j, 'js> {
- Self::with_handles(jobs, 8)
+impl<T> Future for MultiDownloader<T>
+where
+ T: Download + Send + Unpin + 'static
+{
+ type Output = MultiDownloadResult;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let self_mut = self.get_mut();
+ match self_mut.state {
+ MultiDownloaderState::Primed => {
+ self_mut.state = MultiDownloaderState::Running;
+ let jobs = self_mut.jobs.take().unwrap();
+ let nhandles = self_mut.nhandles;
+
+ spawn_blocking(move || {
+ // TODO
+ MultiDownloadBlocking::new(jobs, nhandles).perform();
+ });
+
+ Poll::Pending
+ },
+ MultiDownloaderState::Running => {
+ Poll::Pending // TODO
+ },
+ MultiDownloaderState::Complete => panic!("multi download polled after completion")
+ }
}
+}
+
+struct MultiHandler<T: Download> {
+ job: Option<T>
+}
- pub fn with_handles(jobs: &'js [&'j DownloadJob], nhandles: usize) -> MultiDownloader<'j, 'js> {
+impl<T: Download> Handler for MultiHandler<T> {
+ fn write(&mut self, data: &[u8]) -> Result<usize, WriteError> {
+ todo!()
+ }
+}
+
+struct MultiDownloadBlocking<T: Download> {
+ multi: Multi,
+ easy_handles: Vec<Easy2Handle<MultiHandler<T>>>,
+ jobs: VecDeque<T>
+}
+
+impl<'a, T: Download> MultiDownloadBlocking<T> {
+ fn new(jobs: VecDeque<T>, nhandles: usize) -> MultiDownloadBlocking<T> {
assert!(nhandles > 0);
-
- MultiDownloader {
- jobs, nhandles
+
+ let nhandles = min(nhandles, jobs.len());
+
+ let multi = Multi::new();
+ let mut easy_handles = Vec::with_capacity(nhandles);
+
+ for n in 0..nhandles {
+ let mut easy = Easy2::new(MultiHandler { job: None });
+ easy.useragent(USER_AGENT).expect("setting user agent shouldn't fail");
+
+ let mut handle = multi.add2(easy).expect("adding easy handle cannot fail");
+ handle.set_token(n).expect("setting token cannot fail");
+ easy_handles.push(handle);
+ }
+
+ MultiDownloadBlocking {
+ multi,
+ easy_handles,
+ jobs
}
}
+
+ fn prepare_job(&mut self, easy: &mut Easy2Handle<MultiHandler<T>>, job: T) {
+ let handler = easy.get_mut();
+
+ todo!()
+ }
- fn do_it(&self) {
-
+ fn perform(&mut self) -> MultiDownloadResult {
+ todo!()
}
-} \ No newline at end of file
+}
+
+//
+// pub struct MultiDownloader<'j> {
+// state: MultiDownloaderState,
+// jobs: VecDeque<DownloadJob>,
+// multi: Multi,
+// handles: Vec<Easy2Handle<EasyHandler<'j>>>
+// }
+//
+// pub struct EasyHandler<'j> {
+// job: Option<&'j DownloadJob>
+// }
+//
+// impl<'j> Handler for EasyHandler<'j> {
+// fn write(&mut self, data: &[u8]) -> Result<usize, WriteError> {
+//
+// }
+// }
+//
+// impl<'j, 'js, T> MultiDownloader<'j, 'js, T>
+// where
+// T: Download + Sync + Send + ?Sized
+// {
+// pub fn new(jobs: &'js [&'j T]) -> MultiDownloader<'j, 'js, T> {
+// Self::with_handles(jobs, 8)
+// }
+//
+// pub fn with_handles(jobs: &'js [&'j T], nhandles: usize) -> MultiDownloader<'j, 'js, T> {
+// assert!(nhandles > 0);
+//
+// let mut handles = Vec::with_capacity(nhandles);
+// let multi = Multi::new();
+//
+// for n in 0..nhandles {
+// let mut easy = multi.add2(Easy2::new(EasyHandler { job: None })).expect("adding easy handle should not fail");
+// easy.set_token(n).expect("setting token should not fail");
+//
+// handles.push(easy);
+// }
+//
+// MultiDownloader {
+// state: MultiDownloaderState::Primed,
+//
+// multi,
+// handles
+// }
+// }
+//
+// fn next_job(&mut self) -> Option<&T> {
+// let oj = self.jobs.get(self.job_idx).cloned();
+// self.job_idx += 1;
+// oj
+// }
+// }
+//
+// pub struct MultiDownloadResult {
+//
+// }
+//
+// impl<'j, 'js, T> Future for MultiDownloader<'j, 'js, T>
+// where
+// T: Download + Sync + Send + ?Sized
+// {
+// type Output = MultiDownloadResult;
+//
+// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+// let self_mut = self.get_mut();
+// match self_mut.state {
+// MultiDownloaderState::Primed => {
+// self_mut.state = MultiDownloaderState::Running;
+//
+// // TODO: assign download jobs to each
+//
+// Poll::Pending
+// },
+// MultiDownloaderState::Running => {
+// todo!()
+// },
+// MultiDownloaderState::Complete => panic!("multi downloader polled after completion")
+// }
+// }
+// }
+
+// fn sample() {
+// struct AbcdDownloader {
+// file: Option<File>,
+// path: PathBuf,
+// url: String,
+// expect_size: usize,
+// expect_sha1: Digest
+// }
+//
+// impl Download for AbcdDownloader {
+// async fn before_download(&mut self) -> Result<bool, Box<dyn Error>> {
+// let mut file = match File::open(&self.path).await {
+// Ok(f) => f,
+// Err(e) => return match e.kind() {
+// ErrorKind::NotFound => {
+// // TODO: ensure parent dirs exist
+//
+// Ok(true)
+// },
+// _ => Err(e.into())
+// }
+// };
+//
+// let mut buf = [0u8; 4096];
+// let mut sha1 = Sha1::new();
+// let mut tally = 0usize;
+//
+// loop {
+// let n = file.read(&mut buf[..]).await?;
+// if n == 0 {
+// file.seek(SeekFrom::Start(0)).await?;
+// break;
+// }
+//
+// sha1.update(&buf[..n]);
+// tally += n;
+// }
+//
+// Ok(tally != self.expect_size || sha1.digest() != self.expect_sha1)
+// }
+//
+// async fn handle_chunk(&mut self, data: &[u8]) -> Result<usize, WriteError> {
+// self.file.unwrap().write_all(data).await
+// }
+//
+// async fn after_download(&mut self) -> Result<(), Box<dyn Error>> {
+// todo!()
+// }
+// }
+//
+// let mut jobs = Vec::new();
+// jobs.push(AbcdDownloader {
+// file: None,
+// path: PathBuf::from("/home/bigfoot/test.words"),
+// url: String::from("https://github.com/"),
+// expect_size: 10,
+// expect_sha1: Digest::default()
+// });
+//
+// let abcd = MultiDownloader::new(jobs.iter().map(|a| a).collect::<Vec<&AbcdDownloader>>().as_slice());
+// }
diff --git a/src/launcher/request.rs b/src/launcher/request.rs
index df89a8b..ab11b73 100644
--- a/src/launcher/request.rs
+++ b/src/launcher/request.rs
@@ -122,7 +122,7 @@ impl Future for EasyFetch {
response_code: easy.response_code().expect("querying response code should not fail"),
data: out_data,
easy
- })).expect("curl fetch reader hangup (this shouldn't happen)");
+ })).ok(); // suppress this result since we don't really care if the caller hung up
waker.wake();
});