@@ -5,13 +5,12 @@
Loading
5 5
pub use asuran_core::manifest::archive::{Archive, ChunkLocation, Extent};
6 6
pub use asuran_core::manifest::listing::{Listing, Node, NodeType};
7 7
8 -
use async_lock::RwLock;
9 8
use chrono::prelude::*;
10 9
use dashmap::DashMap;
11 10
use futures::future::join_all;
12 11
use serde::{Deserialize, Serialize};
13 12
use serde_cbor::Serializer;
14 -
use smol::Task;
13 +
use smol::lock::RwLock;
15 14
use thiserror::Error;
16 15
17 16
use std::collections::VecDeque;
@@ -143,12 +142,13 @@
Loading
143 142
        repository: &mut Repository<impl BackendClone>,
144 143
        path: &str,
145 144
        from_reader: R,
145 +
        ex: &smol::Executor<'_>,
146 146
    ) -> Result<()> {
147 147
        // We take advantage of put_sparse_object's behavior of reading past the given end if the
148 148
        // given reader is actually longer
149 149
        let extent = Extent { start: 0, end: 0 };
150 150
        let readers = vec![(extent, from_reader)];
151 -
        self.put_sparse_object(chunker, repository, path, readers)
151 +
        self.put_sparse_object(chunker, repository, path, readers, ex)
152 152
            .await
153 153
    }
154 154
@@ -161,6 +161,7 @@
Loading
161 161
        repository: &mut Repository<impl BackendClone>,
162 162
        path: &str,
163 163
        from_readers: Vec<(Extent, R)>,
164 +
        ex: &smol::Executor<'_>,
164 165
    ) -> Result<()> {
165 166
        let mut locations: Vec<ChunkLocation> = Vec::new();
166 167
        let path = self.canonical_namespace() + path.trim();
@@ -175,7 +176,7 @@
Loading
175 176
                let end = start + (data.len() as u64);
176 177
177 178
                let mut repository = repository.clone();
178 -
                futs.push_back(Task::spawn(async move {
179 +
                futs.push_back(ex.spawn(async move {
179 180
                    let id = repository.write_chunk(data).await?.0;
180 181
                    let result: Result<ChunkLocation> = Ok(ChunkLocation {
181 182
                        id,
@@ -416,7 +417,12 @@
Loading
416 417
417 418
    #[test]
418 419
    fn single_add_get() {
419 -
        smol::run(async {
420 +
        let ex = smol::Executor::new();
421 +
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
422 +
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
423 +
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
424 +
425 +
        smol::block_on(async {
420 426
            let seed = 0;
421 427
            println!("Seed: {}", seed);
422 428
            let chunker = FastCDC::default();
@@ -439,7 +445,7 @@
Loading
439 445
            let input_file = BufReader::new(fs::File::open(input_file_path).unwrap());
440 446
441 447
            archive
442 -
                .put_object(&chunker, &mut repo, "FileOne", input_file)
448 +
                .put_object(&chunker, &mut repo, "FileOne", input_file, &ex)
443 449
                .await
444 450
                .unwrap();
445 451
@@ -471,7 +477,11 @@
Loading
471 477
472 478
    #[test]
473 479
    fn sparse_add_get() {
474 -
        smol::run(async {
480 +
        let ex = smol::Executor::new();
481 +
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
482 +
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
483 +
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
484 +
        smol::block_on(async {
475 485
            let seed = 0;
476 486
            let chunker: FastCDC = FastCDC::default();
477 487
            let key = Key::random(32);
@@ -483,6 +493,7 @@
Loading
483 493
            // Generate a random number of extents from one to ten
484 494
            let mut extents: Vec<Extent> = Vec::new();
485 495
            let extent_count: usize = rng.gen_range(1, 10);
496 +
            let ex: &'static smol::Executor = Box::leak(Box::new(ex));
486 497
            let mut next_start: u64 = 0;
487 498
            let mut final_size: usize = 0;
488 499
            for _ in 0..extent_count {
@@ -519,7 +530,7 @@
Loading
519 530
            // println!("Extent list: {:?}", extent_list);
520 531
            // Load data into archive
521 532
            archive
522 -
                .put_sparse_object(&chunker, &mut repo, "test", extent_list)
533 +
                .put_sparse_object(&chunker, &mut repo, "test", extent_list, &ex)
523 534
                .await
524 535
                .expect("Archive Put Failed");
525 536
@@ -575,7 +586,11 @@
Loading
575 586
576 587
    #[test]
577 588
    fn namespaced_insertions() {
578 -
        smol::run(async {
589 +
        let ex = smol::Executor::new();
590 +
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
591 +
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
592 +
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
593 +
        smol::block_on(async {
579 594
            let chunker = FastCDC::default();
580 595
            let key = Key::random(32);
581 596
@@ -588,11 +603,11 @@
Loading
588 603
            let mut archive_2 = archive_1.clone();
589 604
590 605
            archive_1
591 -
                .put_object(&chunker, &mut repo, "1", obj1.clone())
606 +
                .put_object(&chunker, &mut repo, "1", obj1.clone(), &ex)
592 607
                .await
593 608
                .unwrap();
594 609
            archive_2
595 -
                .put_object(&chunker, &mut repo, "2", obj2.clone())
610 +
                .put_object(&chunker, &mut repo, "2", obj2.clone(), &ex)
596 611
                .await
597 612
                .unwrap();
598 613
@@ -621,7 +636,11 @@
Loading
621 636
622 637
    #[test]
623 638
    fn commit_and_load() {
624 -
        smol::run(async {
639 +
        let ex = smol::Executor::new();
640 +
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
641 +
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
642 +
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
643 +
        smol::block_on(async {
625 644
            let chunker = FastCDC::default();
626 645
            let key = Key::random(32);
627 646
@@ -635,7 +654,7 @@
Loading
635 654
636 655
            let mut archive = ActiveArchive::new("test");
637 656
            archive
638 -
                .put_object(&chunker, &mut repo, "1", obj1.clone())
657 +
                .put_object(&chunker, &mut repo, "1", obj1.clone(), &ex)
639 658
                .await
640 659
                .expect("Unable to put object in archive");
641 660

@@ -135,7 +135,7 @@
Loading
135 135
    // the key we read back is the same
136 136
    #[test]
137 137
    fn key_store_load() {
138 -
        smol::run(async {
138 +
        smol::block_on(async {
139 139
            let (key, enc_key, settings) = setup();
140 140
            let directory = tempdir().unwrap();
141 141
            let file = directory.path().join("temp.asuran");

@@ -89,7 +89,7 @@
Loading
89 89
90 90
    #[test]
91 91
    fn chunk_settings_sanity() {
92 -
        smol::run(async {
92 +
        smol::block_on(async {
93 93
            let settings = ChunkSettings {
94 94
                encryption: Encryption::NoEncryption,
95 95
                compression: Compression::NoCompression,
@@ -110,7 +110,7 @@
Loading
110 110
111 111
    #[test]
112 112
    fn new_archive_updates_time() {
113 -
        smol::run(async {
113 +
        smol::block_on(async {
114 114
            let settings = ChunkSettings::lightweight();
115 115
            let key = Key::random(32);
116 116
            let backend = crate::repository::backend::mem::Mem::new(settings, key.clone(), 4);

@@ -514,7 +514,7 @@
Loading
514 514
    // 5. last_modification works on a new manifest
515 515
    #[test]
516 516
    fn creation_works() {
517 -
        smol::run(async {
517 +
        smol::block_on(async {
518 518
            let (tempdir, path) = setup();
519 519
            let settings = ChunkSettings::lightweight();
520 520
            let key = Key::random(32);
@@ -552,7 +552,7 @@
Loading
552 552
    // 2. Creates and locks a second manifest file
553 553
    #[test]
554 554
    fn double_creation_works() {
555 -
        smol::run(async {
555 +
        smol::block_on(async {
556 556
            let (tempdir, path) = setup();
557 557
            // Create the first manifest
558 558
            let settings = ChunkSettings::lightweight();
@@ -587,7 +587,7 @@
Loading
587 587
    // completion.
588 588
    #[test]
589 589
    fn unlock_on_drop() {
590 -
        smol::run(async {
590 +
        smol::block_on(async {
591 591
            let (tempdir, path) = setup();
592 592
            // Open an manifest and drop it
593 593
            let settings = ChunkSettings::lightweight();
@@ -614,7 +614,7 @@
Loading
614 614
    #[test]
615 615
    fn write_drop_read() {
616 616
        use async_io::Timer;
617 -
        smol::run(async {
617 +
        smol::block_on(async {
618 618
            let (tempdir, path) = setup();
619 619
            let settings = ChunkSettings::lightweight();
620 620
            let key = Key::random(32);
@@ -631,7 +631,7 @@
Loading
631 631
                archives.push(archive.clone());
632 632
                archive_set.insert(archive);
633 633
                // Pause for a bit to make sure the next one has a sufficently differnt timestamp
634 -
                Timer::new(time::Duration::from_millis(5)).await;
634 +
                Timer::after(time::Duration::from_millis(5)).await;
635 635
            }
636 636
637 637
            // write them into the manifest
@@ -660,7 +660,7 @@
Loading
660 660
    // 2. Attempting to create a manifest without chunk settings errors
661 661
    #[test]
662 662
    fn manifest_errors() {
663 -
        smol::run(async {
663 +
        smol::block_on(async {
664 664
            let settings = ChunkSettings::lightweight();
665 665
            let key = Key::random(32);
666 666
            // First open a tempdir and create a file in

@@ -195,7 +195,7 @@
Loading
195 195
196 196
    #[test]
197 197
    fn key_store_load() {
198 -
        smol::run(async {
198 +
        smol::block_on(async {
199 199
            let key = Key::random(32);
200 200
            let (tempdir, mut mf) = setup(&key).await;
201 201
            // Encrypt the key and store it
@@ -225,7 +225,7 @@
Loading
225 225
    // Test to make sure that attempting to open a repository respects an existing global lock
226 226
    #[test]
227 227
    fn repository_global_lock() {
228 -
        smol::run(async {
228 +
        smol::block_on(async {
229 229
            let tempdir = tempdir().unwrap();
230 230
            let path = tempdir.path().to_path_buf();
231 231
            let key = Key::random(32);
@@ -248,7 +248,7 @@
Loading
248 248
    // Tests to make sure that readlocks are created and destroyed properly
249 249
    #[test]
250 250
    fn read_lock_create_destroy() {
251 -
        smol::run(async {
251 +
        smol::block_on(async {
252 252
            let key = Key::random(32);
253 253
            let (tempdir, mut mf) = setup(&key).await;
254 254
            let lock_path: Arc<PathBuf> = mf.read_lock_path.clone();

@@ -158,7 +158,7 @@
Loading
158 158
    #[test]
159 159
    #[should_panic]
160 160
    fn bad_key_access() {
161 -
        smol::run(async {
161 +
        smol::block_on(async {
162 162
            let key = Key::random(32);
163 163
            let backend = Mem::new(ChunkSettings::lightweight(), key, 8);
164 164
            backend.read_key().await.unwrap();
@@ -168,7 +168,7 @@
Loading
168 168
    /// Checks to make sure setting and retriving a key works
169 169
    #[test]
170 170
    fn key_sanity() {
171 -
        smol::run(async {
171 +
        smol::block_on(async {
172 172
            let key = Key::random(32);
173 173
            let backend = Mem::new(ChunkSettings::lightweight(), key.clone(), 8);
174 174
            let key_key = [0_u8; 128];

@@ -335,7 +335,7 @@
Loading
335 335
336 336
    #[test]
337 337
    fn repository_add_read() {
338 -
        smol::run(async {
338 +
        smol::block_on(async {
339 339
            let key = Key::random(32);
340 340
341 341
            let size = 7 * 10_u64.pow(3);
@@ -365,7 +365,7 @@
Loading
365 365
366 366
    #[test]
367 367
    fn double_add() {
368 -
        smol::run(async {
368 +
        smol::block_on(async {
369 369
            // Adding the same chunk to the repository twice shouldn't result in
370 370
            // two chunks in the repository
371 371
            let mut repo = get_repo_mem(Key::random(32));
@@ -386,7 +386,7 @@
Loading
386 386
    // Ensure writing a chunk with an ID works
387 387
    #[test]
388 388
    fn chunk_with_id() {
389 -
        smol::run(async {
389 +
        smol::block_on(async {
390 390
            let mut repo = get_repo_mem(Key::random(32));
391 391
            // generate our chunk
392 392
            let size = 7 * 10_u64.pow(3);

@@ -309,7 +309,7 @@
Loading
309 309
    // 4. Locks the initial index file (index/0.lock)
310 310
    #[test]
311 311
    fn creation_works() {
312 -
        smol::run(async {
312 +
        smol::block_on(async {
313 313
            let (tempdir, path) = setup();
314 314
            // Create the index
315 315
            let index = Index::open(&path, 4).expect("Index creation failed");
@@ -338,7 +338,7 @@
Loading
338 338
    // 2. Creates and locks a second index file
339 339
    #[test]
340 340
    fn double_creation_works() {
341 -
        smol::run(async {
341 +
        smol::block_on(async {
342 342
            let (tempdir, path) = setup();
343 343
            // Create the first index
344 344
            let index1 = Index::open(&path, 4).expect("Index 1 creation failed");
@@ -366,7 +366,7 @@
Loading
366 366
    // completion.
367 367
    #[test]
368 368
    fn unlock_on_drop() {
369 -
        smol::run(async {
369 +
        smol::block_on(async {
370 370
            let (tempdir, path) = setup();
371 371
            // Open an index and drop it
372 372
            let mut index = Index::open(&path, 4).expect("Index creation failed");
@@ -387,7 +387,7 @@
Loading
387 387
    // 4. Chunk count increments properly
388 388
    #[test]
389 389
    fn write_drop_read() {
390 -
        smol::run(async {
390 +
        smol::block_on(async {
391 391
            let (tempdir, path) = setup();
392 392
            // Get some transactions to write to the repository
393 393
            let mut txs = HashMap::new();

@@ -3,9 +3,9 @@
Loading
3 3
use crate::manifest::archive::Extent;
4 4
use crate::manifest::driver::{BackupDriver, RestoreDriver};
5 5
6 -
use async_lock::RwLock;
7 6
use async_trait::async_trait;
8 7
use blocking::unblock;
8 +
use smol::lock::RwLock;
9 9
use walkdir::WalkDir;
10 10
11 11
use std::collections::HashMap;
@@ -56,7 +56,7 @@
Loading
56 56
            let metadata = {
57 57
                let path = entry.path().to_owned();
58 58
59 -
                unblock!(path.metadata().expect("Failed getting file metatdata"))
59 +
                unblock(move || path.metadata().expect("Failed getting file metatdata")).await
60 60
            };
61 61
            // FIXME: Making an assuming that the object is either a file or a directory
62 62
            let node_type = if metadata.is_file() {
@@ -108,7 +108,7 @@
Loading
108 108
                    let file = {
109 109
                        let path = path.clone();
110 110
111 -
                        unblock!(File::open(&path).expect("Unable to open file"))
111 +
                        unblock(move || File::open(&path).expect("Unable to open file")).await
112 112
                    };
113 113
                    file_object.direct_add_range(extent.start, extent.end, file);
114 114
                }
@@ -147,7 +147,10 @@
Loading
147 147
        if node.is_directory() {
148 148
            // If the node is a directory, just create it
149 149
            let path = path.to_owned();
150 -
            unblock!(create_dir_all(path).expect("Unable to create directory (restore_object)"));
150 +
            unblock(move || {
151 +
                create_dir_all(path).expect("Unable to create directory (restore_object)")
152 +
            })
153 +
            .await;
151 154
            output
152 155
        } else {
153 156
            // Get the parent directory, and create it if it does not exist
@@ -155,13 +158,16 @@
Loading
155 158
                .parent()
156 159
                .expect("Unable to get parent(restore_object)")
157 160
                .to_owned();
158 -
            unblock!(create_dir_all(parent_path).expect("Unable to create parent (restore_object)"));
161 +
            unblock(move || {
162 +
                create_dir_all(parent_path).expect("Unable to create parent (restore_object)")
163 +
            })
164 +
            .await;
159 165
            // Check to see if we have any extents
160 166
            if let Some(extents) = node.extents.as_ref() {
161 167
                // if the extents are empty, just touch the file and leave it
162 168
                if extents.is_empty() {
163 169
                    let path = path.to_owned();
164 -
                    unblock!(File::create(path).expect("Unable to open file"));
170 +
                    unblock(|| File::create(path).expect("Unable to open file")).await;
165 171
                    output
166 172
                } else {
167 173
                    let mut file_object = RestoreObject::new(node.total_length);
@@ -177,7 +183,7 @@
Loading
177 183
                }
178 184
            } else {
179 185
                let path = path.to_owned();
180 -
                unblock!(File::create(path).expect("Unable to open file"));
186 +
                unblock(|| File::create(path).expect("Unable to open file")).await;
181 187
182 188
                output
183 189
            }
@@ -218,7 +224,7 @@
Loading
218 224
219 225
    #[test]
220 226
    fn backup_restore_structure() {
221 -
        smol::run(async {
227 +
        smol::block_on(async {
222 228
            let input_dir = make_test_directory();
223 229
            let root_path = input_dir.path().to_owned();
224 230

@@ -32,7 +32,7 @@
Loading
32 32
// Specifically addresses gitlab issue #56
33 33
#[test]
34 34
fn create_multifile_noencryption() {
35 -
    smol::run(async {
35 +
    smol::block_on(async {
36 36
        create_multifile_repository(
37 37
            Encryption::NoEncryption,
38 38
            Compression::NoCompression,
@@ -45,7 +45,7 @@
Loading
45 45
// Attempts to reproduce gitlab issue #58
46 46
#[test]
47 47
fn create_lzma_9() {
48 -
    smol::run(async {
48 +
    smol::block_on(async {
49 49
        create_multifile_repository(
50 50
            Encryption::new_aes256ctr(),
51 51
            Compression::LZMA { level: 9 },

@@ -43,6 +43,7 @@
Loading
43 43
        archive: &ActiveArchive,
44 44
        node: Node,
45 45
        objects: HashMap<String, BackupObject<T>>,
46 +
        ex: &smol::Executor<'_>,
46 47
    ) -> Result<()> {
47 48
        if node.is_file() {
48 49
            for (namespace, backup_object) in objects {
@@ -59,7 +60,7 @@
Loading
59 60
                    archive.put_empty(path).await;
60 61
                } else if range_count == 1 {
61 62
                    let object = ranges.remove(0).object;
62 -
                    archive.put_object(&chunker, repo, path, object).await?;
63 +
                    archive.put_object(&chunker, repo, path, object, ex).await?;
63 64
                } else {
64 65
                    let mut readers: Vec<(Extent, T)> = Vec::new();
65 66
                    for object in ranges {
@@ -71,7 +72,7 @@
Loading
71 72
                        readers.push((extent, object));
72 73
                    }
73 74
                    archive
74 -
                        .put_sparse_object(&chunker, repo, path, readers)
75 +
                        .put_sparse_object(&chunker, repo, path, readers, ex)
75 76
                        .await?;
76 77
                }
77 78
            }
@@ -87,9 +88,10 @@
Loading
87 88
        chunker: C,
88 89
        archive: &ActiveArchive,
89 90
        node: Node,
91 +
        ex: &smol::Executor<'_>,
90 92
    ) -> Result<()> {
91 93
        let objects = self.backup_object(node.clone()).await;
92 -
        self.raw_store_object(repo, chunker, archive, node, objects)
94 +
        self.raw_store_object(repo, chunker, archive, node, objects, ex)
93 95
            .await
94 96
    }
95 97
}

@@ -39,14 +39,21 @@
Loading
39 39
40 40
#[cfg(not(tarpaulin_include))]
41 41
fn main() -> Result<()> {
42 -
    smol::run(async {
42 +
    let ex = smol::Executor::new();
43 +
    let ex: &'static smol::Executor = Box::leak(Box::new(ex));
44 +
    let (_signal, shutdown) = smol::channel::unbounded::<()>();
45 +
    for _ in 0..num_cpus::get_physical() {
46 +
        let shutdown = shutdown.clone();
47 +
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
48 +
    }
49 +
    smol::block_on(async {
43 50
        // Our task in main is dead simple, we only need to parse the options and
44 51
        // match on the subcommand
45 52
        let options = Opt::from_args();
46 53
        let command = options.command.clone();
47 54
        match command {
48 55
            Command::New { .. } => new::new(options).await,
49 -
            Command::Store { target, name, .. } => store::store(options, target, name).await,
56 +
            Command::Store { target, name, .. } => store::store(options, target, name, ex).await,
50 57
            Command::List { .. } => list::list(options).await,
51 58
            Command::Extract {
52 59
                target,
Files Coverage
asuran 76.46%
asuran-chunker/src 79.50%
asuran-core/src 73.78%
asuran-cli/src/main.rs 100.00%
Project Totals (41 files) 76.16%
1
coverage:
2
  status:
3
    project:
4
      default:
5
        target: 80%
6
        threshold: 5%
7
        informational: True
8
    patch:
9
      default:
10
        target: 0%
11
        threshold: 0%
12
        informational: True
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading