1
use crate::chunker::AsyncChunker;
2
use crate::repository::backend::common::manifest::ManifestTransaction;
3
use crate::repository::{BackendClone, ChunkID, Repository};
4

5
pub use asuran_core::manifest::archive::{Archive, ChunkLocation, Extent};
6
pub use asuran_core::manifest::listing::{Listing, Node, NodeType};
7

8
use chrono::prelude::*;
9
use dashmap::DashMap;
10
use futures::future::join_all;
11
use serde::{Deserialize, Serialize};
12
use serde_cbor::Serializer;
13
use smol::lock::RwLock;
14
use thiserror::Error;
15

16
use std::collections::VecDeque;
17
use std::io::{Read, Write};
18
use std::sync::Arc;
19

20
/// Error for all the things that can go wrong with handling Archives
21
#[derive(Error, Debug)]
22
#[non_exhaustive]
23
pub enum ArchiveError {
24
    #[error("Chunker Error")]
25
    Chunker(#[from] crate::chunker::ChunkerError),
26
    #[error("I/O Error")]
27
    IO(#[from] std::io::Error),
28
    #[error("RepositoryError): {0}")]
29
    Repository(#[from] crate::repository::RepositoryError),
30
    #[error("Failed to deserialize archive")]
31
    ArchiveDeserialization,
32
}
33

34
type Result<T> = std::result::Result<T, ArchiveError>;
35

36
/// A 'heavy' pointer to a an `Archive` in a repository.
37
///
38
/// Contains the `ChunkID` of the chunk the `Archive` is serialized in, as well as
39
/// its date of creation.
40
///
41
/// Currently also contains the name of the `Archive`, but adding this was a mistake
42
/// as it leaks information that should not be leaked, so it will be removed soon.
43
#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq, Hash)]
44
pub struct StoredArchive {
45
    /// Pointer the the archive metadata in the repository
46
    pub id: ChunkID,
47
    /// Time the archive was started it
48
    ///
49
    /// Used to prevent replay attackts
50
    pub timestamp: DateTime<FixedOffset>,
51
}
52

53
impl StoredArchive {
54
    /// Loads the archive metadata from the repository and unpacks it for use
55 1
    pub async fn load(&self, repo: &mut Repository<impl BackendClone>) -> Result<ActiveArchive> {
56 1
        let bytes = repo.read_chunk(self.id).await?;
57 1
        let dumb_archive: Archive = serde_cbor::de::from_slice(&bytes[..])
58 1
            .map_err(|_| ArchiveError::ArchiveDeserialization)?;
59 1
        let archive = ActiveArchive::from_archive(dumb_archive);
60 1
        Ok(archive)
61
    }
62

63
    /// Constructs a dummy archive object used for testing
64
    #[cfg(test)]
65
    pub fn dummy_archive() -> StoredArchive {
66
        StoredArchive {
67
            id: ChunkID::random_id(),
68
            timestamp: Local::now().with_timezone(Local::now().offset()),
69
        }
70
    }
71

72
    /// Returns the timestamp of the archive
73 1
    pub fn timestamp(&self) -> DateTime<FixedOffset> {
74 1
        self.timestamp
75
    }
76

77
    /// Returns the pointer to the archive
78 1
    pub fn id(&self) -> ChunkID {
79 1
        self.id
80
    }
81
}
82

83
impl From<ManifestTransaction> for StoredArchive {
84 1
    fn from(item: ManifestTransaction) -> Self {
85
        StoredArchive {
86 1
            id: item.pointer(),
87 1
            timestamp: item.timestamp(),
88
        }
89
    }
90
}
91

92
#[derive(Clone, Debug)]
93
/// A currently open and able to be modified `Archive`
94
///
95
/// This is basically the same thing as an `Archive`, but has async/await aware
96
/// synchronization types wrapping some shared state, allowing the archive to be
97
/// used in multiple tasks at once.
98
pub struct ActiveArchive {
99
    /// The name of this archive
100
    ///
101
    /// Can be used to pull this archive from the manifest later.
102
    ///
103
    /// Can be any arbitray string
104
    name: String,
105
    /// Locations of all the chunks of the objects in this archive
106
    objects: Arc<DashMap<String, Vec<ChunkLocation>>>,
107
    /// The namespace this archive puts and gets objects in
108
    ///
109
    /// A namespace is a colon seperated lists of strings.
110
    ///
111
    /// The default namespace is :
112
    ///
113
    /// Namespaces are stored here as a vector of their parts
114
    namespace: Vec<String>,
115
    /// Time stamp is set at archive creation, this is different than the one
116
    /// set in stored archive
117
    timestamp: DateTime<FixedOffset>,
118
    /// The object listing of the archive
119
    listing: Arc<RwLock<Listing>>,
120
}
121

122
impl ActiveArchive {
123
    /// Creates a new, empty `ActiveArchive`
124 1
    pub fn new(name: &str) -> Self {
125
        ActiveArchive {
126 1
            name: name.to_string(),
127 1
            objects: Arc::new(DashMap::new()),
128 1
            namespace: Vec::new(),
129 1
            timestamp: Local::now().with_timezone(Local::now().offset()),
130 1
            listing: Arc::new(RwLock::new(Listing::default())),
131
        }
132
    }
133

134
    /// Places an object into a archive, as a whole, without regard to sparsity
135
    ///
136
    /// Will read holes as 0s
137
    ///
138
    /// This is implemented as a thin wrapper around `put_sparse_object`
139 1
    pub async fn put_object<R: Read + Send + 'static>(
140
        &mut self,
141
        chunker: &impl AsyncChunker,
142
        repository: &mut Repository<impl BackendClone>,
143
        path: &str,
144
        from_reader: R,
145
        ex: &smol::Executor<'_>,
146
    ) -> Result<()> {
147
        // We take advantage of put_sparse_object's behavior of reading past the given end if the
148
        // given reader is actually longer
149 1
        let extent = Extent { start: 0, end: 0 };
150 1
        let readers = vec![(extent, from_reader)];
151 1
        self.put_sparse_object(chunker, repository, path, readers, ex)
152 0
            .await
153
    }
154

155
    /// Inserts a sparse object into the archive
156
    ///
157
    /// Requires that the object be pre-split into extents
158 1
    pub async fn put_sparse_object<R: Read + Send + 'static>(
159
        &mut self,
160
        chunker: &impl AsyncChunker,
161
        repository: &mut Repository<impl BackendClone>,
162
        path: &str,
163
        from_readers: Vec<(Extent, R)>,
164
        ex: &smol::Executor<'_>,
165
    ) -> Result<()> {
166 1
        let mut locations: Vec<ChunkLocation> = Vec::new();
167 1
        let path = self.canonical_namespace() + path.trim();
168

169 1
        for (extent, read) in from_readers {
170 1
            let max_futs = 100;
171 1
            let mut futs = VecDeque::new();
172 1
            let slices = chunker.async_chunk(read, repository.queue_depth);
173 1
            let mut start = extent.start;
174 1
            while let Ok(result) = slices.recv_async().await {
175 1
                let data = result?;
176 1
                let end = start + (data.len() as u64);
177

178 1
                let mut repository = repository.clone();
179 1
                futs.push_back(ex.spawn(async move {
180 1
                    let id = repository.write_chunk(data).await?.0;
181 1
                    let result: Result<ChunkLocation> = Ok(ChunkLocation {
182 1
                        id,
183 1
                        start,
184 1
                        length: end - start + 1,
185
                    });
186 1
                    result
187
                }));
188 1
                while futs.len() >= max_futs {
189
                    // This unwrap is sound, since we can only be here if futs has elements in it
190 0
                    let loc = futs.pop_front().unwrap().await?;
191 0
                    locations.push(loc);
192
                }
193 1
                start = end + 1;
194
            }
195 1
            let locs = join_all(futs).await;
196 1
            for loc in locs {
197 1
                let loc = loc?;
198 1
                locations.push(loc);
199
            }
200
        }
201

202 1
        self.objects.insert(path.to_string(), locations);
203

204 1
        Ok(())
205
    }
206

207
    /// Inserts an object into the archive without writing any bytes
208 1
    pub async fn put_empty(&mut self, path: &str) {
209 1
        let locations: Vec<ChunkLocation> = Vec::new();
210 1
        self.objects.insert(path.to_string(), locations);
211
    }
212

213
    /// Retreives an object from the archive, without regard to sparsity.
214
    ///
215
    /// Will fill in holes with zeros.
216 1
    pub async fn get_object(
217
        &self,
218
        repository: &mut Repository<impl BackendClone>,
219
        path: &str,
220
        mut restore_to: impl Write,
221
    ) -> Result<()> {
222 1
        let path = self.canonical_namespace() + path.trim();
223
        // Get chunk locations
224
        #[allow(clippy::map_clone)]
225 1
        let locations = self.objects.get(&path.to_string()).map(|x| x.clone());
226 1
        let mut locations = if let Some(locations) = locations {
227 1
            locations
228
        } else {
229 0
            return Ok(());
230
        };
231 1
        locations.sort_unstable();
232 1
        let mut last_index = locations[0].start;
233 1
        for location in &locations {
234 1
            let id = location.id;
235
            // If a chunk is not included, fill the space inbween it and the last with zeros
236 1
            let start = location.start;
237 1
            if start > last_index + 1 {
238 0
                let zero = [0_u8];
239 0
                for _ in last_index + 1..start {
240 0
                    restore_to.write_all(&zero)?;
241
                }
242
            }
243 1
            let bytes = repository.read_chunk(id).await?;
244

245 1
            restore_to.write_all(&bytes)?;
246 1
            last_index = start + location.length - 1;
247
        }
248

249 1
        Ok(())
250
    }
251

252
    /// Retrieve a single extent of an object from the repository
253
    ///
254
    /// Will write past the end of the last chunk ends after the extent
255 1
    pub async fn get_extent(
256
        &self,
257
        repository: &mut Repository<impl BackendClone>,
258
        path: &str,
259
        extent: Extent,
260
        mut restore_to: impl Write,
261
    ) -> Result<()> {
262 1
        let path = self.canonical_namespace() + path.trim();
263

264
        #[allow(clippy::map_clone)]
265 1
        let locations = self.objects.get(&path.to_string()).map(|x| x.clone());
266 1
        let mut locations = if let Some(locations) = locations {
267 1
            locations
268
        } else {
269 0
            return Ok(());
270
        };
271 1
        locations.sort_unstable();
272 1
        let locations = locations
273
            .iter()
274 1
            .filter(|x| x.start >= extent.start && x.start <= extent.end);
275
        // If there are any holes in the extent, fill them in with zeros
276 1
        let mut last_index = extent.start;
277 1
        for location in locations {
278 1
            let id = location.id;
279
            // Perform filling if needed
280 1
            let start = location.start;
281 1
            if start > last_index + 1 {
282 0
                let zero = [0_u8];
283 0
                for _ in last_index + 1..start {
284 0
                    restore_to.write_all(&zero)?;
285
                }
286
            }
287 1
            let bytes = repository.read_chunk(id).await?;
288 1
            restore_to.write_all(&bytes)?;
289 1
            last_index = start + location.length - 1;
290
        }
291

292 1
        Ok(())
293
    }
294

295
    /// Retrieves a sparse object from the repository
296
    ///
297
    /// Will skip over holes
298
    ///
299
    /// Will not write to extents that are not specified
300 0
    pub async fn get_sparse_object(
301
        &self,
302
        repository: &mut Repository<impl BackendClone>,
303
        path: &str,
304
        mut to_writers: Vec<(Extent, impl Write)>,
305
    ) -> Result<()> {
306 0
        for (extent, restore_to) in &mut to_writers {
307 0
            self.get_extent(repository, path, *extent, restore_to)
308 0
                .await?;
309
        }
310 0
        Ok(())
311
    }
312

313
    /// Returns the namespace of this archive in string form
314 1
    pub fn canonical_namespace(&self) -> String {
315 1
        self.namespace.join(":") + ":"
316
    }
317

318
    /// Changes namespace by adding the name to the end of the namespace
319
    ///
320
    /// Returns a new archive
321 1
    pub fn namespace_append(&self, name: &str) -> ActiveArchive {
322 1
        let mut new_namespace = self.namespace.clone();
323 1
        new_namespace.push(name.to_string());
324 1
        let mut archive = self.clone();
325 1
        archive.namespace = new_namespace;
326 0
        archive
327
    }
328

329
    /// Stores archive metatdat in the repository, producing a Stored Archive
330
    ///  object, and consuming the Archive in the process.
331
    ///
332
    /// Returns the key of the serialized archive in the repository
333 1
    pub async fn store(self, repo: &mut Repository<impl BackendClone>) -> StoredArchive {
334 1
        let dumb_archive = self.into_archive().await;
335 1
        let mut bytes = Vec::<u8>::new();
336 1
        dumb_archive
337 1
            .serialize(&mut Serializer::new(&mut bytes))
338
            .expect("Unable to serialize archive.");
339

340 1
        let id = repo
341 1
            .write_chunk(bytes)
342 0
            .await
343 0
            .expect("Unable to write archive metatdata to repository.")
344 0
            .0;
345

346 1
        repo.commit_index().await;
347

348
        StoredArchive {
349
            id,
350 1
            timestamp: dumb_archive.timestamp,
351
        }
352
    }
353

354
    #[cfg(not(tarpaulin_include))]
355
    /// Provides the name of the archive
356
    pub fn name(&self) -> &str {
357
        &self.name
358
    }
359

360
    #[cfg(not(tarpaulin_include))]
361
    /// Provides the timestamp of the archive
362
    pub fn timestamp(&self) -> &DateTime<FixedOffset> {
363
        &self.timestamp
364
    }
365

366
    /// Converts an Archive into an `ActiveArchive`
367 1
    pub fn from_archive(archive: Archive) -> ActiveArchive {
368
        ActiveArchive {
369 1
            name: archive.name,
370 1
            objects: Arc::new(archive.objects.into_iter().collect()),
371 1
            namespace: archive.namespace,
372 1
            timestamp: archive.timestamp,
373 1
            listing: Arc::new(RwLock::new(archive.listing)),
374
        }
375
    }
376

377
    /// Converts self into an Archive
378 1
    pub async fn into_archive(self) -> Archive {
379
        Archive {
380 1
            name: self.name,
381 1
            objects: DashMap::clone(&self.objects).into_iter().collect(),
382 1
            namespace: self.namespace,
383 1
            timestamp: self.timestamp,
384 1
            listing: self.listing.read().await.clone(),
385
        }
386
    }
387

388
    /// Gets a copy of the listing from the archive
389 1
    pub async fn listing(&self) -> Listing {
390 1
        self.listing.read().await.clone()
391
    }
392

393
    /// Replaces the listing with the provided value
394 1
    pub async fn set_listing(&self, listing: Listing) {
395 1
        *self.listing.write().await = listing;
396
    }
397
}
398

399
#[cfg(test)]
400
mod tests {
401
    use super::*;
402
    use crate::chunker::*;
403
    use crate::repository::backend::mem::Mem;
404
    use crate::repository::ChunkSettings;
405
    use crate::repository::Key;
406
    use rand::prelude::*;
407
    use std::fs;
408
    use std::io::{BufReader, Cursor, Seek, SeekFrom};
409
    use std::path::Path;
410
    use tempfile::tempdir;
411

412
    fn get_repo_mem(key: Key) -> Repository<impl BackendClone> {
413
        let settings = ChunkSettings::lightweight();
414
        let backend = Mem::new(settings, key.clone(), 4);
415
        Repository::with(backend, settings, key, 2)
416
    }
417

418
    #[test]
419
    fn single_add_get() {
420
        let ex = smol::Executor::new();
421
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
422
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
423
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
424

425
        smol::block_on(async {
426
            let seed = 0;
427
            println!("Seed: {}", seed);
428
            let chunker = FastCDC::default();
429

430
            let key = Key::random(32);
431
            let size = 2 * 2_usize.pow(14);
432
            let mut data = vec![0_u8; size];
433
            let mut rand = SmallRng::seed_from_u64(seed);
434
            rand.fill_bytes(&mut data);
435
            let mut repo = get_repo_mem(key);
436

437
            let mut archive = ActiveArchive::new("test");
438

439
            let testdir = tempdir().unwrap();
440
            let input_file_path = testdir.path().join(Path::new("file1"));
441
            {
442
                let mut input_file = fs::File::create(input_file_path.clone()).unwrap();
443
                input_file.write_all(&data).unwrap();
444
            }
445
            let input_file = BufReader::new(fs::File::open(input_file_path).unwrap());
446

447
            archive
448
                .put_object(&chunker, &mut repo, "FileOne", input_file, &ex)
449
                .await
450
                .unwrap();
451

452
            let mut buf = Cursor::new(Vec::<u8>::new());
453
            archive
454
                .get_object(&mut repo, "FileOne", &mut buf)
455
                .await
456
                .unwrap();
457

458
            let output = buf.into_inner();
459
            println!("Input length: {}", data.len());
460
            println!("Output length: {}", output.len());
461

462
            let mut mismatch = false;
463
            for i in 0..data.len() {
464
                if data[i] != output[i] {
465
                    println!(
466
                        "Byte {} was different in output. Input val: {:X?} Output val {:X?}",
467
                        i, data[i], output[i]
468
                    );
469

470
                    mismatch = true;
471
                }
472
            }
473

474
            assert!(!mismatch);
475
        });
476
    }
477

478
    #[test]
479
    fn sparse_add_get() {
480
        let ex = smol::Executor::new();
481
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
482
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
483
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
484
        smol::block_on(async {
485
            let seed = 0;
486
            let chunker: FastCDC = FastCDC::default();
487
            let key = Key::random(32);
488
            let mut repo = get_repo_mem(key);
489

490
            let mut archive = ActiveArchive::new("test");
491

492
            let mut rng = SmallRng::seed_from_u64(seed);
493
            // Generate a random number of extents from one to ten
494
            let mut extents: Vec<Extent> = Vec::new();
495
            let extent_count: usize = rng.gen_range(1, 10);
496
            let ex: &'static smol::Executor = Box::leak(Box::new(ex));
497
            let mut next_start: u64 = 0;
498
            let mut final_size: usize = 0;
499
            for _ in 0..extent_count {
500
                // Each extent can be between 256 bytes and 16384 bytes long
501
                let extent_length = rng.gen_range(256, 16384);
502
                let extent = Extent {
503
                    start: next_start,
504
                    end: next_start + extent_length,
505
                };
506
                // Keep track of final size as we grow
507
                final_size = (next_start + extent_length) as usize;
508
                extents.push(extent);
509
                // Each extent can be between 256 and 16384 bytes appart
510
                let jump = rng.gen_range(256, 16384);
511
                next_start = next_start + extent_length + jump;
512
            }
513

514
            // Create the test data
515
            let mut test_input = vec![0_u8; final_size];
516
            // Fill the test vector with random data
517
            for Extent { start, end } in extents.clone() {
518
                for i in start..end {
519
                    test_input[i as usize] = rng.gen();
520
                }
521
            }
522

523
            // Make the extent list
524
            let mut extent_list = Vec::new();
525
            for extent in extents.clone() {
526
                let data = test_input[extent.start as usize..extent.end as usize].to_vec();
527
                extent_list.push((extent, Cursor::new(data)));
528
            }
529

530
            // println!("Extent list: {:?}", extent_list);
531
            // Load data into archive
532
            archive
533
                .put_sparse_object(&chunker, &mut repo, "test", extent_list, &ex)
534
                .await
535
                .expect("Archive Put Failed");
536

537
            // Create output vec
538
            let test_output = Vec::new();
539
            println!("Output is a buffer of {} bytes.", final_size);
540
            let mut cursor = Cursor::new(test_output);
541
            for (i, extent) in extents.clone().iter().enumerate() {
542
                println!("Getting extent #{} : {:?}", i, extent);
543
                cursor
544
                    .seek(SeekFrom::Start(extent.start))
545
                    .expect("Out of bounds");
546
                archive
547
                    .get_extent(&mut repo, "test", *extent, &mut cursor)
548
                    .await
549
                    .expect("Archive Get Failed");
550
            }
551
            let test_output = cursor.into_inner();
552
            println!("Input is now a buffer of {} bytes.", test_input.len());
553
            println!("Output is now a buffer of {} bytes.", test_output.len());
554

555
            for i in 0..test_input.len() {
556
                if test_output[i] != test_input[i] {
557
                    println!("Difference at {}", i);
558
                    println!("Orig: {:?}", &test_input[i - 2..i + 3]);
559
                    println!("New: {:?}", &test_output[i - 2..i + 3]);
560
                    break;
561
                }
562
            }
563

564
            std::mem::drop(repo);
565

566
            assert_eq!(test_input, test_output);
567
        });
568
    }
569

570
    #[test]
571
    fn default_namespace() {
572
        let archive = ActiveArchive::new("test");
573
        let namespace = archive.canonical_namespace();
574
        assert_eq!(namespace, ":");
575
    }
576

577
    #[test]
578
    fn namespace_append() {
579
        let archive = ActiveArchive::new("test");
580
        let archive = archive.namespace_append("1");
581
        let archive = archive.namespace_append("2");
582
        let namespace = archive.canonical_namespace();
583
        println!("Namespace: {}", namespace);
584
        assert_eq!(namespace, "1:2:");
585
    }
586

587
    #[test]
588
    fn namespaced_insertions() {
589
        let ex = smol::Executor::new();
590
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
591
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
592
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
593
        smol::block_on(async {
594
            let chunker = FastCDC::default();
595
            let key = Key::random(32);
596

597
            let mut repo = get_repo_mem(key);
598

599
            let obj1 = Cursor::new([1_u8; 32]);
600
            let obj2 = Cursor::new([2_u8; 32]);
601

602
            let mut archive_1 = ActiveArchive::new("test");
603
            let mut archive_2 = archive_1.clone();
604

605
            archive_1
606
                .put_object(&chunker, &mut repo, "1", obj1.clone(), &ex)
607
                .await
608
                .unwrap();
609
            archive_2
610
                .put_object(&chunker, &mut repo, "2", obj2.clone(), &ex)
611
                .await
612
                .unwrap();
613

614
            let mut restore_1 = Cursor::new(Vec::<u8>::new());
615
            archive_2
616
                .get_object(&mut repo, "1", &mut restore_1)
617
                .await
618
                .unwrap();
619

620
            let mut restore_2 = Cursor::new(Vec::<u8>::new());
621
            archive_1
622
                .get_object(&mut repo, "2", &mut restore_2)
623
                .await
624
                .unwrap();
625

626
            let obj1 = obj1.into_inner();
627
            let obj2 = obj2.into_inner();
628

629
            let restore1 = restore_1.into_inner();
630
            let restore2 = restore_2.into_inner();
631

632
            assert_eq!(&obj1[..], &restore1[..]);
633
            assert_eq!(&obj2[..], &restore2[..]);
634
        });
635
    }
636

637
    #[test]
638
    fn commit_and_load() {
639
        let ex = smol::Executor::new();
640
        let ex: &'static smol::Executor = Box::leak(Box::new(ex));
641
        let (_signal, shutdown) = smol::channel::unbounded::<()>();
642
        std::thread::spawn(move || futures::executor::block_on(ex.run(shutdown.recv())));
643
        smol::block_on(async {
644
            let chunker = FastCDC::default();
645
            let key = Key::random(32);
646

647
            let mut repo = get_repo_mem(key);
648
            let mut obj1 = [0_u8; 32];
649
            for i in 0..obj1.len() {
650
                obj1[i] = i as u8;
651
            }
652

653
            let obj1 = Cursor::new(obj1);
654

655
            let mut archive = ActiveArchive::new("test");
656
            archive
657
                .put_object(&chunker, &mut repo, "1", obj1.clone(), &ex)
658
                .await
659
                .expect("Unable to put object in archive");
660

661
            let stored_archive = archive.store(&mut repo).await;
662

663
            let archive = stored_archive
664
                .load(&mut repo)
665
                .await
666
                .expect("Unable to load archive from repository");
667

668
            let mut obj_restore = Cursor::new(Vec::new());
669
            archive
670
                .get_object(&mut repo, "1", &mut obj_restore)
671
                .await
672
                .expect("Unable to restore object from archive");
673

674
            assert_eq!(&obj1.into_inner()[..], &obj_restore.into_inner()[..]);
675
        });
676
    }
677
}

Read our documentation on viewing source code .

Loading