1use super::archive::*;
2use super::consts::*;
3use super::reader::*;
4use super::segmenter::*;
5use crate::ext::io::*;
6use crate::ext::mutex::*;
7use crate::scripts::base::*;
8use crate::types::*;
9use crate::utils::encoding::*;
10use crate::utils::threadpool::ThreadPool;
11use anyhow::Result;
12use sha2::{Digest, Sha256};
13use std::collections::{BTreeMap, HashMap, HashSet};
14use std::io::{Seek, Write};
15use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
16use std::sync::{Arc, Mutex};
17
18#[derive(Clone)]
19struct WrittenSegment {
20 is_compressed: bool,
21 start: u64,
22 original_size: u64,
23 archived_size: u64,
24}
25
26#[derive(Default)]
27struct Stats {
28 total_original_size: AtomicU64,
29 final_archive_size: AtomicU64,
30 total_segments: AtomicUsize,
31 unique_segments: AtomicUsize,
32 deduplication_savings: AtomicU64,
33}
34
35impl std::fmt::Display for Stats {
36 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
37 let total_original_size = self
38 .total_original_size
39 .load(std::sync::atomic::Ordering::Relaxed);
40 let final_archive_size = self
41 .final_archive_size
42 .load(std::sync::atomic::Ordering::Relaxed);
43 let total_segments = self
44 .total_segments
45 .load(std::sync::atomic::Ordering::Relaxed);
46 let unique_segments = self
47 .unique_segments
48 .load(std::sync::atomic::Ordering::Relaxed);
49 let deduplication_savings = self
50 .deduplication_savings
51 .load(std::sync::atomic::Ordering::Relaxed);
52 write!(
53 f,
54 "Total Original Size: {} bytes\nFinal Archive Size: {} bytes\nTotal Segments: {}\nUnique Segments: {}\nDeduplication Savings: {} bytes",
55 total_original_size,
56 final_archive_size,
57 total_segments,
58 unique_segments,
59 deduplication_savings
60 )
61 }
62}
63
64pub struct Xp3ArchiveWriter<T: Write + Seek> {
65 file: Arc<Mutex<T>>,
66 segments: Arc<Mutex<HashMap<[u8; 32], WrittenSegment>>>,
67 items: Arc<Mutex<BTreeMap<String, ArchiveItem>>>,
68 runner: ThreadPool<Result<()>>,
69 compress_files: bool,
70 compress_index: bool,
71 zlib_compression_level: u32,
72 segmenter: Option<Arc<Box<dyn Segmenter + Send + Sync>>>,
73 stats: Arc<Stats>,
74 compress_workers: usize,
75 processing_segments: Arc<Mutex<HashSet<[u8; 32]>>>,
76 use_zstd: bool,
77 zstd_compression_level: i32,
78 no_adler: bool,
79 #[cfg(feature = "zopfli")]
80 use_zopfli: bool,
81 #[cfg(feature = "zopfli")]
82 zopfli_iteration_count: std::num::NonZeroU64,
83 #[cfg(feature = "zopfli")]
84 zopfli_iterations_without_improvement: std::num::NonZeroU64,
85 #[cfg(feature = "zopfli")]
86 zopfli_maximum_block_splits: u16,
87}
88
89impl Xp3ArchiveWriter<std::io::BufWriter<std::fs::File>> {
90 pub fn new(filename: &str, files: &[&str], config: &ExtraConfig) -> Result<Self> {
91 let file = std::fs::File::create(filename)?;
92 let mut file = std::io::BufWriter::new(file);
93 let mut items = BTreeMap::new();
94 for file in files {
95 let item = ArchiveItem {
96 name: file.to_string(),
97 file_hash: 0,
98 original_size: 0,
99 archived_size: 0,
100 segments: Vec::new(),
101 };
102 items.insert(file.to_string(), item);
103 }
104 let segmenter = create_segmenter(config.xp3_segmenter).map(|s| Arc::new(s));
105 file.write_all(XP3_MAGIC)?;
106 file.write_u64(0)?; Ok(Self {
108 file: Arc::new(Mutex::new(file)),
109 segments: Arc::new(Mutex::new(HashMap::new())),
110 items: Arc::new(Mutex::new(items)),
111 runner: ThreadPool::new(
112 if config.xp3_segmenter.is_none() {
113 1
114 } else {
115 config.xp3_pack_workers.max(1)
116 },
117 Some("xp3-writer"),
118 false,
119 )?,
120 compress_files: config.xp3_compress_files,
121 compress_index: config.xp3_compress_index,
122 zlib_compression_level: config.zlib_compression_level,
123 segmenter,
124 stats: Arc::new(Stats::default()),
125 compress_workers: config.xp3_compress_workers.max(1),
126 processing_segments: Arc::new(Mutex::new(HashSet::new())),
127 use_zstd: config.xp3_zstd,
128 zstd_compression_level: config.zstd_compression_level,
129 no_adler: config.xp3_no_adler,
130 #[cfg(feature = "zopfli")]
131 use_zopfli: config.xp3_zopfli,
132 #[cfg(feature = "zopfli")]
133 zopfli_iteration_count: config.zopfli_iteration_count,
134 #[cfg(feature = "zopfli")]
135 zopfli_iterations_without_improvement: config.zopfli_iterations_without_improvement,
136 #[cfg(feature = "zopfli")]
137 zopfli_maximum_block_splits: config.zopfli_maximum_block_splits,
138 })
139 }
140}
141
142struct Writer<'a> {
143 inner: Box<dyn Write + 'a>,
144 mem: MemWriter,
145}
146
147impl std::fmt::Debug for Writer<'_> {
148 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
149 f.debug_struct("Writer").field("mem", &self.mem).finish()
150 }
151}
152
153impl<'a> Write for Writer<'a> {
154 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
155 self.mem.write(buf)
156 }
157
158 fn flush(&mut self) -> std::io::Result<()> {
159 self.mem.flush()
160 }
161}
162
163impl<'a> Seek for Writer<'a> {
164 fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
165 self.mem.seek(pos)
166 }
167
168 fn stream_position(&mut self) -> std::io::Result<u64> {
169 self.mem.stream_position()
170 }
171
172 fn rewind(&mut self) -> std::io::Result<()> {
173 self.mem.rewind()
174 }
175}
176
177impl<'a> Drop for Writer<'a> {
178 fn drop(&mut self) {
179 let _ = self.inner.write_all(&self.mem.data);
180 let _ = self.inner.flush();
181 }
182}
183
184impl<T: Write + Seek + Sync + Send + 'static> Archive for Xp3ArchiveWriter<T> {
185 fn new_file<'a>(
186 &'a mut self,
187 name: &str,
188 size: Option<u64>,
189 ) -> Result<Box<dyn WriteSeek + 'a>> {
190 let inner = self.new_file_non_seek(name, size)?;
191 Ok(Box::new(Writer {
192 inner,
193 mem: MemWriter::new(),
194 }))
195 }
196
197 fn new_file_non_seek<'a>(
198 &'a mut self,
199 name: &str,
200 _size: Option<u64>,
201 ) -> Result<Box<dyn Write + 'a>> {
202 if self.segmenter.is_none() {
203 self.runner.join();
204 }
205 for err in self.runner.take_results() {
206 err?;
207 }
208 let item = {
209 let items = self.items.lock_blocking();
210 Arc::new(Mutex::new(
211 items
212 .get(name)
213 .ok_or_else(|| anyhow::anyhow!("File not found in archive: {}", name))?
214 .clone(),
215 ))
216 };
217 let (reader, writer) = std::io::pipe()?;
218 let reader = Reader::new(reader);
219 {
220 let file = self.file.clone();
221 let segments = self.segments.clone();
222 let items = self.items.clone();
223 let segmenter = self.segmenter.clone();
224 let stats = self.stats.clone();
225 let is_compressed = self.compress_files;
226 let zlib_compression_level = self.zlib_compression_level;
227 let workers = if self.segmenter.is_some() && is_compressed {
228 Some(Arc::new(ThreadPool::<Result<()>>::new(
229 self.compress_workers,
230 Some("xp3-compress"),
231 false,
232 )?))
233 } else {
234 None
235 };
236 let processiong_segments = self.processing_segments.clone();
237 let use_zstd = self.use_zstd;
238 #[cfg(feature = "zopfli")]
239 let use_zopfli = self.use_zopfli;
240 #[cfg(feature = "zopfli")]
241 let zopfli_iteration_count = self.zopfli_iteration_count;
242 #[cfg(feature = "zopfli")]
243 let zopfli_iterations_without_improvement = self.zopfli_iterations_without_improvement;
244 #[cfg(feature = "zopfli")]
245 let zopfli_maximum_block_splits = self.zopfli_maximum_block_splits;
246 let zstd_compression_level = self.zstd_compression_level;
247 self.runner.execute(
248 move |_| {
249 let mut reader = reader;
250 let mut offset_in_file = 0u64;
251 if let Some(segmenter) = segmenter {
252 for seg in segmenter.segment(&mut reader) {
253 let seg = seg?;
254 let hash: [u8; 32] = Sha256::digest(&seg).into();
255 let seg_offset_in_file = offset_in_file;
256 offset_in_file += seg.len() as u64;
257 let fseg = match {
258 let mut segments = segments.lock_blocking();
259 if let Some(old_seg) = segments.get(&hash) {
260 Err(old_seg.clone())
261 } else {
262 let seg_data = WrittenSegment {
263 is_compressed,
264 start: 0,
265 original_size: seg.len() as u64,
266 archived_size: seg.len() as u64,
267 };
268 segments.insert(hash, seg_data.clone());
269 Ok(seg_data)
270 }
271 } {
272 Ok(mut info) => {
273 if let Some(workers) = workers.as_ref() {
274 {
275 let mut processing =
276 processiong_segments.lock_blocking();
277 processing.insert(hash);
278 }
279 let file = file.clone();
280 let segments = segments.clone();
281 let stats = stats.clone();
282 let item = item.clone();
283 let processiong_segments = processiong_segments.clone();
284 workers.execute(
285 move |_| {
286 let data = {
287 if use_zopfli {
288 let option = zopfli::Options {
289 iteration_count: zopfli_iteration_count,
290 iterations_without_improvement:
291 zopfli_iterations_without_improvement,
292 maximum_block_splits:
293 zopfli_maximum_block_splits,
294 };
295 let mut e = zopfli::ZlibEncoder::new(option, zopfli::BlockType::Dynamic, Vec::new())?;
296 e.write_all(&seg)?;
297 e.finish()?
298 } else if use_zstd {
299 let mut e = zstd::stream::Encoder::new(
300 Vec::new(),
301 zstd_compression_level,
302 )?;
303 e.write_all(&seg)?;
304 e.finish()?
305 } else {
306 let mut e = flate2::write::ZlibEncoder::new(
307 Vec::new(),
308 flate2::Compression::new(
309 zlib_compression_level,
310 ),
311 );
312 e.write_all(&seg)?;
313 e.finish()?
314 }
315 };
316 let mut file = file.lock_blocking();
317 let start = file.seek(std::io::SeekFrom::End(0))?;
318 file.write_all(&data)?;
319 info.start = start;
320 info.archived_size = data.len() as u64;
321 let stats = stats.clone();
322 stats.total_original_size.fetch_add(
323 info.original_size,
324 Ordering::Relaxed,
325 );
326 stats.final_archive_size.fetch_add(
327 info.archived_size,
328 Ordering::Relaxed,
329 );
330 stats
331 .total_segments
332 .fetch_add(1, Ordering::Relaxed);
333 stats
334 .unique_segments
335 .fetch_add(1, Ordering::Relaxed);
336 let mut segments = segments.lock_blocking();
337 segments.insert(hash, info.clone());
338 let ninfo = Segment {
339 is_compressed: info.is_compressed,
340 start: info.start,
341 offset_in_file: seg_offset_in_file,
342 original_size: info.original_size,
343 archived_size: info.archived_size,
344 };
345 let mut item = item.lock_blocking();
346 item.original_size += ninfo.original_size;
347 item.archived_size += ninfo.archived_size;
348 item.segments.push(ninfo);
349 let mut processing =
350 processiong_segments.lock_blocking();
351 processing.remove(&hash);
352 Ok(())
353 },
354 true,
355 )?;
356 None
357 } else {
358 {
359 let mut processing =
360 processiong_segments.lock_blocking();
361 processing.insert(hash);
362 }
363 let data = seg;
364 let mut file = file.lock_blocking();
365 let start = file.seek(std::io::SeekFrom::End(0))?;
366 file.write_all(&data)?;
367 info.start = start;
368 info.archived_size = data.len() as u64;
369 let stats = stats.clone();
370 stats
371 .total_original_size
372 .fetch_add(info.original_size, Ordering::Relaxed);
373 stats
374 .final_archive_size
375 .fetch_add(info.archived_size, Ordering::Relaxed);
376 stats.total_segments.fetch_add(1, Ordering::Relaxed);
377 stats.unique_segments.fetch_add(1, Ordering::Relaxed);
378 let mut segments = segments.lock_blocking();
379 segments.insert(hash, info.clone());
380 let ninfo = Segment {
381 is_compressed: info.is_compressed,
382 start: info.start,
383 offset_in_file: seg_offset_in_file,
384 original_size: info.original_size,
385 archived_size: info.archived_size,
386 };
387 {
388 let mut processing =
389 processiong_segments.lock_blocking();
390 processing.remove(&hash);
391 }
392 Some(ninfo)
393 }
394 }
395 Err(mut seg_info) => {
396 let mut need_update = false;
397 loop {
398 if {
399 let processing = processiong_segments.lock_blocking();
400 !processing.contains(&hash)
401 } {
402 break;
403 }
404 need_update = true;
405 std::thread::sleep(std::time::Duration::from_millis(10));
406 }
407 if need_update {
408 seg_info = {
409 let segments = segments.lock_blocking();
410 segments
411 .get(&hash)
412 .ok_or(anyhow::anyhow!(
413 "Failed to get latest segment info."
414 ))?
415 .clone()
416 };
417 }
418 let stats = stats.clone();
419 stats
420 .total_original_size
421 .fetch_add(seg_info.original_size, Ordering::Relaxed);
422 stats
423 .deduplication_savings
424 .fetch_add(seg_info.archived_size, Ordering::Relaxed);
425 stats.total_segments.fetch_add(1, Ordering::Relaxed);
426 let ninfo = Segment {
427 is_compressed: seg_info.is_compressed,
428 start: seg_info.start,
429 offset_in_file: seg_offset_in_file,
430 original_size: seg_info.original_size,
431 archived_size: seg_info.archived_size,
432 };
433 Some(ninfo)
434 }
435 };
436 if let Some(fseg) = fseg {
437 let mut item = item.lock_blocking();
438 item.original_size += fseg.original_size;
439 item.archived_size += fseg.archived_size;
440 item.segments.push(fseg);
441 }
442 }
443 } else {
444 let mut file = file.lock_blocking();
445 let start = file.seek(std::io::SeekFrom::End(0))?;
446 let size = {
447 let mut writer = if is_compressed {
448 if use_zopfli {
449 let e = zopfli::ZlibEncoder::new(
450 zopfli::Options {
451 iteration_count: zopfli_iteration_count,
452 iterations_without_improvement:
453 zopfli_iterations_without_improvement,
454 maximum_block_splits: zopfli_maximum_block_splits,
455 },
456 zopfli::BlockType::Dynamic,
457 &mut *file,
458 )?;
459 Box::new(e) as Box<dyn Write>
460 } else if use_zstd {
461 let e = zstd::stream::Encoder::new(
462 &mut *file,
463 zstd_compression_level,
464 )?;
465 Box::new(e) as Box<dyn Write>
466 } else {
467 let e = flate2::write::ZlibEncoder::new(
468 &mut *file,
469 flate2::Compression::new(zlib_compression_level),
470 );
471 Box::new(e) as Box<dyn Write>
472 }
473 } else {
474 Box::new(&mut *file) as Box<dyn Write>
475 };
476 std::io::copy(&mut reader, &mut writer)?
477 };
478 let ninfo = Segment {
479 is_compressed,
480 start,
481 offset_in_file: 0,
482 original_size: size,
483 archived_size: if is_compressed {
484 file.stream_position()? - start
485 } else {
486 size
487 },
488 };
489 let mut item = item.lock_blocking();
490 item.original_size += ninfo.original_size;
491 item.archived_size += ninfo.archived_size;
492 let stats = stats.clone();
493 stats
494 .total_original_size
495 .fetch_add(ninfo.original_size, Ordering::Relaxed);
496 stats
497 .final_archive_size
498 .fetch_add(ninfo.archived_size, Ordering::Relaxed);
499 stats.total_segments.fetch_add(1, Ordering::Relaxed);
500 stats.unique_segments.fetch_add(1, Ordering::Relaxed);
501 item.segments.push(ninfo);
502 }
503 if let Some(workers) = workers {
504 workers.join();
505 for err in workers.take_results() {
506 err?;
507 }
508 }
509 let mut item = item.lock_blocking().to_owned();
510 item.file_hash = reader.into_checksum();
511 item.segments.sort_by_key(|s| s.offset_in_file);
512 let mut items = items.lock_blocking();
513 items.insert(item.name.clone(), item);
514 Ok(())
515 },
516 true,
517 )?;
518 }
519 Ok(Box::new(writer))
520 }
521
522 fn write_header(&mut self) -> Result<()> {
523 self.runner.join();
524 for err in self.runner.take_results() {
525 err?;
526 }
527 let mut file = self.file.lock_blocking();
528 let index_offset = file.seek(std::io::SeekFrom::End(0))?;
529 let mut index_data = MemWriter::new();
530 let items = self.items.lock_blocking();
531 for (_, item) in items.iter() {
532 let mut file_chunk = MemWriter::new();
533 let name = encode_string(Encoding::Utf16LE, &item.name, false)?;
534 let info_data_size = name.len() as u64 + 22;
535 file_chunk.write_all(CHUNK_INFO)?;
536 file_chunk.write_u64(info_data_size)?;
537 file_chunk.write_u32(0)?; file_chunk.write_u64(item.original_size)?;
539 file_chunk.write_u64(item.archived_size)?;
540 file_chunk.write_u16(name.len() as u16 / 2)?;
541 file_chunk.write_all(&name)?;
542 let segm_data_size = item.segments.len() as u64 * 28;
543 file_chunk.write_all(CHUNK_SEGM)?;
544 file_chunk.write_u64(segm_data_size)?;
545 for seg in &item.segments {
546 let flag = if seg.is_compressed {
547 TVP_XP3_SEGM_ENCODE_ZLIB
548 } else {
549 TVP_XP3_SEGM_ENCODE_RAW
550 };
551 file_chunk.write_u32(flag)?;
552 file_chunk.write_u64(seg.start)?;
553 file_chunk.write_u64(seg.original_size)?;
554 file_chunk.write_u64(seg.archived_size)?;
555 }
556 let adlr_data_size = 4;
557 file_chunk.write_all(CHUNK_ADLR)?;
558 file_chunk.write_u64(adlr_data_size)?;
559 if self.no_adler {
560 file_chunk.write_u32(0)?;
561 } else {
562 file_chunk.write_u32(item.file_hash)?;
563 }
564 index_data.write_all(CHUNK_FILE)?;
565 let file_chunk = file_chunk.into_inner();
566 index_data.write_u64(file_chunk.len() as u64)?;
567 index_data.write_all(&file_chunk)?;
568 }
569 let index_data = index_data.into_inner();
570 if self.compress_index {
571 let compressed_index = if self.use_zopfli {
572 let option = zopfli::Options {
573 iteration_count: self.zopfli_iteration_count,
574 iterations_without_improvement: self.zopfli_iterations_without_improvement,
575 maximum_block_splits: self.zopfli_maximum_block_splits,
576 };
577 let mut e =
578 zopfli::ZlibEncoder::new(option, zopfli::BlockType::Dynamic, Vec::new())?;
579 e.write_all(&index_data)?;
580 e.finish()?
581 } else if self.use_zstd {
582 let mut e = zstd::stream::Encoder::new(Vec::new(), self.zstd_compression_level)?;
583 e.write_all(&index_data)?;
584 e.finish()?
585 } else {
586 let mut e = flate2::write::ZlibEncoder::new(
587 Vec::new(),
588 flate2::Compression::new(self.zlib_compression_level),
589 );
590 e.write_all(&index_data)?;
591 e.finish()?
592 };
593 file.write_u8(TVP_XP3_INDEX_ENCODE_ZLIB)?;
594 file.write_u64(compressed_index.len() as u64)?;
595 file.write_u64(index_data.len() as u64)?;
596 file.write_all(&compressed_index)?;
597 } else {
598 file.write_u8(TVP_XP3_INDEX_ENCODE_RAW)?;
599 file.write_u64(index_data.len() as u64)?;
600 file.write_all(&index_data)?;
601 }
602 file.write_u64_at(11, index_offset)?; file.flush()?;
604 eprintln!("XP3 Archive Statistics:\n{}", self.stats);
605 Ok(())
606 }
607}