mindustry logic execution, map- and schematic- parsing and rendering
move deflation and inflation into dataread/write
| -rw-r--r-- | Cargo.toml | 2 | ||||
| -rw-r--r-- | src/data/dynamic.rs | 4 | ||||
| -rw-r--r-- | src/data/mod.rs | 150 | ||||
| -rw-r--r-- | src/data/schematic.rs | 139 | ||||
| -rw-r--r-- | src/lib.rs | 1 |
5 files changed, 166 insertions, 130 deletions
@@ -1,6 +1,6 @@ [package] name = "mindus" -version = "1.0.7" +version = "1.0.8" edition = "2021" description = "A library for working with mindustry data formats (eg schematics) (fork of plandustry)" authors = [ diff --git a/src/data/dynamic.rs b/src/data/dynamic.rs index eda0625..b44e68c 100644 --- a/src/data/dynamic.rs +++ b/src/data/dynamic.rs @@ -351,7 +351,7 @@ impl Serializer<DynData> for DynSerializer { } } -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Debug, PartialEq)] pub enum ReadError { Underlying(data::ReadError), Type(u8), @@ -409,7 +409,7 @@ impl Error for ReadError { } } -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Debug, PartialEq)] pub enum WriteError { Underlying(data::WriteError), IntArrayLen(usize), diff --git a/src/data/mod.rs b/src/data/mod.rs index 7653c27..2426f44 100644 --- a/src/data/mod.rs +++ b/src/data/mod.rs @@ -1,4 +1,9 @@ //! all the IO +use flate2::{ + Compress, CompressError, Compression, Decompress, DecompressError, FlushCompress, + FlushDecompress, Status, +}; +use std::collections::HashMap; use std::error::Error; use std::fmt; use std::str::Utf8Error; @@ -9,6 +14,7 @@ pub mod dynamic; pub mod renderer; pub mod schematic; +#[derive(Debug)] pub struct DataRead<'d> { data: &'d [u8], } @@ -59,15 +65,15 @@ impl<'d> DataRead<'d> { have: self.data.len(), }); } - let len = u16::from_be_bytes([self.data[0], self.data[1]]); - let end = 2 + len as usize; + let len = self.read_u16()?; + let end = len as usize; if self.data.len() < end { return Err(ReadError::Underflow { need: end, have: self.data.len(), }); } - let result = std::str::from_utf8(&self.data[2..end])?; + let result = std::str::from_utf8(&self.data[..end])?; self.data = &self.data[end..]; Ok(result) } @@ -95,14 +101,66 @@ impl<'d> DataRead<'d> { self.data = &self.data[len..]; Ok(()) } + + pub fn read_map(&mut self, dst: &mut HashMap<String, String>) -> Result<(), ReadError> { + let n = self.read_u8()?; + for _ in 0..n { + let key = self.read_utf()?; + let value = self.read_utf()?; + dst.insert(key.to_owned(), value.to_owned()); + } + Ok(()) + } + + pub fn deflate(&mut self) -> Result<Vec<u8>, ReadError> { + let mut dec = Decompress::new(true); + let mut raw = Vec::<u8>::new(); + raw.reserve(1024); + loop { + let t_in = dec.total_in(); + let t_out = dec.total_out(); + let res = dec.decompress_vec(self.data, &mut raw, FlushDecompress::Finish)?; + if dec.total_in() > t_in { + // we have to advance input every time, decompress_vec only knows the output position + self.data = &self.data[(dec.total_in() - t_in) as usize..]; + } + match res { + // there's no more input (and the flush mode says so), we need to reserve additional space + Status::Ok | Status::BufError => (), + // input was already at the end, so this is referring to the output + Status::StreamEnd => break, + } + if dec.total_in() == t_in && dec.total_out() == t_out { + // protect against looping forever + return Err(ReadError::DecompressStall); + } + raw.reserve(1024); + } + assert_eq!(dec.total_out() as usize, raw.len()); + Ok(raw) + } } -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Debug)] pub enum ReadError { + DecompressStall, + Decompress(DecompressError), Underflow { need: usize, have: usize }, Utf8(Utf8Error), } +impl PartialEq for ReadError { + fn eq(&self, _: &Self) -> bool { + return false; + } +} + +impl From<DecompressError> for ReadError { + fn from(value: DecompressError) -> Self { + Self::Decompress(value) + } +} + impl From<Utf8Error> for ReadError { fn from(err: Utf8Error) -> Self { Self::Utf8(err) @@ -115,6 +173,8 @@ impl fmt::Display for ReadError { Self::Underflow { need, have } => { write!(f, "buffer underflow (expected {need} but got {have})") } + Self::Decompress(..) => f.write_str("zlib decompression failed"), + Self::DecompressStall => f.write_str("decompressor stalled before completion"), Self::Utf8(..) => f.write_str("malformed utf-8 in string"), } } @@ -124,6 +184,7 @@ impl Error for ReadError { fn source(&self) -> Option<&(dyn Error + 'static)> { match self { Self::Utf8(e) => Some(e), + Self::Decompress(e) => Some(e), _ => None, } } @@ -217,6 +278,55 @@ impl<'d> DataWrite<'d> { WriteBuff::Vec(v) => v, } } + + pub fn inflate(self, to: &mut DataWrite) -> Result<(), WriteError> { + // compress into the provided buffer + let WriteBuff::Vec( raw) = self.data else { unreachable!("write buffer not owned") }; + let mut comp = Compress::new(Compression::default(), true); + // compress the immediate buffer into a temp buffer to copy it to buff? no thanks + match to.data { + WriteBuff::Ref { + raw: ref mut dst, + ref mut pos, + } => { + match comp.compress(&raw, &mut dst[*pos..], FlushCompress::Finish)? { + // there's no more input (and the flush mode says so), but we can't resize the output + Status::Ok | Status::BufError => { + return Err(WriteError::CompressEof( + raw.len() - comp.total_in() as usize, + )) + } + Status::StreamEnd => (), + } + } + WriteBuff::Vec(ref mut dst) => { + let mut input = raw.as_ref(); + dst.reserve(1024); + loop { + let t_in = comp.total_in(); + let t_out = comp.total_out(); + let res = comp.compress_vec(input, dst, FlushCompress::Finish)?; + if comp.total_in() > t_in { + // we have to advance input every time, compress_vec only knows the output position + input = &input[(comp.total_in() - t_in) as usize..]; + } + match res { + // there's no more input (and the flush mode says so), we need to reserve additional space + Status::Ok | Status::BufError => (), + // input was already at the end, so this is referring to the output + Status::StreamEnd => break, + } + if comp.total_in() == t_in && comp.total_out() == t_out { + // protect against looping forever + return Err(WriteError::CompressStall); + } + dst.reserve(1024); + } + } + } + assert_eq!(comp.total_in() as usize, raw.len()); + Ok(()) + } } impl Default for DataWrite<'static> { @@ -227,10 +337,25 @@ impl Default for DataWrite<'static> { } } -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Debug)] pub enum WriteError { Overflow { need: usize, have: usize }, TooLong { len: usize }, + Compress(CompressError), + CompressEof(usize), + CompressStall, +} + +impl From<CompressError> for WriteError { + fn from(value: CompressError) -> Self { + Self::Compress(value) + } +} + +impl PartialEq for WriteError { + fn eq(&self, _: &Self) -> bool { + return false; + } } impl fmt::Display for WriteError { @@ -239,12 +364,25 @@ impl fmt::Display for WriteError { Self::Overflow { need, have } => { write!(f, "buffer overflow (expected {need} but got {have})") } + Self::Compress(..) => f.write_str("zlib compression failed"), + Self::CompressEof(remain) => write!( + f, + "compression overflow with {remain} bytes of input remaining" + ), + Self::CompressStall => f.write_str("compressor stalled before completion"), Self::TooLong { len } => write!(f, "string too long ({len} bytes of {})", u16::MAX), } } } -impl Error for WriteError {} +impl Error for WriteError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + Self::Compress(e) => Some(e), + _ => None, + } + } +} impl<'d> From<&'d mut [u8]> for DataWrite<'d> { fn from(value: &'d mut [u8]) -> Self { diff --git a/src/data/schematic.rs b/src/data/schematic.rs index 5465498..19026f2 100644 --- a/src/data/schematic.rs +++ b/src/data/schematic.rs @@ -6,10 +6,6 @@ use std::fmt::{self, Write}; use std::iter::FusedIterator; use std::slice::Iter; -use flate2::{ - Compress, CompressError, Compression, Decompress, DecompressError, FlushCompress, - FlushDecompress, Status, -}; use image::RgbaImage; use crate::block::{self, Block, BlockRegistry, Rotation, State}; @@ -1015,72 +1011,45 @@ impl<'l> Serializer<Schematic<'l>> for SchematicSerializer<'l> { if version > 1 { return Err(ReadError::Version(version)); } - let mut dec = Decompress::new(true); - let mut raw = Vec::<u8>::new(); - raw.reserve(1024); - loop { - let t_in = dec.total_in(); - let t_out = dec.total_out(); - let res = dec.decompress_vec(buff.data, &mut raw, FlushDecompress::Finish)?; - if dec.total_in() > t_in { - // we have to advance input every time, decompress_vec only knows the output position - buff.data = &buff.data[(dec.total_in() - t_in) as usize..]; - } - match res { - // there's no more input (and the flush mode says so), we need to reserve additional space - Status::Ok | Status::BufError => (), - // input was already at the end, so this is referring to the output - Status::StreamEnd => break, - } - if dec.total_in() == t_in && dec.total_out() == t_out { - // protect against looping forever - return Err(ReadError::DecompressStall); - } - raw.reserve(1024); - } - assert_eq!(dec.total_out() as usize, raw.len()); - let mut rbuff = DataRead::new(&raw); - let w = rbuff.read_i16()?; - let h = rbuff.read_i16()?; + let mut buff = buff.deflate()?; + let mut buff = DataRead::new(&mut buff); + let w = buff.read_i16()?; + let h = buff.read_i16()?; if w < 0 || h < 0 || w as u16 > MAX_DIMENSION || h as u16 > MAX_DIMENSION { return Err(ReadError::Dimensions(w, h)); } let mut schematic = Schematic::new(w as u16, h as u16); - for _ in 0..rbuff.read_u8()? { - let key = rbuff.read_utf()?; - let value = rbuff.read_utf()?; - schematic.tags.insert(key.to_owned(), value.to_owned()); - } - let num_table = rbuff.read_i8()?; + buff.read_map(&mut schematic.tags)?; + let num_table = buff.read_i8()?; if num_table < 0 { return Err(ReadError::TableSize(num_table)); } - let mut block_table = Vec::<&'l Block>::new(); + let mut block_table = Vec::new(); block_table.reserve(num_table as usize); for _ in 0..num_table { - let name = rbuff.read_utf()?; - match self.0.get(name) { + let name = buff.read_utf()?; + match self.0.get(&name) { None => return Err(ReadError::NoSuchBlock(name.to_owned())), Some(b) => block_table.push(b), } } - let num_blocks = rbuff.read_i32()?; + let num_blocks = buff.read_i32()?; if num_blocks < 0 || num_blocks as u32 > MAX_BLOCKS { return Err(ReadError::BlockCount(num_blocks)); } for _ in 0..num_blocks { - let idx = rbuff.read_i8()?; + let idx = buff.read_i8()?; if idx < 0 || idx as usize >= block_table.len() { return Err(ReadError::BlockIndex(idx, block_table.len())); } - let pos = GridPos::from(rbuff.read_u32()?); + let pos = GridPos::from(buff.read_u32()?); let block = block_table[idx as usize]; let config = if version < 1 { - block.data_from_i32(rbuff.read_i32()?, pos)? + block.data_from_i32(buff.read_i32()?, pos)? } else { - DynSerializer.deserialize(&mut rbuff)? + DynSerializer.deserialize(&mut buff)? }; - let rot = Rotation::from(rbuff.read_u8()?); + let rot = Rotation::from(buff.read_u8()?); schematic.set(pos.0, pos.1, block, config, rot)?; } Ok(schematic) @@ -1108,8 +1077,8 @@ impl<'l> Serializer<Schematic<'l>> for SchematicSerializer<'l> { rbuff.write_utf(v)?; } // use string keys here to avoid issues with different block refs with the same name - let mut block_map = HashMap::<&str, u32>::new(); - let mut block_table = Vec::<&str>::new(); + let mut block_map = HashMap::new(); + let mut block_table = Vec::new(); for curr in &data.blocks { if let Entry::Vacant(e) = block_map.entry(curr.block.get_name()) { e.insert(block_table.len() as u32); @@ -1139,52 +1108,7 @@ impl<'l> Serializer<Schematic<'l>> for SchematicSerializer<'l> { num += 1; } assert_eq!(num, data.blocks.len()); - - // compress into the provided buffer - let data::WriteBuff::Vec(raw) = rbuff.data else { unreachable!("write buffer not owned") }; - let mut comp = Compress::new(Compression::default(), true); - // compress the immediate buffer into a temp buffer to copy it to buff? no thanks - match buff.data { - data::WriteBuff::Ref { - raw: ref mut dst, - ref mut pos, - } => { - match comp.compress(&raw, &mut dst[*pos..], FlushCompress::Finish)? { - // there's no more input (and the flush mode says so), but we can't resize the output - Status::Ok | Status::BufError => { - return Err(WriteError::CompressEof( - raw.len() - comp.total_in() as usize, - )) - } - Status::StreamEnd => (), - } - } - data::WriteBuff::Vec(ref mut dst) => { - let mut input = raw.as_ref(); - dst.reserve(1024); - loop { - let t_in = comp.total_in(); - let t_out = comp.total_out(); - let res = comp.compress_vec(input, dst, FlushCompress::Finish)?; - if comp.total_in() > t_in { - // we have to advance input every time, compress_vec only knows the output position - input = &input[(comp.total_in() - t_in) as usize..]; - } - match res { - // there's no more input (and the flush mode says so), we need to reserve additional space - Status::Ok | Status::BufError => (), - // input was already at the end, so this is referring to the output - Status::StreamEnd => break, - } - if comp.total_in() == t_in && comp.total_out() == t_out { - // protect against looping forever - return Err(WriteError::CompressStall); - } - dst.reserve(1024); - } - } - } - assert_eq!(comp.total_in() as usize, raw.len()); + rbuff.inflate(buff)?; Ok(()) } } @@ -1194,8 +1118,6 @@ pub enum ReadError { Read(data::ReadError), Header(u32), Version(u8), - Decompress(DecompressError), - DecompressStall, Dimensions(i16, i16), TableSize(i8), NoSuchBlock(String), @@ -1212,12 +1134,6 @@ impl From<data::ReadError> for ReadError { } } -impl From<DecompressError> for ReadError { - fn from(value: DecompressError) -> Self { - Self::Decompress(value) - } -} - impl From<dynamic::ReadError> for ReadError { fn from(value: dynamic::ReadError) -> Self { Self::ReadState(value) @@ -1242,8 +1158,6 @@ impl fmt::Display for ReadError { Self::Read(..) => f.write_str("failed to read from buffer"), Self::Header(hdr) => write!(f, "incorrect header ({hdr:08X})"), Self::Version(ver) => write!(f, "unsupported version ({ver})"), - Self::Decompress(..) => f.write_str("zlib decompression failed"), - Self::DecompressStall => f.write_str("decompressor stalled before completion"), Self::Dimensions(w, h) => write!(f, "invalid schematic dimensions ({w} * {h})"), Self::TableSize(cnt) => write!(f, "invalid block table size ({cnt})"), Self::NoSuchBlock(name) => write!(f, "unknown block {name:?}"), @@ -1260,7 +1174,6 @@ impl Error for ReadError { fn source(&self) -> Option<&(dyn Error + 'static)> { match self { Self::Read(e) => Some(e), - Self::Decompress(e) => Some(e), Self::BlockConfig(e) => Some(e), Self::ReadState(e) => Some(e), Self::Placement(e) => Some(e), @@ -1276,9 +1189,6 @@ pub enum WriteError { TableSize(usize), StateSerialize(block::SerializeError), WriteState(dynamic::WriteError), - Compress(CompressError), - CompressEof(usize), - CompressStall, } impl From<data::WriteError> for WriteError { @@ -1293,12 +1203,6 @@ impl From<block::SerializeError> for WriteError { } } -impl From<CompressError> for WriteError { - fn from(value: CompressError) -> Self { - Self::Compress(value) - } -} - impl From<dynamic::WriteError> for WriteError { fn from(value: dynamic::WriteError) -> Self { Self::WriteState(value) @@ -1313,12 +1217,6 @@ impl fmt::Display for WriteError { Self::TableSize(len) => write!(f, "block table too long ({len})"), Self::StateSerialize(e) => e.fmt(f), Self::WriteState(..) => f.write_str("failed to write block data"), - Self::Compress(..) => f.write_str("zlib compression failed"), - Self::CompressEof(remain) => write!( - f, - "compression overflow with {remain} bytes of input remaining" - ), - Self::CompressStall => f.write_str("compressor stalled before completion"), } } } @@ -1328,7 +1226,6 @@ impl Error for WriteError { match self { Self::Write(e) => Some(e), Self::StateSerialize(e) => e.source(), - Self::Compress(e) => Some(e), _ => None, } } @@ -15,3 +15,4 @@ pub use block::build_registry; pub use data::dynamic::DynData; pub use data::renderer::Renderer; pub use data::schematic::{Schematic, SchematicSerializer}; +pub use data::Serializer; |