| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139 |
- //go:build windows || linux
- // +build windows linux
- package wim
- import (
- "encoding/binary"
- "io"
- "github.com/Microsoft/go-winio/wim/lzx"
- )
- const chunkSize = 32768 // Compressed resource chunk size
- type compressedReader struct {
- r *io.SectionReader
- d io.ReadCloser
- chunks []int64
- curChunk int
- originalSize int64
- }
- func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) {
- nchunks := (originalSize + chunkSize - 1) / chunkSize
- var base int64
- chunks := make([]int64, nchunks)
- if originalSize <= 0xffffffff {
- // 32-bit chunk offsets
- base = (nchunks - 1) * 4
- chunks32 := make([]uint32, nchunks-1)
- err := binary.Read(r, binary.LittleEndian, chunks32)
- if err != nil {
- return nil, err
- }
- for i, n := range chunks32 {
- chunks[i+1] = int64(n)
- }
- } else {
- // 64-bit chunk offsets
- base = (nchunks - 1) * 8
- err := binary.Read(r, binary.LittleEndian, chunks[1:])
- if err != nil {
- return nil, err
- }
- }
- for i, c := range chunks {
- chunks[i] = c + base
- }
- cr := &compressedReader{
- r: r,
- chunks: chunks,
- originalSize: originalSize,
- }
- err := cr.reset(int(offset / chunkSize))
- if err != nil {
- return nil, err
- }
- suboff := offset % chunkSize
- if suboff != 0 {
- _, err := io.CopyN(io.Discard, cr.d, suboff)
- if err != nil {
- return nil, err
- }
- }
- return cr, nil
- }
- func (r *compressedReader) chunkOffset(n int) int64 {
- if n == len(r.chunks) {
- return r.r.Size()
- }
- return r.chunks[n]
- }
- func (r *compressedReader) chunkSize(n int) int {
- return int(r.chunkOffset(n+1) - r.chunkOffset(n))
- }
- func (r *compressedReader) uncompressedSize(n int) int {
- if n < len(r.chunks)-1 {
- return chunkSize
- }
- size := int(r.originalSize % chunkSize)
- if size == 0 {
- size = chunkSize
- }
- return size
- }
- func (r *compressedReader) reset(n int) error {
- if n >= len(r.chunks) {
- return io.EOF
- }
- if r.d != nil {
- r.d.Close()
- }
- r.curChunk = n
- size := r.chunkSize(n)
- uncompressedSize := r.uncompressedSize(n)
- section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size))
- if size != uncompressedSize {
- d, err := lzx.NewReader(section, uncompressedSize)
- if err != nil {
- return err
- }
- r.d = d
- } else {
- r.d = io.NopCloser(section)
- }
- return nil
- }
- func (r *compressedReader) Read(b []byte) (int, error) {
- for {
- n, err := r.d.Read(b)
- if err != io.EOF { //nolint:errorlint
- return n, err
- }
- err = r.reset(r.curChunk + 1)
- if err != nil {
- return n, err
- }
- }
- }
- func (r *compressedReader) Close() error {
- var err error
- if r.d != nil {
- err = r.d.Close()
- r.d = nil
- }
- return err
- }
|