decompress.go 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. //go:build windows || linux
  2. // +build windows linux
  3. package wim
  4. import (
  5. "encoding/binary"
  6. "io"
  7. "github.com/Microsoft/go-winio/wim/lzx"
  8. )
  9. const chunkSize = 32768 // Compressed resource chunk size
  10. type compressedReader struct {
  11. r *io.SectionReader
  12. d io.ReadCloser
  13. chunks []int64
  14. curChunk int
  15. originalSize int64
  16. }
  17. func newCompressedReader(r *io.SectionReader, originalSize int64, offset int64) (*compressedReader, error) {
  18. nchunks := (originalSize + chunkSize - 1) / chunkSize
  19. var base int64
  20. chunks := make([]int64, nchunks)
  21. if originalSize <= 0xffffffff {
  22. // 32-bit chunk offsets
  23. base = (nchunks - 1) * 4
  24. chunks32 := make([]uint32, nchunks-1)
  25. err := binary.Read(r, binary.LittleEndian, chunks32)
  26. if err != nil {
  27. return nil, err
  28. }
  29. for i, n := range chunks32 {
  30. chunks[i+1] = int64(n)
  31. }
  32. } else {
  33. // 64-bit chunk offsets
  34. base = (nchunks - 1) * 8
  35. err := binary.Read(r, binary.LittleEndian, chunks[1:])
  36. if err != nil {
  37. return nil, err
  38. }
  39. }
  40. for i, c := range chunks {
  41. chunks[i] = c + base
  42. }
  43. cr := &compressedReader{
  44. r: r,
  45. chunks: chunks,
  46. originalSize: originalSize,
  47. }
  48. err := cr.reset(int(offset / chunkSize))
  49. if err != nil {
  50. return nil, err
  51. }
  52. suboff := offset % chunkSize
  53. if suboff != 0 {
  54. _, err := io.CopyN(io.Discard, cr.d, suboff)
  55. if err != nil {
  56. return nil, err
  57. }
  58. }
  59. return cr, nil
  60. }
  61. func (r *compressedReader) chunkOffset(n int) int64 {
  62. if n == len(r.chunks) {
  63. return r.r.Size()
  64. }
  65. return r.chunks[n]
  66. }
  67. func (r *compressedReader) chunkSize(n int) int {
  68. return int(r.chunkOffset(n+1) - r.chunkOffset(n))
  69. }
  70. func (r *compressedReader) uncompressedSize(n int) int {
  71. if n < len(r.chunks)-1 {
  72. return chunkSize
  73. }
  74. size := int(r.originalSize % chunkSize)
  75. if size == 0 {
  76. size = chunkSize
  77. }
  78. return size
  79. }
  80. func (r *compressedReader) reset(n int) error {
  81. if n >= len(r.chunks) {
  82. return io.EOF
  83. }
  84. if r.d != nil {
  85. r.d.Close()
  86. }
  87. r.curChunk = n
  88. size := r.chunkSize(n)
  89. uncompressedSize := r.uncompressedSize(n)
  90. section := io.NewSectionReader(r.r, r.chunkOffset(n), int64(size))
  91. if size != uncompressedSize {
  92. d, err := lzx.NewReader(section, uncompressedSize)
  93. if err != nil {
  94. return err
  95. }
  96. r.d = d
  97. } else {
  98. r.d = io.NopCloser(section)
  99. }
  100. return nil
  101. }
  102. func (r *compressedReader) Read(b []byte) (int, error) {
  103. for {
  104. n, err := r.d.Read(b)
  105. if err != io.EOF { //nolint:errorlint
  106. return n, err
  107. }
  108. err = r.reset(r.curChunk + 1)
  109. if err != nil {
  110. return n, err
  111. }
  112. }
  113. }
  114. func (r *compressedReader) Close() error {
  115. var err error
  116. if r.d != nil {
  117. err = r.d.Close()
  118. r.d = nil
  119. }
  120. return err
  121. }