wrappers.go 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. package storage
  2. import (
  3. "context"
  4. "io"
  5. "os"
  6. g "github.com/anacrolix/generics"
  7. "github.com/anacrolix/missinggo/v2"
  8. "github.com/anacrolix/torrent/metainfo"
  9. )
  10. type Client struct {
  11. ci ClientImpl
  12. }
  13. func NewClient(cl ClientImpl) *Client {
  14. return &Client{cl}
  15. }
  16. func (cl Client) OpenTorrent(
  17. ctx context.Context,
  18. info *metainfo.Info,
  19. infoHash metainfo.Hash,
  20. ) (*Torrent, error) {
  21. t, err := cl.ci.OpenTorrent(ctx, info, infoHash)
  22. if err != nil {
  23. return nil, err
  24. }
  25. return &Torrent{t}, nil
  26. }
  27. type Torrent struct {
  28. TorrentImpl
  29. }
  30. // Deprecated. Use PieceWithHash, as this doesn't work with pure v2 torrents.
  31. func (t Torrent) Piece(p metainfo.Piece) Piece {
  32. return t.PieceWithHash(p, g.Some(p.V1Hash().Unwrap().Bytes()))
  33. }
  34. func (t Torrent) PieceWithHash(p metainfo.Piece, pieceHash g.Option[[]byte]) Piece {
  35. var pieceImpl PieceImpl
  36. if t.TorrentImpl.PieceWithHash != nil {
  37. pieceImpl = t.TorrentImpl.PieceWithHash(p, pieceHash)
  38. } else {
  39. pieceImpl = t.TorrentImpl.Piece(p)
  40. }
  41. return Piece{pieceImpl, p}
  42. }
  43. type Piece struct {
  44. PieceImpl
  45. mip metainfo.Piece
  46. }
  47. var _ io.WriterTo = Piece{}
  48. // Why do we have this wrapper? Well PieceImpl doesn't implement io.Reader, so we can't let io.Copy
  49. // and friends check for io.WriterTo and fallback for us since they expect an io.Reader.
  50. func (p Piece) WriteTo(w io.Writer) (int64, error) {
  51. if i, ok := p.PieceImpl.(io.WriterTo); ok {
  52. return i.WriteTo(w)
  53. }
  54. n := p.mip.Length()
  55. r := io.NewSectionReader(p, 0, n)
  56. return io.CopyN(w, r, n)
  57. }
  58. func (p Piece) WriteAt(b []byte, off int64) (n int, err error) {
  59. // Callers should not be writing to completed pieces, but it's too
  60. // expensive to be checking this on every single write using uncached
  61. // completions.
  62. // c := p.Completion()
  63. // if c.Ok && c.Complete {
  64. // err = errors.New("piece already completed")
  65. // return
  66. // }
  67. if off+int64(len(b)) > p.mip.Length() {
  68. panic("write overflows piece")
  69. }
  70. b = missinggo.LimitLen(b, p.mip.Length()-off)
  71. return p.PieceImpl.WriteAt(b, off)
  72. }
  73. func (p Piece) ReadAt(b []byte, off int64) (n int, err error) {
  74. if off < 0 {
  75. err = os.ErrInvalid
  76. return
  77. }
  78. if off >= p.mip.Length() {
  79. err = io.EOF
  80. return
  81. }
  82. b = missinggo.LimitLen(b, p.mip.Length()-off)
  83. if len(b) == 0 {
  84. return
  85. }
  86. n, err = p.PieceImpl.ReadAt(b, off)
  87. if n > len(b) {
  88. panic(n)
  89. }
  90. if n == 0 && err == nil {
  91. panic("io.Copy will get stuck")
  92. }
  93. off += int64(n)
  94. // Doing this here may be inaccurate. There's legitimate reasons we may fail to read while the
  95. // data is still there, such as too many open files. There should probably be a specific error
  96. // to return if the data has been lost.
  97. if off < p.mip.Length() {
  98. if err == io.EOF {
  99. p.MarkNotComplete()
  100. }
  101. }
  102. return
  103. }