2017-10-28 21:01:34 +02:00
|
|
|
package vfs
|
2017-05-02 23:35:07 +02:00
|
|
|
|
|
|
|
import (
|
2019-06-17 10:34:30 +02:00
|
|
|
"context"
|
2021-11-04 11:12:57 +01:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2017-05-02 23:35:07 +02:00
|
|
|
"io"
|
2017-10-29 22:11:17 +01:00
|
|
|
"os"
|
2017-05-02 23:35:07 +02:00
|
|
|
"sync"
|
2019-11-04 13:16:36 +01:00
|
|
|
"time"
|
2017-05-02 23:35:07 +02:00
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/chunkedreader"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2017-05-02 23:35:07 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// ReadFileHandle is an open for read file handle on a File
|
|
|
|
type ReadFileHandle struct {
|
2017-10-29 22:11:17 +01:00
|
|
|
baseHandle
|
2020-11-05 17:59:59 +01:00
|
|
|
done func(ctx context.Context, err error)
|
2019-09-14 14:09:07 +02:00
|
|
|
mu sync.Mutex
|
2022-07-28 18:35:14 +02:00
|
|
|
cond sync.Cond // cond lock for out of sequence reads
|
2019-09-14 14:09:07 +02:00
|
|
|
r *accounting.Account
|
|
|
|
size int64 // size of the object (0 for unknown length)
|
|
|
|
offset int64 // offset of read of o
|
|
|
|
roffset int64 // offset of Read() calls
|
|
|
|
file *File
|
|
|
|
hash *hash.MultiHasher
|
|
|
|
remote string
|
2022-07-28 19:14:04 +02:00
|
|
|
closed bool // set if handle has been closed
|
|
|
|
readCalled bool // set if read has been called
|
|
|
|
noSeek bool
|
|
|
|
sizeUnknown bool // set if size of source is not known
|
|
|
|
opened bool
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
|
2017-10-27 22:41:34 +02:00
|
|
|
// Check interfaces
|
|
|
|
var (
|
|
|
|
_ io.Reader = (*ReadFileHandle)(nil)
|
|
|
|
_ io.ReaderAt = (*ReadFileHandle)(nil)
|
|
|
|
_ io.Seeker = (*ReadFileHandle)(nil)
|
|
|
|
_ io.Closer = (*ReadFileHandle)(nil)
|
|
|
|
)
|
|
|
|
|
2018-03-01 16:50:23 +01:00
|
|
|
func newReadFileHandle(f *File) (*ReadFileHandle, error) {
|
2018-01-12 17:30:54 +01:00
|
|
|
var mhash *hash.MultiHasher
|
2017-05-11 09:33:45 +02:00
|
|
|
var err error
|
2018-03-01 16:50:23 +01:00
|
|
|
o := f.getObject()
|
2020-04-14 19:03:45 +02:00
|
|
|
if !f.VFS().Opt.NoChecksum {
|
2019-11-04 11:12:56 +01:00
|
|
|
hashes := hash.NewHashSet(o.Fs().Hashes().GetOne()) // just pick one hash
|
|
|
|
mhash, err = hash.NewMultiHasherTypes(hashes)
|
2017-05-08 18:47:22 +02:00
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(o.Fs(), "newReadFileHandle hash error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-02 23:35:07 +02:00
|
|
|
fh := &ReadFileHandle{
|
2019-09-14 14:09:07 +02:00
|
|
|
remote: o.Remote(),
|
2020-04-14 19:03:45 +02:00
|
|
|
noSeek: f.VFS().Opt.NoSeek,
|
2019-09-14 14:09:07 +02:00
|
|
|
file: f,
|
|
|
|
hash: mhash,
|
|
|
|
size: nonNegative(o.Size()),
|
|
|
|
sizeUnknown: o.Size() < 0,
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2022-07-28 18:35:14 +02:00
|
|
|
fh.cond = sync.Cond{L: &fh.mu}
|
2017-05-02 23:35:07 +02:00
|
|
|
return fh, nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 09:33:45 +02:00
|
|
|
// openPending opens the file if there is a pending open
|
|
|
|
// call with the lock held
|
|
|
|
func (fh *ReadFileHandle) openPending() (err error) {
|
|
|
|
if fh.opened {
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
o := fh.file.getObject()
|
2020-04-14 19:03:45 +02:00
|
|
|
r, err := chunkedreader.New(context.TODO(), o, int64(fh.file.VFS().Opt.ChunkSize), int64(fh.file.VFS().Opt.ChunkSizeLimit)).Open()
|
2017-05-11 09:33:45 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-18 12:13:54 +02:00
|
|
|
tr := accounting.GlobalStats().NewTransfer(o)
|
2019-07-16 13:56:20 +02:00
|
|
|
fh.done = tr.Done
|
2020-06-04 16:09:03 +02:00
|
|
|
fh.r = tr.Account(context.TODO(), r).WithBuffer() // account the transfer
|
2017-05-11 09:33:45 +02:00
|
|
|
fh.opened = true
|
2019-07-16 13:56:20 +02:00
|
|
|
|
2017-05-11 09:33:45 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-09 12:29:02 +02:00
|
|
|
// String converts it to printable
|
|
|
|
func (fh *ReadFileHandle) String() string {
|
|
|
|
if fh == nil {
|
|
|
|
return "<nil *ReadFileHandle>"
|
|
|
|
}
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-09 12:29:02 +02:00
|
|
|
if fh.file == nil {
|
|
|
|
return "<nil *ReadFileHandle.file>"
|
|
|
|
}
|
|
|
|
return fh.file.String() + " (r)"
|
|
|
|
}
|
|
|
|
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
// Node returns the Node associated with this - satisfies Noder interface
|
2017-05-02 23:35:07 +02:00
|
|
|
func (fh *ReadFileHandle) Node() Node {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-02 23:35:07 +02:00
|
|
|
return fh.file
|
|
|
|
}
|
|
|
|
|
|
|
|
// seek to a new offset
|
|
|
|
//
|
|
|
|
// if reopen is true, then we won't attempt to use an io.Seeker interface
|
|
|
|
//
|
|
|
|
// Must be called with fh.mu held
|
|
|
|
func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) {
|
|
|
|
if fh.noSeek {
|
|
|
|
return ESPIPE
|
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
fh.hash = nil
|
2018-08-11 10:18:19 +02:00
|
|
|
if !reopen {
|
|
|
|
ar := fh.r.GetAsyncReader()
|
2020-05-20 12:39:20 +02:00
|
|
|
// try to fulfill the seek with buffer discard
|
2018-08-11 10:18:19 +02:00
|
|
|
if ar != nil && ar.SkipBytes(int(offset-fh.offset)) {
|
|
|
|
fh.offset = offset
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fh.r.StopBuffering() // stop the background reading first
|
2017-05-02 23:35:07 +02:00
|
|
|
oldReader := fh.r.GetReader()
|
2018-02-18 15:18:12 +01:00
|
|
|
r, ok := oldReader.(*chunkedreader.ChunkedReader)
|
|
|
|
if !ok {
|
|
|
|
fs.Logf(fh.remote, "ReadFileHandle.Read expected reader to be a ChunkedReader, got %T", oldReader)
|
|
|
|
reopen = true
|
|
|
|
}
|
|
|
|
if !reopen {
|
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d (fs.RangeSeeker)", fh.offset, offset)
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err = r.RangeSeek(context.TODO(), offset, io.SeekStart, -1)
|
2017-05-02 23:35:07 +02:00
|
|
|
if err != nil {
|
2018-02-18 15:18:12 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read fs.RangeSeeker failed: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
|
2017-05-02 23:35:07 +02:00
|
|
|
// close old one
|
|
|
|
err = oldReader.Close()
|
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read seek close old failed: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
// re-open with a seek
|
2018-03-01 16:50:23 +01:00
|
|
|
o := fh.file.getObject()
|
2020-04-14 19:03:45 +02:00
|
|
|
r = chunkedreader.New(context.TODO(), o, int64(fh.file.VFS().Opt.ChunkSize), int64(fh.file.VFS().Opt.ChunkSizeLimit))
|
2018-02-18 15:18:12 +01:00
|
|
|
_, err := r.Seek(offset, 0)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read seek failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
r, err = r.Open()
|
2017-05-02 23:35:07 +02:00
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read seek failed: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-06-04 16:09:03 +02:00
|
|
|
fh.r.UpdateReader(context.TODO(), r)
|
2017-05-02 23:35:07 +02:00
|
|
|
fh.offset = offset
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-02 19:22:26 +01:00
|
|
|
// Seek the file - returns ESPIPE if seeking isn't possible
|
2017-10-27 22:41:34 +02:00
|
|
|
func (fh *ReadFileHandle) Seek(offset int64, whence int) (n int64, err error) {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-11-02 19:22:26 +01:00
|
|
|
if fh.noSeek {
|
|
|
|
return 0, ESPIPE
|
|
|
|
}
|
2017-11-20 18:57:13 +01:00
|
|
|
size := fh.size
|
2017-10-27 22:41:34 +02:00
|
|
|
switch whence {
|
2018-04-06 20:53:06 +02:00
|
|
|
case io.SeekStart:
|
2017-10-27 22:41:34 +02:00
|
|
|
fh.roffset = 0
|
2018-04-06 20:53:06 +02:00
|
|
|
case io.SeekEnd:
|
2017-10-27 22:41:34 +02:00
|
|
|
fh.roffset = size
|
|
|
|
}
|
|
|
|
fh.roffset += offset
|
|
|
|
// we don't check the offset - the next Read will
|
|
|
|
return fh.roffset, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAt reads len(p) bytes into p starting at offset off in the
|
|
|
|
// underlying input source. It returns the number of bytes read (0 <=
|
|
|
|
// n <= len(p)) and any error encountered.
|
|
|
|
//
|
|
|
|
// When ReadAt returns n < len(p), it returns a non-nil error
|
|
|
|
// explaining why more bytes were not returned. In this respect,
|
|
|
|
// ReadAt is stricter than Read.
|
|
|
|
//
|
|
|
|
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
|
|
|
// space during the call. If some data is available but not len(p)
|
|
|
|
// bytes, ReadAt blocks until either all the data is available or an
|
|
|
|
// error occurs. In this respect ReadAt is different from Read.
|
|
|
|
//
|
|
|
|
// If the n = len(p) bytes returned by ReadAt are at the end of the
|
|
|
|
// input source, ReadAt may return either err == EOF or err == nil.
|
|
|
|
//
|
|
|
|
// If ReadAt is reading from an input source with a seek offset,
|
|
|
|
// ReadAt should not affect nor be affected by the underlying seek
|
|
|
|
// offset.
|
|
|
|
//
|
|
|
|
// Clients of ReadAt can execute parallel ReadAt calls on the same
|
|
|
|
// input source.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (fh *ReadFileHandle) ReadAt(p []byte, off int64) (n int, err error) {
|
2017-05-02 23:35:07 +02:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-11-03 17:11:44 +01:00
|
|
|
return fh.readAt(p, off)
|
|
|
|
}
|
|
|
|
|
2020-05-18 17:52:56 +02:00
|
|
|
// This waits for *poff to equal off or aborts after the timeout.
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
// Waits here potentially affect all seeks so need to keep them short.
|
2020-05-18 17:52:56 +02:00
|
|
|
//
|
|
|
|
// Call with fh.mu Locked
|
|
|
|
func waitSequential(what string, remote string, cond *sync.Cond, maxWait time.Duration, poff *int64, off int64) {
|
|
|
|
var (
|
|
|
|
timeout = time.NewTimer(maxWait)
|
|
|
|
done = make(chan struct{})
|
|
|
|
abort = false
|
|
|
|
)
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-timeout.C:
|
|
|
|
// take the lock to make sure that cond.Wait() is called before
|
|
|
|
// cond.Broadcast. NB cond.L == mu
|
|
|
|
cond.L.Lock()
|
|
|
|
// set abort flag and give all the waiting goroutines a kick on timeout
|
|
|
|
abort = true
|
|
|
|
fs.Debugf(remote, "aborting in-sequence %s wait, off=%d", what, off)
|
|
|
|
cond.Broadcast()
|
|
|
|
cond.L.Unlock()
|
|
|
|
case <-done:
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
for *poff != off && !abort {
|
|
|
|
fs.Debugf(remote, "waiting for in-sequence %s to %d for %v", what, off, maxWait)
|
|
|
|
cond.Wait()
|
|
|
|
}
|
|
|
|
// tidy up end timer
|
|
|
|
close(done)
|
|
|
|
timeout.Stop()
|
|
|
|
if *poff != off {
|
|
|
|
fs.Debugf(remote, "failed to wait for in-sequence %s to %d", what, off)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-03 17:11:44 +01:00
|
|
|
// Implementation of ReadAt - call with lock held
|
|
|
|
func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
2019-09-14 14:09:07 +02:00
|
|
|
// defer log.Trace(fh.remote, "p[%d], off=%d", len(p), off)("n=%d, err=%v", &n, &err)
|
2020-05-20 12:39:20 +02:00
|
|
|
err = fh.openPending() // FIXME pending open could be more efficient in the presence of seek (and retries)
|
2017-05-11 09:33:45 +02:00
|
|
|
if err != nil {
|
2017-10-27 22:41:34 +02:00
|
|
|
return 0, err
|
2017-05-11 09:33:45 +02:00
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Read size %d offset %d", reqSize, off)
|
2017-05-02 23:35:07 +02:00
|
|
|
if fh.closed {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read error: %v", EBADF)
|
2017-11-03 12:35:36 +01:00
|
|
|
return 0, ECLOSED
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2019-11-04 13:16:36 +01:00
|
|
|
maxBuf := 1024 * 1024
|
|
|
|
if len(p) < maxBuf {
|
|
|
|
maxBuf = len(p)
|
|
|
|
}
|
|
|
|
if gap := off - fh.offset; gap > 0 && gap < int64(8*maxBuf) {
|
2022-07-28 18:35:14 +02:00
|
|
|
waitSequential("read", fh.remote, &fh.cond, fh.file.VFS().Opt.ReadWait, &fh.offset, off)
|
2019-11-04 13:16:36 +01:00
|
|
|
}
|
2017-10-27 22:41:34 +02:00
|
|
|
doSeek := off != fh.offset
|
2017-10-29 22:14:05 +01:00
|
|
|
if doSeek && fh.noSeek {
|
|
|
|
return 0, ESPIPE
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
var newOffset int64
|
|
|
|
retries := 0
|
2017-10-27 22:41:34 +02:00
|
|
|
reqSize := len(p)
|
2017-05-02 23:35:07 +02:00
|
|
|
doReopen := false
|
2020-11-05 12:33:32 +01:00
|
|
|
lowLevelRetries := fs.GetConfig(context.TODO()).LowLevelRetries
|
2017-05-02 23:35:07 +02:00
|
|
|
for {
|
|
|
|
if doSeek {
|
|
|
|
// Are we attempting to seek beyond the end of the
|
|
|
|
// file - if so just return EOF leaving the underlying
|
|
|
|
// file in an unchanged state.
|
2017-11-20 18:57:13 +01:00
|
|
|
if off >= fh.size {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read attempt to read beyond end of file: %d > %d", off, fh.size)
|
2017-10-29 22:14:05 +01:00
|
|
|
return 0, io.EOF
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
// Otherwise do the seek
|
2017-10-27 22:41:34 +02:00
|
|
|
err = fh.seek(off, doReopen)
|
2017-05-02 23:35:07 +02:00
|
|
|
} else {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
if reqSize > 0 {
|
|
|
|
fh.readCalled = true
|
|
|
|
}
|
2017-10-27 22:41:34 +02:00
|
|
|
n, err = io.ReadFull(fh.r, p)
|
2017-05-02 23:35:07 +02:00
|
|
|
newOffset = fh.offset + int64(n)
|
|
|
|
// if err == nil && rand.Intn(10) == 0 {
|
|
|
|
// err = errors.New("random error")
|
|
|
|
// }
|
|
|
|
if err == nil {
|
|
|
|
break
|
2019-09-14 14:09:07 +02:00
|
|
|
} else if (err == io.ErrUnexpectedEOF || err == io.EOF) && (newOffset == fh.size || fh.sizeUnknown) {
|
|
|
|
if fh.sizeUnknown {
|
|
|
|
// size is now known since we have read to the end
|
|
|
|
fh.sizeUnknown = false
|
|
|
|
fh.size = newOffset
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
// Have read to end of file - reset error
|
|
|
|
err = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
if retries >= lowLevelRetries {
|
2017-05-02 23:35:07 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
retries++
|
2020-11-05 12:33:32 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, lowLevelRetries, err)
|
2017-05-02 23:35:07 +02:00
|
|
|
doSeek = true
|
|
|
|
doReopen = true
|
|
|
|
}
|
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read error: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
} else {
|
|
|
|
fh.offset = newOffset
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Read OK")
|
2017-05-08 18:47:22 +02:00
|
|
|
|
|
|
|
if fh.hash != nil {
|
2017-10-27 22:41:34 +02:00
|
|
|
_, err = fh.hash.Write(p[:n])
|
2017-05-08 18:47:22 +02:00
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read HashError: %v", err)
|
2017-10-27 22:41:34 +02:00
|
|
|
return 0, err
|
2017-05-08 18:47:22 +02:00
|
|
|
}
|
|
|
|
}
|
2017-10-29 22:14:05 +01:00
|
|
|
|
|
|
|
// If we have no error and we didn't fill the buffer, must be EOF
|
|
|
|
if n != len(p) {
|
|
|
|
err = io.EOF
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2019-11-04 13:16:36 +01:00
|
|
|
fh.cond.Broadcast() // wake everyone up waiting for an in-sequence read
|
2017-10-27 22:41:34 +02:00
|
|
|
return n, err
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
|
2017-05-08 18:47:22 +02:00
|
|
|
func (fh *ReadFileHandle) checkHash() error {
|
2017-11-20 18:57:13 +01:00
|
|
|
if fh.hash == nil || !fh.readCalled || fh.offset < fh.size {
|
2017-05-08 18:47:22 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-01 16:50:23 +01:00
|
|
|
o := fh.file.getObject()
|
2017-05-08 18:47:22 +02:00
|
|
|
for hashType, dstSum := range fh.hash.Sums() {
|
2019-06-17 10:34:30 +02:00
|
|
|
srcSum, err := o.Hash(context.TODO(), hashType)
|
2017-05-08 18:47:22 +02:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
if errors.Is(err, os.ErrNotExist) {
|
2020-05-04 13:18:28 +02:00
|
|
|
// if it was file not found then at
|
|
|
|
// this point we don't care any more
|
|
|
|
continue
|
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
return err
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
if !hash.Equals(dstSum, srcSum) {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, dstSum, srcSum)
|
2017-05-08 18:47:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-27 22:41:34 +02:00
|
|
|
// Read reads up to len(p) bytes into p. It returns the number of bytes read (0
|
|
|
|
// <= n <= len(p)) and any error encountered. Even if Read returns n < len(p),
|
|
|
|
// it may use all of p as scratch space during the call. If some data is
|
|
|
|
// available but not len(p) bytes, Read conventionally returns what is
|
|
|
|
// available instead of waiting for more.
|
|
|
|
//
|
|
|
|
// When Read encounters an error or end-of-file condition after successfully
|
|
|
|
// reading n > 0 bytes, it returns the number of bytes read. It may return the
|
|
|
|
// (non-nil) error from the same call or return the error (and n == 0) from a
|
|
|
|
// subsequent call. An instance of this general case is that a Reader returning
|
|
|
|
// a non-zero number of bytes at the end of the input stream may return either
|
|
|
|
// err == EOF or err == nil. The next Read should return 0, EOF.
|
|
|
|
//
|
|
|
|
// Callers should always process the n > 0 bytes returned before considering
|
|
|
|
// the error err. Doing so correctly handles I/O errors that happen after
|
|
|
|
// reading some bytes and also both of the allowed EOF behaviors.
|
|
|
|
//
|
|
|
|
// Implementations of Read are discouraged from returning a zero byte count
|
|
|
|
// with a nil error, except when len(p) == 0. Callers should treat a return of
|
|
|
|
// 0 and nil as indicating that nothing happened; in particular it does not
|
|
|
|
// indicate EOF.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (fh *ReadFileHandle) Read(p []byte) (n int, err error) {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2019-09-14 14:09:07 +02:00
|
|
|
if fh.roffset >= fh.size && !fh.sizeUnknown {
|
2017-10-27 22:41:34 +02:00
|
|
|
return 0, io.EOF
|
|
|
|
}
|
2017-11-03 17:11:44 +01:00
|
|
|
n, err = fh.readAt(p, fh.roffset)
|
2017-10-27 22:41:34 +02:00
|
|
|
fh.roffset += int64(n)
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
2017-05-02 23:35:07 +02:00
|
|
|
// close the file handle returning EBADF if it has been
|
|
|
|
// closed already.
|
|
|
|
//
|
|
|
|
// Must be called with fh.mu held
|
|
|
|
func (fh *ReadFileHandle) close() error {
|
|
|
|
if fh.closed {
|
2017-11-03 12:35:36 +01:00
|
|
|
return ECLOSED
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
fh.closed = true
|
2017-05-08 18:47:22 +02:00
|
|
|
|
2017-10-29 22:14:05 +01:00
|
|
|
if fh.opened {
|
2019-07-16 13:56:20 +02:00
|
|
|
var err error
|
|
|
|
defer func() {
|
2020-11-05 17:59:59 +01:00
|
|
|
fh.done(context.TODO(), err)
|
2019-07-16 13:56:20 +02:00
|
|
|
}()
|
2017-11-03 17:11:44 +01:00
|
|
|
// Close first so that we have hashes
|
2019-07-16 13:56:20 +02:00
|
|
|
err = fh.r.Close()
|
2017-11-03 17:11:44 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-29 22:14:05 +01:00
|
|
|
}
|
2017-11-03 17:11:44 +01:00
|
|
|
// Now check the hash
|
|
|
|
err = fh.checkHash()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-29 22:14:05 +01:00
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
}
|
2017-10-29 22:14:05 +01:00
|
|
|
return nil
|
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
|
2017-10-29 22:14:05 +01:00
|
|
|
// Close closes the file
|
|
|
|
func (fh *ReadFileHandle) Close() error {
|
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
|
|
|
return fh.close()
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Flush is called each time the file or directory is closed.
|
|
|
|
// Because there can be multiple file descriptors referring to a
|
|
|
|
// single opened file, Flush can be called multiple times.
|
|
|
|
func (fh *ReadFileHandle) Flush() error {
|
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-11 09:33:45 +02:00
|
|
|
if !fh.opened {
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Flush")
|
2017-05-02 23:35:07 +02:00
|
|
|
|
2017-05-08 18:47:22 +02:00
|
|
|
if err := fh.checkHash(); err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Flush error: %v", err)
|
2017-05-08 18:47:22 +02:00
|
|
|
return err
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Flush OK")
|
2017-05-02 23:35:07 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release is called when we are finished with the file handle
|
|
|
|
//
|
|
|
|
// It isn't called directly from userspace so the error is ignored by
|
|
|
|
// the kernel
|
|
|
|
func (fh *ReadFileHandle) Release() error {
|
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-11 09:33:45 +02:00
|
|
|
if !fh.opened {
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
if fh.closed {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Release nothing to do")
|
2017-05-02 23:35:07 +02:00
|
|
|
return nil
|
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Release closing")
|
2017-05-02 23:35:07 +02:00
|
|
|
err := fh.close()
|
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Release error: %v", err)
|
2022-06-08 22:25:17 +02:00
|
|
|
//} else {
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Release OK")
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2017-10-24 22:06:06 +02:00
|
|
|
|
|
|
|
// Size returns the size of the underlying file
|
|
|
|
func (fh *ReadFileHandle) Size() int64 {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-11-20 18:57:13 +01:00
|
|
|
return fh.size
|
2017-10-24 22:06:06 +02:00
|
|
|
}
|
|
|
|
|
2017-10-29 22:11:17 +01:00
|
|
|
// Stat returns info about the file
|
|
|
|
func (fh *ReadFileHandle) Stat() (os.FileInfo, error) {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-10-29 22:11:17 +01:00
|
|
|
return fh.file, nil
|
|
|
|
}
|