2017-10-28 21:01:34 +02:00
|
|
|
package vfs
|
2017-05-02 23:35:07 +02:00
|
|
|
|
|
|
|
import (
|
2019-06-17 10:34:30 +02:00
|
|
|
"context"
|
2017-05-02 23:35:07 +02:00
|
|
|
"io"
|
2017-10-29 22:11:17 +01:00
|
|
|
"os"
|
2017-05-02 23:35:07 +02:00
|
|
|
"sync"
|
2019-11-04 13:16:36 +01:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
2017-05-02 23:35:07 +02:00
|
|
|
|
2017-05-08 18:47:22 +02:00
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/chunkedreader"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2017-05-02 23:35:07 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// ReadFileHandle is an open for read file handle on a File
|
|
|
|
type ReadFileHandle struct {
|
2017-10-29 22:11:17 +01:00
|
|
|
baseHandle
|
2019-09-14 14:09:07 +02:00
|
|
|
done func(err error)
|
|
|
|
mu sync.Mutex
|
2019-11-04 13:16:36 +01:00
|
|
|
cond *sync.Cond // cond lock for out of sequence reads
|
|
|
|
closed bool // set if handle has been closed
|
2019-09-14 14:09:07 +02:00
|
|
|
r *accounting.Account
|
|
|
|
readCalled bool // set if read has been called
|
|
|
|
size int64 // size of the object (0 for unknown length)
|
|
|
|
offset int64 // offset of read of o
|
|
|
|
roffset int64 // offset of Read() calls
|
|
|
|
noSeek bool
|
|
|
|
sizeUnknown bool // set if size of source is not known
|
|
|
|
file *File
|
|
|
|
hash *hash.MultiHasher
|
|
|
|
opened bool
|
|
|
|
remote string
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
|
2017-10-27 22:41:34 +02:00
|
|
|
// Check interfaces
|
|
|
|
var (
|
|
|
|
_ io.Reader = (*ReadFileHandle)(nil)
|
|
|
|
_ io.ReaderAt = (*ReadFileHandle)(nil)
|
|
|
|
_ io.Seeker = (*ReadFileHandle)(nil)
|
|
|
|
_ io.Closer = (*ReadFileHandle)(nil)
|
|
|
|
)
|
|
|
|
|
2018-03-01 16:50:23 +01:00
|
|
|
func newReadFileHandle(f *File) (*ReadFileHandle, error) {
|
2018-01-12 17:30:54 +01:00
|
|
|
var mhash *hash.MultiHasher
|
2017-05-11 09:33:45 +02:00
|
|
|
var err error
|
2018-03-01 16:50:23 +01:00
|
|
|
o := f.getObject()
|
2017-10-29 12:00:56 +01:00
|
|
|
if !f.d.vfs.Opt.NoChecksum {
|
2019-11-04 11:12:56 +01:00
|
|
|
hashes := hash.NewHashSet(o.Fs().Hashes().GetOne()) // just pick one hash
|
|
|
|
mhash, err = hash.NewMultiHasherTypes(hashes)
|
2017-05-08 18:47:22 +02:00
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(o.Fs(), "newReadFileHandle hash error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-02 23:35:07 +02:00
|
|
|
fh := &ReadFileHandle{
|
2019-09-14 14:09:07 +02:00
|
|
|
remote: o.Remote(),
|
|
|
|
noSeek: f.d.vfs.Opt.NoSeek,
|
|
|
|
file: f,
|
|
|
|
hash: mhash,
|
|
|
|
size: nonNegative(o.Size()),
|
|
|
|
sizeUnknown: o.Size() < 0,
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2019-11-04 13:16:36 +01:00
|
|
|
fh.cond = sync.NewCond(&fh.mu)
|
2017-05-02 23:35:07 +02:00
|
|
|
return fh, nil
|
|
|
|
}
|
|
|
|
|
2017-05-11 09:33:45 +02:00
|
|
|
// openPending opens the file if there is a pending open
|
|
|
|
// call with the lock held
|
|
|
|
func (fh *ReadFileHandle) openPending() (err error) {
|
|
|
|
if fh.opened {
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
o := fh.file.getObject()
|
2019-06-17 10:34:30 +02:00
|
|
|
r, err := chunkedreader.New(context.TODO(), o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit)).Open()
|
2017-05-11 09:33:45 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-18 12:13:54 +02:00
|
|
|
tr := accounting.GlobalStats().NewTransfer(o)
|
2019-07-16 13:56:20 +02:00
|
|
|
fh.done = tr.Done
|
|
|
|
fh.r = tr.Account(r).WithBuffer() // account the transfer
|
2017-05-11 09:33:45 +02:00
|
|
|
fh.opened = true
|
2019-07-16 13:56:20 +02:00
|
|
|
|
2017-05-11 09:33:45 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-09 12:29:02 +02:00
|
|
|
// String converts it to printable
|
|
|
|
func (fh *ReadFileHandle) String() string {
|
|
|
|
if fh == nil {
|
|
|
|
return "<nil *ReadFileHandle>"
|
|
|
|
}
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-09 12:29:02 +02:00
|
|
|
if fh.file == nil {
|
|
|
|
return "<nil *ReadFileHandle.file>"
|
|
|
|
}
|
|
|
|
return fh.file.String() + " (r)"
|
|
|
|
}
|
|
|
|
|
2017-05-02 23:35:07 +02:00
|
|
|
// Node returns the Node assocuated with this - satisfies Noder interface
|
|
|
|
func (fh *ReadFileHandle) Node() Node {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-02 23:35:07 +02:00
|
|
|
return fh.file
|
|
|
|
}
|
|
|
|
|
|
|
|
// seek to a new offset
|
|
|
|
//
|
|
|
|
// if reopen is true, then we won't attempt to use an io.Seeker interface
|
|
|
|
//
|
|
|
|
// Must be called with fh.mu held
|
|
|
|
func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) {
|
|
|
|
if fh.noSeek {
|
|
|
|
return ESPIPE
|
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
fh.hash = nil
|
2018-08-11 10:18:19 +02:00
|
|
|
if !reopen {
|
|
|
|
ar := fh.r.GetAsyncReader()
|
|
|
|
// try to fullfill the seek with buffer discard
|
|
|
|
if ar != nil && ar.SkipBytes(int(offset-fh.offset)) {
|
|
|
|
fh.offset = offset
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fh.r.StopBuffering() // stop the background reading first
|
2017-05-02 23:35:07 +02:00
|
|
|
oldReader := fh.r.GetReader()
|
2018-02-18 15:18:12 +01:00
|
|
|
r, ok := oldReader.(*chunkedreader.ChunkedReader)
|
|
|
|
if !ok {
|
|
|
|
fs.Logf(fh.remote, "ReadFileHandle.Read expected reader to be a ChunkedReader, got %T", oldReader)
|
|
|
|
reopen = true
|
|
|
|
}
|
|
|
|
if !reopen {
|
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d (fs.RangeSeeker)", fh.offset, offset)
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err = r.RangeSeek(context.TODO(), offset, io.SeekStart, -1)
|
2017-05-02 23:35:07 +02:00
|
|
|
if err != nil {
|
2018-02-18 15:18:12 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read fs.RangeSeeker failed: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
|
2017-05-02 23:35:07 +02:00
|
|
|
// close old one
|
|
|
|
err = oldReader.Close()
|
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read seek close old failed: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
// re-open with a seek
|
2018-03-01 16:50:23 +01:00
|
|
|
o := fh.file.getObject()
|
2019-06-17 10:34:30 +02:00
|
|
|
r = chunkedreader.New(context.TODO(), o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit))
|
2018-02-18 15:18:12 +01:00
|
|
|
_, err := r.Seek(offset, 0)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read seek failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
r, err = r.Open()
|
2017-05-02 23:35:07 +02:00
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read seek failed: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fh.r.UpdateReader(r)
|
|
|
|
fh.offset = offset
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-02 19:22:26 +01:00
|
|
|
// Seek the file - returns ESPIPE if seeking isn't possible
|
2017-10-27 22:41:34 +02:00
|
|
|
func (fh *ReadFileHandle) Seek(offset int64, whence int) (n int64, err error) {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-11-02 19:22:26 +01:00
|
|
|
if fh.noSeek {
|
|
|
|
return 0, ESPIPE
|
|
|
|
}
|
2017-11-20 18:57:13 +01:00
|
|
|
size := fh.size
|
2017-10-27 22:41:34 +02:00
|
|
|
switch whence {
|
2018-04-06 20:53:06 +02:00
|
|
|
case io.SeekStart:
|
2017-10-27 22:41:34 +02:00
|
|
|
fh.roffset = 0
|
2018-04-06 20:53:06 +02:00
|
|
|
case io.SeekEnd:
|
2017-10-27 22:41:34 +02:00
|
|
|
fh.roffset = size
|
|
|
|
}
|
|
|
|
fh.roffset += offset
|
|
|
|
// we don't check the offset - the next Read will
|
|
|
|
return fh.roffset, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAt reads len(p) bytes into p starting at offset off in the
|
|
|
|
// underlying input source. It returns the number of bytes read (0 <=
|
|
|
|
// n <= len(p)) and any error encountered.
|
|
|
|
//
|
|
|
|
// When ReadAt returns n < len(p), it returns a non-nil error
|
|
|
|
// explaining why more bytes were not returned. In this respect,
|
|
|
|
// ReadAt is stricter than Read.
|
|
|
|
//
|
|
|
|
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
|
|
|
// space during the call. If some data is available but not len(p)
|
|
|
|
// bytes, ReadAt blocks until either all the data is available or an
|
|
|
|
// error occurs. In this respect ReadAt is different from Read.
|
|
|
|
//
|
|
|
|
// If the n = len(p) bytes returned by ReadAt are at the end of the
|
|
|
|
// input source, ReadAt may return either err == EOF or err == nil.
|
|
|
|
//
|
|
|
|
// If ReadAt is reading from an input source with a seek offset,
|
|
|
|
// ReadAt should not affect nor be affected by the underlying seek
|
|
|
|
// offset.
|
|
|
|
//
|
|
|
|
// Clients of ReadAt can execute parallel ReadAt calls on the same
|
|
|
|
// input source.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (fh *ReadFileHandle) ReadAt(p []byte, off int64) (n int, err error) {
|
2017-05-02 23:35:07 +02:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-11-03 17:11:44 +01:00
|
|
|
return fh.readAt(p, off)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Implementation of ReadAt - call with lock held
|
|
|
|
func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
2019-09-14 14:09:07 +02:00
|
|
|
// defer log.Trace(fh.remote, "p[%d], off=%d", len(p), off)("n=%d, err=%v", &n, &err)
|
2017-10-29 22:14:05 +01:00
|
|
|
err = fh.openPending() // FIXME pending open could be more efficient in the presense of seek (and retries)
|
2017-05-11 09:33:45 +02:00
|
|
|
if err != nil {
|
2017-10-27 22:41:34 +02:00
|
|
|
return 0, err
|
2017-05-11 09:33:45 +02:00
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Read size %d offset %d", reqSize, off)
|
2017-05-02 23:35:07 +02:00
|
|
|
if fh.closed {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read error: %v", EBADF)
|
2017-11-03 12:35:36 +01:00
|
|
|
return 0, ECLOSED
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2019-11-04 13:16:36 +01:00
|
|
|
maxBuf := 1024 * 1024
|
|
|
|
if len(p) < maxBuf {
|
|
|
|
maxBuf = len(p)
|
|
|
|
}
|
|
|
|
if gap := off - fh.offset; gap > 0 && gap < int64(8*maxBuf) {
|
|
|
|
// Set a background timer so we don't wait for long
|
|
|
|
// Waits here potentially affect all seeks so need to keep them short
|
|
|
|
// This time here was made by finding the smallest when mounting a local backend
|
|
|
|
// that didn't cause seeks.
|
|
|
|
const maxWait = 5 * time.Millisecond
|
|
|
|
timeout := time.NewTimer(maxWait)
|
|
|
|
done := make(chan struct{})
|
|
|
|
abort := int32(0)
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-timeout.C:
|
|
|
|
// set abort flag an give all the waiting goroutines a kick on timeout
|
|
|
|
atomic.StoreInt32(&abort, 1)
|
|
|
|
fs.Debugf(fh.remote, "aborting in-sequence read wait, off=%d", off)
|
|
|
|
fh.cond.Broadcast()
|
|
|
|
case <-done:
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
for fh.offset != off && atomic.LoadInt32(&abort) == 0 {
|
|
|
|
fs.Debugf(fh.remote, "waiting for in-sequence read to %d for %v", off, maxWait)
|
|
|
|
fh.cond.Wait()
|
|
|
|
}
|
|
|
|
// tidy up end timer
|
|
|
|
close(done)
|
|
|
|
timeout.Stop()
|
|
|
|
if fh.offset != off {
|
|
|
|
fs.Debugf(fh.remote, "failed to wait for in-sequence read to %d", off)
|
|
|
|
}
|
|
|
|
}
|
2017-10-27 22:41:34 +02:00
|
|
|
doSeek := off != fh.offset
|
2017-10-29 22:14:05 +01:00
|
|
|
if doSeek && fh.noSeek {
|
|
|
|
return 0, ESPIPE
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
var newOffset int64
|
|
|
|
retries := 0
|
2017-10-27 22:41:34 +02:00
|
|
|
reqSize := len(p)
|
2017-05-02 23:35:07 +02:00
|
|
|
doReopen := false
|
|
|
|
for {
|
|
|
|
if doSeek {
|
|
|
|
// Are we attempting to seek beyond the end of the
|
|
|
|
// file - if so just return EOF leaving the underlying
|
|
|
|
// file in an unchanged state.
|
2017-11-20 18:57:13 +01:00
|
|
|
if off >= fh.size {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Read attempt to read beyond end of file: %d > %d", off, fh.size)
|
2017-10-29 22:14:05 +01:00
|
|
|
return 0, io.EOF
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
// Otherwise do the seek
|
2017-10-27 22:41:34 +02:00
|
|
|
err = fh.seek(off, doReopen)
|
2017-05-02 23:35:07 +02:00
|
|
|
} else {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
if reqSize > 0 {
|
|
|
|
fh.readCalled = true
|
|
|
|
}
|
2017-10-27 22:41:34 +02:00
|
|
|
n, err = io.ReadFull(fh.r, p)
|
2017-05-02 23:35:07 +02:00
|
|
|
newOffset = fh.offset + int64(n)
|
|
|
|
// if err == nil && rand.Intn(10) == 0 {
|
|
|
|
// err = errors.New("random error")
|
|
|
|
// }
|
|
|
|
if err == nil {
|
|
|
|
break
|
2019-09-14 14:09:07 +02:00
|
|
|
} else if (err == io.ErrUnexpectedEOF || err == io.EOF) && (newOffset == fh.size || fh.sizeUnknown) {
|
|
|
|
if fh.sizeUnknown {
|
|
|
|
// size is now known since we have read to the end
|
|
|
|
fh.sizeUnknown = false
|
|
|
|
fh.size = newOffset
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
// Have read to end of file - reset error
|
|
|
|
err = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if retries >= fs.Config.LowLevelRetries {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
retries++
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, fs.Config.LowLevelRetries, err)
|
2017-05-02 23:35:07 +02:00
|
|
|
doSeek = true
|
|
|
|
doReopen = true
|
|
|
|
}
|
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read error: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
} else {
|
|
|
|
fh.offset = newOffset
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Read OK")
|
2017-05-08 18:47:22 +02:00
|
|
|
|
|
|
|
if fh.hash != nil {
|
2017-10-27 22:41:34 +02:00
|
|
|
_, err = fh.hash.Write(p[:n])
|
2017-05-08 18:47:22 +02:00
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Read HashError: %v", err)
|
2017-10-27 22:41:34 +02:00
|
|
|
return 0, err
|
2017-05-08 18:47:22 +02:00
|
|
|
}
|
|
|
|
}
|
2017-10-29 22:14:05 +01:00
|
|
|
|
|
|
|
// If we have no error and we didn't fill the buffer, must be EOF
|
|
|
|
if n != len(p) {
|
|
|
|
err = io.EOF
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2019-11-04 13:16:36 +01:00
|
|
|
fh.cond.Broadcast() // wake everyone up waiting for an in-sequence read
|
2017-10-27 22:41:34 +02:00
|
|
|
return n, err
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
|
2017-05-08 18:47:22 +02:00
|
|
|
func (fh *ReadFileHandle) checkHash() error {
|
2017-11-20 18:57:13 +01:00
|
|
|
if fh.hash == nil || !fh.readCalled || fh.offset < fh.size {
|
2017-05-08 18:47:22 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-01 16:50:23 +01:00
|
|
|
o := fh.file.getObject()
|
2017-05-08 18:47:22 +02:00
|
|
|
for hashType, dstSum := range fh.hash.Sums() {
|
2019-06-17 10:34:30 +02:00
|
|
|
srcSum, err := o.Hash(context.TODO(), hashType)
|
2017-05-08 18:47:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
if !hash.Equals(dstSum, srcSum) {
|
2017-05-08 18:47:22 +02:00
|
|
|
return errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, dstSum, srcSum)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-27 22:41:34 +02:00
|
|
|
// Read reads up to len(p) bytes into p. It returns the number of bytes read (0
|
|
|
|
// <= n <= len(p)) and any error encountered. Even if Read returns n < len(p),
|
|
|
|
// it may use all of p as scratch space during the call. If some data is
|
|
|
|
// available but not len(p) bytes, Read conventionally returns what is
|
|
|
|
// available instead of waiting for more.
|
|
|
|
//
|
|
|
|
// When Read encounters an error or end-of-file condition after successfully
|
|
|
|
// reading n > 0 bytes, it returns the number of bytes read. It may return the
|
|
|
|
// (non-nil) error from the same call or return the error (and n == 0) from a
|
|
|
|
// subsequent call. An instance of this general case is that a Reader returning
|
|
|
|
// a non-zero number of bytes at the end of the input stream may return either
|
|
|
|
// err == EOF or err == nil. The next Read should return 0, EOF.
|
|
|
|
//
|
|
|
|
// Callers should always process the n > 0 bytes returned before considering
|
|
|
|
// the error err. Doing so correctly handles I/O errors that happen after
|
|
|
|
// reading some bytes and also both of the allowed EOF behaviors.
|
|
|
|
//
|
|
|
|
// Implementations of Read are discouraged from returning a zero byte count
|
|
|
|
// with a nil error, except when len(p) == 0. Callers should treat a return of
|
|
|
|
// 0 and nil as indicating that nothing happened; in particular it does not
|
|
|
|
// indicate EOF.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (fh *ReadFileHandle) Read(p []byte) (n int, err error) {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2019-09-14 14:09:07 +02:00
|
|
|
if fh.roffset >= fh.size && !fh.sizeUnknown {
|
2017-10-27 22:41:34 +02:00
|
|
|
return 0, io.EOF
|
|
|
|
}
|
2017-11-03 17:11:44 +01:00
|
|
|
n, err = fh.readAt(p, fh.roffset)
|
2017-10-27 22:41:34 +02:00
|
|
|
fh.roffset += int64(n)
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
2017-05-02 23:35:07 +02:00
|
|
|
// close the file handle returning EBADF if it has been
|
|
|
|
// closed already.
|
|
|
|
//
|
|
|
|
// Must be called with fh.mu held
|
|
|
|
func (fh *ReadFileHandle) close() error {
|
|
|
|
if fh.closed {
|
2017-11-03 12:35:36 +01:00
|
|
|
return ECLOSED
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
fh.closed = true
|
2017-05-08 18:47:22 +02:00
|
|
|
|
2017-10-29 22:14:05 +01:00
|
|
|
if fh.opened {
|
2019-07-16 13:56:20 +02:00
|
|
|
var err error
|
|
|
|
defer func() {
|
|
|
|
fh.done(err)
|
|
|
|
}()
|
2017-11-03 17:11:44 +01:00
|
|
|
// Close first so that we have hashes
|
2019-07-16 13:56:20 +02:00
|
|
|
err = fh.r.Close()
|
2017-11-03 17:11:44 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-29 22:14:05 +01:00
|
|
|
}
|
2017-11-03 17:11:44 +01:00
|
|
|
// Now check the hash
|
|
|
|
err = fh.checkHash()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-29 22:14:05 +01:00
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
}
|
2017-10-29 22:14:05 +01:00
|
|
|
return nil
|
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
|
2017-10-29 22:14:05 +01:00
|
|
|
// Close closes the file
|
|
|
|
func (fh *ReadFileHandle) Close() error {
|
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
|
|
|
return fh.close()
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Flush is called each time the file or directory is closed.
|
|
|
|
// Because there can be multiple file descriptors referring to a
|
|
|
|
// single opened file, Flush can be called multiple times.
|
|
|
|
func (fh *ReadFileHandle) Flush() error {
|
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-11 09:33:45 +02:00
|
|
|
if !fh.opened {
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Flush")
|
2017-05-02 23:35:07 +02:00
|
|
|
|
2017-05-08 18:47:22 +02:00
|
|
|
if err := fh.checkHash(); err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Flush error: %v", err)
|
2017-05-08 18:47:22 +02:00
|
|
|
return err
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
2017-05-08 18:47:22 +02:00
|
|
|
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Flush OK")
|
2017-05-02 23:35:07 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release is called when we are finished with the file handle
|
|
|
|
//
|
|
|
|
// It isn't called directly from userspace so the error is ignored by
|
|
|
|
// the kernel
|
|
|
|
func (fh *ReadFileHandle) Release() error {
|
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-05-11 09:33:45 +02:00
|
|
|
if !fh.opened {
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-02 23:35:07 +02:00
|
|
|
if fh.closed {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Release nothing to do")
|
2017-05-02 23:35:07 +02:00
|
|
|
return nil
|
|
|
|
}
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Debugf(fh.remote, "ReadFileHandle.Release closing")
|
2017-05-02 23:35:07 +02:00
|
|
|
err := fh.close()
|
|
|
|
if err != nil {
|
2018-03-01 16:50:23 +01:00
|
|
|
fs.Errorf(fh.remote, "ReadFileHandle.Release error: %v", err)
|
2017-05-02 23:35:07 +02:00
|
|
|
} else {
|
2018-03-01 16:50:23 +01:00
|
|
|
// fs.Debugf(fh.remote, "ReadFileHandle.Release OK")
|
2017-05-02 23:35:07 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2017-10-24 22:06:06 +02:00
|
|
|
|
|
|
|
// Size returns the size of the underlying file
|
|
|
|
func (fh *ReadFileHandle) Size() int64 {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-11-20 18:57:13 +01:00
|
|
|
return fh.size
|
2017-10-24 22:06:06 +02:00
|
|
|
}
|
|
|
|
|
2017-10-29 22:11:17 +01:00
|
|
|
// Stat returns info about the file
|
|
|
|
func (fh *ReadFileHandle) Stat() (os.FileInfo, error) {
|
2017-11-03 17:11:44 +01:00
|
|
|
fh.mu.Lock()
|
|
|
|
defer fh.mu.Unlock()
|
2017-10-29 22:11:17 +01:00
|
|
|
return fh.file, nil
|
|
|
|
}
|