rclone/fs/buffer.go

258 lines
5.8 KiB
Go
Raw Normal View History

package fs
import (
"io"
2017-02-14 23:28:18 +01:00
"sync"
"github.com/pkg/errors"
)
const (
asyncBufferSize = 1024 * 1024
softStartInitial = 4 * 1024
)
2017-02-14 23:28:18 +01:00
var asyncBufferPool = sync.Pool{
New: func() interface{} { return newBuffer() },
}
// asyncReader will do async read-ahead from the input reader
// and make the data available as an io.Reader.
// This should be fully transparent, except that once an error
// has been returned from the Reader, it will not recover.
type asyncReader struct {
in io.ReadCloser // Input reader
ready chan *buffer // Buffers ready to be handed to the reader
2017-02-14 23:28:18 +01:00
token chan struct{} // Tokens which allow a buffer to be taken
exit chan struct{} // Closes when finished
buffers int // Number of buffers
err error // If an error has occurred it is here
cur *buffer // Current buffer being served
exited chan struct{} // Channel is closed been the async reader shuts down
size int // size of buffer to use
}
// newAsyncReader returns a reader that will asynchronously read from
2017-02-14 23:28:18 +01:00
// the supplied Reader into a number of buffers each of size asyncBufferSize
// It will start reading from the input at once, maybe even before this
// function has returned.
// The input can be read from the returned reader.
// When done use Close to release the buffers and close the supplied input.
2017-02-14 23:28:18 +01:00
func newAsyncReader(rd io.ReadCloser, buffers int) (io.ReadCloser, error) {
if buffers <= 0 {
return nil, errors.New("number of buffers too small")
}
if rd == nil {
return nil, errors.New("nil reader supplied")
}
a := &asyncReader{}
2017-02-14 23:28:18 +01:00
a.init(rd, buffers)
return a, nil
}
2017-02-14 23:28:18 +01:00
func (a *asyncReader) init(rd io.ReadCloser, buffers int) {
a.in = rd
a.ready = make(chan *buffer, buffers)
2017-02-14 23:28:18 +01:00
a.token = make(chan struct{}, buffers)
a.exit = make(chan struct{}, 0)
a.exited = make(chan struct{}, 0)
a.buffers = buffers
a.cur = nil
a.size = softStartInitial
2017-02-14 23:28:18 +01:00
// Create tokens
for i := 0; i < buffers; i++ {
2017-02-14 23:28:18 +01:00
a.token <- struct{}{}
}
// Start async reader
go func() {
// Ensure that when we exit this is signalled.
defer close(a.exited)
2017-02-14 23:28:18 +01:00
defer close(a.ready)
for {
select {
2017-02-14 23:28:18 +01:00
case <-a.token:
b := a.getBuffer()
if a.size < asyncBufferSize {
b.buf = b.buf[:a.size]
a.size <<= 1
}
err := b.read(a.in)
a.ready <- b
if err != nil {
return
}
case <-a.exit:
return
}
}
}()
}
2017-02-14 23:28:18 +01:00
// return the buffer to the pool (clearing it)
func (a *asyncReader) putBuffer(b *buffer) {
b.clear()
asyncBufferPool.Put(b)
}
// get a buffer from the pool
func (a *asyncReader) getBuffer() *buffer {
b := asyncBufferPool.Get().(*buffer)
return b
2017-02-14 23:28:18 +01:00
}
// Read will return the next available data.
func (a *asyncReader) fill() (err error) {
if a.cur.isEmpty() {
if a.cur != nil {
2017-02-14 23:28:18 +01:00
a.putBuffer(a.cur)
a.token <- struct{}{}
a.cur = nil
}
b, ok := <-a.ready
if !ok {
return a.err
}
a.cur = b
}
return nil
}
// Read will return the next available data.
func (a *asyncReader) Read(p []byte) (n int, err error) {
// Swap buffer and maybe return error
err = a.fill()
if err != nil {
return 0, err
}
// Copy what we can
n = copy(p, a.cur.buffer())
a.cur.increment(n)
// If at end of buffer, return any error, if present
if a.cur.isEmpty() {
a.err = a.cur.err
return n, a.err
}
return n, nil
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.increment(n2)
n += int64(n2)
if err != nil {
return n, err
}
if a.cur.err != nil {
a.err = a.cur.err
return n, a.cur.err
}
}
}
// close will ensure that the underlying async reader is shut down.
// If closeIn is set it will also close the input supplied on
// newAsyncReader.
func (a *asyncReader) close(closeIn bool) (err error) {
2017-02-14 23:28:18 +01:00
// Return if already closed
select {
2017-02-14 23:28:18 +01:00
case <-a.exit:
return
default:
}
2017-02-14 23:28:18 +01:00
// Close and wait for go routine
close(a.exit)
<-a.exited
// Return any outstanding buffers to the Pool
if a.cur != nil {
a.putBuffer(a.cur)
}
2017-02-14 23:28:18 +01:00
for b := range a.ready {
a.putBuffer(b)
}
if closeIn {
return a.in.Close()
}
return nil
}
// Close will ensure that the underlying async reader is shut down.
// It will also close the input supplied on newAsyncReader.
func (a *asyncReader) Close() (err error) {
return a.close(true)
}
// Abandon will ensure that the underlying async reader is shut down.
// It will NOT close the input supplied on newAsyncReader.
func (a *asyncReader) Abandon() {
_ = a.close(false)
}
// Internal buffer
// If an error is present, it must be returned
// once all buffer content has been served.
type buffer struct {
buf []byte
err error
offset int
}
2017-02-14 23:28:18 +01:00
func newBuffer() *buffer {
return &buffer{
buf: make([]byte, asyncBufferSize),
err: nil,
}
}
// clear returns the buffer to its full size and clears the members
func (b *buffer) clear() {
b.buf = b.buf[:cap(b.buf)]
b.err = nil
b.offset = 0
}
// isEmpty returns true is offset is at end of
// buffer, or
func (b *buffer) isEmpty() bool {
if b == nil {
return true
}
if len(b.buf)-b.offset <= 0 {
return true
}
return false
}
// read into start of the buffer from the supplied reader,
// resets the offset and updates the size of the buffer.
// Any error encountered during the read is returned.
func (b *buffer) read(rd io.Reader) error {
var n int
2017-02-14 23:28:18 +01:00
n, b.err = ReadFill(rd, b.buf)
b.buf = b.buf[0:n]
b.offset = 0
return b.err
}
// Return the buffer at current offset
func (b *buffer) buffer() []byte {
return b.buf[b.offset:]
}
// increment the offset
func (b *buffer) increment(n int) {
b.offset += n
}