sftp/client.go

1284 lines
34 KiB
Go
Raw Normal View History

package sftp
import (
"bytes"
"encoding/binary"
"io"
"os"
2013-11-06 12:40:35 +08:00
"path"
"sync/atomic"
"syscall"
"time"
2013-11-11 09:57:03 +08:00
"github.com/kr/fs"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh"
)
2017-07-26 12:04:32 +08:00
// InternalInconsistency indicates the packets sent and the data queued to be
// written to the file don't match up. It is an unusual error and usually is
// caused by bad behavior server side or connection issues. The error is
// limited in scope to the call where it happened, the client object is still
// OK to use as long as the connection is still open.
2017-07-26 12:04:32 +08:00
var InternalInconsistency = errors.New("internal inconsistency")
// A ClientOption is a function which applies configuration to a Client.
type ClientOption func(*Client) error
// MaxPacketChecked sets the maximum size of the payload, measured in bytes.
// This option only accepts sizes servers should support, ie. <= 32768 bytes.
//
// If you get the error "failed to send packet header: EOF" when copying a
// large file, try lowering this number.
//
// The default packet size is 32768 bytes.
func MaxPacketChecked(size int) ClientOption {
return func(c *Client) error {
if size < 1 {
return errors.Errorf("size must be greater or equal to 1")
}
if size > 32768 {
return errors.Errorf("sizes larger than 32KB might not work with all servers")
}
c.maxPacket = size
return nil
}
}
// MaxPacketUnchecked sets the maximum size of the payload, measured in bytes.
// It accepts sizes larger than the 32768 bytes all servers should support.
// Only use a setting higher than 32768 if your application always connects to
// the same server or after sufficiently broad testing.
//
// If you get the error "failed to send packet header: EOF" when copying a
// large file, try lowering this number.
//
// The default packet size is 32768 bytes.
func MaxPacketUnchecked(size int) ClientOption {
return func(c *Client) error {
if size < 1 {
return errors.Errorf("size must be greater or equal to 1")
}
c.maxPacket = size
return nil
}
}
// MaxPacket sets the maximum size of the payload, measured in bytes.
// This option only accepts sizes servers should support, ie. <= 32768 bytes.
// This is a synonym for MaxPacketChecked that provides backward compatibility.
//
// If you get the error "failed to send packet header: EOF" when copying a
// large file, try lowering this number.
//
// The default packet size is 32768 bytes.
func MaxPacket(size int) ClientOption {
return MaxPacketChecked(size)
}
// MaxConcurrentRequestsPerFile sets the maximum concurrent requests allowed for a single file.
//
// The default maximum concurrent requests is 64.
func MaxConcurrentRequestsPerFile(n int) ClientOption {
return func(c *Client) error {
if n < 1 {
return errors.Errorf("n must be greater or equal to 1")
}
c.maxConcurrentRequests = n
return nil
}
}
// NewClient creates a new SFTP client on conn, using zero or more option
// functions.
func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) {
s, err := conn.NewSession()
if err != nil {
return nil, err
}
if err := s.RequestSubsystem("sftp"); err != nil {
return nil, err
}
pw, err := s.StdinPipe()
if err != nil {
return nil, err
}
pr, err := s.StdoutPipe()
if err != nil {
return nil, err
}
2014-10-10 02:49:08 +08:00
return NewClientPipe(pr, pw, opts...)
}
// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser.
// This can be used for connecting to an SFTP server over TCP/TLS or by using
// the system's ssh client program (e.g. via exec.Command).
func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) {
sftp := &Client{
clientConn: clientConn{
conn: conn{
Reader: rd,
WriteCloser: wr,
},
inflight: make(map[uint32]chan<- result),
closed: make(chan struct{}),
},
maxPacket: 1 << 15,
maxConcurrentRequests: 64,
}
if err := sftp.applyOptions(opts...); err != nil {
wr.Close()
return nil, err
}
if err := sftp.sendInit(); err != nil {
wr.Close()
return nil, err
}
Handle recvPacket in a single goroutine. Previously recvPacket would be invoked in several goroutines. This meant that when multiple concurrent requests were in flight there were N goroutines each waiting on recvPacket. For optimal throughput the goal is to send a new request as quickly as possible once a response is received. The previous mechanism worked counter to this because the goroutine sending new requests would be competing against N recvPacket goroutines that may become runnable as data streams in. Having a single goroutine responsible for recvPacket means that the recv and send goroutines will ping-pong back and forth optimizing throughput. This changes shows a ~10-25% increase in throughput in the the *Delay* benchmark tests. $ go test -bench=. -integration PASS BenchmarkRead1k 2 840068631 ns/op 12.48 MB/s BenchmarkRead16k 20 72968548 ns/op 143.70 MB/s BenchmarkRead32k 30 56871347 ns/op 184.38 MB/s BenchmarkRead128k 100 34150953 ns/op 307.05 MB/s BenchmarkRead512k 100 15730685 ns/op 666.59 MB/s BenchmarkRead1MiB 200 10462421 ns/op 1002.24 MB/s BenchmarkRead4MiB 200 7325236 ns/op 1431.47 MB/s BenchmarkRead4MiBDelay10Msec 10 186893765 ns/op 56.11 MB/s BenchmarkRead4MiBDelay50Msec 2 907127114 ns/op 11.56 MB/s BenchmarkRead4MiBDelay150Msec 1 2708025060 ns/op 3.87 MB/s BenchmarkWrite1k 1 1623940932 ns/op 6.46 MB/s BenchmarkWrite16k 10 174293843 ns/op 60.16 MB/s BenchmarkWrite32k 10 120377272 ns/op 87.11 MB/s BenchmarkWrite128k 20 54592205 ns/op 192.08 MB/s BenchmarkWrite512k 50 66449591 ns/op 157.80 MB/s BenchmarkWrite1MiB 50 70965660 ns/op 147.76 MB/s BenchmarkWrite4MiB 50 69234861 ns/op 151.45 MB/s BenchmarkWrite4MiBDelay10Msec 5 276624260 ns/op 37.91 MB/s BenchmarkWrite4MiBDelay50Msec 1 1318396552 ns/op 7.95 MB/s BenchmarkWrite4MiBDelay150Msec 1 3918416658 ns/op 2.68 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 152240808 ns/op 68.88 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 715003188 ns/op 14.67 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2116878801 ns/op 4.95 MB/s BenchmarkCopyUp10MiBDelay10Msec 10 192748258 ns/op 54.40 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 691486538 ns/op 15.16 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 1997162991 ns/op 5.25 MB/s BenchmarkMarshalInit 2000000 644 ns/op BenchmarkMarshalOpen 3000000 562 ns/op BenchmarkMarshalWriteWorstCase 20000 75166 ns/op BenchmarkMarshalWrite1k 500000 3862 ns/op ok github.com/pkg/sftp 71.174s
2015-06-03 04:03:39 +08:00
if err := sftp.recvVersion(); err != nil {
wr.Close()
return nil, err
}
2016-06-15 16:23:51 +08:00
sftp.clientConn.wg.Add(1)
go sftp.loop()
Handle recvPacket in a single goroutine. Previously recvPacket would be invoked in several goroutines. This meant that when multiple concurrent requests were in flight there were N goroutines each waiting on recvPacket. For optimal throughput the goal is to send a new request as quickly as possible once a response is received. The previous mechanism worked counter to this because the goroutine sending new requests would be competing against N recvPacket goroutines that may become runnable as data streams in. Having a single goroutine responsible for recvPacket means that the recv and send goroutines will ping-pong back and forth optimizing throughput. This changes shows a ~10-25% increase in throughput in the the *Delay* benchmark tests. $ go test -bench=. -integration PASS BenchmarkRead1k 2 840068631 ns/op 12.48 MB/s BenchmarkRead16k 20 72968548 ns/op 143.70 MB/s BenchmarkRead32k 30 56871347 ns/op 184.38 MB/s BenchmarkRead128k 100 34150953 ns/op 307.05 MB/s BenchmarkRead512k 100 15730685 ns/op 666.59 MB/s BenchmarkRead1MiB 200 10462421 ns/op 1002.24 MB/s BenchmarkRead4MiB 200 7325236 ns/op 1431.47 MB/s BenchmarkRead4MiBDelay10Msec 10 186893765 ns/op 56.11 MB/s BenchmarkRead4MiBDelay50Msec 2 907127114 ns/op 11.56 MB/s BenchmarkRead4MiBDelay150Msec 1 2708025060 ns/op 3.87 MB/s BenchmarkWrite1k 1 1623940932 ns/op 6.46 MB/s BenchmarkWrite16k 10 174293843 ns/op 60.16 MB/s BenchmarkWrite32k 10 120377272 ns/op 87.11 MB/s BenchmarkWrite128k 20 54592205 ns/op 192.08 MB/s BenchmarkWrite512k 50 66449591 ns/op 157.80 MB/s BenchmarkWrite1MiB 50 70965660 ns/op 147.76 MB/s BenchmarkWrite4MiB 50 69234861 ns/op 151.45 MB/s BenchmarkWrite4MiBDelay10Msec 5 276624260 ns/op 37.91 MB/s BenchmarkWrite4MiBDelay50Msec 1 1318396552 ns/op 7.95 MB/s BenchmarkWrite4MiBDelay150Msec 1 3918416658 ns/op 2.68 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 152240808 ns/op 68.88 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 715003188 ns/op 14.67 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2116878801 ns/op 4.95 MB/s BenchmarkCopyUp10MiBDelay10Msec 10 192748258 ns/op 54.40 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 691486538 ns/op 15.16 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 1997162991 ns/op 5.25 MB/s BenchmarkMarshalInit 2000000 644 ns/op BenchmarkMarshalOpen 3000000 562 ns/op BenchmarkMarshalWriteWorstCase 20000 75166 ns/op BenchmarkMarshalWrite1k 500000 3862 ns/op ok github.com/pkg/sftp 71.174s
2015-06-03 04:03:39 +08:00
return sftp, nil
}
2013-11-06 10:04:40 +08:00
// Client represents an SFTP session on a *ssh.ClientConn SSH connection.
// Multiple Clients can be active on a single SSH connection, and a Client
// may be called concurrently from multiple Goroutines.
2013-11-07 14:43:06 +08:00
//
// Client implements the github.com/kr/fs.FileSystem interface.
type Client struct {
clientConn
maxPacket int // max packet size read or written.
nextid uint32
maxConcurrentRequests int
}
// Create creates the named file mode 0666 (before umask), truncating it if it
// already exists. If successful, methods on the returned File can be used for
// I/O; the associated file descriptor has mode O_RDWR. If you need more
// control over the flags/mode used to open the file see client.OpenFile.
2013-11-06 10:04:40 +08:00
func (c *Client) Create(path string) (*File, error) {
2013-11-14 12:32:21 +08:00
return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC))
2013-11-06 10:04:40 +08:00
}
2014-10-10 02:45:30 +08:00
const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
func (c *Client) sendInit() error {
return c.clientConn.conn.sendPacket(sshFxInitPacket{
2014-10-10 02:45:30 +08:00
Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
})
}
// returns the next value of c.nextid
2016-01-05 05:15:21 +08:00
func (c *Client) nextID() uint32 {
return atomic.AddUint32(&c.nextid, 1)
}
func (c *Client) recvVersion() error {
typ, data, err := c.recvPacket()
if err != nil {
return err
}
2013-11-06 16:10:28 +08:00
if typ != ssh_FXP_VERSION {
return &unexpectedPacketErr{ssh_FXP_VERSION, typ}
}
2014-10-10 02:45:30 +08:00
version, _ := unmarshalUint32(data)
if version != sftpProtocolVersion {
return &unexpectedVersionErr{sftpProtocolVersion, version}
}
return nil
}
// Walk returns a new Walker rooted at root.
2013-11-07 08:31:46 +08:00
func (c *Client) Walk(root string) *fs.Walker {
2013-11-07 14:23:51 +08:00
return fs.WalkFS(root, c)
}
2013-11-07 14:23:51 +08:00
// ReadDir reads the directory named by dirname and returns a list of
// directory entries.
func (c *Client) ReadDir(p string) ([]os.FileInfo, error) {
2013-11-06 12:40:35 +08:00
handle, err := c.opendir(p)
if err != nil {
return nil, err
}
2013-11-08 18:24:50 +08:00
defer c.close(handle) // this has to defer earlier than the lock below
var attrs []os.FileInfo
var done = false
for !done {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err1 := c.sendPacket(sshFxpReaddirPacket{
2016-01-05 05:15:21 +08:00
ID: id,
Handle: handle,
})
if err1 != nil {
err = err1
done = true
break
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_NAME:
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return nil, &unexpectedIDErr{id, sid}
}
count, data := unmarshalUint32(data)
for i := uint32(0); i < count; i++ {
var filename string
filename, data = unmarshalString(data)
_, data = unmarshalString(data) // discard longname
var attr *FileStat
attr, data = unmarshalAttrs(data)
if filename == "." || filename == ".." {
continue
}
attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename)))
}
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
// TODO(dfc) scope warning!
err = normaliseError(unmarshalStatus(id, data))
done = true
default:
return nil, unimplementedPacketErr(typ)
}
}
if err == io.EOF {
err = nil
}
return attrs, err
}
2015-12-22 05:45:40 +08:00
func (c *Client) opendir(path string) (string, error) {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpOpendirPacket{
2016-01-05 05:15:21 +08:00
ID: id,
Path: path,
})
if err != nil {
return "", err
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_HANDLE:
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return "", &unexpectedIDErr{id, sid}
}
handle, _ := unmarshalString(data)
return handle, nil
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
return "", normaliseError(unmarshalStatus(id, data))
default:
return "", unimplementedPacketErr(typ)
}
}
// Stat returns a FileInfo structure describing the file specified by path 'p'.
// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file.
func (c *Client) Stat(p string) (os.FileInfo, error) {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpStatPacket{
2016-01-05 05:15:21 +08:00
ID: id,
Path: p,
})
if err != nil {
return nil, err
}
switch typ {
case ssh_FXP_ATTRS:
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return nil, &unexpectedIDErr{id, sid}
}
attr, _ := unmarshalAttrs(data)
return fileInfoFromStat(attr, path.Base(p)), nil
case ssh_FXP_STATUS:
return nil, normaliseError(unmarshalStatus(id, data))
default:
return nil, unimplementedPacketErr(typ)
}
}
// Lstat returns a FileInfo structure describing the file specified by path 'p'.
// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link.
2013-11-06 12:40:35 +08:00
func (c *Client) Lstat(p string) (os.FileInfo, error) {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpLstatPacket{
2016-01-05 05:15:21 +08:00
ID: id,
2013-11-06 12:40:35 +08:00
Path: p,
})
if err != nil {
return nil, err
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_ATTRS:
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return nil, &unexpectedIDErr{id, sid}
}
attr, _ := unmarshalAttrs(data)
return fileInfoFromStat(attr, path.Base(p)), nil
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
return nil, normaliseError(unmarshalStatus(id, data))
default:
return nil, unimplementedPacketErr(typ)
}
}
2013-11-06 08:04:26 +08:00
2014-09-24 02:42:28 +08:00
// ReadLink reads the target of a symbolic link.
2014-09-23 10:29:20 +08:00
func (c *Client) ReadLink(p string) (string, error) {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpReadlinkPacket{
2016-01-05 05:15:21 +08:00
ID: id,
2014-09-23 10:29:20 +08:00
Path: p,
})
if err != nil {
return "", err
}
switch typ {
case ssh_FXP_NAME:
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return "", &unexpectedIDErr{id, sid}
2014-09-23 10:29:20 +08:00
}
count, data := unmarshalUint32(data)
if count != 1 {
2014-09-24 02:42:28 +08:00
return "", unexpectedCount(1, count)
2014-09-23 10:29:20 +08:00
}
2014-09-24 02:42:28 +08:00
filename, _ := unmarshalString(data) // ignore dummy attributes
2014-09-23 10:29:20 +08:00
return filename, nil
case ssh_FXP_STATUS:
return "", normaliseError(unmarshalStatus(id, data))
2014-09-23 10:29:20 +08:00
default:
return "", unimplementedPacketErr(typ)
}
}
2015-09-07 16:05:16 +08:00
// Symlink creates a symbolic link at 'newname', pointing at target 'oldname'
func (c *Client) Symlink(oldname, newname string) error {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpSymlinkPacket{
2016-01-05 05:15:21 +08:00
ID: id,
2015-09-07 16:05:16 +08:00
Linkpath: newname,
Targetpath: oldname,
})
if err != nil {
return err
}
switch typ {
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
2015-09-07 16:05:16 +08:00
default:
return unimplementedPacketErr(typ)
}
}
// setstat is a convience wrapper to allow for changing of various parts of the file descriptor.
func (c *Client) setstat(path string, flags uint32, attrs interface{}) error {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpSetstatPacket{
2016-01-05 05:15:21 +08:00
ID: id,
Path: path,
Flags: flags,
Attrs: attrs,
})
if err != nil {
return err
}
switch typ {
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
default:
return unimplementedPacketErr(typ)
}
}
// Chtimes changes the access and modification times of the named file.
func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error {
type times struct {
Atime uint32
Mtime uint32
}
2014-06-23 04:01:04 +08:00
attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())}
return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs)
}
// Chown changes the user and group owners of the named file.
func (c *Client) Chown(path string, uid, gid int) error {
type owner struct {
2016-01-05 05:15:21 +08:00
UID uint32
GID uint32
}
2014-06-23 04:01:04 +08:00
attrs := owner{uint32(uid), uint32(gid)}
return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs)
}
// Chmod changes the permissions of the named file.
func (c *Client) Chmod(path string, mode os.FileMode) error {
2014-06-23 04:01:04 +08:00
return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode))
}
// Truncate sets the size of the named file. Although it may be safely assumed
// that if the size is less than its current size it will be truncated to fit,
// the SFTP protocol does not specify what behavior the server should do when setting
// size greater than the current size.
func (c *Client) Truncate(path string, size int64) error {
return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size))
}
2013-11-06 08:04:26 +08:00
// Open opens the named file for reading. If successful, methods on the
// returned file can be used for reading; the associated file descriptor
// has mode O_RDONLY.
func (c *Client) Open(path string) (*File, error) {
2013-11-14 12:32:21 +08:00
return c.open(path, flags(os.O_RDONLY))
}
// OpenFile is the generalized open call; most users will use Open or
// Create instead. It opens the named file with specified flag (O_RDONLY
// etc.). If successful, methods on the returned File can be used for I/O.
func (c *Client) OpenFile(path string, f int) (*File, error) {
return c.open(path, flags(f))
2013-11-06 09:53:45 +08:00
}
func (c *Client) open(path string, pflags uint32) (*File, error) {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpOpenPacket{
2016-01-05 05:15:21 +08:00
ID: id,
2013-11-06 08:04:26 +08:00
Path: path,
2013-11-06 09:53:45 +08:00
Pflags: pflags,
})
2013-11-06 08:04:26 +08:00
if err != nil {
return nil, err
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_HANDLE:
2013-11-06 08:04:26 +08:00
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return nil, &unexpectedIDErr{id, sid}
2013-11-06 08:04:26 +08:00
}
handle, _ := unmarshalString(data)
2013-11-06 09:36:05 +08:00
return &File{c: c, path: path, handle: handle}, nil
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
return nil, normaliseError(unmarshalStatus(id, data))
2013-11-06 08:04:26 +08:00
default:
return nil, unimplementedPacketErr(typ)
}
}
2013-11-06 08:30:01 +08:00
// close closes a handle handle previously returned in the response
// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid
// immediately after this request has been sent.
func (c *Client) close(handle string) error {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpClosePacket{
2016-01-05 05:15:21 +08:00
ID: id,
2013-11-06 08:30:01 +08:00
Handle: handle,
})
2013-11-06 08:30:01 +08:00
if err != nil {
return err
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
2013-11-06 08:30:01 +08:00
default:
return unimplementedPacketErr(typ)
}
}
2013-11-06 09:36:05 +08:00
func (c *Client) fstat(handle string) (*FileStat, error) {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpFstatPacket{
2016-01-05 05:15:21 +08:00
ID: id,
2013-11-06 09:36:05 +08:00
Handle: handle,
})
2013-11-06 09:36:05 +08:00
if err != nil {
return nil, err
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_ATTRS:
2013-11-06 09:36:05 +08:00
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return nil, &unexpectedIDErr{id, sid}
2013-11-06 09:36:05 +08:00
}
attr, _ := unmarshalAttrs(data)
return attr, nil
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
return nil, normaliseError(unmarshalStatus(id, data))
2013-11-06 09:36:05 +08:00
default:
return nil, unimplementedPacketErr(typ)
}
}
2013-11-06 09:42:14 +08:00
// StatVFS retrieves VFS statistics from a remote host.
//
// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature
// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt.
func (c *Client) StatVFS(path string) (*StatVFS, error) {
// send the StatVFS packet to the server
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpStatvfsPacket{
2016-01-05 05:15:21 +08:00
ID: id,
Path: path,
})
if err != nil {
return nil, err
}
switch typ {
// server responded with valid data
case ssh_FXP_EXTENDED_REPLY:
var response StatVFS
err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response)
if err != nil {
return nil, errors.New("can not parse reply")
}
return &response, nil
// the resquest failed
case ssh_FXP_STATUS:
return nil, errors.New(fxp(ssh_FXP_STATUS).String())
default:
return nil, unimplementedPacketErr(typ)
}
}
2013-11-07 14:23:51 +08:00
// Join joins any number of path elements into a single path, adding a
// separating slash if necessary. The result is Cleaned; in particular, all
// empty strings are ignored.
func (c *Client) Join(elem ...string) string { return path.Join(elem...) }
// Remove removes the specified file or directory. An error will be returned if no
// file or directory with the specified path exists, or if the specified directory
// is not empty.
2013-11-06 11:08:26 +08:00
func (c *Client) Remove(path string) error {
err := c.removeFile(path)
2016-09-08 18:00:35 +08:00
if err, ok := err.(*StatusError); ok {
switch err.Code {
// some servers, *cough* osx *cough*, return EPERM, not ENODIR.
// serv-u returns ssh_FX_FILE_IS_A_DIRECTORY
case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY:
2016-10-21 22:04:00 +08:00
return c.RemoveDirectory(path)
}
}
return err
}
func (c *Client) removeFile(path string) error {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpRemovePacket{
2016-01-05 05:15:21 +08:00
ID: id,
2013-11-06 11:08:26 +08:00
Filename: path,
})
2013-11-06 11:08:26 +08:00
if err != nil {
return err
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
2013-11-06 11:08:26 +08:00
default:
return unimplementedPacketErr(typ)
}
}
2016-10-21 22:04:00 +08:00
// RemoveDirectory removes a directory path.
func (c *Client) RemoveDirectory(path string) error {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpRmdirPacket{
2016-01-05 05:15:21 +08:00
ID: id,
Path: path,
})
if err != nil {
return err
}
switch typ {
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
default:
return unimplementedPacketErr(typ)
}
}
2013-11-06 11:15:26 +08:00
// Rename renames a file.
func (c *Client) Rename(oldname, newname string) error {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpRenamePacket{
2016-01-05 05:15:21 +08:00
ID: id,
2013-11-06 11:15:26 +08:00
Oldpath: oldname,
Newpath: newname,
})
2013-11-06 11:15:26 +08:00
if err != nil {
return err
}
switch typ {
2013-11-06 16:10:28 +08:00
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
2013-11-06 11:15:26 +08:00
default:
return unimplementedPacketErr(typ)
}
}
// PosixRename renames a file using the posix-rename@openssh.com extension
// which will replace newname if it already exists.
func (c *Client) PosixRename(oldname, newname string) error {
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpPosixRenamePacket{
ID: id,
Oldpath: oldname,
Newpath: newname,
})
if err != nil {
return err
}
switch typ {
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
default:
return unimplementedPacketErr(typ)
}
}
2015-12-22 05:45:40 +08:00
func (c *Client) realpath(path string) (string, error) {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpRealpathPacket{
2016-01-05 05:15:21 +08:00
ID: id,
2015-12-22 05:45:40 +08:00
Path: path,
})
if err != nil {
return "", err
}
switch typ {
case ssh_FXP_NAME:
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return "", &unexpectedIDErr{id, sid}
2015-12-22 05:45:40 +08:00
}
count, data := unmarshalUint32(data)
if count != 1 {
return "", unexpectedCount(1, count)
}
filename, _ := unmarshalString(data) // ignore attributes
return filename, nil
case ssh_FXP_STATUS:
return "", normaliseError(unmarshalStatus(id, data))
2015-12-22 05:45:40 +08:00
default:
return "", unimplementedPacketErr(typ)
}
}
// Getwd returns the current working directory of the server. Operations
// involving relative paths will be based at this location.
func (c *Client) Getwd() (string, error) {
return c.realpath(".")
}
// Mkdir creates the specified directory. An error will be returned if a file or
// directory with the specified path already exists, or if the directory's
// parent folder does not exist (the method cannot create complete paths).
func (c *Client) Mkdir(path string) error {
2016-01-05 05:15:21 +08:00
id := c.nextID()
typ, data, err := c.sendPacket(sshFxpMkdirPacket{
2016-01-05 05:15:21 +08:00
ID: id,
Path: path,
})
if err != nil {
return err
}
switch typ {
case ssh_FXP_STATUS:
return normaliseError(unmarshalStatus(id, data))
default:
return unimplementedPacketErr(typ)
}
}
2018-04-25 14:58:10 +08:00
// MkdirAll creates a directory named path, along with any necessary parents,
// and returns nil, or else returns an error.
// If path is already a directory, MkdirAll does nothing and returns nil.
// If path contains a regular file, an error is returned
func (c *Client) MkdirAll(path string) error {
// Most of this code mimics https://golang.org/src/os/path.go?s=514:561#L13
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := c.Stat(path)
if err == nil {
if dir.IsDir() {
return nil
2018-04-25 14:58:10 +08:00
}
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent
err = c.MkdirAll(path[0 : j-1])
2018-04-25 14:58:10 +08:00
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
err = c.Mkdir(path)
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := c.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
2018-04-25 14:58:10 +08:00
return nil
}
// applyOptions applies options functions to the Client.
// If an error is encountered, option processing ceases.
func (c *Client) applyOptions(opts ...ClientOption) error {
for _, f := range opts {
if err := f(c); err != nil {
return err
}
}
return nil
}
2013-11-06 09:42:14 +08:00
// File represents a remote file.
type File struct {
2013-11-06 09:53:45 +08:00
c *Client
path string
handle string
offset uint64 // current offset within remote file
2013-11-06 09:42:14 +08:00
}
// Close closes the File, rendering it unusable for I/O. It returns an
// error, if any.
func (f *File) Close() error {
2013-11-06 09:53:45 +08:00
return f.c.close(f.handle)
2013-11-06 09:42:14 +08:00
}
// Name returns the name of the file as presented to Open or Create.
func (f *File) Name() string {
return f.path
}
// Read reads up to len(b) bytes from the File. It returns the number of bytes
// read and an error, if any. Read follows io.Reader semantics, so when Read
// encounters an error or EOF condition after successfully reading n > 0 bytes,
// it returns the number of bytes read.
//
// To maximise throughput for transferring the entire file (especially
// over high latency links) it is recommended to use WriteTo rather
// than calling Read multiple times. io.Copy will do this
// automatically.
2013-11-06 09:42:14 +08:00
func (f *File) Read(b []byte) (int, error) {
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
// Split the read into multiple maxPacket sized concurrent reads
// bounded by maxConcurrentRequests. This allows reads with a suitably
// large buffer to transfer data at a much faster rate due to
// overlapping round trip times.
inFlight := 0
desiredInFlight := 1
offset := f.offset
// maxConcurrentRequests buffer to deal with broadcastErr() floods
// also must have a buffer of max value of (desiredInFlight - inFlight)
ch := make(chan result, f.c.maxConcurrentRequests+1)
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
type inflightRead struct {
b []byte
offset uint64
}
reqs := map[uint32]inflightRead{}
type offsetErr struct {
offset uint64
err error
}
var firstErr offsetErr
sendReq := func(b []byte, offset uint64) {
2016-01-05 05:15:21 +08:00
reqID := f.c.nextID()
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
f.c.dispatchRequest(ch, sshFxpReadPacket{
2016-01-05 05:15:21 +08:00
ID: reqID,
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
Handle: f.handle,
Offset: offset,
Len: uint32(len(b)),
})
inFlight++
2016-01-05 05:15:21 +08:00
reqs[reqID] = inflightRead{b: b, offset: offset}
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
}
var read int
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
for len(b) > 0 || inFlight > 0 {
for inFlight < desiredInFlight && len(b) > 0 && firstErr.err == nil {
l := min(len(b), f.c.maxPacket)
rb := b[:l]
sendReq(rb, offset)
offset += uint64(l)
b = b[l:]
}
if inFlight == 0 {
break
}
res := <-ch
inFlight--
if res.err != nil {
firstErr = offsetErr{offset: 0, err: res.err}
continue
}
reqID, data := unmarshalUint32(res.data)
req, ok := reqs[reqID]
if !ok {
firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)}
continue
}
delete(reqs, reqID)
switch res.typ {
case ssh_FXP_STATUS:
if firstErr.err == nil || req.offset < firstErr.offset {
firstErr = offsetErr{
offset: req.offset,
err: normaliseError(unmarshalStatus(reqID, res.data)),
}
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
}
case ssh_FXP_DATA:
l, data := unmarshalUint32(data)
n := copy(req.b, data[:l])
read += n
if n < len(req.b) {
sendReq(req.b[l:], req.offset+uint64(l))
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
}
if desiredInFlight < f.c.maxConcurrentRequests {
desiredInFlight++
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
}
default:
firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)}
}
}
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
// If the error is anything other than EOF, then there
// may be gaps in the data copied to the buffer so it's
// best to return 0 so the caller can't make any
// incorrect assumptions about the state of the buffer.
if firstErr.err != nil && firstErr.err != io.EOF {
read = 0
}
f.offset += uint64(read)
return read, firstErr.err
2013-11-06 09:42:14 +08:00
}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
// WriteTo writes the file to w. The return value is the number of bytes
// written. Any error encountered during the write is also returned.
//
// This method is preferred over calling Read multiple times to
// maximise throughput for transferring the entire file (especially
// over high latency links).
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
func (f *File) WriteTo(w io.Writer) (int64, error) {
fi, err := f.Stat()
if err != nil {
return 0, err
}
inFlight := 0
desiredInFlight := 1
offset := f.offset
writeOffset := offset
fileSize := uint64(fi.Size())
// see comment on same line in Read() above
ch := make(chan result, f.c.maxConcurrentRequests+1)
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
type inflightRead struct {
b []byte
offset uint64
}
reqs := map[uint32]inflightRead{}
pendingWrites := map[uint64][]byte{}
type offsetErr struct {
offset uint64
err error
}
var firstErr offsetErr
sendReq := func(b []byte, offset uint64) {
2016-01-05 05:15:21 +08:00
reqID := f.c.nextID()
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
f.c.dispatchRequest(ch, sshFxpReadPacket{
2016-01-05 05:15:21 +08:00
ID: reqID,
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
Handle: f.handle,
Offset: offset,
Len: uint32(len(b)),
})
inFlight++
2016-01-05 05:15:21 +08:00
reqs[reqID] = inflightRead{b: b, offset: offset}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
}
var copied int64
for firstErr.err == nil || inFlight > 0 {
if firstErr.err == nil {
for inFlight+len(pendingWrites) < desiredInFlight {
b := make([]byte, f.c.maxPacket)
sendReq(b, offset)
offset += uint64(f.c.maxPacket)
if offset > fileSize {
desiredInFlight = 1
}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
}
}
if inFlight == 0 {
if firstErr.err == nil && len(pendingWrites) > 0 {
2017-07-26 12:04:32 +08:00
return copied, InternalInconsistency
}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
break
}
res := <-ch
inFlight--
if res.err != nil {
firstErr = offsetErr{offset: 0, err: res.err}
continue
}
reqID, data := unmarshalUint32(res.data)
req, ok := reqs[reqID]
if !ok {
firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)}
continue
}
delete(reqs, reqID)
switch res.typ {
case ssh_FXP_STATUS:
if firstErr.err == nil || req.offset < firstErr.offset {
firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
}
case ssh_FXP_DATA:
l, data := unmarshalUint32(data)
if req.offset == writeOffset {
nbytes, err := w.Write(data)
copied += int64(nbytes)
if err != nil {
// We will never receive another DATA with offset==writeOffset, so
// the loop will drain inFlight and then exit.
firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
break
}
if nbytes < int(l) {
firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: io.ErrShortWrite}
break
}
switch {
case offset > fileSize:
desiredInFlight = 1
case desiredInFlight < f.c.maxConcurrentRequests:
desiredInFlight++
}
writeOffset += uint64(nbytes)
for {
pendingData, ok := pendingWrites[writeOffset]
if !ok {
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
break
}
// Give go a chance to free the memory.
delete(pendingWrites, writeOffset)
nbytes, err := w.Write(pendingData)
// Do not move writeOffset on error so subsequent iterations won't trigger
// any writes.
if err != nil {
firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
break
}
if nbytes < len(pendingData) {
firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: io.ErrShortWrite}
break
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
}
writeOffset += uint64(nbytes)
}
} else {
// Don't write the data yet because
// this response came in out of order
// and we need to wait for responses
// for earlier segments of the file.
pendingWrites[req.offset] = data
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
}
default:
firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)}
}
}
Implement WriteTo for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Read's are performed. This improves the throughput on the BenchmarkCopyDown tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 80039871 ns/op 131.01 MB/s BenchmarkRead16k 100 13109576 ns/op 799.86 MB/s BenchmarkRead32k 100 13002925 ns/op 806.42 MB/s BenchmarkRead128k 200 9189480 ns/op 1141.07 MB/s BenchmarkRead512k 300 5863892 ns/op 1788.21 MB/s BenchmarkRead1MiB 300 5350731 ns/op 1959.71 MB/s BenchmarkRead4MiB 300 5880209 ns/op 1783.25 MB/s BenchmarkRead4MiBDelay10Msec 5 211600615 ns/op 49.56 MB/s BenchmarkRead4MiBDelay50Msec 1 1014580728 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3015748763 ns/op 3.48 MB/s BenchmarkWrite1k 10 210602614 ns/op 49.79 MB/s BenchmarkWrite16k 30 53914210 ns/op 194.49 MB/s BenchmarkWrite32k 20 68630676 ns/op 152.79 MB/s BenchmarkWrite128k 50 70518854 ns/op 148.70 MB/s BenchmarkWrite512k 30 69846510 ns/op 150.13 MB/s BenchmarkWrite1MiB 30 70971873 ns/op 147.75 MB/s BenchmarkWrite4MiB 20 68902426 ns/op 152.18 MB/s BenchmarkWrite4MiBDelay10Msec 5 334770724 ns/op 31.32 MB/s BenchmarkWrite4MiBDelay50Msec 1 1439154435 ns/op 7.29 MB/s BenchmarkWrite4MiBDelay150Msec 1 4381710538 ns/op 2.39 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 161331837 ns/op 64.99 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 844679071 ns/op 12.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2721133400 ns/op 3.85 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410147635 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16310789039 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48479031068 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 685 ns/op BenchmarkMarshalOpen 3000000 606 ns/op BenchmarkMarshalWriteWorstCase 20000 81904 ns/op BenchmarkMarshalWrite1k 300000 4646 ns/op ok github.com/pkg/sftp 128.842s
2015-06-03 01:41:36 +08:00
if firstErr.err != io.EOF {
return copied, firstErr.err
}
return copied, nil
2013-11-06 09:42:14 +08:00
}
// Stat returns the FileInfo structure describing file. If there is an
2013-11-06 16:48:37 +08:00
// error.
2013-11-06 09:42:14 +08:00
func (f *File) Stat() (os.FileInfo, error) {
2014-06-24 14:06:55 +08:00
fs, err := f.c.fstat(f.handle)
if err != nil {
return nil, err
2013-11-06 09:53:45 +08:00
}
2014-06-24 14:06:55 +08:00
return fileInfoFromStat(fs, path.Base(f.path)), nil
2013-11-06 09:42:14 +08:00
}
2013-11-06 10:04:40 +08:00
// Write writes len(b) bytes to the File. It returns the number of bytes
// written and an error, if any. Write returns a non-nil error when n !=
// len(b).
//
// To maximise throughput for transferring the entire file (especially
// over high latency links) it is recommended to use ReadFrom rather
// than calling Write multiple times. io.Copy will do this
// automatically.
2013-11-06 10:04:40 +08:00
func (f *File) Write(b []byte) (int, error) {
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
// Split the write into multiple maxPacket sized concurrent writes
// bounded by maxConcurrentRequests. This allows writes with a suitably
// large buffer to transfer data at a much faster rate due to
// overlapping round trip times.
inFlight := 0
desiredInFlight := 1
offset := f.offset
// see comment on same line in Read() above
ch := make(chan result, f.c.maxConcurrentRequests+1)
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
var firstErr error
written := len(b)
for len(b) > 0 || inFlight > 0 {
for inFlight < desiredInFlight && len(b) > 0 && firstErr == nil {
l := min(len(b), f.c.maxPacket)
rb := b[:l]
f.c.dispatchRequest(ch, sshFxpWritePacket{
2016-01-05 05:15:21 +08:00
ID: f.c.nextID(),
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
Handle: f.handle,
Offset: offset,
Length: uint32(len(rb)),
Data: rb,
})
inFlight++
offset += uint64(l)
b = b[l:]
2013-11-08 18:24:50 +08:00
}
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
if inFlight == 0 {
break
}
res := <-ch
inFlight--
if res.err != nil {
firstErr = res.err
continue
}
switch res.typ {
case ssh_FXP_STATUS:
id, _ := unmarshalUint32(res.data)
err := normaliseError(unmarshalStatus(id, res.data))
if err != nil && firstErr == nil {
firstErr = err
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
break
}
if desiredInFlight < f.c.maxConcurrentRequests {
desiredInFlight++
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
}
default:
firstErr = unimplementedPacketErr(res.typ)
2013-11-08 18:24:50 +08:00
}
}
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
// If error is non-nil, then there may be gaps in the data written to
// the file so it's best to return 0 so the caller can't make any
// incorrect assumptions about the state of the file.
if firstErr != nil {
written = 0
2013-11-08 18:24:50 +08:00
}
Increase throughput of Read/Write. Break up Read/Write calls into multiple concurrent requests to allow the roundtrip time to the server to overlap. This provides a roughly 10x throughput increase when using large buffers over a high latency link. This does not help the naive io.Copy case since io.Copy defaults to an 8k buffer. $ go test -bench=. -integration PASS BenchmarkRead1k 20 82017395 ns/op 127.85 MB/s BenchmarkRead16k 100 14634723 ns/op 716.51 MB/s BenchmarkRead32k 100 13706765 ns/op 765.02 MB/s BenchmarkRead128k 200 9614364 ns/op 1090.65 MB/s BenchmarkRead512k 200 5778457 ns/op 1814.65 MB/s BenchmarkRead1MiB 300 5624251 ns/op 1864.41 MB/s BenchmarkRead4MiB 200 5798324 ns/op 1808.43 MB/s BenchmarkRead4MiBDelay10Msec 5 214369945 ns/op 48.91 MB/s BenchmarkRead4MiBDelay50Msec 1 1014850552 ns/op 10.33 MB/s BenchmarkRead4MiBDelay150Msec 1 3016993337 ns/op 3.48 MB/s BenchmarkWrite1k 10 200740041 ns/op 52.24 MB/s BenchmarkWrite16k 50 74597799 ns/op 140.57 MB/s BenchmarkWrite32k 20 63229429 ns/op 165.84 MB/s BenchmarkWrite128k 20 78691019 ns/op 133.25 MB/s BenchmarkWrite512k 20 64372711 ns/op 162.89 MB/s BenchmarkWrite1MiB 20 95393443 ns/op 109.92 MB/s BenchmarkWrite4MiB 20 72211301 ns/op 145.21 MB/s BenchmarkWrite4MiBDelay10Msec 3 335329748 ns/op 31.27 MB/s BenchmarkWrite4MiBDelay50Msec 1 1668562466 ns/op 6.28 MB/s BenchmarkWrite4MiBDelay150Msec 1 4535944414 ns/op 2.31 MB/s BenchmarkCopyDown10MiBDelay10Msec 1 3371273197 ns/op 3.11 MB/s BenchmarkCopyDown10MiBDelay50Msec 1 16250399252 ns/op 0.65 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 48459210755 ns/op 0.22 MB/s BenchmarkCopyUp10MiBDelay10Msec 1 3410202609 ns/op 3.07 MB/s BenchmarkCopyUp10MiBDelay50Msec 1 16291168491 ns/op 0.64 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 48478335678 ns/op 0.22 MB/s BenchmarkMarshalInit 2000000 716 ns/op BenchmarkMarshalOpen 2000000 638 ns/op BenchmarkMarshalWriteWorstCase 20000 61127 ns/op BenchmarkMarshalWrite1k 300000 4724 ns/op ok github.com/pkg/sftp 186.575s
2015-06-03 00:37:35 +08:00
f.offset += uint64(written)
return written, firstErr
2013-11-08 18:24:50 +08:00
}
Implement ReadFrom for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Write's are performed. This improves the throughput on the BenchmarkCopyUp tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 78382052 ns/op 133.78 MB/s BenchmarkRead16k 100 14038681 ns/op 746.93 MB/s BenchmarkRead32k 100 12076514 ns/op 868.29 MB/s BenchmarkRead128k 200 8892708 ns/op 1179.16 MB/s BenchmarkRead512k 300 5937224 ns/op 1766.13 MB/s BenchmarkRead1MiB 300 5383775 ns/op 1947.68 MB/s BenchmarkRead4MiB 300 5896306 ns/op 1778.38 MB/s BenchmarkRead4MiBDelay10Msec 5 213987487 ns/op 49.00 MB/s BenchmarkRead4MiBDelay50Msec 1 1013717329 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3012666692 ns/op 3.48 MB/s BenchmarkWrite1k 10 189878293 ns/op 55.22 MB/s BenchmarkWrite16k 50 57726712 ns/op 181.65 MB/s BenchmarkWrite32k 30 79804300 ns/op 131.39 MB/s BenchmarkWrite128k 20 71296126 ns/op 147.08 MB/s BenchmarkWrite512k 20 101823875 ns/op 102.98 MB/s BenchmarkWrite1MiB 50 70351842 ns/op 149.05 MB/s BenchmarkWrite4MiB 20 70187426 ns/op 149.40 MB/s BenchmarkWrite4MiBDelay10Msec 5 333251686 ns/op 31.47 MB/s BenchmarkWrite4MiBDelay50Msec 1 1576708254 ns/op 6.65 MB/s BenchmarkWrite4MiBDelay150Msec 1 4823796059 ns/op 2.17 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 196175368 ns/op 53.45 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 918624682 ns/op 11.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2880111274 ns/op 3.64 MB/s BenchmarkCopyUp10MiBDelay10Msec 5 246048181 ns/op 42.62 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 872059111 ns/op 12.02 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 2516801139 ns/op 4.17 MB/s BenchmarkMarshalInit 2000000 690 ns/op BenchmarkMarshalOpen 3000000 579 ns/op BenchmarkMarshalWriteWorstCase 20000 60438 ns/op BenchmarkMarshalWrite1k 300000 4318 ns/op ok github.com/pkg/sftp 70.210s
2015-06-03 02:15:54 +08:00
// ReadFrom reads data from r until EOF and writes it to the file. The return
// value is the number of bytes read. Any error except io.EOF encountered
// during the read is also returned.
//
// This method is preferred over calling Write multiple times to
// maximise throughput for transferring the entire file (especially
// over high latency links).
Implement ReadFrom for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Write's are performed. This improves the throughput on the BenchmarkCopyUp tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 78382052 ns/op 133.78 MB/s BenchmarkRead16k 100 14038681 ns/op 746.93 MB/s BenchmarkRead32k 100 12076514 ns/op 868.29 MB/s BenchmarkRead128k 200 8892708 ns/op 1179.16 MB/s BenchmarkRead512k 300 5937224 ns/op 1766.13 MB/s BenchmarkRead1MiB 300 5383775 ns/op 1947.68 MB/s BenchmarkRead4MiB 300 5896306 ns/op 1778.38 MB/s BenchmarkRead4MiBDelay10Msec 5 213987487 ns/op 49.00 MB/s BenchmarkRead4MiBDelay50Msec 1 1013717329 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3012666692 ns/op 3.48 MB/s BenchmarkWrite1k 10 189878293 ns/op 55.22 MB/s BenchmarkWrite16k 50 57726712 ns/op 181.65 MB/s BenchmarkWrite32k 30 79804300 ns/op 131.39 MB/s BenchmarkWrite128k 20 71296126 ns/op 147.08 MB/s BenchmarkWrite512k 20 101823875 ns/op 102.98 MB/s BenchmarkWrite1MiB 50 70351842 ns/op 149.05 MB/s BenchmarkWrite4MiB 20 70187426 ns/op 149.40 MB/s BenchmarkWrite4MiBDelay10Msec 5 333251686 ns/op 31.47 MB/s BenchmarkWrite4MiBDelay50Msec 1 1576708254 ns/op 6.65 MB/s BenchmarkWrite4MiBDelay150Msec 1 4823796059 ns/op 2.17 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 196175368 ns/op 53.45 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 918624682 ns/op 11.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2880111274 ns/op 3.64 MB/s BenchmarkCopyUp10MiBDelay10Msec 5 246048181 ns/op 42.62 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 872059111 ns/op 12.02 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 2516801139 ns/op 4.17 MB/s BenchmarkMarshalInit 2000000 690 ns/op BenchmarkMarshalOpen 3000000 579 ns/op BenchmarkMarshalWriteWorstCase 20000 60438 ns/op BenchmarkMarshalWrite1k 300000 4318 ns/op ok github.com/pkg/sftp 70.210s
2015-06-03 02:15:54 +08:00
func (f *File) ReadFrom(r io.Reader) (int64, error) {
inFlight := 0
desiredInFlight := 1
offset := f.offset
// see comment on same line in Read() above
ch := make(chan result, f.c.maxConcurrentRequests+1)
Implement ReadFrom for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Write's are performed. This improves the throughput on the BenchmarkCopyUp tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 78382052 ns/op 133.78 MB/s BenchmarkRead16k 100 14038681 ns/op 746.93 MB/s BenchmarkRead32k 100 12076514 ns/op 868.29 MB/s BenchmarkRead128k 200 8892708 ns/op 1179.16 MB/s BenchmarkRead512k 300 5937224 ns/op 1766.13 MB/s BenchmarkRead1MiB 300 5383775 ns/op 1947.68 MB/s BenchmarkRead4MiB 300 5896306 ns/op 1778.38 MB/s BenchmarkRead4MiBDelay10Msec 5 213987487 ns/op 49.00 MB/s BenchmarkRead4MiBDelay50Msec 1 1013717329 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3012666692 ns/op 3.48 MB/s BenchmarkWrite1k 10 189878293 ns/op 55.22 MB/s BenchmarkWrite16k 50 57726712 ns/op 181.65 MB/s BenchmarkWrite32k 30 79804300 ns/op 131.39 MB/s BenchmarkWrite128k 20 71296126 ns/op 147.08 MB/s BenchmarkWrite512k 20 101823875 ns/op 102.98 MB/s BenchmarkWrite1MiB 50 70351842 ns/op 149.05 MB/s BenchmarkWrite4MiB 20 70187426 ns/op 149.40 MB/s BenchmarkWrite4MiBDelay10Msec 5 333251686 ns/op 31.47 MB/s BenchmarkWrite4MiBDelay50Msec 1 1576708254 ns/op 6.65 MB/s BenchmarkWrite4MiBDelay150Msec 1 4823796059 ns/op 2.17 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 196175368 ns/op 53.45 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 918624682 ns/op 11.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2880111274 ns/op 3.64 MB/s BenchmarkCopyUp10MiBDelay10Msec 5 246048181 ns/op 42.62 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 872059111 ns/op 12.02 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 2516801139 ns/op 4.17 MB/s BenchmarkMarshalInit 2000000 690 ns/op BenchmarkMarshalOpen 3000000 579 ns/op BenchmarkMarshalWriteWorstCase 20000 60438 ns/op BenchmarkMarshalWrite1k 300000 4318 ns/op ok github.com/pkg/sftp 70.210s
2015-06-03 02:15:54 +08:00
var firstErr error
read := int64(0)
b := make([]byte, f.c.maxPacket)
for inFlight > 0 || firstErr == nil {
for inFlight < desiredInFlight && firstErr == nil {
n, err := r.Read(b)
if err != nil {
firstErr = err
}
f.c.dispatchRequest(ch, sshFxpWritePacket{
2016-01-05 05:15:21 +08:00
ID: f.c.nextID(),
Implement ReadFrom for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Write's are performed. This improves the throughput on the BenchmarkCopyUp tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 78382052 ns/op 133.78 MB/s BenchmarkRead16k 100 14038681 ns/op 746.93 MB/s BenchmarkRead32k 100 12076514 ns/op 868.29 MB/s BenchmarkRead128k 200 8892708 ns/op 1179.16 MB/s BenchmarkRead512k 300 5937224 ns/op 1766.13 MB/s BenchmarkRead1MiB 300 5383775 ns/op 1947.68 MB/s BenchmarkRead4MiB 300 5896306 ns/op 1778.38 MB/s BenchmarkRead4MiBDelay10Msec 5 213987487 ns/op 49.00 MB/s BenchmarkRead4MiBDelay50Msec 1 1013717329 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3012666692 ns/op 3.48 MB/s BenchmarkWrite1k 10 189878293 ns/op 55.22 MB/s BenchmarkWrite16k 50 57726712 ns/op 181.65 MB/s BenchmarkWrite32k 30 79804300 ns/op 131.39 MB/s BenchmarkWrite128k 20 71296126 ns/op 147.08 MB/s BenchmarkWrite512k 20 101823875 ns/op 102.98 MB/s BenchmarkWrite1MiB 50 70351842 ns/op 149.05 MB/s BenchmarkWrite4MiB 20 70187426 ns/op 149.40 MB/s BenchmarkWrite4MiBDelay10Msec 5 333251686 ns/op 31.47 MB/s BenchmarkWrite4MiBDelay50Msec 1 1576708254 ns/op 6.65 MB/s BenchmarkWrite4MiBDelay150Msec 1 4823796059 ns/op 2.17 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 196175368 ns/op 53.45 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 918624682 ns/op 11.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2880111274 ns/op 3.64 MB/s BenchmarkCopyUp10MiBDelay10Msec 5 246048181 ns/op 42.62 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 872059111 ns/op 12.02 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 2516801139 ns/op 4.17 MB/s BenchmarkMarshalInit 2000000 690 ns/op BenchmarkMarshalOpen 3000000 579 ns/op BenchmarkMarshalWriteWorstCase 20000 60438 ns/op BenchmarkMarshalWrite1k 300000 4318 ns/op ok github.com/pkg/sftp 70.210s
2015-06-03 02:15:54 +08:00
Handle: f.handle,
Offset: offset,
Length: uint32(n),
Data: b[:n],
})
inFlight++
offset += uint64(n)
read += int64(n)
}
if inFlight == 0 {
break
}
res := <-ch
inFlight--
if res.err != nil {
firstErr = res.err
continue
}
switch res.typ {
case ssh_FXP_STATUS:
id, _ := unmarshalUint32(res.data)
err := normaliseError(unmarshalStatus(id, res.data))
if err != nil && firstErr == nil {
firstErr = err
Implement ReadFrom for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Write's are performed. This improves the throughput on the BenchmarkCopyUp tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 78382052 ns/op 133.78 MB/s BenchmarkRead16k 100 14038681 ns/op 746.93 MB/s BenchmarkRead32k 100 12076514 ns/op 868.29 MB/s BenchmarkRead128k 200 8892708 ns/op 1179.16 MB/s BenchmarkRead512k 300 5937224 ns/op 1766.13 MB/s BenchmarkRead1MiB 300 5383775 ns/op 1947.68 MB/s BenchmarkRead4MiB 300 5896306 ns/op 1778.38 MB/s BenchmarkRead4MiBDelay10Msec 5 213987487 ns/op 49.00 MB/s BenchmarkRead4MiBDelay50Msec 1 1013717329 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3012666692 ns/op 3.48 MB/s BenchmarkWrite1k 10 189878293 ns/op 55.22 MB/s BenchmarkWrite16k 50 57726712 ns/op 181.65 MB/s BenchmarkWrite32k 30 79804300 ns/op 131.39 MB/s BenchmarkWrite128k 20 71296126 ns/op 147.08 MB/s BenchmarkWrite512k 20 101823875 ns/op 102.98 MB/s BenchmarkWrite1MiB 50 70351842 ns/op 149.05 MB/s BenchmarkWrite4MiB 20 70187426 ns/op 149.40 MB/s BenchmarkWrite4MiBDelay10Msec 5 333251686 ns/op 31.47 MB/s BenchmarkWrite4MiBDelay50Msec 1 1576708254 ns/op 6.65 MB/s BenchmarkWrite4MiBDelay150Msec 1 4823796059 ns/op 2.17 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 196175368 ns/op 53.45 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 918624682 ns/op 11.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2880111274 ns/op 3.64 MB/s BenchmarkCopyUp10MiBDelay10Msec 5 246048181 ns/op 42.62 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 872059111 ns/op 12.02 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 2516801139 ns/op 4.17 MB/s BenchmarkMarshalInit 2000000 690 ns/op BenchmarkMarshalOpen 3000000 579 ns/op BenchmarkMarshalWriteWorstCase 20000 60438 ns/op BenchmarkMarshalWrite1k 300000 4318 ns/op ok github.com/pkg/sftp 70.210s
2015-06-03 02:15:54 +08:00
break
}
if desiredInFlight < f.c.maxConcurrentRequests {
desiredInFlight++
Implement ReadFrom for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Write's are performed. This improves the throughput on the BenchmarkCopyUp tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 78382052 ns/op 133.78 MB/s BenchmarkRead16k 100 14038681 ns/op 746.93 MB/s BenchmarkRead32k 100 12076514 ns/op 868.29 MB/s BenchmarkRead128k 200 8892708 ns/op 1179.16 MB/s BenchmarkRead512k 300 5937224 ns/op 1766.13 MB/s BenchmarkRead1MiB 300 5383775 ns/op 1947.68 MB/s BenchmarkRead4MiB 300 5896306 ns/op 1778.38 MB/s BenchmarkRead4MiBDelay10Msec 5 213987487 ns/op 49.00 MB/s BenchmarkRead4MiBDelay50Msec 1 1013717329 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3012666692 ns/op 3.48 MB/s BenchmarkWrite1k 10 189878293 ns/op 55.22 MB/s BenchmarkWrite16k 50 57726712 ns/op 181.65 MB/s BenchmarkWrite32k 30 79804300 ns/op 131.39 MB/s BenchmarkWrite128k 20 71296126 ns/op 147.08 MB/s BenchmarkWrite512k 20 101823875 ns/op 102.98 MB/s BenchmarkWrite1MiB 50 70351842 ns/op 149.05 MB/s BenchmarkWrite4MiB 20 70187426 ns/op 149.40 MB/s BenchmarkWrite4MiBDelay10Msec 5 333251686 ns/op 31.47 MB/s BenchmarkWrite4MiBDelay50Msec 1 1576708254 ns/op 6.65 MB/s BenchmarkWrite4MiBDelay150Msec 1 4823796059 ns/op 2.17 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 196175368 ns/op 53.45 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 918624682 ns/op 11.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2880111274 ns/op 3.64 MB/s BenchmarkCopyUp10MiBDelay10Msec 5 246048181 ns/op 42.62 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 872059111 ns/op 12.02 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 2516801139 ns/op 4.17 MB/s BenchmarkMarshalInit 2000000 690 ns/op BenchmarkMarshalOpen 3000000 579 ns/op BenchmarkMarshalWriteWorstCase 20000 60438 ns/op BenchmarkMarshalWrite1k 300000 4318 ns/op ok github.com/pkg/sftp 70.210s
2015-06-03 02:15:54 +08:00
}
default:
firstErr = unimplementedPacketErr(res.typ)
Implement ReadFrom for *sftp.File Improve the naive io.Copy case by splitting up the transfer into multiple concurrent chunks similar to how large Write's are performed. This improves the throughput on the BenchmarkCopyUp tests by 15-20x. $ go test -bench=. -integration PASS BenchmarkRead1k 20 78382052 ns/op 133.78 MB/s BenchmarkRead16k 100 14038681 ns/op 746.93 MB/s BenchmarkRead32k 100 12076514 ns/op 868.29 MB/s BenchmarkRead128k 200 8892708 ns/op 1179.16 MB/s BenchmarkRead512k 300 5937224 ns/op 1766.13 MB/s BenchmarkRead1MiB 300 5383775 ns/op 1947.68 MB/s BenchmarkRead4MiB 300 5896306 ns/op 1778.38 MB/s BenchmarkRead4MiBDelay10Msec 5 213987487 ns/op 49.00 MB/s BenchmarkRead4MiBDelay50Msec 1 1013717329 ns/op 10.34 MB/s BenchmarkRead4MiBDelay150Msec 1 3012666692 ns/op 3.48 MB/s BenchmarkWrite1k 10 189878293 ns/op 55.22 MB/s BenchmarkWrite16k 50 57726712 ns/op 181.65 MB/s BenchmarkWrite32k 30 79804300 ns/op 131.39 MB/s BenchmarkWrite128k 20 71296126 ns/op 147.08 MB/s BenchmarkWrite512k 20 101823875 ns/op 102.98 MB/s BenchmarkWrite1MiB 50 70351842 ns/op 149.05 MB/s BenchmarkWrite4MiB 20 70187426 ns/op 149.40 MB/s BenchmarkWrite4MiBDelay10Msec 5 333251686 ns/op 31.47 MB/s BenchmarkWrite4MiBDelay50Msec 1 1576708254 ns/op 6.65 MB/s BenchmarkWrite4MiBDelay150Msec 1 4823796059 ns/op 2.17 MB/s BenchmarkCopyDown10MiBDelay10Msec 10 196175368 ns/op 53.45 MB/s BenchmarkCopyDown10MiBDelay50Msec 2 918624682 ns/op 11.41 MB/s BenchmarkCopyDown10MiBDelay150Msec 1 2880111274 ns/op 3.64 MB/s BenchmarkCopyUp10MiBDelay10Msec 5 246048181 ns/op 42.62 MB/s BenchmarkCopyUp10MiBDelay50Msec 2 872059111 ns/op 12.02 MB/s BenchmarkCopyUp10MiBDelay150Msec 1 2516801139 ns/op 4.17 MB/s BenchmarkMarshalInit 2000000 690 ns/op BenchmarkMarshalOpen 3000000 579 ns/op BenchmarkMarshalWriteWorstCase 20000 60438 ns/op BenchmarkMarshalWrite1k 300000 4318 ns/op ok github.com/pkg/sftp 70.210s
2015-06-03 02:15:54 +08:00
}
}
if firstErr == io.EOF {
firstErr = nil
}
// If error is non-nil, then there may be gaps in the data written to
// the file so it's best to return 0 so the caller can't make any
// incorrect assumptions about the state of the file.
if firstErr != nil {
read = 0
}
f.offset += uint64(read)
return read, firstErr
2013-11-08 18:24:50 +08:00
}
// Seek implements io.Seeker by setting the client offset for the next Read or
// Write. It returns the next offset read. Seeking before or after the end of
// the file is undefined. Seeking relative to the end calls Stat.
func (f *File) Seek(offset int64, whence int) (int64, error) {
switch whence {
2018-02-16 02:27:33 +08:00
case io.SeekStart:
f.offset = uint64(offset)
2018-02-16 02:27:33 +08:00
case io.SeekCurrent:
f.offset = uint64(int64(f.offset) + offset)
2018-02-16 02:27:33 +08:00
case io.SeekEnd:
fi, err := f.Stat()
if err != nil {
return int64(f.offset), err
}
f.offset = uint64(fi.Size() + offset)
default:
return int64(f.offset), unimplementedSeekWhence(whence)
}
return int64(f.offset), nil
}
// Chown changes the uid/gid of the current file.
func (f *File) Chown(uid, gid int) error {
return f.c.Chown(f.path, uid, gid)
}
// Chmod changes the permissions of the current file.
func (f *File) Chmod(mode os.FileMode) error {
return f.c.Chmod(f.path, mode)
}
// Truncate sets the size of the current file. Although it may be safely assumed
// that if the size is less than its current size it will be truncated to fit,
// the SFTP protocol does not specify what behavior the server should do when setting
// size greater than the current size.
func (f *File) Truncate(size int64) error {
return f.c.Truncate(f.path, size)
}
2013-11-08 18:24:50 +08:00
func min(a, b int) int {
if a > b {
return b
}
return a
2013-11-06 10:04:40 +08:00
}
// normaliseError normalises an error into a more standard form that can be
// checked against stdlib errors like io.EOF or os.ErrNotExist.
func normaliseError(err error) error {
switch err := err.(type) {
case *StatusError:
switch err.Code {
case ssh_FX_EOF:
return io.EOF
case ssh_FX_NO_SUCH_FILE:
return os.ErrNotExist
case ssh_FX_OK:
return nil
default:
return err
}
default:
return err
2013-11-06 12:00:04 +08:00
}
}
2013-11-06 11:15:26 +08:00
func unmarshalStatus(id uint32, data []byte) error {
sid, data := unmarshalUint32(data)
if sid != id {
2016-01-05 05:15:21 +08:00
return &unexpectedIDErr{id, sid}
2013-11-06 11:15:26 +08:00
}
code, data := unmarshalUint32(data)
msg, data, _ := unmarshalStringSafe(data)
lang, _, _ := unmarshalStringSafe(data)
2013-11-06 11:15:26 +08:00
return &StatusError{
Code: code,
msg: msg,
lang: lang,
}
}
2013-11-14 12:32:21 +08:00
2015-07-25 16:19:29 +08:00
func marshalStatus(b []byte, err StatusError) []byte {
b = marshalUint32(b, err.Code)
b = marshalString(b, err.msg)
b = marshalString(b, err.lang)
return b
}
2013-11-14 12:32:21 +08:00
// flags converts the flags passed to OpenFile into ssh flags.
// Unsupported flags are ignored.
func flags(f int) uint32 {
var out uint32
switch f & os.O_WRONLY {
case os.O_WRONLY:
out |= ssh_FXF_WRITE
case os.O_RDONLY:
out |= ssh_FXF_READ
}
if f&os.O_RDWR == os.O_RDWR {
out |= ssh_FXF_READ | ssh_FXF_WRITE
}
if f&os.O_APPEND == os.O_APPEND {
out |= ssh_FXF_APPEND
}
if f&os.O_CREATE == os.O_CREATE {
out |= ssh_FXF_CREAT
}
if f&os.O_TRUNC == os.O_TRUNC {
out |= ssh_FXF_TRUNC
}
if f&os.O_EXCL == os.O_EXCL {
out |= ssh_FXF_EXCL
}
return out
}