~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/transport/sftp.py

  • Committer: John Arbash Meinel
  • Date: 2006-07-26 19:35:23 UTC
  • mto: This revision was merged to the branch mainline in revision 1888.
  • Revision ID: john@arbash-meinel.com-20060726193523-8eb718ca3c9eb9e4
Some small cleanups.

Show diffs side-by-side

added added

removed removed

Lines of Context:
325
325
    # 8KiB had good performance for both local and remote network operations
326
326
    _bytes_to_read_before_seek = 8192
327
327
 
328
 
    _max_chunk_size = 32768 # All that is guaranteed by the sftp spec
 
328
    # The sftp spec says that implementations SHOULD allow reads
 
329
    # to be at least 32K. paramiko.readv() does an async request
 
330
    # for the chunks. So we need to keep it within a single request
 
331
    # size for paramiko <= 1.6.1. paramiko 1.6.2 will probably chop
 
332
    # up the request itself, rather than us having to worry about it
 
333
    _max_request_size = 32768
329
334
 
330
335
    def __init__(self, base, clone_from=None):
331
336
        assert base.startswith('sftp://')
491
496
        #
492
497
        # TODO: jam 20060725 This could be optimized one step further, by
493
498
        #       attempting to yield whatever data we have read, even before
494
 
        #       the first section has been fully processed.
 
499
        #       the first coallesced section has been fully processed.
495
500
 
496
501
        # When coalescing for use with readv(), we don't really need to
497
502
        # use any fudge factor, because the requests are made asynchronously
506
511
 
507
512
            # We need to break this up into multiple requests
508
513
            while size > 0:
509
 
                next_size = min(size, self._max_chunk_size)
 
514
                next_size = min(size, self._max_request_size)
510
515
                requests.append((start, next_size))
511
516
                size -= next_size
512
517
                start += next_size
535
540
            assert cur_data_len == cur_coalesced.length, \
536
541
                "Somehow we read too much: %s != %s" % (cur_data_len,
537
542
                                                        cur_coalesced.length)
538
 
            data = ''.join(cur_data)
 
543
            all_data = ''.join(cur_data)
539
544
            cur_data = []
540
545
            cur_data_len = 0
541
546
 
542
547
            for suboffset, subsize in cur_coalesced.ranges:
543
548
                key = (cur_coalesced.start+suboffset, subsize)
544
 
                data_map[key] = data[suboffset:suboffset+subsize]
 
549
                data_map[key] = all_data[suboffset:suboffset+subsize]
545
550
 
546
551
            # Now that we've read some data, see if we can yield anything back
547
552
            while cur_offset_and_size in data_map: