~bzr-pqm/bzr/bzr.dev

« back to all changes in this revision

Viewing changes to bzrlib/transport/sftp.py

  • Committer: John Arbash Meinel
  • Date: 2006-07-25 17:25:26 UTC
  • mto: This revision was merged to the branch mainline in revision 1888.
  • Revision ID: john@arbash-meinel.com-20060725172526-e669e5b1a25d78e3
Try using paramiko.readv()

Show diffs side-by-side

added added

removed removed

Lines of Context:
456
456
        # We are going to iterate multiple times, we need a list
457
457
        offsets = list(offsets)
458
458
 
459
 
        for offset, data in itertools.izip(offsets, fp.readv(offsets)):
460
 
            yield offset[0], data
461
 
 
 
459
        #return self._combine_and_readv(fp, offsets)
 
460
        # paramiko.readv() doesn't support reads > 65536 bytes yet
 
461
        # Check if any requests are > 64K, if so, we need to switch to
 
462
        # the old seek + read method
 
463
        big_requests = False
 
464
        for start, length in offsets:
 
465
            if length >= 65536:
 
466
                big_requests = True
 
467
                break
 
468
        if big_requests:
 
469
            return self._seek_and_read(fp, offsets)
 
470
 
 
471
        return self._yield_simple_chunks(fp, offsets)
 
472
 
 
473
    def _yield_simple_chunks(self, fp, offsets):
 
474
        mutter('using plain paramiko.readv() for %d offsets' % (len(offsets),))
 
475
        for (start, length), data in itertools.izip(offsets, fp.readv(offsets)):
 
476
            assert length == len(data), \
 
477
                'Incorrect length of data chunk: %s != %s' % (length, len(data))
 
478
            yield start, data
 
479
 
 
480
    def _combine_and_readv(self, fp, offsets):
 
481
        """This tries to combine requests into larger requests.
 
482
 
 
483
        And then read them using paramiko.readv(). paramiko.readv()
 
484
        does not support ranges > 64K, so it caps the request size, and
 
485
        just reads until it gets all the stuff it wants
 
486
        """
 
487
        sorted_offsets = sorted(offsets)
 
488
 
 
489
        # turn the list of offsets into a stack
 
490
        offset_stack = iter(offsets)
 
491
        cur_offset_and_size = offset_stack.next()
 
492
        coalesced = list(self._coalesce_offsets(sorted_offsets,
 
493
                               limit=self._max_readv_combine,
 
494
                               fudge_factor=self._bytes_to_read_before_seek,
 
495
                               ))
 
496
 
 
497
        requests = []
 
498
        for c_offset in coalesced:
 
499
            start = c_offset.start
 
500
            length = c_offset.length
 
501
            offset = 0
 
502
 
 
503
            # We need to break this up into multiple requests
 
504
            while offset < length:
 
505
                next_size = min(length-offset, 60000)
 
506
                requests.append((start+offset, next_size))
 
507
                offset += next_size
 
508
 
 
509
        # Cache the results, but only until they have been fulfilled
 
510
        data_map = {}
 
511
        cur_data = []
 
512
        cur_data_len = 0
 
513
        cur_coalesced_stack = iter(coalesced)
 
514
        cur_coalesced = cur_coalesced_stack.next()
 
515
 
 
516
        for data in fp.readv(requests):
 
517
            cur_data += data
 
518
            cur_data_len += len(data)
 
519
 
 
520
            if cur_data_len < cur_coalesced.length:
 
521
                continue
 
522
            assert cur_data_len == cur_coalesced.length, \
 
523
                "Somehow we read too much: %s != %s" % (cur_data_len,
 
524
                                                        cur_coalesced.length)
 
525
            data = ''.join(cur_data)
 
526
            cur_data = []
 
527
            cur_data_len = 0
 
528
 
 
529
            for suboffset, subsize in cur_coalesced.ranges:
 
530
                key = (cur_coalesced.start+suboffset, subsize)
 
531
                data_map[key] = data[suboffset:suboffset+subsize]
 
532
 
 
533
            # Now that we've read some data, see if we can yield anything back
 
534
            while cur_offset_and_size in data_map:
 
535
                this_data = data_map.pop(cur_offset_and_size)
 
536
                yield cur_offset_and_size[0], this_data
 
537
                cur_offset_and_size = offset_stack.next()
 
538
 
 
539
            # Now that we've read all of the data for this coalesced section
 
540
            # on to the next
 
541
            cur_coalesced = cur_coalesced_stack.next()
462
542
 
463
543
    def put(self, relpath, f, mode=None):
464
544
        """