def ruin_bloom(bloomfilename):
rbloomfilename = git.repo_rel(bloomfilename)
if not os.path.exists(bloomfilename):
- log("%s\n" % bloomfilename)
- add_error("bloom: %s not found to ruin\n" % rbloomfilename)
+ log(bloomfilename)
+ add_error("bloom: %s not found to ruin" % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename, readwrite=True, expected=1)
b.map[16:16+2**b.bits] = '\0' * 2**b.bits
rbloomfilename = git.repo_rel(bloomfilename)
ridx = git.repo_rel(idx)
if not os.path.exists(bloomfilename):
- log("bloom: %s: does not exist.\n" % rbloomfilename)
+ log("bloom: %s: does not exist." % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename)
if not b.valid():
- add_error("bloom: %r is invalid.\n" % rbloomfilename)
+ add_error("bloom: %r is invalid." % rbloomfilename)
return
base = os.path.basename(idx)
if base not in b.idxnames:
- log("bloom: %s does not contain the idx.\n" % rbloomfilename)
+ log("bloom: %s does not contain the idx." % rbloomfilename)
return
if base == idx:
idx = os.path.join(path, idx)
- log("bloom: bloom file: %s\n" % rbloomfilename)
- log("bloom: checking %s\n" % ridx)
+ log("bloom: bloom file: %s" % rbloomfilename)
+ log("bloom: checking %s" % ridx)
for objsha in git.open_idx(idx):
if not b.exists(objsha):
add_error("bloom: ERROR: object %s missing"
if os.path.exists(outfilename) and not opt.force:
b = bloom.ShaBloom(outfilename)
if not b.valid():
- debug1("bloom: Existing invalid bloom found, regenerating.\n")
+ debug1("bloom: Existing invalid bloom found, regenerating.")
b = None
add = []
add_count = 0
rest_count = 0
for i,name in enumerate(glob.glob('%s/*.idx' % path)):
- progress('bloom: counting: %d\r' % i)
+ progress_update('bloom: counting (%d) ...' % i)
ix = git.open_idx(name)
ixbase = os.path.basename(name)
if b and (ixbase in b.idxnames):
total = add_count + rest_count
if not add:
- debug1("bloom: nothing to do.\n")
+ debug1("bloom: nothing to do.")
return
if b:
if len(b) != rest_count:
- debug1("bloom: size %d != idx total %d, regenerating\n"
+ debug1("bloom: size %d != idx total %d, regenerating"
% (len(b), rest_count))
b = None
elif (b.bits < bloom.MAX_BLOOM_BITS and
msg = b is None and 'creating from' or 'adding'
if not _first: _first = path
dirprefix = (_first != path) and git.repo_rel(path)+': ' or ''
- progress('bloom: %s%s %d file%s (%d object%s).\n'
+ progress_update('bloom: %s%s %d file%s (%d object%s) ...'
% (dirprefix, msg,
len(add), len(add)!=1 and 's' or '',
add_count, add_count!=1 and 's' or ''))
icount = 0
for name in add:
ix = git.open_idx(name)
- qprogress('bloom: writing %.2f%% (%d/%d objects)\r'
+ progress_update('bloom: writing %.2f%% (%d/%d objects) ...'
% (icount*100.0/add_count, icount, add_count))
b.add_idx(ix)
count += 1
if tfname:
os.rename(tfname, outfilename)
+ progress_end('bloom: %s%s %d file%s (%d object%s), done.'
+ % (dirprefix, msg,
+ len(add), len(add)!=1 and 's' or '',
+ add_count, add_count!=1 and 's' or ''))
+
handle_ctrl_c()
paths = opt.dir and [opt.dir] or git.all_packdirs()
for path in paths:
- debug1('bloom: scanning %s\n' % path)
+ debug1('bloom: scanning %s' % path)
outfilename = opt.output or os.path.join(path, 'bup.bloom')
if opt.check:
check_bloom(path, outfilename, opt.check)
do_bloom(path, outfilename)
if saved_errors:
- log('WARNING: %d errors encountered during bloom.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered during bloom.' % len(saved_errors))
sys.exit(1)
elif opt.check:
- log('All tests passed.\n')
+ log('All tests passed.')
o.fatal('%r is not a plain file' % target)
if saved_errors:
- log('warning: %d errors encountered\n' % len(saved_errors))
+ log('warning: %d errors encountered' % len(saved_errors))
sys.exit(1)
continue
try:
if af == socket.AF_INET6:
- log("bup daemon: listening on [%s]:%s\n" % sa[:2])
+ log("bup daemon: listening on [%s]:%s" % sa[:2])
else:
- log("bup daemon: listening on %s:%s\n" % sa[:2])
+ log("bup daemon: listening on %s:%s" % sa[:2])
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(sa)
s.listen(1)
socks.append(s)
if not socks:
- log('bup daemon: listen socket: %s\n' % e.args[1])
+ log('bup daemon: listen socket: %s' % e.args[1])
sys.exit(1)
try:
for l in rl:
s, src = l.accept()
try:
- log("Socket accepted connection from %s\n" % (src,))
+ log("Socket accepted connection from %s" % (src,))
fd1 = os.dup(s.fileno())
fd2 = os.dup(s.fileno())
s.close()
random.seed(opt.seed)
for name in extra:
- log('Damaging "%s"...\n' % name)
+ log('Damaging "%s"...' % name)
f = open(name, 'r+b')
st = os.fstat(f.fileno())
size = st.st_size
ofs = r*chunksize
else:
ofs = random.randrange(0, size - sz + 1)
- log(' %6d bytes at %d\n' % (sz, ofs))
+ log(' %6d bytes at %d' % (sz, ofs))
f.seek(ofs)
f.write(randblock(sz))
f.close()
print name
if saved_errors:
- log('WARNING: %d errors encountered.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered.' % len(saved_errors))
sys.exit(1)
stdout=nullf, stderr=nullf, stdin=nullf)
rv = p.wait()
except OSError:
- log('fsck: warning: par2 not found; disabling recovery features.\n')
+ log('fsck: warning: par2 not found; disabling recovery features.')
else:
par2_ok = 1
try:
quick_verify(base)
except Exception, e:
- debug('error: %s\n' % e)
+ debug('error: %s' % e)
return 1
return 0
else:
rresult = par2_repair(base)
if rresult != 0:
action_result = 'failed'
- log('%s par2 repair: failed (%d)\n' % (last, rresult))
+ log('%s par2 repair: failed (%d)' % (last, rresult))
code = rresult
else:
action_result = 'repaired'
- log('%s par2 repair: succeeded (0)\n' % last)
+ log('%s par2 repair: succeeded (0)' % last)
code = 100
else:
action_result = 'failed'
- log('%s par2 verify: failed (%d)\n' % (last, vresult))
+ log('%s par2 verify: failed (%d)' % (last, vresult))
code = vresult
else:
action_result = 'ok'
gresult = git_verify(base)
if gresult != 0:
action_result = 'failed'
- log('%s git verify: failed (%d)\n' % (last, gresult))
+ log('%s git verify: failed (%d)' % (last, gresult))
code = gresult
else:
if par2_ok and opt.generate:
presult = par2_generate(base)
if presult != 0:
action_result = 'failed'
- log('%s par2 create: failed (%d)\n' % (last, presult))
+ log('%s par2 create: failed (%d)' % (last, presult))
code = presult
else:
action_result = 'generated'
git.check_repo_or_die()
if not extra:
- debug('fsck: No filenames given: checking all packs.\n')
+ debug('fsck: No filenames given: checking all packs.')
extra = glob.glob(git.repo('objects/pack/*.pack'))
code = 0
if par2_exists and os.stat(base + '.par2').st_size == 0:
par2_exists = 0
sys.stdout.flush()
- debug('fsck: checking %s (%s)\n'
+ debug('fsck: checking %s (%s)'
% (last, par2_ok and par2_exists and 'par2' or 'git'))
if not opt.verbose:
- progress('fsck (%d/%d)\r' % (count, len(extra)))
-
+ progress_update('fsck (%d/%d) ...' % (count + 1, len(extra)))
+
if not opt.jobs:
nc = do_pack(base, last, par2_exists)
code = code or nc
try:
sys.exit(do_pack(base, last, par2_exists))
except Exception, e:
- log('exception: %r\n' % e)
+ log('exception: %r' % e)
sys.exit(99)
while len(outstanding):
code = code or nc
count += 1
if not opt.verbose:
- progress('fsck (%d/%d)\r' % (count, len(extra)))
+ progress_update('fsck (%d/%d) ...' % (count + 1, len(extra)))
-if istty2:
- debug('fsck done. \n')
+progress_end('fsck done.')
sys.exit(code)
def _completer_get_subs(line):
(qtype, lastword) = shquote.unfinished_word(line)
(dir,name) = os.path.split(lastword)
- #log('\ncompleter: %r %r %r\n' % (qtype, lastword, text))
+ #log('completer: %r %r %r' % (qtype, lastword, text))
try:
n = pwd.resolve(dir)
subs = list(filter(lambda x: x.name.startswith(name),
terminate=True) + ' '
return text + ret
except Exception, e:
- log('\n')
+ log('')
try:
import traceback
traceback.print_tb(sys.exc_traceback)
except Exception, e2:
- log('Error printing traceback: %s\n' % e2)
- log('\nError in completion: %s\n' % e)
+ log('Error printing traceback: %s' % e2)
+ log('Error in completion: %s' % e)
optspec = """
try:
import readline
except ImportError:
- log('* readline module not available: line editing disabled.\n')
+ log('* readline module not available: line editing disabled.')
readline = None
if readline:
continue
words = [word for (wordstart,word) in shquote.quotesplit(line)]
cmd = words[0].lower()
- #log('execute: %r %r\n' % (cmd, parm))
+ #log('execute: %r %r' % (cmd, parm))
try:
if cmd == 'ls':
do_ls(words[1:])
(dir,base) = os.path.split(rname)
lname = len(words)>2 and words[2] or base
inf = pwd.resolve(rname).open()
- log('Saving %r\n' % lname)
+ log('Saving %r' % lname)
write_to_file(inf, open(lname, 'wb'))
elif cmd == 'mget':
for parm in words[1:]:
for n in pwd.resolve(dir).subs():
if fnmatch.fnmatch(n.name, base):
try:
- log('Saving %r\n' % n.name)
+ log('Saving %r' % n.name)
inf = n.open()
outf = open(n.name, 'wb')
write_to_file(inf, outf)
outf.close()
except Exception, e:
rv = 1
- log(' error: %s\n' % e)
+ log(' error: %s' % e)
elif cmd == 'help' or cmd == '?':
- log('Commands: ls cd pwd cat get mget help quit\n')
+ log('Commands: ls cd pwd cat get mget help quit')
elif cmd == 'quit' or cmd == 'exit' or cmd == 'bye':
break
else:
raise Exception('no such command %r' % cmd)
except Exception, e:
rv = 1
- log('error: %s\n' % e)
+ log('error: %s' % e)
#raise
sys.exit(rv)
try:
import fuse
except ImportError:
- log('error: cannot find the python "fuse" module; please install it\n')
+ log('error: cannot find the python "fuse" module; please install it')
sys.exit(1)
cache[('',)] = top
c = None
max = len(parts)
- #log('cache: %r\n' % cache.keys())
+ #log('cache: %r' % cache.keys())
for i in range(max):
pre = parts[:max-i]
- #log('cache trying: %r\n' % pre)
+ #log('cache trying: %r' % pre)
c = cache.get(tuple(pre))
if c:
rest = parts[max-i:]
for r in rest:
- #log('resolving %r from %r\n' % (r, c.fullname()))
+ #log('resolving %r from %r' % (r, c.fullname()))
c = c.lresolve(r)
key = tuple(pre + [r])
- #log('saving: %r\n' % (key,))
+ #log('saving: %r' % (key,))
cache[key] = c
break
assert(c)
self.meta = meta
def getattr(self, path):
- log('--getattr(%r)\n' % path)
+ log('--getattr(%r)' % path)
try:
node = cache_get(self.top, path)
st = Stat()
return -errno.ENOENT
def readdir(self, path, offset):
- log('--readdir(%r)\n' % path)
+ log('--readdir(%r)' % path)
node = cache_get(self.top, path)
yield fuse.Direntry('.')
yield fuse.Direntry('..')
yield fuse.Direntry(sub.name)
def readlink(self, path):
- log('--readlink(%r)\n' % path)
+ log('--readlink(%r)' % path)
node = cache_get(self.top, path)
return node.readlink()
def open(self, path, flags):
- log('--open(%r)\n' % path)
+ log('--open(%r)' % path)
node = cache_get(self.top, path)
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
node.open()
def release(self, path, flags):
- log('--release(%r)\n' % path)
+ log('--release(%r)' % path)
def read(self, path, size, offset):
- log('--read(%r)\n' % path)
+ log('--read(%r)' % path)
n = cache_get(self.top, path)
o = n.open()
o.seek(offset)
def check_index(reader):
try:
- log('check: checking forward iteration...\n')
+ log('check: checking forward iteration...')
e = None
d = {}
for e in reader.forward_iter():
if e.children_n:
if opt.verbose:
- log('%08x+%-4d %r\n' % (e.children_ofs, e.children_n,
+ log('%08x+%-4d %r' % (e.children_ofs, e.children_n,
e.name))
assert(e.children_ofs)
assert(e.name.endswith('/'))
assert(e.sha != index.EMPTY_SHA)
assert(e.gitmode)
assert(not e or e.name == '/') # last entry is *always* /
- log('check: checking normal iteration...\n')
+ log('check: checking normal iteration...')
last = None
for e in reader:
if last:
assert(last > e.name)
last = e.name
except:
- log('index error! at %r\n' % e)
+ log('index error! at %r' % e)
raise
- log('check: passed.\n')
+ log('check: passed.')
def clear_index(indexfile):
try:
os.remove(path)
if opt.verbose:
- log('clear: removed %s\n' % path)
+ log('clear: removed %s' % path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
sys.stdout.flush()
elapsed = time.time() - index_start
paths_per_sec = total / elapsed if elapsed else 0
- qprogress('Indexing: %d (%d paths/s)\r' % (total, paths_per_sec))
+ progress_update('Indexing: %d (%d paths/s) ...'
+ % (total, paths_per_sec), False)
elif not (total % 128):
elapsed = time.time() - index_start
paths_per_sec = total / elapsed if elapsed else 0
- qprogress('Indexing: %d (%d paths/s)\r' % (total, paths_per_sec))
+ progress_update('Indexing: %d (%d paths/s) ...'
+ % (total, paths_per_sec), False)
total += 1
while rig.cur and rig.cur.name > path: # deleted paths
if rig.cur.exists():
elapsed = time.time() - index_start
paths_per_sec = total / elapsed if elapsed else 0
- progress('Indexing: %d, done (%d paths/s).\n' % (total, paths_per_sec))
+ progress_end('Indexing: %d, done (%d paths/s).' % (total, paths_per_sec))
hlinks.prepare_save()
if wi.count:
wr = wi.new_reader()
if opt.check:
- log('check: before merging: oldfile\n')
+ log('check: before merging: oldfile')
check_index(ri)
- log('check: before merging: newfile\n')
+ log('check: before merging: newfile')
check_index(wr)
mi = index.Writer(indexfile, msw, tmax)
handle_ctrl_c()
if opt.check:
- log('check: starting initial check.\n')
+ log('check: starting initial check.')
check_index(index.Reader(indexfile))
if opt.clear:
- log('clear: clearing index.\n')
+ log('clear: clearing index.')
clear_index(indexfile)
excluded_paths = parse_excludes(flags, o.fatal)
print line + (name or './')
if opt.check and (opt['print'] or opt.status or opt.modified or opt.update):
- log('check: starting final check.\n')
+ log('check: starting final check.')
check_index(index.Reader(indexfile))
if saved_errors:
- log('WARNING: %d errors encountered.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered.' % len(saved_errors))
sys.exit(1)
outfile.write(blob)
except KeyError, e:
outfile.flush()
- log('error: %s\n' % e)
+ log('error: %s' % e)
ret = 1
sys.exit(ret)
i = str(_i).encode('hex')
if i.startswith(find):
print name, i
- qprogress('Searching: %d\r' % count)
+ progress_update('Searching: %d ...' % count, False)
count += 1
+ progress_end('Searching done.')
if saved_errors:
- log('WARNING: %d errors encountered while saving.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered.' % len(saved_errors))
sys.exit(1)
expected = prefix * total / (1<<64)
diff = count - expected
maxdiff = max(maxdiff, abs(diff))
- print '%d of %d (%.3f%%) ' % (maxdiff, len(ix), maxdiff*100.0/len(ix))
- sys.stdout.flush()
+ log('%d of %d (%.3f%%)' % (maxdiff, len(ix), maxdiff*100.0/len(ix)))
assert(count+1 == len(ix))
if opt.predict:
pm = _helpers.bitmatch(last, i)
longmatch = max(longmatch, pm)
last = i
- print longmatch
- log('%d matching prefix bits\n' % longmatch)
+ log('')
+ log('%d matching prefix bits' % longmatch)
doublings = math.log(len(mi), 2)
bpd = longmatch / doublings
- log('%.2f bits per doubling\n' % bpd)
+ log('%.2f bits per doubling' % bpd)
remain = 160 - longmatch
rdoublings = remain / bpd
- log('%d bits (%.2f doublings) remaining\n' % (remain, rdoublings))
+ log('%d bits (%.2f doublings) remaining' % (remain, rdoublings))
larger = 2**rdoublings
- log('%g times larger is possible\n' % larger)
+ log('%g times larger is possible' % larger)
perperson = larger/POPULATION_OF_EARTH
- log('\nEveryone on earth could have %d data sets like yours, all in one\n'
- 'repository, and we would expect 1 object collision.\n'
- % int(perperson))
+ log('')
+ log('Everyone on earth could have %d data sets like yours,' % int(perperson))
+ log('all in one repository, and we would expect 1 object collision.')
f = open('/proc/self/status')
except IOError, e:
if not _linux_warned:
- log('Warning: %s\n' % e)
+ log('Warning: %s' % e)
_linux_warned = 1
return {}
for line in f:
if saved_errors:
- log('WARNING: %d errors encountered.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered.' % len(saved_errors))
sys.exit(1)
else:
sys.exit(0)
def check_midx(name):
nicename = git.repo_rel(name)
- log('Checking %s.\n' % nicename)
+ log('Checking %s.' % nicename)
try:
ix = git.open_idx(name)
except git.GitError, e:
sub = git.open_idx(os.path.join(os.path.dirname(name), subname))
for ecount,e in enumerate(sub):
if not (ecount % 1234):
- qprogress(' %d/%d: %s %d/%d\r'
+ progress_update(' %d/%d: %s %d/%d ...'
% (count, len(ix.idxnames),
- git.shorten_hash(subname), ecount, len(sub)))
+ git.shorten_hash(subname), ecount, len(sub)), False)
if not sub.exists(e):
add_error("%s: %s: %s missing from idx"
% (nicename, git.shorten_hash(subname),
prev = None
for ecount,e in enumerate(ix):
if not (ecount % 1234):
- qprogress(' Ordering: %d/%d\r' % (ecount, len(ix)))
+ progress_update(' Ordering: %d/%d ...' % (ecount, len(ix)), False)
if not e >= prev:
add_error('%s: ordering error: %s < %s'
% (nicename,
if not _first: _first = outdir
dirprefix = (_first != outdir) and git.repo_rel(outdir)+': ' or ''
- debug1('midx: %s%screating from %d files (%d objects).\n'
+ debug1('midx: %s%screating from %d files (%d objects).'
% (dirprefix, prefixstr, len(infilenames), total))
if (opt.auto and (total < 1024 and len(infilenames) < 3)) \
or ((opt.auto or opt.force) and len(infilenames) < 2) \
or (opt.force and not total):
- debug1('midx: nothing to do.\n')
+ debug1('midx: nothing to do.')
return
pages = int(total/SHA_PER_PAGE) or 1
bits = int(math.ceil(math.log(pages, 2)))
entries = 2**bits
- debug1('midx: table size: %d (%d bits)\n' % (entries*4, bits))
+ debug1('midx: table size: %d (%d bits)' % (entries*4, bits))
unlink(outfilename)
with atomically_replaced_file(outfilename, 'wb') as f:
already[iname] = 1
any = 1
if not any:
- debug1('%r is redundant\n' % mname)
+ debug1('%r is redundant' % mname)
unlink(mname)
already[mname] = 1
DESIRED_HWM = opt.force and 1 or 5
DESIRED_LWM = opt.force and 1 or 2
existed = dict((name,1) for sz,name in all)
- debug1('midx: %d indexes; want no more than %d.\n'
+ debug1('midx: %d indexes; want no more than %d.'
% (len(all), DESIRED_HWM))
if len(all) <= DESIRED_HWM:
- debug1('midx: nothing to do.\n')
+ debug1('midx: nothing to do.')
while len(all) > DESIRED_HWM:
all.sort()
part1 = [name for sz,name in all[:len(all)-DESIRED_LWM+1]]
part2 = all[len(all)-DESIRED_LWM+1:]
all = list(do_midx_group(path, part1)) + part2
if len(all) > DESIRED_HWM:
- debug1('\nStill too many indexes (%d > %d). Merging again.\n'
+ debug1('Still too many indexes (%d > %d). Merging again.'
% (len(all), DESIRED_HWM))
if opt['print']:
midxes = []
paths = opt.dir and [opt.dir] or git.all_packdirs()
for path in paths:
- debug1('midx: scanning %s\n' % path)
+ debug1('midx: scanning %s' % path)
midxes += glob.glob(os.path.join(path, '*.midx'))
for name in midxes:
check_midx(name)
if not saved_errors:
- log('All tests passed.\n')
+ log('All tests passed.')
else:
if extra:
do_midx(git.repo('objects/pack'), opt.output, extra, '')
elif opt.auto or opt.force:
paths = opt.dir and [opt.dir] or git.all_packdirs()
for path in paths:
- debug1('midx: scanning %s\n' % path)
+ debug1('midx: scanning %s' % path)
do_midx_dir(path)
else:
o.fatal("you must use -f or -a or provide input filenames")
if saved_errors:
- log('WARNING: %d errors encountered.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered.' % len(saved_errors))
sys.exit(1)
subcmd = extra
-debug2('bup mux: starting %r\n' % (extra,))
+debug2('bup mux: starting %r' % (extra,))
outr, outw = os.pipe()
errr, errw = os.pipe()
prv = p.wait()
if prv:
- debug1('%s exited with code %d\n' % (extra[0], prv))
+ debug1('%s exited with code %d' % (extra[0], prv))
-debug1('bup mux: done\n')
+debug1('bup mux: done')
sys.exit(prv)
sp.wait()
break
except SigException, e:
- log('\nbup on: %s\n' % e)
+ log('bup on: %s' % e)
os.kill(p.pid, e.signum)
ret = 84
sys.exit(ret)
_helpers.write_random(sys.stdout.fileno(), total, opt.seed,
opt.verbose and 1 or 0)
else:
- log('error: not writing binary data to a terminal. Use -f to force.\n')
+ log('error: not writing binary data to a terminal. Use -f to force.')
sys.exit(1)
print s
-def plog(s):
- if opt.quiet:
- return
- qprogress(s)
-
-
def valid_restore_path(path):
path = os.path.normpath(path)
if path.startswith('/'):
root_meta = metadata.Metadata.read(meta_stream)
print_info(n, '.')
total_restored += 1
- plog('Restoring: %d\r' % total_restored)
+ if not opt.quiet:
+ progress_update('Restoring: %d ...' % total_restored, False)
for sub in n:
m = None
# Don't get metadata if this is a dir -- handled in sub do_node().
write_content(fullname, n)
total_restored += 1
- plog('Restoring: %d\r' % total_restored)
+ if not opt.quiet:
+ progress_update('Restoring: %d ...' % total_restored, False)
for sub in n:
m = None
# Don't get metadata if this is a dir -- handled in sub do_node().
do_node(n.parent, n, opt.sparse, owner_map, meta = meta)
if not opt.quiet:
- progress('Restoring: %d, done.\n' % total_restored)
+ progress_end('Restoring: %d, done.' % total_restored)
if saved_errors:
- log('WARNING: %d errors encountered while restoring.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered while restoring.' % len(saved_errors))
sys.exit(1)
remainstr = ''
kpsstr = ''
else:
- kpsstr = '%dk/s' % kps
+ kpsstr = '%dk/s ' % kps
if hours:
- remainstr = '%dh%dm' % (hours, mins)
+ remainstr = '%dh%dm ' % (hours, mins)
elif mins:
- remainstr = '%dm%d' % (mins, secs)
+ remainstr = '%dm%d ' % (mins, secs)
else:
- remainstr = '%ds' % secs
- qprogress('Saving: %.2f%% (%d/%dk, %d/%d files) %s %s\r'
+ remainstr = '%ds ' % secs
+ progress_update('Saving: %.2f%% (%d/%dk, %d/%d files) %s%s...'
% (pct, cc/1024, total/1024, fcount, ftotal,
- remainstr, kpsstr))
+ remainstr, kpsstr), False)
indexfile = opt.indexfile or git.repo('bupindex')
if opt.progress:
for (transname,ent) in r.filter(extra, wantrecurse=wantrecurse_pre):
if not (ftotal % 10024):
- qprogress('Reading index: %d\r' % ftotal)
+ progress_update('Reading index: %d ...' % ftotal, False)
exists = ent.exists()
hashvalid = already_saved(ent)
ent.set_sha_missing(not hashvalid)
if exists and not hashvalid:
total += ent.size
ftotal += 1
- progress('Reading index: %d, done.\n' % ftotal)
+ progress_end('Reading index: %d, done.' % ftotal)
hashsplit.progress_callback = progress_report
# Root collisions occur when strip or graft options map more than one
else:
status = ' '
if opt.verbose >= 2:
- log('%s %-70s\n' % (status, ent.name))
+ log('%s %-70s' % (status, ent.name))
elif not stat.S_ISDIR(ent.mode) and lastdir != dir:
if not lastdir.startswith(dir):
- log('%s %-70s\n' % (status, os.path.join(dir, '')))
+ log('%s %-70s' % (status, os.path.join(dir, '')))
lastdir = dir
if opt.progress:
if opt.smaller and ent.size >= opt.smaller:
if exists and not hashvalid:
if opt.verbose:
- log('skipping large file "%s"\n' % ent.name)
+ log('skipping large file "%s"' % ent.name)
lastskip_name = ent.name
continue
if opt.progress:
pct = total and count*100.0/total or 100
- progress('Saving: %.2f%% (%d/%dk, %d/%d files), done. \n'
+ progress_end('Saving: %.2f%% (%d/%dk, %d/%d files), done.'
% (pct, count/1024, total/1024, fcount, ftotal))
while len(parts) > 1: # _pop() all the parts above the root
cli.close()
if saved_errors:
- log('WARNING: %d errors encountered while saving.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered while saving.' % len(saved_errors))
sys.exit(1)
def _set_mode():
global dumb_server_mode
dumb_server_mode = os.path.exists(git.repo('bup-dumb-server'))
- debug1('bup server: serving in %s mode\n'
+ debug1('bup server: serving in %s mode'
% (dumb_server_mode and 'dumb' or 'smart'))
# OK. we now know the path is a proper repository. Record this path in the
# environment so that subprocesses inherit it and know where to operate.
os.environ['BUP_DIR'] = git.repodir
- debug1('bup server: bupdir is %r\n' % git.repodir)
+ debug1('bup server: bupdir is %r' % git.repodir)
_set_mode()
def init_dir(conn, arg):
git.init_repo(arg)
- debug1('bup server: bupdir initialized: %r\n' % git.repodir)
+ debug1('bup server: bupdir initialized: %r' % git.repodir)
_init_session(arg)
conn.ok()
w.abort()
raise Exception('object read: expected length header, got EOF\n')
n = struct.unpack('!I', ns)[0]
- #debug2('expecting %d bytes\n' % n)
+ #debug2('expecting %d bytes' % n)
if not n:
- debug1('bup server: received %d object%s.\n'
+ debug1('bup server: received %d object%s.'
% (w.count, w.count!=1 and "s" or ''))
fullpath = w.close(run_midx=not dumb_server_mode)
if fullpath:
conn.ok()
return
elif n == 0xffffffff:
- debug2('bup server: receive-objects suspended.\n')
+ debug2('bup server: receive-objects suspended.')
suspended_w = w
conn.ok()
return
crcr = struct.unpack('!I', conn.read(4))[0]
n -= 20 + 4
buf = conn.read(n) # object sizes in bup are reasonably small
- #debug2('read %d bytes\n' % n)
+ #debug2('read %d bytes' % n)
_check(w, n, len(buf), 'object read: expected %d bytes, got %d\n')
if not dumb_server_mode:
oldpack = w.exists(shar, want_source=True)
assert(oldpack.endswith('.idx'))
(dir,name) = os.path.split(oldpack)
if not (name in suggested):
- debug1("bup server: suggesting index %s\n"
+ debug1("bup server: suggesting index %s"
% git.shorten_hash(name))
- debug1("bup server: because of object %s\n"
+ debug1("bup server: because of object %s"
% shar.encode('hex'))
conn.write('index %s\n' % name)
suggested.add(name)
conn.write(struct.pack('!I', len(blob)))
conn.write(blob)
except KeyError, e:
- log('server: error: %s\n' % e)
+ log('server: error: %s' % e)
conn.write('\0\0\0\0')
conn.error(e)
else:
if extra:
o.fatal('no arguments expected')
-debug2('bup server: reading from stdin.\n')
+debug2('bup server: reading from stdin.')
commands = {
'quit': None,
line = _line.strip()
if not line:
continue
- debug1('bup server: command: %r\n' % line)
+ debug1('bup server: command: %r' % line)
words = line.split(' ', 1)
cmd = words[0]
rest = len(words)>1 and words[1] or ''
else:
raise Exception('unknown server command: %r\n' % line)
-debug1('bup server: done\n')
+debug1('bup server: done')
global total_bytes
total_bytes += nbytes
if filenum > 0:
- qprogress('Splitting: file #%d, %d kbytes\r'
- % (filenum+1, total_bytes/1024))
+ progress_update('Splitting: file #%d, %d kbytes ...'
+ % (filenum+1, total_bytes/1024), False)
else:
- qprogress('Splitting: %d kbytes\r' % (total_bytes/1024))
+ progress_update('Splitting: %d kbytes ...'
+ % (total_bytes/1024), False)
is_reverse = os.environ.get('BUP_SERVER_REVERSE')
if not opt.quiet and last != megs:
last = megs
-if opt.verbose:
- log('\n')
if opt.tree:
print tree.encode('hex')
if opt.commit or opt.name:
secs = time.time() - start_time
size = hashsplit.total_split
if opt.bench:
- log('bup: %.2fkbytes in %.2f secs = %.2f kbytes/sec\n'
+ log('bup: %.2fkbytes in %.2f secs = %.2f kbytes/sec'
% (size/1024., secs, size/1024./secs))
if saved_errors:
- log('WARNING: %d errors encountered while saving.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered while saving.' % len(saved_errors))
sys.exit(1)
# contents of the tag file and pass the hash, and we already know
# about the tag's existance via "tags".
if not opt.force and opt.delete not in tags:
- log("error: tag '%s' doesn't exist\n" % opt.delete)
+ log("error: tag '%s' doesn't exist" % opt.delete)
sys.exit(1)
tag_file = 'refs/tags/%s' % opt.delete
git.delete_ref(tag_file)
(tag_name, commit) = extra[:2]
if not tag_name:
o.fatal("tag name must not be empty.")
-debug1("args: tag name = %s; commit = %s\n" % (tag_name, commit))
+debug1("args: tag name = %s; commit = %s" % (tag_name, commit))
if tag_name in tags and not opt.force:
- log("bup: error: tag '%s' already exists\n" % tag_name)
+ log("bup: error: tag '%s' already exists" % tag_name)
sys.exit(1)
if tag_name.startswith('.'):
sys.exit(2)
if not hash:
- log("bup: error: commit %s not found.\n" % commit)
+ log("bup: error: commit %s not found." % commit)
sys.exit(2)
pL = git.PackIdxList(git.repo('objects/pack'))
if not pL.exists(hash):
- log("bup: error: commit %s not found.\n" % commit)
+ log("bup: error: commit %s not found." % commit)
sys.exit(2)
tag_file = git.repo('refs/tags/%s' % tag_name)
import tornado.ioloop
import tornado.web
except ImportError:
- log('error: cannot find the python "tornado" module; please install it\n')
+ log('error: cannot find the python "tornado" module; please install it')
sys.exit(1)
handle_ctrl_c()
first_path = False
if saved_errors:
- log('WARNING: %d errors encountered.\n' % len(saved_errors))
+ log('WARNING: %d errors encountered.' % len(saved_errors))
sys.exit(1)
else:
sys.exit(0)
# flush it as one big lump during close().
pages = os.fstat(f.fileno()).st_size / 4096 * 5 # assume k=5
self.delaywrite = expected > pages
- debug1('bloom: delaywrite=%r\n' % self.delaywrite)
+ debug1('bloom: delaywrite=%r' % self.delaywrite)
if self.delaywrite:
self.map = mmap_readwrite_private(self.rwfile, close=False)
else:
self.map = mmap_read(f)
got = str(self.map[0:4])
if got != 'BLOM':
- log('Warning: invalid BLOM header (%r) in %r\n' % (got, filename))
+ log('Warning: invalid BLOM header (%r) in %r' % (got, filename))
return self._init_failed()
ver = struct.unpack('!I', self.map[4:8])[0]
if ver < BLOOM_VERSION:
- log('Warning: ignoring old-style (v%d) bloom %r\n'
+ log('Warning: ignoring old-style (v%d) bloom %r'
% (ver, filename))
return self._init_failed()
if ver > BLOOM_VERSION:
- log('Warning: ignoring too-new (v%d) bloom %r\n'
+ log('Warning: ignoring too-new (v%d) bloom %r'
% (ver, filename))
return self._init_failed()
def close(self):
if self.map and self.rwfile:
- debug2("bloom: closing with %d entries\n" % self.entries)
+ debug2("bloom: closing with %d entries" % self.entries)
self.map[12:16] = struct.pack('!I', self.entries)
if self.delaywrite:
self.rwfile.seek(0)
bits = int(math.floor(math.log(expected*MAX_BITS_EACH/8,2)))
k = k or ((bits <= MAX_BLOOM_BITS[5]) and 5 or 4)
if bits > MAX_BLOOM_BITS[k]:
- log('bloom: warning, max bits exceeded, non-optimal\n')
+ log('bloom: warning, max bits exceeded, non-optimal')
bits = MAX_BLOOM_BITS[k]
- debug1('bloom: using 2^%d bytes and %d hash functions\n' % (bits, k))
+ debug1('bloom: using 2^%d bytes and %d hash functions' % (bits, k))
f = f or open(name, 'w+b')
f.write('BLOM')
f.write(struct.pack('!IHHI', BLOOM_VERSION, bits, k, 0))
# All cached idxs are extra until proven otherwise
extra = set()
for f in os.listdir(self.cachedir):
- debug1('%s\n' % f)
+ debug1('%s' % f)
if f.endswith('.idx'):
extra.add(f)
needed = set()
extra.discard(idx)
self.check_ok()
- debug1('client: removing extra indexes: %s\n' % extra)
+ debug1('client: removing extra indexes: %s' % extra)
for idx in extra:
os.unlink(os.path.join(self.cachedir, idx))
- debug1('client: server requested load of: %s\n' % needed)
+ debug1('client: server requested load of: %s' % needed)
for idx in needed:
self.sync_index(idx)
git.auto_midx(self.cachedir)
def sync_index(self, name):
- #debug1('requesting %r\n' % name)
+ #debug1('requesting %r' % name)
self.check_busy()
mkdirp(self.cachedir)
fn = os.path.join(self.cachedir, name)
assert(n)
with atomically_replaced_file(fn, 'w') as f:
count = 0
- progress('Receiving index from server: %d/%d\r' % (count, n))
+ progress_update('Receiving index from server: %d/%d ...' % (count, n))
for b in chunkyreader(self.conn, n):
f.write(b)
count += len(b)
- qprogress('Receiving index from server: %d/%d\r' % (count, n))
- progress('Receiving index from server: %d/%d, done.\n' % (count, n))
+ progress_update('Receiving index from server: %d/%d ...' % (count, n), False)
+ progress_end('Receiving index from server: %d/%d, done.' % (count, n))
self.check_ok()
def _make_objcache(self):
for line in linereader(self.conn):
if not line:
break
- debug2('%s\n' % line)
+ debug2('%s' % line)
if line.startswith('index '):
idx = line[6:]
- debug1('client: received index suggestion: %s\n'
+ debug1('client: received index suggestion: %s'
% git.shorten_hash(idx))
suggested.append(idx)
else:
assert(line.endswith('.idx'))
- debug1('client: completed writing pack, idx: %s\n'
+ debug1('client: completed writing pack, idx: %s'
% git.shorten_hash(line))
suggested.append(line)
self.check_ok()
path = prepend + name
if excluded_paths:
if os.path.normpath(path) in excluded_paths:
- debug1('Skipping %r: excluded.\n' % path)
+ debug1('Skipping %r: excluded.' % path)
continue
if exclude_rxs and should_rx_exclude_path(path, exclude_rxs):
continue
if name.endswith('/'):
if bup_dir != None:
if os.path.normpath(path) == bup_dir:
- debug1('Skipping BUP_DIR.\n')
+ debug1('Skipping BUP_DIR.')
continue
if xdev != None and pst.st_dev != xdev:
- debug1('Skipping contents of %r: different filesystem.\n' % path)
+ debug1('Skipping contents of %r: different filesystem.' % path)
else:
try:
OsFile(name).fchdir()
broken = False
for n in mx.idxnames:
if not os.path.exists(os.path.join(mxd, n)):
- log(('warning: index %s missing\n' +
- ' used by %s\n') % (n, mxf))
+ log('warning: index %s missing!' % n)
+ log(' used by %s\n' % mxf)
broken = True
if broken:
mx.close()
for name in ix.idxnames:
d[os.path.join(self.dir, name)] = ix
elif not ix.force_keep:
- debug1('midx: removing redundant: %s\n'
+ debug1('midx: removing redundant: %s'
% os.path.basename(ix.name))
ix.close()
unlink(ix.name)
self.do_bloom = True
else:
self.bloom = None
- debug1('PackIdxList: using %d index%s.\n'
+ debug1('PackIdxList: using %d index%s.'
% (len(self.packs), len(self.packs)!=1 and 'es' or ''))
def add(self, hash):
def idxmerge(idxlist, final_progress=True):
"""Generate a list of all the objects reachable in a PackIdxList."""
def pfunc(count, total):
- qprogress('Reading indexes: %.2f%% (%d/%d)\r'
- % (count*100.0/total, count, total))
+ progress_update('Reading indexes: %.2f%% (%d/%d) ...'
+ % (count*100.0/total, count, total), False)
def pfinal(count, total):
if final_progress:
- progress('Reading indexes: %.2f%% (%d/%d), done.\n'
+ progress_end('Reading indexes: %.2f%% (%d/%d), done.'
% (100, total, total))
return merge_iter(idxlist, 10024, pfunc, pfinal)
"""
head = read_ref(committish, repo_dir=repo_dir)
if head:
- debug2("resolved from ref: commit = %s\n" % head.encode('hex'))
+ debug2("resolved from ref: commit = %s" % head.encode('hex'))
return head
pL = PackIdxList(repo('objects/pack', repo_dir=repo_dir))
os.stat(repo('objects/pack/.'))
except OSError, e:
if e.errno == errno.ENOENT:
- log('error: %r is not a bup repository; run "bup init"\n'
+ log('error: %r is not a bup repository; run "bup init"'
% repo())
sys.exit(15)
else:
- log('error: %s\n' % e)
+ log('error: %s' % e)
sys.exit(14)
wanted = ('1','5','6')
if ver() < wanted:
if not _ver_warned:
- log('warning: git version < %s; bup will be slow.\n'
+ log('warning: git version < %s; bup will be slow.'
% '.'.join(wanted))
_ver_warned = 1
self.get = self._slow_get
poll_result = self.p.poll()
assert(poll_result == None)
if self.inprogress:
- log('_fast_get: opening %r while %r is open\n'
+ log('_fast_get: opening %r while %r is open'
% (id, self.inprogress))
assert(not self.inprogress)
assert(id.find('\n') < 0)
for d in self._join(self.get(id)):
yield d
except StopIteration:
- log('booger!\n')
+ log('booger!')
_cp = {}
for (sha,size,level) in sl:
stacks[0].append((GIT_MODE_FILE, sha, size))
_squish(maketree, stacks, level)
- #log('stacks: %r\n' % [len(i) for i in stacks])
+ #log('stacks: %r' % [len(i) for i in stacks])
_squish(maketree, stacks, len(stacks)-1)
- #log('stacks: %r\n' % [len(i) for i in stacks])
+ #log('stacks: %r' % [len(i) for i in stacks])
return _make_shalist(stacks[-1])[0]
buf = buf[sz:]
+istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
+istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
+
_last_prog = 0
-def log(s):
+
+
+def log(s, is_prog=False, is_prog_end=False):
"""Print a log message to stderr."""
global _last_prog
+ ceol = "\033[K"
sys.stdout.flush()
+ if is_prog:
+ if is_prog_end:
+ s = s + ceol + "\n"
+ else:
+ s = s + ceol + "\r"
+ else:
+ if _last_prog > 0:
+ s = s + ceol + "\n"
+ else:
+ s = s + "\n"
_hard_write(sys.stderr.fileno(), s)
_last_prog = 0
def debug1(s):
+ """Write a level 1 debug message to stderr"""
if buglvl >= 1:
log(s)
def debug2(s):
+ """Write a level 2 debug message to stderr"""
if buglvl >= 2:
log(s)
-istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
-istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
-def progress(s):
- """Calls log() if stderr is a TTY. Does nothing otherwise."""
- if istty2:
- log(s)
-
+def progress_update(s, always=True):
+ """Update progress messages on stderr.
-def qprogress(s):
- """Calls progress() only if we haven't printed progress in a while.
-
- This avoids overloading the stderr buffer with excess junk.
- We do the TTY check here, too, to minimize overhead of the time() function.
+ Calls log() if stderr is connected to a TTY and we haven't printed
+ progress messages in a while. This avoids overloading the stderr buffer
+ with excess junk.
"""
global _last_prog
if istty2:
now = time.time()
- if now - _last_prog > 0.1:
- progress(s)
+ if always or now - _last_prog > 0.1:
+ log(s, True)
_last_prog = now
+def progress_end(s):
+ """End progress messages on stderr.
+
+ Calls log() if stderr is connected to a TTY and makes sure, that this
+ message is kept even if more/other messages follow using progress_update().
+ """
+ if istty2:
+ log(s, True, True)
+
+
def mkdirp(d, mode=None):
"""Recursively create directories on path 'd'.
out = os.path.join(dir, name)
else:
out = os.path.realpath(p)
- #log('realpathing:%r,%r\n' % (p, out))
+ #log('realpathing:%r,%r' % (p, out))
return out
def write(self, data):
"""Write 'data' to output stream."""
- #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
+ #log('%d writing: %d bytes' % (os.getpid(), len(data)))
self.outp.write(data)
def has_input(self):
self.outp.flush()
rl = ''
for rl in linereader(self):
- #log('%d got line: %r\n' % (os.getpid(), rl))
+ #log('%d got line: %r' % (os.getpid(), rl))
if not rl: # empty line
continue
elif rl == 'ok':
return None
elif rl.startswith('error '):
- #log('client: error: %s\n' % rl[6:])
+ #log('client: error: %s' % rl[6:])
return NotOk(rl[6:])
else:
onempty(rl)
sys.stderr.write(buf)
elif fdw == 3:
self.closed = True
- debug2("DemuxConn: marked closed\n")
+ debug2("DemuxConn: marked closed")
return True
def _load_buf(self, timeout):
accessible in the module variable helpers.saved_errors.
"""
saved_errors.append(e)
- log('%-70s\n' % e)
+ log('%-70s' % e)
def clear_errors():
oldhook = sys.excepthook
def newhook(exctype, value, traceback):
if exctype == KeyboardInterrupt:
- log('\nInterrupted.\n')
+ log('Interrupted.')
else:
return oldhook(exctype, value, traceback)
sys.excepthook = newhook
"""Return True if path matches a regular expression in exclude_rxs."""
for rx in exclude_rxs:
if rx.search(path):
- debug1('Skipping %r: excluded by rx pattern %r.\n'
+ debug1('Skipping %r: excluded by rx pattern %r.'
% (path, rx.pattern))
return True
return False
(ofs,n) = (f.tell(), len(self.list))
if self.list:
count = len(self.list)
- #log('popping %r with %d entries\n'
+ #log('popping %r with %d entries'
# % (''.join(self.ename), count))
for e in self.list:
e.write(f)
self.children_ofs, self.children_n,
self.meta_ofs)
except (DeprecationWarning, struct.error), e:
- log('pack error: %s (%r)\n' % (e, self))
+ log('pack error: %s (%r)' % (e, self))
raise
def from_stat(self, st, meta_ofs, tstart, check_device=True):
if f:
b = f.read(len(INDEX_HDR))
if b != INDEX_HDR:
- log('warning: %s: header: expected %r, got %r\n'
+ log('warning: %s: header: expected %r, got %r'
% (filename, INDEX_HDR, b))
else:
st = os.fstat(f.fileno())
endswith = name.endswith('/')
ename = pathsplit(name)
basename = ename[-1]
- #log('add: %r %r\n' % (basename, name))
+ #log('add: %r %r' % (basename, name))
flags = IX_EXISTS
sha = None
if hashgen:
def merge(*iters):
def pfunc(count, total):
- qprogress('bup: merging indexes (%d/%d)\r' % (count, total))
+ progress_update('bup: merging indexes (%d/%d) ...' % (count, total), False)
def pfinal(count, total):
- progress('bup: merging indexes (%d/%d), done.\n' % (count, total))
+ progress_end('bup: merging indexes (%d/%d), done.' % (count, total))
return merge_iter(iters, 1024, pfunc, pfinal, key='name')
else:
output_node_info(n, os.path.normpath(path))
except vfs.NodeError, e:
- log('error: %s\n' % e)
+ log('error: %s' % e)
ret = 1
if L:
try:
import xattr
except ImportError:
- log('Warning: Linux xattr support missing; install python-pyxattr.\n')
+ log('Warning: Linux xattr support missing; install python-pyxattr.')
if xattr:
try:
xattr.get_all
try:
import posix1e
except ImportError:
- log('Warning: POSIX ACL support missing; install python-pylibacl.\n')
+ log('Warning: POSIX ACL support missing; install python-pylibacl.')
try:
from bup._helpers import get_linux_file_attr, set_linux_file_attr
if not (get_linux_file_attr or set_linux_file_attr):
return
if _suppress_linux_file_attr:
- log('Warning: Linux attr support disabled (see "bup help index").\n')
+ log('Warning: Linux attr support disabled (see "bup help index").')
get_linux_file_attr = set_linux_file_attr = None
# FIXME: S_ISDOOR, S_IFMPB, S_IFCMP, S_IFNWK, ... see stat(2).
else:
assert(not self._recognized_file_type())
- add_error('not creating "%s" with unrecognized mode "0x%x"\n'
+ add_error('not creating "%s" with unrecognized mode "0x%x"'
% (path, self.mode))
def _apply_common_rec(self, path, restore_numeric_ids=False):
if not posix1e:
if self.posix1e_acl:
- add_error("%s: can't restore ACLs; posix1e support missing.\n"
+ add_error("%s: can't restore ACLs; posix1e support missing."
% path)
return
if self.posix1e_acl:
def _apply_linux_xattr_rec(self, path, restore_numeric_ids=False):
if not xattr:
if self.linux_xattr:
- add_error("%s: can't restore xattr; xattr support missing.\n"
+ add_error("%s: can't restore xattr; xattr support missing."
% path)
return
if not self.linux_xattr:
for path in paths:
safe_path = _clean_up_path_for_archive(path)
if safe_path != path:
- log('archiving "%s" as "%s"\n' % (path, safe_path))
+ log('archiving "%s" as "%s"' % (path, safe_path))
if not recurse:
for p in paths:
assert(filename.endswith('.midx'))
self.map = mmap_read(open(filename))
if str(self.map[0:4]) != 'MIDX':
- log('Warning: skipping: invalid MIDX header in %r\n' % filename)
+ log('Warning: skipping: invalid MIDX header in %r' % filename)
self.force_keep = True
return self._init_failed()
ver = struct.unpack('!I', self.map[4:8])[0]
if ver < MIDX_VERSION:
- log('Warning: ignoring old-style (v%d) midx %r\n'
+ log('Warning: ignoring old-style (v%d) midx %r'
% (ver, filename))
self.force_keep = False # old stuff is boring
return self._init_failed()
if ver > MIDX_VERSION:
- log('Warning: ignoring too-new (v%d) midx %r\n'
+ log('Warning: ignoring too-new (v%d) midx %r'
% (ver, filename))
self.force_keep = True # new stuff is exciting
return self._init_failed()
if port:
argv.extend(('-p', port))
argv.extend((rhost, '--', cmd.strip()))
- #helpers.log('argv is: %r\n' % argv)
+ #helpers.log('argv is: %r' % argv)
def setup():
# runs in the child process
if not rhost:
self.blob = self.blob[want:]
if not self.it:
break
- debug2('next(%d) returned %d\n' % (size, len(out)))
+ debug2('next(%d) returned %d' % (size, len(out)))
self.ofs += len(out)
return out
return self
def _lresolve(self, parts):
- #debug2('_lresolve %r in %r\n' % (parts, self.name))
+ #debug2('_lresolve %r in %r' % (parts, self.name))
if not parts:
return self
(first, rest) = (parts[0], parts[1:])
parts = re.split(r'/+', path or '.')
if not parts[-1]:
parts[-1] = '.'
- #debug2('parts: %r %r\n' % (path, parts))
+ #debug2('parts: %r %r' % (path, parts))
return start._lresolve(parts)
def resolve(self, path = ''):
def size(self):
"""Get this file's size."""
if self._cached_size == None:
- debug1('<<<<File.size() is calculating (for %r)...\n' % self.name)
+ debug1('<<<<File.size() is calculating (for %r)...' % self.name)
if self.bupmode == git.BUP_CHUNKED:
self._cached_size = _total_size(self.hash,
repo_dir = self._repo_dir)
else:
self._cached_size = _chunk_len(self.hash,
repo_dir = self._repo_dir)
- debug1('<<<<File.size() done.\n')
+ debug1('<<<<File.size() done.')
return self._cached_size
self._subs = {}
refs = git.list_refs(repo_dir = self._repo_dir)
for ref in refs:
- #debug2('ref name: %s\n' % ref[0])
+ #debug2('ref name: %s' % ref[0])
revs = git.rev_list(ref[1].encode('hex'), repo_dir = self._repo_dir)
for (date, commit) in revs:
- #debug2('commit: %s date: %s\n' % (commit.encode('hex'), date))
+ #debug2('commit: %s date: %s' % (commit.encode('hex'), date))
commithex = commit.encode('hex')
containername = commithex[:2]
dirname = commithex[2:]