#!/usr/bin/env python
#
# rdiff-backup -- Mirror files while keeping incremental changes
# Version 0.4.0 released October 30, 2001
# Copyright (C) 2001 Ben Escoto <bescoto@stanford.edu>
#
# This program is licensed under the GNU General Public License (GPL).
# See http://www.gnu.org/copyleft/gpl.html for details.
#
# Please send me mail if you find bugs or have any suggestions.

from __future__ import nested_scopes, generators
import os, stat, time, sys, tempfile, getopt, re, cPickle, types, shutil, sha, marshal, traceback


#######################################################################
#
# globals - aggregate some configuration options
#

class Globals:

	# The current version of rdiff-backup
	version = "0.4.0"
	
	# This determines how many bytes to read at a time when copying
	blocksize = 32768

	# True if script is running as a server
	server = None

	# If true, when copying attributes, also change target's uid/gid
	change_ownership = None

	# If true, change the permissions of unwriteable mirror files
	# (such as directories) so that they can be written, and then
	# change them back.
	change_mirror_perms = 1

	# If true, temporarily change permissions of unreadable files in
	# the source directory to make sure we can read all files.
	change_source_perms = None

	# If true, try to reset the atimes of the source partition.
	preserve_atime = None

	# This is a list of compiled regular expressions.  If one of them
	# matches a file in the source area, do not process that file.
	exclude_regexps = []

	# Another list of compiled regexps; this time the file is excluded
	# if it matches something in the destination area.
	exclude_mirror_regexps = []

	# This can be set to Rdiff.copy or RPath.copy depending on
	# preferences.  It should be set in the Main() class.
	copy_func = None

	# This will be set as soon as the LocalConnection class loads
	local_connection = None

	# All connections should be added to the following list, so
	# further global changes can be propagated to the remote systems.
	connections = []

	def get(cls, name):
		"""Return the value of something in this class"""
		return cls.__dict__[name]
	get = classmethod(get)

	def add_regexp(cls, regstr, mirror=None):
		"""Add a regular expression to the exclude list"""
		for conn in Globals.connections:
			conn.Globals.add_regexp_local(regstr, mirror)
	add_regexp = classmethod(add_regexp)

	def add_regexp_local(cls, regstr, mirror):
		"""Add the regex only to the local Globals class"""
		compiled = re.compile(regstr)
		if mirror: Globals.exclude_mirror_regexps.append(compiled)
		else: Globals.exclude_regexps.append(compiled)
	add_regexp_local = classmethod(add_regexp_local)


#######################################################################
#
# static - MakeStatic and MakeClass
#
# These functions are used to make all the instance methods in a class
# into static or class methods.
#

class StaticMethodsError(Exception):
	pass

def MakeStatic(cls):
	"""turn instance methods into static ones

	The methods (that don't begin with _) of any class that
	subclasses this will be turned into static methods.

	"""
	for name in dir(cls):
		if name[0] != "_":
			cls.__dict__[name] = staticmethod(cls.__dict__[name])


def MakeClass(cls):
	"""Turn instance methods into classmethods.  Ignore _ like above"""
	for name in dir(cls):
		if name[0] != "_":
			cls.__dict__[name] = classmethod(cls.__dict__[name])


#######################################################################
#
# lazy - Define some lazy data structures and functions acting on them
#

class Iter:
	"""Hold static methods for the manipulation of lazy iterators"""

	def filter(predicate, iterator):
		"""Like filter in a lazy functional programming language"""
		for i in iterator:
			if predicate(i): yield i

	def map(function, iterator):
		"""Like map in a lazy functional programming language"""
		for i in iterator: yield function(i)

	def foreach(function, iterator):
		"""Run function on each element in iterator"""
		for i in iterator: function(i)

	def cat(*iters):
		"""Lazily concatenate iterators"""
		for iter in iters:
			for i in iter: yield i

	def cat2(iter_of_iters):
		"""Lazily concatenate iterators, iterated by big iterator"""
		for iter in iter_of_iters:
			for i in iter: yield i

	def empty(iter):
		"""True if iterator has length 0"""
		for i in iter: return None
		return 1

	def equal(iter1, iter2, verbose = None, operator = lambda x, y: x == y):
		"""True if iterator 1 has same elements as iterator 2

		Use equality operator, or == if it is unspecified.

		"""
		for i1 in iter1:
			try: i2 = iter2.next()
			except StopIteration:
				if verbose: print "End when i1 = %s" % i1
				return None
			if not operator(i1, i2):
				if verbose: print "%s not equal to %s" % (i1, i2)
				return None
		try: i2 = iter2.next()
		except StopIteration: return 1
		if verbose: print "End when i2 = %s" % i2
		return None

	def Or(iter):
		"""True if any element in iterator is true.  Short circuiting"""
		i = None
		for i in iter:
			if i: return i
		return i

	def And(iter):
		"""True if all elements in iterator are true.  Short circuiting"""
		i = 1
		for i in iter:
			if not i: return i
		return i

	def len(iter):
		"""Return length of iterator"""
		i = 0
		while 1:
			try: iter.next()
			except StopIteration: return i
			i = i+1

	def foldr(f, default, iter):
		"""foldr the "fundamental list recursion operator"?"""
		try: next = iter.next()
		except StopIteration: return default
		return f(next, Iter.foldr(f, default, iter))

	def foldl(f, default, iter):
		"""the fundamental list iteration operator.."""
		while 1:
			try: next = iter.next()
			except StopIteration: return default
			default = f(default, next)

	def multiplex(iter, num_of_forks, final_func = lambda i: None,
				  closing_func = lambda: None):
		"""Split a single iterater into a number of streams

		The return val will be a list with length num_of_forks, each
		of which will be an iterator like iter.  final_func is the
		function that will be called on each element in iter just as
		it is being removed from the buffer.  closing_func is called
		when all the streams are finished.

		"""
		# buffer is a list of elements that some iterators need and others
		# don't
		buffer = []

		# buffer[forkposition[i]] is the next element yieled by iterator
		# i.  If it is -1, yield from the original iter
		starting_forkposition = [-1] * num_of_forks
		forkposition = starting_forkposition[:]
		called_closing_func = [None]

		def get_next(fork_num):
			"""Return the next element requested by fork_num"""
			if forkposition[fork_num] == -1:
				try:  buffer.insert(0, iter.next())
				except StopIteration:
					# call closing_func if necessary
					if (forkposition == starting_forkposition and
						not called_closing_func[0]):
						closing_func()
						called_closing_func[0] = None
					raise StopIteration
				for i in range(num_of_forks): forkposition[i] += 1

			return_val = buffer[forkposition[fork_num]]
			forkposition[fork_num] -= 1

			blen = len(buffer)
			if not (blen-1) in forkposition:
				# Last position in buffer no longer needed
				assert forkposition[fork_num] == blen-2
				final_func(buffer[blen-1])
				del buffer[blen-1]
			return return_val

		def make_iterator(fork_num):
			while(1): yield get_next(fork_num)

		return tuple(map(make_iterator, range(num_of_forks)))

MakeStatic(Iter)



class IterTreeReducer:
	"""Tree style reducer object for iterator

	The indicies of a RORPIter form a tree type structure.  This class
	can be used on each element of an iter in sequence and the result
	will be as if the corresponding tree was reduced.  This tries to
	bridge the gap between the tree nature of directories, and the
	iterator nature of the connection between hosts and the temporal
	order in which the files are processed.

	The elements of the iterator are required to have a tuple-style
	.index, called "indexed elem" below.

	"""
	def __init__(self, base_init, branch_reducer, branch_base, base_final):
		"""ITR initializer

		base_init is a function of one argument, an indexed elem.  It
		is called immediately on any elem in the iterator.  It should
		return some value type A.

		branch_reducer and branch_base are used to form a value on a
		bunch of reduced branches, in the way that a linked list of
		type C can be folded to form a value type B.

		base_final is called when leaving a tree.  It takes three
		arguments, the indexed elem, the output (type A) of base_init,
		the output of branch_reducer on all the branches (type B) and
		returns a value type C.

		"""
		self.base_init = base_init
		self.branch_reducer = branch_reducer
		self.branch_base = branch_base
		self.base_final = base_final

		self.current_index = None
		self.branch_val = branch_base
		self.subreducer = None
		self.calculated = None

	def getresult(self):
		"""Return results of calculation"""
		if not self.calculated: self.calculate_final_val()
		return self.final_val

	def intree(self, index):
		"""Return true if index is still in current tree"""
		return self.base_index == index[:len(self.base_index)]

	def newinstance(self):
		"""Return reducer of same type as self"""
		return self.__class__(self.base_init, self.branch_reducer,
							  self.branch_base, self.base_final)

	def process_w_subreducer(self, indexed_elem):
		"""Give object to subreducer, if necessary update branch_val"""
		if not self.subreducer: self.subreducer = self.newinstance()
		if not self.subreducer(indexed_elem):
			self.branch_val = self.branch_reducer(self.branch_val,
												  self.subreducer.getresult())
			self.subreducer = self.newinstance()
			assert self.subreducer(indexed_elem)

	def calculate_final_val(self):
		"""Set final value"""
		if self.subreducer:
			self.branch_val = self.branch_reducer(self.branch_val,
												  self.subreducer.getresult())
		self.final_val = self.base_final(self.base_elem, self.base_init_val,
										 self.branch_val)
		self.calculated = 1

	def __call__(self, indexed_elem):
		"""Process elem, current position in iterator

		Returns true if elem successfully processed, false if elem is
		not in the current tree and thus the final result is
		available.

		"""
		index = indexed_elem.index
		assert type(index) is types.TupleType
		oldindex = self.current_index
		self.current_index = index

		if oldindex is None: # must be at base
			self.base_index = index
			self.base_elem = indexed_elem
			self.base_init_val = self.base_init(indexed_elem)
			return 1
		else: assert index > oldindex, (index, oldindex)

		if not self.intree(index):
			self.calculate_final_val()
			return None
		else:
			self.process_w_subreducer(indexed_elem)
			return 1


#######################################################################
#
# log - Manage logging 
#

class Logger:
	"""All functions which deal with logging"""
	def __init__(self):
		self.logfp = self.logrp = None
		self.verbosity = self.term_verbosity = 3
		# termverbset is true if the term_verbosity has been explicity set
		self.termverbset = None

	def setverbosity(self, verbosity):
		"""Set verbosity levels"""
		self.verbosity = verbosity
		if not self.termverbset: self.term_verbosity = verbosity

	def setterm_verbosity(self, termverb):
		"""Set verbosity to terminal"""
		self.term_verbosity = termverb
		self.termverbset = 1

	def open_logfile(self, rpath):
		"""Sets rpath to be the logfile.  May be on remote side."""
		self.logrp = rpath
		self.logfp = rpath.open("a")

	def close_logfile(self):
		if self.logfp: self.logfp.close()

	def format(self, message, verbosity):
		"""Format the message, possibly adding date information"""
		if verbosity < 9: return message + "\n"
		else: return "%s  %s\n" % (time.asctime(time.localtime(time.time())),
								   message)

	def __call__(self, message, verbosity):
		"""Log message that has verbosity importance"""
		if verbosity <= self.verbosity: self.log_to_file(message)
		if verbosity <= self.term_verbosity:
			self.log_to_term(message, verbosity)

	def log_to_file(self, message):
		"""Write the message to the log file, if possible"""
		if self.logfp: self.logfp.write(self.format(message, self.verbosity))

	def log_to_term(self, message, verbosity):
		"""Write message to stdout/stderr"""
		if verbosity <= 2 or Globals.server: termfp = sys.stderr
		else: termfp = sys.stdout
		termfp.write(self.format(message, self.term_verbosity))

	def conn(self, message):
		"""Log a message about connection - only to terminal"""
		if self.term_verbosity >= 9: self.log_to_term(message, 9)

	def FatalError(self, message):
		self("Fatal Error: " + message, 1)
		sys.exit(1)

Log = Logger()


#######################################################################
#
# ttime - Provide Time class, which contains time related functions.
#

class TimeException(Exception): pass

class Time:
	"""Functions which act on the time"""
	_interval_conv_dict = {"s": 1, "m": 60, "h": 3600,
						   "D": 86400, "M": 30*86400, "Y": 365*86400}

	def setcurtime(cls):
		"""Sets the current time in curtime and curtimestr on all systems"""
		t = time.time()
		for conn in Globals.connections:
			conn.Time.setcurtime_local(t, cls.timetostring(t))

	def setcurtime_local(cls, timeinseconds, timestr):
		"""Only set the current time locally"""
		cls.curtime = timeinseconds
		cls.curtimestr = timestr

	def setprevtime(cls, timeinseconds):
		"""Sets the previous inc time in prevtime and prevtimestr"""
		assert timeinseconds > 0, timeinseconds
		for conn in Globals.connections:
			conn.Time.setprevtime_local(timeinseconds,
										cls.timetostring(timeinseconds))

	def setprevtime_local(cls, timeinseconds, timestr):
		"""Like setprevtime but only set the local version"""
		cls.prevtime = timeinseconds
		cls.prevtimestr = timestr

	def timetostring(cls, timeinseconds):
		"""Return w3 datetime compliant listing of timeinseconds"""
		return time.strftime("%Y-%m-%dT%H:%M:%S",
							 time.localtime(timeinseconds)) + cls.gettzd()

	def stringtotime(cls, timestring):
		"""Return time in seconds from w3 timestring

		If there is an error parsing the string, or it doesn't look
		like a w3 datetime string, return None.

		"""
		try:
			date, daytime = timestring[:19].split("T")
			year, month, day = map(int, date.split("-"))
			hour, minute, second = map(int, daytime.split(":"))
			assert 1900 < year < 2100, year
			assert 1 <= month <= 12
			assert 1 <= day <= 31
			assert 0 <= hour <= 23
			assert 0 <= minute <= 59
			assert 0 <= second <= 61  # leap seconds
			timetuple = (year, month, day, hour, minute, second, -1, -1, -1)
			if time.daylight:
				utc_in_secs = time.mktime(timetuple) - time.altzone
			else: utc_in_secs = time.mktime(timetuple) - time.timezone

			return utc_in_secs + cls.tzdtoseconds(timestring[19:])
		except (TypeError, ValueError, AssertionError): return None

	def timetopretty(cls, timeinseconds):
		"""Return pretty version of time"""
		return time.asctime(time.localtime(timeinseconds))

	def stringtopretty(cls, timestring):
		"""Return pretty version of time given w3 time string"""
		return cls.timetopretty(cls.stringtotime(timestring))

	def intstringtoseconds(cls, interval_string):
		"""Convert a string expressing an interval to seconds"""
		def error():
			raise TimeException('Bad interval string "%s"' % interval_string)
		if len(interval_string) < 2: error()
		try: num, ext = int(interval_string[:-1]), interval_string[-1]
		except ValueError: error()
		if not ext in cls._interval_conv_dict or num < 0: error()
		return num*cls._interval_conv_dict[ext]

	def gettzd(cls):
		"""Return w3's timezone identification string.

		Expresed as [+/-]hh:mm.  For instance, PST is -08:00.  Zone is
		coincides with what localtime(), etc., use.

		"""
		if time.daylight: offset = -1 * time.altzone/60
		else: offset = -1 * time.timezone/60
		if offset > 0: prefix = "+"
		elif offset < 0: prefix = "-"
		else: return "Z" # time is already in UTC

		hours, minutes = map(abs, divmod(offset, 60))
		assert 0 <= hours <= 23
		assert 0 <= minutes <= 59
		return prefix + "%02d:%02d" % (hours, minutes)

	def tzdtoseconds(cls, tzd):
		"""Given w3 compliant TZD, return how far ahead UTC is"""
		if tzd == "Z": return 0
		assert len(tzd) == 6 # only accept forms like +08:00 for now
		assert (tzd[0] == "-" or tzd[0] == "+") and tzd[3] == ":"
		return -60 * (60 * int(tzd[:3]) + int(tzd[4:]))

	def cmp(cls, time1, time2):
		"""Compare time1 and time2 and return -1, 0, or 1"""
		if type(time1) is types.StringType:
			time1 = cls.stringtotime(time1)
			assert time1 is not None
		if type(time2) is types.StringType:
			time2 = cls.stringtotime(time2)
			assert time2 is not None
		
		if time1 < time2: return -1
		elif time1 == time2: return 0
		else: return 1

MakeClass(Time)


#######################################################################
#
# iterfile - Convert an iterator to a file object and vice-versa
#

class IterFileException(Exception): pass

class UnwrapFile:
	"""Contains some basic methods for parsing a file containing an iter"""
	def __init__(self, file):
		self.file = file

	def _s2l(self, s):
		"""Convert string to long int"""
		assert len(s) == 7
		l = 0L
		for i in range(7): l = l*256 + ord(s[i])
		return l

	def _get(self):
		"""Return pair (type, data) next in line on the file

		type is a single character which is either "o" for object, "f"
		for file, "c" for a continution of a file, or None if no more
		data can be read.  Data is either the file's data, if type is
		"c" or "f", or the actual object if the type is "o".

		"""
		header = self.file.read(8)
		if not header: return None, None
		assert len(header) == 8, "Header is only %d bytes" % len(header)
		type, length = header[0], self._s2l(header[1:])
		buf = self.file.read(length)
		if type == "o": return type, cPickle.loads(buf)
		else: return type, buf


class IterWrappingFile(UnwrapFile):
	"""An iterator generated from a file.

	Initialize with a file type object, and then it will return the
	elements of the file in order.

	"""
	def __init__(self, file):
		UnwrapFile.__init__(self, file)
		self.currently_in_file = None

	def __iter__(self): return self

	def next(self):
		if self.currently_in_file:
			self.currently_in_file.close() # no error checking by this point
		type, data = self._get()
		if not type: raise StopIteration
		if type == "o": return data
		elif type == "f":
			file = IterVirtualFile(self, data)
			if data: self.currently_in_file = file
			else: self.currently_in_file = None
			return file
		else: raise IterFileException("Bad file type %s" % type)


class IterVirtualFile(UnwrapFile):
	"""Another version of a pretend file

	This is returned by IterWrappingFile when a file is embedded in
	the main file that the IterWrappingFile is based around.

	"""
	def __init__(self, iwf, initial_data):
		"""Initializer

		initial_data is the data from the first block of the file.
		iwf is the iter wrapping file that spawned this
		IterVirtualFile.

		"""
		UnwrapFile.__init__(self, iwf.file)
		self.iwf = iwf
		self.bufferlist = [initial_data]
		self.bufferlen = len(initial_data)
		self.closed = None

	def check_consistency(self):
		l = len("".join(self.bufferlist))
		assert l == self.bufferlen, \
			   "Length of IVF bufferlist doesn't match (%s, %s)" % \
			   (l, self.bufferlen)

	def read(self, length):
		assert not self.closed
		if self.iwf.currently_in_file:
			while length <= self.bufferlen:
				if not self.addtobuffer(): break

		real_len = min(length, self.bufferlen)
		combined_buffer = "".join(self.bufferlist)
		assert len(combined_buffer) == self.bufferlen, \
			   (len(combined_buffer), self.bufferlen)
		self.bufferlist = [combined_buffer[real_len:]]
		self.bufferlen = self.bufferlen - real_len
		return combined_buffer[:real_len]
			
	def addtobuffer(self):
		"""Read a chunk from the file and add it to the buffer"""
		assert self.iwf.currently_in_file
		type, data = self._get()
		assert type == "c", "Type is %s instead of c" % type
		if data:
			self.bufferlen = self.bufferlen + len(data)
			self.bufferlist.append(data)
			return 1
		else:
			self.iwf.currently_in_file = None
			return None

	def close(self):
		"""Currently just reads whats left and discards it"""
		while self.iwf.currently_in_file:
			self.addtobuffer()
			self.bufferlist = []
			self.bufferlen = 0
		self.closed = 1


class FileWrappingIter:
	"""A file interface wrapping around an iterator

	This is initialized with an iterator, and then converts it into a
	stream of characters.  The object will evaluate as little of the
	iterator as is necessary to provide the requested bytes.

	The actual file is a sequence of marshaled objects, each preceded
	by 8 bytes which identifies the following the type of object, and
	specifies its length.  File objects are not marshalled, but the
	data is written in chunks of Globals.blocksize, and the following
	blocks can identify themselves as continuations.

	"""
	def __init__(self, iter):
		"""Initialize with iter"""
		self.iter = iter
		self.bufferlist = []
		self.bufferlen = 0L
		self.currently_in_file = None
		self.closed = None

	def read(self, length):
		"""Return next length bytes in file"""
		assert not self.closed
		while self.bufferlen < length:
			if not self.addtobuffer(): break

		combined_buffer = "".join(self.bufferlist)
		assert len(combined_buffer) == self.bufferlen
		real_len = min(self.bufferlen, length)
		self.bufferlen = self.bufferlen - real_len
		self.bufferlist = [combined_buffer[real_len:]]
		return combined_buffer[:real_len]

	def addtobuffer(self):
		"""Updates self.bufferlist and self.bufferlen, adding on a chunk

		Returns None if we have reached the end of the iterator,
		otherwise return true.

		"""
		if self.currently_in_file:
			buf = "c" + self.addfromfile()
		else:
			try: currentobj = self.iter.next()
			except StopIteration: return None
			if hasattr(currentobj, "read") and hasattr(currentobj, "close"):
				self.currently_in_file = currentobj
				buf = "f" + self.addfromfile()
			else:
				pickle = cPickle.dumps(currentobj, 1)
				buf = "o" + self._l2s(len(pickle)) + pickle
				
		self.bufferlist.append(buf)
		self.bufferlen = self.bufferlen + len(buf)
		return 1

	def addfromfile(self):
		"""Read a chunk from the current file and return it"""
		buf = self.currently_in_file.read(Globals.blocksize)
		if not buf:
			assert not self.currently_in_file.close()
			self.currently_in_file = None
		return self._l2s(len(buf)) + buf

	def _l2s(self, l):
		"""Convert long int to string of 7 characters"""
		s = ""
		for i in range(7):
			l, remainder = divmod(l, 256)
			s = chr(remainder) + s
		assert remainder == 0
		return s

	def close(self): self.closed = 1


#######################################################################
#
# rlist - Define the CachingIter, and sig/diff/patch ops on iterators
#

class CachingIter:
	"""Cache parts of an iter using a list

	Turn an iter into something that you can prepend elements into,
	and also read from without apparently changing the state.

	"""
	def __init__(self, iter_or_list):
		if type(iter_or_list) is types.ListType:
			self.iter = iter(iter_or_list)
		else: self.iter = iter_or_list
		self.next = self.iter.next
		self.head = []

	def __iter__(self): return self

	def _next(self):
		"""Take elements from the head list

		When there are elements waiting before the main iterator, this
		is the next function.  If not, iter.next returns to being next.

		"""
		head = self.head
		a = head[0]
		del head[0]
		if not head: self.next = self.iter.next
		return a

	def nextrange(self, m):
		"""Return next m elements in list"""
		l = head[:m]
		del head[:m]
		for i in xrange(m - len(l)): l.append(self.iter.next())
		return l

	def peek(self):
		"""Return next element without removing it from iterator"""
		n = self.next()
		self.push(n)
		return n

	def push(self, elem):
		"""Insert an element into the iterator at the beginning"""
		if not self.head: self.next = self._next
		self.head.insert(0, elem)

	def pushrange(self, elem_list):
		"""Insert list of multiple elements at the beginning"""
		if not self.head: self.next = self._next
		self.head[:0] = elem_list

	def cache(self, m):
		"""Move next m elements from iter to internal list

		If m is None, append the entire rest of the iterator.

		"""
		h, it = self.head, self.iter
		if m is None:
			for i in it: h.append(i)
		else:
			for i in xrange(m): h.append(it.next())

	def __getitem__(self, key):
		"""Support a[i:j] style notation.  Non destructive"""
		if type(key) is types.SliceType:
			if key.stop > len(self.head): self.cache(key.stop - len(self.head))
			return self.head[key.start, key.stop]
		else:
			if key >= len(self.head): self.cache(key + 1 - len(self.head))
			return self.head[key]



class RListDelta:
	"""Note a difference from one iterator (A) to another (B)

	The min, max pairs are indicies which stand for the half-open
	interval (min, max], and elemlist is a list of all the elements in
	A which fall within this interval.

	These are produced by the function RList.Deltas(...)

	"""
	def __init__(self, (min, max), elemlist):
		self.min, self.max = min, max
		self.elemlist = elemlist



class RList:
	"""Tools for signatures, diffing, and patching an iterator

	This class requires that the iterators involved are yielding
	objects that have .index and .data attributes.  Two objects with
	the same .data attribute are supposed to be equivalent.  The
	iterator must also yield the objects in increasing order with
	respect to the .index attribute.

	"""
	blocksize = 100

	def Signatures(iter):
		"""Return iterator of signatures from stream of pairs

		Each signature is an ordered pair (last index sig applies to,
		SHA digest of data)

		"""
		i, s = 0, sha.new()
		for iter_elem in iter:
			s.update(marshal.dumps(iter_elem.data))
			i = i+1
			if i == RList.blocksize:
				yield (iter_elem.index, s.digest())
				i, s = 0, sha.new()
		if i != 0: yield (iter_elem.index, s.digest())

	def sig_one_block(iter_or_list):
		"""Return the digest portion of a signature on given list"""
		s = sha.new()
		for iter_elem in iter_or_list: s.update(marshal.dumps(iter_elem.data))
		return s.digest()

	def Deltas(remote_sigs, iter):
		"""Return iterator of Delta objects that bring iter to remote"""
		def get_before(index, iter):
			"""Return elements in iter whose index is before or equal index
			iter needs to be pushable
			"""
			l = []
			while 1:
				try: iter_elem = iter.next()
				except StopIteration: return l
				if iter_elem.index > index: break
				l.append(iter_elem)
			iter.push(iter_elem)
			return l

		if not isinstance(iter, CachingIter): iter = CachingIter(iter)
		oldindex = None
		for (rs_index, rs_digest) in remote_sigs:
			l = get_before(rs_index, iter)
			if rs_digest != RList.sig_one_block(l):
				yield RListDelta((oldindex, rs_index), l)
			oldindex = rs_index

	def patch_once(basis, delta):
		"""Apply one delta to basis to return original iterator

		This returns original iterator up to and including the max range
		of delta, then stop.  basis should be pushable.

		"""
		# Return elements of basis until start of delta range
		for basis_elem in basis:
			if basis_elem.index > delta.min:
				basis.push(basis_elem)
				break
			yield basis_elem

		# Yield elements of delta...
		for elem in delta.elemlist: yield elem

		# Finally, discard basis until end of delta range
		for basis_elem in basis:
			if basis_elem.index > delta.max:
				basis.push(basis_elem)
				break

	def Patch(basis, deltas):
		"""Apply a delta stream to basis iterator, yielding original"""
		if not isinstance(basis, CachingIter): basis = CachingIter(basis)
		for d in deltas:
			for elem in RList.patch_once(basis, d): yield elem
		for elem in basis: yield elem

	def get_difference_once(basis, delta):
		"""From one delta, find differences from basis

		Will return pairs (basis_elem, new_elem) where basis_elem is
		the element from the basis iterator and new_elem is the
		element from the other iterator.  If either is missing None
		will take its place.  If both are present iff two have the
		same index.

		"""
		# Discard any elements of basis before delta starts
		for basis_elem in basis:
			if basis_elem.index > delta.min:
				basis.push(basis_elem)
				break

		# In range compare each one by one
		di, boverflow, doverflow = 0, None, None
		while 1:
			# Set indicies and data, or mark if at end of range already
			try:
				basis_elem = basis.next()
				if basis_elem.index > delta.max:
					basis.push(basis_elem)
					boverflow = 1
			except StopIteration: boverflow = 1
			if di >= len(delta.elemlist): doverflow = 1
			else: delta_elem = delta.elemlist[di]

			if boverflow and doverflow: break
			elif boverflow:
				yield (None, delta_elem)
				di = di+1
			elif doverflow: yield (basis_elem, None)

			# Now can assume that everything is in range
			elif basis_elem.index > delta_elem.index:
				yield (None, delta_elem)
				basis.push(basis_elem)
				di = di+1
			elif basis_elem.index == delta_elem.index:
				if basis_elem.data != delta_elem.data:
					yield (basis_elem, delta_elem)
				di = di+1
			else: yield (basis_elem, None)

	def Dissimilar(basis, deltas):
		"""Return iter of differences from delta iter and basis iter"""
		if not isinstance(basis, CachingIter): basis = CachingIter(basis)
		for d in deltas:
			for triple in RList.get_difference_once(basis, d): yield triple

MakeStatic(RList)


#######################################################################
#
# rdiff - Invoke rdiff utility to make signatures, deltas, or patch
#

class RdiffException(Exception): pass

class Rdiff:
	"""Contains static methods for rdiff operations"""
	def get_signature(rp):
		"""Take signature of rpin file and return in file object"""
		Log("Getting signature of %s" % rp.path, 7)
		return rp.conn.os.popen("rdiff signature %s" % rp.quote())

	def get_delta_sigfileobj(sig_fileobj, rp_new):
		"""Like get_delta but signature is in a file object"""
		sig_rp = RPath(rp_new.conn, rp_new.conn.tempfile.mktemp(), ())
		sig_rp.write_from_fileobj(sig_fileobj)
		return Rdiff.get_delta(sig_rp, rp_new)

	def get_delta(rp_signature, rp_new):
		"""Take signature rp and new rp, return delta file object"""
		assert rp_signature.conn is rp_new.conn
		Log("Getting delta of %s with signature %s" %
			(rp_new.path, rp_signature.path), 7)
		return rp_new.conn.os.popen("rdiff delta %s %s" %
									(rp_signature.quote(), rp_new.quote()))

	def write_delta(basis, new, delta):
		"""Write deltafile delta which brings basis to new"""
		Log("Writing delta %s from %s -> %s" %
			(basis.path, new.path, delta.path), 7)
		sig = RPath(new.conn, new.conn.tempfile.mktemp())
		sig.write_from_fileobj(Rdiff.get_signature(basis))
		delta.write_from_fileobj(Rdiff.get_delta(sig, new))
		sig.delete()

	def patch(rp_basis, rp_delta, rp_new = None):
		"""Patch rp_basis with rp_delta

		If rp_new is specified, leave rp_basis the way it is,
		otherwise replace rp_basis with rp_new.

		"""
		assert rp_basis.conn is rp_delta.conn
		if rp_new:
			assert rp_new.conn is rp_basis.conn
			out_rp = rp_new
		else: out_rp = RPath(rp_basis.conn, rp_basis.conn.tempfile.mktemp())

		Log("Patching %s using %s to %s" % (rp_basis.path, rp_delta.path,
											out_rp.path), 7)
		cmdline = "rdiff patch %s %s %s" % (rp_basis.quote(),
											rp_delta.quote(), out_rp.quote())
		if rp_basis.conn.os.system(cmdline):
			RdiffException("Error running %s" % cmdline)
		out_rp.setdata()

		if not rp_new: # Copy over rp_basis
			rp_basis.delete()
			RPath.move(out_rp, rp_basis)

	def patch_fileobj(rp_basis, delta_fileobj, rp_new = None):
		"""Like patch but diff is given in fileobj form"""
		delta_rp = RPath(rp_basis.conn, tempfile.mktemp(), ())
		delta_rp.write_from_fileobj(delta_fileobj)
		Rdiff.patch(rp_basis, delta_rp, rp_new)

	def patch_fileobj_returnnew(rp_basis, delta_fileobj):
		"""Like patch_fileobj but put new in tempfile and return rp"""
		rp_new = RPath(rp_basis.conn, tempfile.mktemp(), ())
		Rdiff.patch_fileobj(rp_basis, delta_fileobj, rp_new)
		return rp_new

	def copy(rpin, rpout):
		"""Use rdiff to copy rpin to rpout, conserving bandwidth"""
		if not rpin.isreg() or not rpout.isreg() or rpin.conn is rpout.conn:
			RPath.copy(rpin, rpout)  # fallback to regular copying
		else:
			Log("Rdiff copying %s to %s" % (rpin.path, rpout.path), 6)
			rp_delta = RPath(rpout.conn, rpout.conn.tempfile.mktemp(), ())
			Rdiff.write_delta(rpout, rpin, rp_delta)
			Rdiff.patch(rpout, rp_delta)
			rp_delta.delete()


MakeStatic(Rdiff)


#######################################################################
#
# connection - Code that deals with remote execution
#

class ConnectionError(Exception):
	pass

class ConnectionQuit(Exception):
	pass


class Connection:
	"""Connection class - represent remote execution

	The idea is that, if c is an instance of this class, c.foo will
	return the object on the remote side.  For functions, c.foo will
	return a function that, when called, executes foo on the remote
	side, sending over the arguments and sending back the result.

	"""
	pass


class LocalConnection(Connection):
	"""Local connection

	This is a dummy connection class, so that LC.foo just evaluates to
	foo using global scope.

	"""
	def __init__(self):
		"""Purpose is to prevent two instances of LocalConnection"""
		assert not Globals.local_connection

	def __getattr__(self, name):
		try: return globals()[name]
		except KeyError:
			builtins = globals()["__builtins__"]
			try:
				if type(builtins) is types.ModuleType:
					return builtins.__dict__[name]
				else: return builtins[name]
			except KeyError: raise NameError, name

	def __setattr__(self, name, value):
		globals()[name] = value

	def __delattr__(self, name):
		del globals()[name]

	def rexec(self, command):
		exec command in globals()

	def quit(self): pass

Globals.local_connection = LocalConnection()
Globals.connections.append(Globals.local_connection)


class ConnectionRequest:
	"""Simple wrapper around a PipeConnection request"""
	def __init__(self, function_string, num_args):
		self.function_string = function_string
		self.num_args = num_args

	def __str__(self):
		return "ConnectionRequest: %s with %d arguments" % \
			   (self.function_string, self.num_args)


class LowLevelPipeConnection(Connection):
	"""Routines for just sending objects from one side of pipe to another

	Each thing sent down the pipe is paired with a request number,
	currently limited to be between 0 and 255.  The size of each thing
	should be less than 2^56.

	Each thing also has a type, indicated by one of the following
	characters:

	o - generic object
	i - iterator/generator of RORPs
	f - file object
	b - string
	q - quit signal
	R - RPath
	r - RORPath only

	"""
	def __init__(self, inpipe, outpipe):
		"""inpipe is a file-type open for reading, outpipe for writing"""
		self.inpipe = inpipe
		self.outpipe = outpipe

	def _put(self, obj, req_num):
		"""Put an object into the pipe (will send raw if string)"""
		if Log.verbosity >= 9 or Log.term_verbosity >= 9:
			self._log_transfer("sending", obj, req_num)

		if type(obj) is types.StringType: self._putbuf(obj, req_num)
		elif ((hasattr(obj, "read") or hasattr(obj, "write"))
			  and hasattr(obj, "close")): self._putfile(obj, req_num)
		elif isinstance(obj, RPath): self._putrpath(obj, req_num)
		elif isinstance(obj, RORPath): self._putrorpath(obj, req_num)
		elif hasattr(obj, "next"): self._putiter(obj, req_num)
		else: self._putobj(obj, req_num)

	def _putobj(self, obj, req_num):
		"""Send a generic python obj down the outpipe"""
		self._write("o", cPickle.dumps(obj, 1), req_num)

	def _putbuf(self, buf, req_num):
		"""Send buffer buf down the outpipe"""
		self._write("b", buf, req_num)

	def _putfile(self, fp, req_num):
		"""Send a file to the client using virtual files"""
		self._write("f", str(VirtualFile.new(fp)), req_num)

	def _putiter(self, iterator, req_num):
		"""Put an iterator through the pipe"""
		self._write("i", str(VirtualFile.new(RORPIter.ToFile(iterator))),
					req_num)

	def _putrpath(self, rpath, req_num):
		"""Put an rpath into the pipe

		If the RPath is from the remote side, it will emerge as being
		local, and vice-versa if it's from the local side.

		"""
		if rpath.conn is self: conn_type = "remote"
		else:
			assert rpath.conn is Globals.local_connection, \
				   (rpath.path, rpath.conn)
			conn_type = "local"
		rpath_repr = (conn_type, rpath.base, rpath.index, rpath.data)
		self._write("R", cPickle.dumps(rpath_repr, 1), req_num)

	def _putrorpath(self, rorpath, req_num):
		"""Put an rorpath into the pipe

		This is only necessary because if there is a .file attached,
		it must be excluded from the pickling

		"""
		rorpath_repr = (rorpath.index, rorpath.data)
		self._write("r", cPickle.dumps(rorpath_repr, 1), req_num)

	def _putquit(self):
		"""Send a string that takes down server"""
		self._write("q", "", 255)

	def _write(self, headerchar, data, req_num):
		"""Write header and then data to the pipe"""
		self.outpipe.write(headerchar + chr(req_num) + self._l2s(len(data)))
		self.outpipe.write(data)
		self.outpipe.flush()

	def _read(self, length):
		"""Read length bytes from inpipe, returning result"""
		return self.inpipe.read(length)

	def _s2l(self, s):
		"""Convert string to long int"""
		assert len(s) == 7
		l = 0L
		for i in range(7): l = l*256 + ord(s[i])
		return l

	def _l2s(self, l):
		"""Convert long int to string"""
		s = ""
		for i in range(7):
			l, remainder = divmod(l, 256)
			s = chr(remainder) + s
		assert remainder == 0
		return s

	def _get(self):
		"""Read an object from the pipe and return (req_num, value)"""
		header_string = self.inpipe.read(9)
		assert len(header_string) == 9, len(header_string)
		try:
			format_string, req_num, length = (header_string[0],
											  ord(header_string[1]),
											  self._s2l(header_string[2:]))
		except IndexError: raise ConnectionError()
		if format_string == "o": result = cPickle.loads(self._read(length))
		elif format_string == "b": result = self._read(length)
		elif format_string == "f":
			result = VirtualFile(self, int(self._read(length)))
		elif format_string == "i":
			result = RORPIter.FromFile(VirtualFile(self,
												   int(self._read(length))))
		elif format_string == "r":
			result = self._getrorpath(self._read(length))
		elif format_string == "R": result = self._getrpath(self._read(length))
		else:
#			if not format_string == "q":
#				print header_string
#				while 1: print self.inpipe.read(1),

			assert format_string == "q", header_string
			raise ConnectionQuit("Received quit signal")
		if Log.verbosity >= 9 or Log.term_verbosity >= 9:
			self._log_transfer("received", result, req_num)
		return (req_num, result)

	def _log_transfer(self, direction, result, req_num):
		"""Log statistics on network objects"""
		if type(result) is types.StringType: result_repr = repr(result)
		else: result_repr = result
		if Globals.server: Log("Server %s (%d): %s" % (direction, req_num,
													   result_repr), 9)
		else: Log("Client %s (%d): %s" % (direction, req_num, result_repr), 9)

	def _getrorpath(self, raw_rorpath_buf):
		"""Reconstruct RORPath object from raw data"""
		index, data = cPickle.loads(raw_rorpath_buf)
		return RORPath(index, data)

	def _getrpath(self, raw_rpath_buf):
		"""Return RPath object indicated by raw_rpath_buf"""
		conn_type, base, index, data = cPickle.loads(raw_rpath_buf)
		if conn_type == "local": conn = self
		else:
			assert conn_type == "remote", conn_type
			conn = Globals.local_connection
		return RPath(conn, base, index, data)

	def _close(self):
		"""Close the pipes associated with the connection"""
		self.outpipe.close()
		self.inpipe.close()


class PipeConnection(LowLevelPipeConnection):
	"""Provide server and client functions for a Pipe Connection

	Both sides act as modules that allows for remote execution.  For
	instance, self.conn.pow(2,8) will execute the operation on the
	server side.

	The only difference between the client and server is that the
	client makes the first request, and the server listens first.

	"""
	def __init__(self, *args):
		apply(LowLevelPipeConnection.__init__, (self,) + args)
		self.unused_request_numbers = {}
		for i in range(256): self.unused_request_numbers[i] = None

	def get_response(self, desired_req_num):
		"""Read from pipe, responding to requests until req_num.

		Sometimes after a request is sent, the other side will make
		another request before responding to the original one.  In
		that case, respond to the request.  But return once the right
		response is given.

		"""
		while 1:
			try: req_num, object = self._get()
			except ConnectionQuit:
				self._put("quitting", self.get_new_req_num())
				return
			if req_num == desired_req_num: return object
			else:
				assert isinstance(object, ConnectionRequest)
				self.answer_request(object, req_num)

	def answer_request(self, request, req_num):
		"""Put the object requested by request down the pipe"""
		del self.unused_request_numbers[req_num]
		argument_list = []
		for i in range(request.num_args):
			arg_req_num, arg = self._get()
			assert arg_req_num == req_num
			argument_list.append(arg)
		try: result = apply(eval(request.function_string), argument_list)
		except: result = self.extract_exception()
		self._put(result, req_num)
		self.unused_request_numbers[req_num] = None

	def extract_exception(self):
		"""Return active exception"""
		if Log.verbosity >= 8 or Log.term_verbosity >= 8:
			Log("Sending back exception: \n" +
				"".join(traceback.format_tb(sys.exc_info()[2])), 8)
		return sys.exc_info()[1]

	def Server(self):
		"""Start server's read eval return loop"""
		Globals.server = 1
		Log("Starting server", 6)
		self.get_response(-1)

	def reval(self, function_string, *args):
		"""Execute command on remote side

		The first argument should be a string that evaluates to a
		function, like "pow", and the remaining are arguments to that
		function.

		"""
		req_num = self.get_new_req_num()
		self._put(ConnectionRequest(function_string, len(args)), req_num)
		for arg in args: self._put(arg, req_num)
		result = self.get_response(req_num)
		self.unused_request_numbers[req_num] = None
		if isinstance(result, Exception): raise result
		else: return result

	def get_new_req_num(self):
		"""Allot a new request number and return it"""
		if not self.unused_request_numbers:
			raise ConnectionError("Exhaused possible connection numbers")
		req_num = self.unused_request_numbers.keys()[0]
		del self.unused_request_numbers[req_num]
		return req_num

	def quit(self):
		"""Close the associated pipes and tell server side to quit"""
		assert not Globals.server
		self._putquit()
		self._get()
		self._close()

	def __getattr__(self, name):
		"""Intercept attributes to allow for . invocation"""
		return EmulateCallable(self, name)


class EmulateCallable:
	"""This is used by PipeConnection in calls like conn.os.chmod(foo)"""
	def __init__(self, connection, name):
		self.connection = connection
		self.name = name
	def __call__(self, *args):
		return apply(self.connection.reval, (self.name,) + args)
	def __getattr__(self, attr_name):
		return EmulateCallable(self.connection,
							   "%s.%s" % (self.name, attr_name))


class VirtualFile:
	"""When the client asks for a file over the connection, it gets this

	The returned instance then forwards requests over the connection.
	The class's dictionary is used by the server to associate each
	with a unique file number.

	"""
	#### The following are used by the server
	vfiles = {}
	counter = 0

	def getbyid(cls, id):
		return cls.vfiles[id]
	getbyid = classmethod(getbyid)

	def readfromid(cls, id, length):
		return cls.vfiles[id].read(length)
	readfromid = classmethod(readfromid)

	def writetoid(cls, id, buffer):
		return cls.vfiles[id].write(buffer)
	writetoid = classmethod(writetoid)

	def closebyid(cls, id):
		fp = cls.vfiles[id]
		del cls.vfiles[id]
		return fp.close()
	closebyid = classmethod(closebyid)

	def new(cls, fileobj):
		"""Associate a new VirtualFile with a read fileobject, return id"""
		count = cls.counter
		cls.vfiles[count] = fileobj
		cls.counter = count + 1
		return count
	new = classmethod(new)


	#### And these are used by the client
	def __init__(self, connection, id):
		self.connection = connection
		self.id = id

	def read(self, length = -1):
		return self.connection.VirtualFile.readfromid(self.id, length)

	def write(self, buf):
		return self.connection.VirtualFile.writetoid(self.id, buf)

	def close(self):
		return self.connection.VirtualFile.closebyid(self.id)


#######################################################################
#
# rpath - Wrapper class around a real path like "/usr/bin/env"
#
# The RPath and associated classes make some function calls more
# convenient (e.g. RPath.getperms()) and also make working with files
# on remote systems transparent.
#

class RPathException(Exception): pass

class RPathStatic:
	"""Contains static methods for use with RPaths"""
	def copyfileobj(inputfp, outputfp):
		"""Copies file inputfp to outputfp in blocksize intervals"""
		blocksize = Globals.blocksize
		while 1:
			inbuf = inputfp.read(blocksize)
			if not inbuf: break
			outputfp.write(inbuf)

	def cmpfileobj(fp1, fp2):
		"""True if file objects fp1 and fp2 contain same data"""
		blocksize = Globals.blocksize
		while 1:
			buf1 = fp1.read(blocksize)
			buf2 = fp2.read(blocksize)
			if buf1 != buf2: return None
			elif not buf1: return 1

	def check_for_files(*rps):
		"""Make sure that all the rps exist, raise error if not"""
		for rp in rps:
			if not rp.lstat():
				raise RPathException("File %s does not exist" % rp.path)

	def move(rpin, rpout):
		"""Move rpin to rpout, renaming if possible"""
		try: RPath.rename(rpin, rpout)
		except os.error:
			RPath.copy(rpin, rpout)
			rpin.delete()

	def copy(rpin, rpout):
		"""Copy RPath rpin to rpout.  Works for symlinks, dirs, etc."""
		Log("Regular copying %s to %s" % (rpin.index, rpout.path), 6)
		if not rpin.lstat():
			raise RPathException, ("File %s does not exist" % rpin.index)

		if rpout.lstat():
			if rpin.isreg() or not RPath.cmp(rpin, rpout):
				rpout.delete()   # easier to write that compare
			else: return
			
		if rpin.isreg(): RPath.copy_reg_file(rpin, rpout)
		elif rpin.isdir(): rpout.mkdir()
		elif rpin.issym(): rpout.symlink(rpin.readlink())
		elif rpin.ischardev():
			major, minor = rpin.getdevnums()
			rpout.makedev("c", major, minor)
		elif rpin.isblkdev():
			major, minor = rpin.getdevnums()
			rpout.makedev("b", major, minor)
		elif rpin.isfifo(): rpout.mkfifo()
		elif rpin.issock(): sys.stderr.write("Found socket %s, ignoring\n" %
											 rpin.path)
		else: raise RPathException("File %s has unknown type" % rpin.path)

	def copy_reg_file(rpin, rpout):
		"""Copy regular file rpin to rpout, possibly avoiding connection"""
		try:
			if rpout.conn is rpin.conn:
				rpout.conn.shutil.copyfile(rpin.path, rpout.path)
				rpout.data = {'type': rpin.data['type']}
				return
		except AttributeError: pass
		rpout.write_from_fileobj(rpin.open("rb"))

	def cmp(rpin, rpout):
		"""True if rpin has the same data as rpout

		cmp does not compare file ownership, permissions, or times, or
		examine the contents of a directory.

		"""
		RPath.check_for_files(rpin, rpout)
		if rpin.isreg():
			if not rpout.isreg(): return None
			fp1, fp2 = rpin.open("rb"), rpout.open("rb")
			result = RPathStatic.cmpfileobj(fp1, fp2)
			if fp1.close() or fp2.close():
				raise RPathException("Error closing file")
			return result
		elif rpin.isdir(): return rpout.isdir()
		elif rpin.issym():
			return rpout.issym() and (rpin.readlink() == rpout.readlink())
		elif rpin.ischardev():
			return rpout.ischardev() and \
				   (rpin.getdevnums() == rpout.getdevnums())
		elif rpin.isblkdev():
			return rpout.isblkdev() and \
				   (rpin.getdevnums() == rpout.getdevnums())
		elif rpin.isfifo(): return rpout.isfifo()
		elif rpin.issock(): return rpout.issock()
		else: raise RPathException("File %s has unknown type" % rpin.path)

	def copy_attribs(rpin, rpout):
		"""Change file attributes of rpout to match rpin

		Only changes the chmoddable bits, uid/gid ownership, and
		timestamps, so both must already exist.

		"""
		RPath.check_for_files(rpin, rpout)
		s = rpin.lstat()
		if rpin.issym(): return # symlinks have no valid attributes
		if Globals.change_ownership: apply(rpout.chown, rpin.getuidgid())
		rpout.chmod(rpin.getperms())
		rpout.setmtime(rpin.getmtime())

	def cmp_attribs(rp1, rp2):
		"""True if rp1 has the same file attributes as rp2

		Does not compare file access times.  If not changing
		ownership, do not check user/group id.

		"""
		RPath.check_for_files(rp1, rp2)
		result = ((not Globals.change_ownership
				   or rp1.getuidgid() == rp2.getuidgid())
				  and ((rp1.issym() and rp2.issym())
					   or (not rp1.issym() and not rp2.issym()
						   and rp1.getmtime() == rp2.getmtime()))
				  and rp1.getperms() == rp2.getperms())
		Log("Compare attribs %s and %s: %s" % (rp1.path, rp2.path, result), 7)
		return result

	def copy_with_attribs(rpin, rpout):
		"""Copy file and then copy over attributes"""
		RPath.copy(rpin, rpout)
		RPath.copy_attribs(rpin, rpout)

	def quick_cmp_with_attribs(rp1, rp2):
		"""Quicker version of cmp_with_attribs

		Instead of reading all of each file, assume that regular files
		are the same if the attributes compare.

		"""
		if not RPath.cmp_attribs(rp1, rp2): return None
		if rp1.isreg() and rp2.isreg() and (rp1.getlen() == rp2.getlen()):
			return 1
		return RPath.cmp(rp1, rp2)

	def cmp_with_attribs(rp1, rp2):
		"""Combine cmp and cmp_attribs"""
		return RPath.cmp(rp1, rp2) and RPath.cmp_attribs(rp1, rp2)

	def rename(rp_source, rp_dest):
		"""Rename rp_source to rp_dest"""
		assert rp_source.conn is rp_dest.conn
		rp_source.conn.os.rename(rp_source.path, rp_dest.path)
		rp_dest.data = rp_source.data
		rp_source.data = {'type': None}

	def gettemp(connection):
		"""Return an RPath that can be used as a temporary file"""
		return RPath(connection, connection.tempfile.mktemp(), ())

	def tupled_lstat(filename):
		"""Like os.lstat, but return only a tuple

		Later versions of os.lstat return a special lstat object,
		which can confuse the pickler and cause errors in remote
		operations.

		"""
		return tuple(os.lstat(filename))

	def cmp_recursive(rp1, rp2):
		"""True if rp1 and rp2 are at the base of same directories

		Includes only attributes, no file data.  This function may not
		used in rdiff-backup but it comes in handy in the unit tests.

		"""
		rp1.setdata()
		rp2.setdata()
		dsiter1, dsiter2 = map(DestructiveStepping.Iterate_with_Finalizer,
							   [rp1, rp2], [1, None])
		result = Iter.equal(dsiter1, dsiter2, 1)
		for i in dsiter1: pass # make sure all files processed anyway
		for i in dsiter2: pass
		return result

MakeStatic(RPathStatic)


class RORPath(RPathStatic):
	"""Read Only RPath - carry information about a path

	These contain information about a file, and possible the file's
	data, but do not have a connection and cannot be written to or
	changed.  The advantage of these objects is that they can be
	communicated by incoding their index and data dictionary.

	"""
	def __init__(self, index, data = None):
		self.index = index
		if data: self.data = data
		else: self.data = {'type':None} # signify empty file
		self.file = None

	def __eq__(self, other):
		"""Signal two files equivalent"""
		return self.index == other.index and self.data == other.data

	def __str__(self):
		"""Pretty print file statistics"""
		return "Index: %s\nData: %s" % (self.index, self.data)

	def lstat(self):
		"""Returns type of file

		The allowable types are None if the file doesn't exist, 'reg'
		for a regular file, 'dir' for a directory, 'dev' for a device
		file, 'fifo' for a fifo, 'sock' for a socket, and 'sym' for a
		symlink.
		
		"""
		return self.data['type']
	gettype = lstat

	def isdir(self):
		"""True if self is a dir"""
		return self.data['type'] == 'dir'

	def isreg(self):
		"""True if self is a regular file"""
		return self.data['type'] == 'reg'

	def issym(self):
		"""True if path is of a symlink"""
		return self.data['type'] == 'sym'

	def isfifo(self):
		"""True if path is a fifo"""
		return self.data['type'] == 'fifo'

	def ischardev(self):
		"""True if path is a character device file"""
		return self.data['type'] == 'dev' and self.data['devnums'][0] == 'c'

	def isblkdev(self):
		"""True if path is a block device file"""
		return self.data['type'] == 'dev' and self.data['devnums'][0] == 'b'

	def issock(self):
		"""True if path is a socket"""
		return self.data['sock'] == 'sock'

	def getperms(self):
		"""Return permission block of file"""
		return self.data['perms']

	def hasfullperms(self):
		"""Return true if owner has full permissions on the file"""
		return self.getperms() % 01000 >= 0700

	def readable(self):
		"""Return true if owner has read permissions on the file"""
		return self.getperms() % 01000 >= 0400

	def executable(self):
		"""Return true if owner has execute permissions"""
		return self.getperms() % 0200 >= 0100

	def getsize(self):
		"""Return length of file in bytes"""
		return self.data['size']

	def getuidgid(self):
		"""Return userid/groupid of file"""
		return self.data['uid'], self.data['gid']

	def getatime(self):
		"""Return access time in seconds"""
		return self.data['atime']

	def getmtime(self):
		"""Return modification time in seconds"""
		return self.data['mtime']
	
	def readlink(self):
		"""Wrapper around os.readlink()"""
		return self.data['linkname']

	def getdevnums(self):
		"""Return a devices major/minor numbers from dictionary"""
		return self.data['devnums'][1:]

	def setfile(self, file):
		"""Right now just set self.file to be the already opened file"""
		assert file and not self.file
		self.file = file
		self.file_already_open = None

	def get_attached_filetype(self):
		"""If there is a file attached, say what it is

		Currently the choices are 'snapshot' meaning an exact copy of
		something, and 'diff' for an rdiff style diff.

		"""
		return self.data['filetype']
	
	def set_attached_filetype(self, type):
		"""Set the type of the attached file"""
		self.data['filetype'] = type

	def open(self, mode):
		"""Return file type object if any was given using self.setfile"""
		if mode != "rb": raise RPathException("Bad mode %s" % mode)
		if self.file_already_open:
			raise RPathException("Attempt to open same file twice")
		self.file_already_open = 1
		return self.file

	def close_if_necessary(self):
		"""Close file if one is present"""
		if self.file:
			assert not self.file.close(), \
		   "Error closing file\ndata = %s\nindex = \n" % (self.data,
														  self.index)


class RPath(RORPath):
	"""Remote Path class - wrapper around a possibly non-local pathname

	This class contains a dictionary called "data" which should
	contain all the information about the file sufficient for
	identification (i.e. if two files have the the same (==) data
	dictionary, they are the same file).

	"""
	regex_chars_to_quote = re.compile("[\\\"\$`]")

	def __init__(self, connection, base, index = (), data = None):
		"""RPath constructor

		connection = self.conn is the Connection the RPath will use to
		make system calls, and index is the name of the rpath used for
		comparison, and should be a tuple consisting of the parts of
		the rpath split up.  For instance ("foo", "bar") for
		"foo/bar", and ("/", "usr", "local", "bin") for
		"/usr/local/bin".

		"""
		self.conn = connection
		self.index = index
		self.base = base
		self.path = apply(os.path.join, (base,) + self.index)
		self.file = None
		if data: self.data = data
		else: self.setdata()

	def __str__(self):
		return "Path: %s\nIndex: %s\nData: %s" % (self.path, self.index,
												  self.data)

	def setdata(self):
		"""Create the data dictionary"""
		try: statblock = self.conn.RPathStatic.tupled_lstat(self.path)
		except os.error:
			self.data = {'type':None}
			return
		data = {}
		mode = statblock[stat.ST_MODE]

		if stat.S_ISDIR(mode): type = 'dir'
		elif stat.S_ISCHR(mode):
			type = 'dev'
			data['devnums'] = ('c',) + self._getdevnums()
		elif stat.S_ISBLK(mode):
			type = 'dev'
			data['devnums'] = ('b',) + self._getdevnums()
		elif stat.S_ISREG(mode):
			type = 'reg'
			data['size'] = statblock[stat.ST_SIZE]
		elif stat.S_ISFIFO(mode): type = 'fifo'
		elif stat.S_ISLNK(mode):
			type = 'sym'
			data['linkname'] = self.conn.os.readlink(self.path)
		elif stat.S_ISSOCK(mode): type = 'sock'
		else: raise RPathException("Unknown type for %s" % self.path)
		data['type'] = type
		data['perms'] = stat.S_IMODE(mode)

		if not type == 'sym': # Cannot set times of symbolic links
			data['mtime'] = long(statblock[stat.ST_MTIME])

		if Globals.change_ownership:
			data['uid'] = statblock[stat.ST_UID]
			data['gid'] = statblock[stat.ST_GID]
		if Globals.preserve_atime and not type == 'sym':
			data['atime'] = long(statblock[stat.ST_ATIME])
		self.data = data

	def check_consistency(self):
		"""Raise an error if consistency of rp broken

		This is useful for debugging when the cache and disk get out
		of sync and you need to find out where it happened.

		"""
		temptype = self.data['type']
		self.setdata()
		assert temptype == self.data['type'], \
			   "\nName: %s\nOld: %s --> New: %s\n" % \
			   (self.path, temptype, self.data['type'])

	def _getdevnums(self):
		"""Return tuple for special file (major, minor)"""
		cmdline = "stat -t %s" % self.quote()
		statpipe = self.conn.os.popen(cmdline)
		statoutputlist = statpipe.read().split()
		if statpipe.close(): raise RPathException("Error running " + cmdline)
		return tuple(map(lambda x: int(x, 16), statoutputlist[9:11]))

	def chmod(self, permissions):
		"""Wrapper around os.chmod"""
		self.conn.os.chmod(self.path, permissions)
		self.data['perms'] = permissions

	def settime(self, accesstime, modtime):
		"""Change file modification times"""
		Log("Setting time of %s to %d" % (self.path, modtime), 7)
		self.conn.os.utime(self.path, (accesstime, modtime))
		self.data['atime'] = accesstime
		self.data['mtime'] = modtime

	def setmtime(self, modtime):
		"""Set only modtime (access time to present)"""
		Log("Setting time of %s to %d" % (self.path, modtime), 7)
		self.conn.os.utime(self.path, (time.time(), modtime))
		self.data['mtime'] = modtime

	def chown(self, uid, gid):
		"""Set file's uid and gid"""
		self.conn.os.chown(self.path, uid, gid)
		self.data['uid'] = uid
		self.data['gid'] = gid

	def mkdir(self):
		Log("Making directory " + self.path, 6)
		self.conn.os.mkdir(self.path)
		self.setdata()

	def rmdir(self):
		Log("Removing directory " + self.path, 6)
		self.conn.os.rmdir(self.path)
		self.data = {'type': None}

	def listdir(self):
		"""Return list of string paths returned by os.listdir"""
		return self.conn.os.listdir(self.path)

	def symlink(self, linktext):
		"""Make symlink at self.path pointing to linktext"""
		self.conn.os.symlink(linktext, self.path)
		self.setdata()
		assert self.issym()

	def mkfifo(self):
		"""Make a fifo at self.path"""
		self.conn.os.mkfifo(self.path)
		self.setdata()
		assert self.isfifo()

	def touch(self):
		"""Make sure file at self.path exists"""
		Log("Touching " + self.path, 7)
		self.conn.open(self.path, "w").close()
		self.setdata()
		assert self.isreg()

	def delete(self):
		"""Delete file at self.path

		The destructive stepping allows this function to delete
		directories even if they have files and we lack permissions.

		"""
		Log("Deleting %s" % self.path, 7)
		assert self.lstat()
		if self.isdir():
			def helper(dsrp, base_init_output, branch_reduction):
				if dsrp.isdir(): dsrp.rmdir()
				else: dsrp.delete()
			dsiter = DestructiveStepping.Iterate_from(self, None)
			itm = IterTreeReducer(lambda x: None, lambda x,y: None, None,
								  helper)
			for dsrp in dsiter: itm(dsrp)
			itm.getresult()
		else: self.conn.os.unlink(self.path)
		self.setdata()

	def quote(self):
		"""Return quoted self.path for use with os.system()"""
		return '"%s"' % self.regex_chars_to_quote.sub(
			lambda m: "\\"+m.group(0), self.path)

	def normalize(self):
		"""Return RPath canonical version of self.path

		This just means that redundant /'s will be removed, including
		the trailing one, even for directories.  ".." components will
		be retained.

		"""
		newpath = "/".join(filter(lambda x: x and x != ".",
								  self.path.split("/")))
		if self.path[0] == "/": newpath = "/" + newpath
		elif not newpath: newpath = "."
		return self.__class__(self.conn, newpath, ())

	def dirsplit(self):
		"""Returns a tuple of strings (dirname, basename)

		Basename is never '' unless self is root, so it is unlike
		os.path.basename.  If path is just above root (so dirname is
		root), then dirname is ''.  In all other cases dirname is not
		the empty string.  Also, dirsplit depends on the format of
		self, so basename could be ".." and dirname could be a
		subdirectory.  For an atomic relative path, dirname will be
		'.'.

		"""
		normed = self.normalize()
		if normed.path.find("/") == -1: return (".", normed.path)
		comps = normed.path.split("/")
		return "/".join(comps[:-1]), comps[-1]

	def append(self, ext):
		"""Return new RPath with same connection by adjoing ext"""
		return self.__class__(self.conn, self.base, self.index + (ext,))

	def new_index(self, index):
		"""Return similar RPath but with new index"""
		return self.__class__(self.conn, self.base, index)

	def add_inc_ext(self, typestr):
		"""Return new RPath with same connection and inc extension"""
		addtostr = lambda s: "%s.%s.%s" % (s, Time.prevtimestr, typestr)
		if self.index:
			return self.__class__(self.conn, self.base, self.index[:-1] +
								  (addtostr(self.index[-1]),))
		else: return self.__class__(self.conn, addtostr(self.base),
									self.index)

	def open(self, mode):
		"""Return open file.  Supports modes "w" and "r"."""
		return self.conn.open(self.path, mode)

	def write_from_fileobj(self, fp):
		"""Reads fp and writes to self.path.  Closes both when done"""
		Log("Writing file object to " + self.path, 7)
		assert not self.lstat(), "File %s already exists" % self.path
		outfp = self.open("wb")
		RPath.copyfileobj(fp, outfp)
		if fp.close() or outfp.close():
			raise RPathException("Error closing file")
		self.data['type'] = 'reg'

	def isincfile(self):
		"""Return true if path looks like an increment file"""
		dotsplit = self.path.split(".")
		if len(dotsplit) < 3: return None
		timestring, ext = dotsplit[-2:]
		if Time.stringtotime(timestring) is None: return None
		return (ext == "snapshot" or ext == "dir" or
				ext == "missing" or ext == "diff")

	def getinctype(self):
		"""Return type of an increment file"""
		return self.path.split(".")[-1]

	def getinctime(self):
		"""Return timestring of an increment file"""
		return self.path.split(".")[-2]
	
	def getincbase(self):
		"""Return the base filename of an increment file in rp form"""
		if self.index:
			return self.__class__(self.conn, self.base, self.index[:-1] +
							 ((".".join(self.index[-1].split(".")[:-2])),))
		else: return self.__class__(self.conn,
									".".join(self.base.split(".")[:-2]), ())

	def getincbase_str(self):
		"""Return the base filename string of an increment file"""
		return self.getincbase().dirsplit()[1]

	def makedev(self, type, major, minor):
		"""Make a special file with specified type, and major/minor nums"""
		self.conn.os.system("mknod %s %s %d %d" %
							(self.quote(), type, major, minor))
		if type == 'c': datatype = 'chr'
		elif type == 'b': datatype = 'blk'
		else: raise RPathException
		self.data = {'type': datatype, 'devnums': (type, major, minor)}

	def getRORPath(self, include_contents = None):
		"""Return read only version of self"""
		rorp = RORPath(self.index, self.data)
		if include_contents: rorp.setfile(self.open("rb"))
		return rorp


#######################################################################
#
# rorpiter - Operations on Iterators of Read Only Remote Paths
#

class RORPIterException(Exception): pass

class RORPIter:
	"""Functions relating to iterators of Read Only RPaths

	The main structure will be an iterator that yields RORPaths.
	Every RORPath has a "raw" form that makes it more amenable to
	being turned into a file.  The raw form of the iterator yields
	each RORPath in the form of the tuple (index, data_dictionary,
	files), where files is the number of files attached (usually 1 or
	0).  After that, if a file is attached, it yields that file.

	"""
	def ToRaw(rorp_iter):
		"""Convert a rorp iterator to raw form"""
		for rorp in rorp_iter:
			if rorp.file:
				yield (rorp.index, rorp.data, 1)
				yield rorp.file
			else: yield (rorp.index, rorp.data, 0)

	def FromRaw(raw_iter):
		"""Convert raw rorp iter back to standard form"""
		for index, data, num_files in raw_iter:
			rorp = RORPath(index, data)
			if num_files:
				assert num_files == 1, "Only one file accepted right now"
				rorp.setfile(RORPIter.getnext(raw_iter))
			yield rorp

	def ToFile(rorp_iter):
		"""Return file version of iterator"""
		return FileWrappingIter(RORPIter.ToRaw(rorp_iter))

	def FromFile(fileobj):
		"""Recover rorp iterator from file interface"""
		return RORPIter.FromRaw(IterWrappingFile(fileobj))

	def IterateRPaths(base_rp):
		"""Return an iterator yielding RPaths with given base rp"""
		yield base_rp
		if base_rp.isdir():
			for filename in base_rp.listdir():
				for rp in RORPIter.IterateRPaths(base_rp.append(filename)):
					yield rp

	def Signatures(rp_iter):
		"""Yield signatures of rpaths in given rp_iter"""
		for rp in rp_iter:
			rorp = rp.getRORPath()
			if rp.isreg(): rorp.setfile(Rdiff.get_signature(rp))
			yield rorp

	def GetSignatureIter(base_rp):
		"""Return a signature iterator recurring over the base_rp"""
		return RORPIter.Signatures(RORPIter.IterateRPaths(base_rp))

	def CollateIterators(*rorp_iters):
		"""Collate RORPath iterators by index

		So it takes two or more iterators of rorps and returns an
		iterator yielding tuples like (rorp1, rorp2) with the same
		index.  If one or the other lacks that index, it will be None

		"""
		# overflow[i] means that iter[i] has been exhausted
		# rorps[i] is None means that it is time to replenish it.
		iter_num = len(rorp_iters)
		overflow = [None] * iter_num
		rorps = overflow[:]

		def setrorps(overflow, rorps):
			"""Set the overflow and rorps list"""
			for i in range(iter_num):
				if not overflow[i] and rorps[i] is None:
					try: rorps[i] = rorp_iters[i].next()
					except StopIteration:
						overflow[i] = 1
						rorps[i] = None

		def getleastindex(rorps):
			"""Return the first index in rorps, assuming rorps isn't empty"""
			return min(map(lambda rorp: rorp.index,
						   filter(lambda x: x, rorps)))

		while 1:
			setrorps(overflow, rorps)
			if not None in overflow: break
			
			index = getleastindex(rorps)
			yieldval = []
			for i in range(iter_num):
				if rorps[i] and rorps[i].index == index:
					yieldval.append(rorps[i])
					rorps[i] = None
				else: yieldval.append(None)
			yield IndexedTuple(index, yieldval)

	def getnext(iter):
		"""Return the next element of an iterator, raising error if none"""
		try: next = iter.next()
		except StopIteration: raise RORPIterException("Unexpected end to iter")
		return next

	def GetDiffIter(sig_iter, new_iter):
		"""Return delta iterator from sig_iter to new_iter

		The accompanying file for each will be a delta as produced by
		rdiff, unless the destination file does not exist, in which
		case it will be the file in its entirety.

		sig_iter may be composed of rorps, but new_iter should have
		full RPaths.

		"""
		collated_iter = RORPIter.CollateIterators(sig_iter, new_iter)
		for rorp, rp in collated_iter: yield RORPIter.diffonce(rorp, rp)

	def diffonce(sig_rorp, new_rp):
		"""Return one diff rorp, based from signature rorp and orig rp"""
		if sig_rorp and sig_rorp.isreg() and new_rp and new_rp.isreg():
			diff_rorp = new_rp.getRORPath()
			diff_rorp.setfile(Rdiff.get_delta_sigfileobj(sig_rorp.open("rb"),
														 new_rp))
			diff_rorp.set_attached_filetype('diff')
			return diff_rorp
		else:
			# Just send over originial if diff isn't appropriate
			sig_rorp.close_if_necessary()
			if not new_rp: return RORPath(sig_rorp.index)
			elif new_rp.isreg():
				diff_rorp = new_rp.getRORPath(1)
				diff_rorp.set_attached_filetype('snapshot')
				return diff_rorp
			else: return new_rp.getRORPath()

	def PatchIter(base_rp, diff_iter):
		"""Patch the appropriate rps in basis_iter using diff_iter"""
		basis_iter = RORPIter.IterateRPaths(base_rp)
		collated_iter = RORPIter.CollateIterators(basis_iter, diff_iter)
		for basisrp, diff_rorp in collated_iter:
			RORPIter.patchonce(base_rp, basisrp, diff_rorp)

	def patchonce(base_rp, basisrp, diff_rorp):
		"""Patch basisrp using diff_rorp"""
		assert diff_rorp, "Missing diff index %s" % basisrp.index
		if not diff_rorp.lstat():
			assert basisrp.lstat()
			basisrp.delete()
			return

		if basisrp and basisrp.isreg() and diff_rorp.isreg():
			assert diff_rorp.get_attached_filetype() == 'diff'
			Rdiff.patch_fileobj(basisrp, diff_rorp.open("rb"))
		else:
			# Diff contains whole file, just copy it over
			if not basisrp: basisrp = base_rp.new_index(diff_rorp.index)
			if diff_rorp.isreg():
				assert diff_rorp.get_attached_filetype() == 'snapshot'
				if basisrp.lstat(): basisrp.delete()
				basisrp.write_from_fileobj(diff_rorp.open("rb"))
			else: RPath.copy(diff_rorp, basisrp)
		RPath.copy_attribs(diff_rorp, basisrp)

MakeStatic(RORPIter)



class IndexedTuple:
	"""Like a tuple, but has .index

	This is used by CollateIterator above, and can be passed to the
	IterTreeReducer.

	"""
	def __init__(self, index, sequence):
		self.index = index
		self.data = tuple(sequence)

	def __len__(self): return len(self.data)

	def __getitem__(self, key): return self.data.__getitem__(key)

	def __cmp__(self, other):
		assert isinstance(other, IndexedTuple)
		if self.index < other.index: return -1
		elif self.index == other.index: return 0
		else: return 1

	def __eq__(self, other):
		if isinstance(other, IndexedTuple):
			return self.index == other.index and self.data == other.data
		elif type(other) is types.TupleType:
			return self.data == other
		else: return None

	def __str__(self):
		assert len(self.data) == 2
		return "(%s, %s).%s" % (str(self.data[0]), str(self.data[1]),
								str(self.index))


#######################################################################
#
# destructive-stepping - Deal with side effects from traversing trees
#

class DSRPath(RPath):
	"""Destructive Stepping RPath

	Sometimes when we traverse the directory tree, even when we just
	want to read files, we have to change things, like the permissions
	of a file or directory in order to read it, or the file's access
	times.  This class is like an RPath, but the permission and time
	modifications are delayed, so that they can be done at the very
	end when they won't be disturbed later.

	"""
	def __init__(self, *args):
		self.perms_delayed = self.times_delayed = None
		RPath.__init__(self, *args)

	def delay_perm_writes(self):
		"""Signal that permission writing should be delayed until the end"""
		self.perms_delayed = 1
		self.newperms = None

	def delay_time_changes(self):
		"""Signal that time changes should also be delayed until the end"""
		self.times_delayed = 1
		self.newtimes = None

	def chmod(self, permissions):
		"""Change permissions, delaying if self.perms_delayed is set"""
		if self.perms_delayed:
			self.newperms = 1
			self.data['perms'] = permissions
		else: RPath.chmod(self, permissions)

	def chmod_bypass(self, permissions):
		"""Change permissions without updating the data dictionary"""
		self.conn.os.chmod(self.path, permissions)
		self.perms_delayed = self.newperms = 1

	def remember_times(self):
		"""Mark times as changed so they can be restored later"""
		self.times_delayed = self.newtimes = 1

	def settime(self, accesstime, modtime):
		"""Change times, delaying if self.times_delayed is set"""
		if self.times_delayed:
			self.newtimes = 1
			self.data['atime'] = accesstime
			self.data['mtime'] = modtime
		else: RPath.settime(self, accesstime, modtime)

	def settime_bypass(self, accesstime, modtime):
		"""Change times without updating data dictionary"""
		self.conn.os.utime(self.path, (accesstime, modtime))

	def setmtime(self, modtime):
		"""Change mtime, delaying if self.times_delayed is set"""
		if self.times_delayed:
			self.newtimes = 1
			self.data['mtime'] = modtime
		else: RPath.setmtime(self, modtime)

	def setmtime_bypass(self, modtime):
		"""Change mtime without updating data dictionary"""
		self.conn.os.utime(self.path, (time.time(), modtime))

	def restoretimes(self):
		"""Write times in self.data back to file"""
		RPath.settime(self, self.data['atime'], self.data['mtime'])

	def restoreperms(self):
		"""Write permissions in self.data back to file"""
		RPath.chmod(self, self.data['perms'])

	def write_changes(self):
		"""Write saved up permission/time changes"""
		if not self.lstat(): return # File has been deleted in meantime
		
		if self.perms_delayed and self.newperms:
			self.conn.os.chmod(self.path, self.getperms())
		if (self.times_delayed and (self.newtimes or self.isdir())
			and not self.issym()):
			if self.data.has_key('atime'):
				self.settime_bypass(self.getatime(), self.getmtime())
			else: self.setmtime_bypass(self.getmtime())


class DestructiveStepping:
	"""Destructive stepping"""
	def initialize(dsrpath, source):
		"""Change permissions of dsrpath, possibly delay writes"""
		if not source or Globals.change_source_perms:
			dsrpath.delay_perm_writes()

		# Deal with missing permissions on regular files and directories
		if source and Globals.change_source_perms:
			if dsrpath.isreg() and not dsrpath.readable():
				dsrpath.chmod_bypass(0400)
			elif dsrpath.isdir() and (not dsrpath.readable() or
									  not dsrpath.executable()):
				dsrpath.chmod_bypass(0500)
		elif not source and Globals.change_mirror_perms:
			if dsrpath.isreg() and not dsrpath.readable():
				dsrpath.chmod_bypass(0600)
			elif dsrpath.isdir() and not dsrpath.hasfullperms():
				dsrpath.chmod_bypass(0700)

		# Try to preserve access times if necessary
		if source and Globals.preserve_atime: dsrpath.remember_times()
		elif not source: dsrpath.delay_time_changes()

	def Finalizer():
		"""Return a finalizer that can work on an iterator of dsrpaths

		The reason we have to use an IterTreeReducer is that some files
		should be updated immediately, but for directories we sometimes
		need to update all the files in the directory before finally
		coming back to it.

		"""
		return IterTreeReducer(lambda x: None, lambda x,y: None, None,
							   lambda dsrpath, x, y: dsrpath.write_changes())

	def Iterate_from(baserp, source):
		"""Iterate dsrps from baserp, skipping any matching exclude_regexps"""
		if source: exclude_regexps = Globals.exclude_regexps
		else: exclude_regexps = Globals.exclude_mirror_regexps

		def isexcluded(pathstring):
			"""True if pathstring is excluded"""
			for regexp in exclude_regexps:
				if regexp.match(pathstring):
					Log("Excluding %s, %s" % (pathstring, exclude_regexps), 6)
					return 1
			Log("Not excluding %s, %s" % (pathstring, exclude_regexps), 6)
			return None

		def helper(dsrpath):
			if not isexcluded(dsrpath.path):
				DestructiveStepping.initialize(dsrpath, source)
				yield dsrpath
				if dsrpath.isdir():
					dir_listing = dsrpath.listdir()
					dir_listing.sort()
					for filename in dir_listing:
						for dsrp in helper(dsrpath.append(filename)):
							yield dsrp

		base_dsrpath = DSRPath(baserp.conn, baserp.base,
							   baserp.index, baserp.data)
		return helper(base_dsrpath)

	def Iterate_with_Finalizer(baserp, source):
		"""Like Iterate_from, but finalize each dsrp afterwards"""
		finalize = DestructiveStepping.Finalizer()
		for dsrp in DestructiveStepping.Iterate_from(baserp, source):
			yield dsrp
			finalize(dsrp)
		finalize.getresult()
		

MakeStatic(DestructiveStepping)


#######################################################################
#
# increment - Provides Inc class, which writes increment files
#
# This code is what writes files ending in .diff, .snapshot, etc.
#

class Inc:
	"""Class containing increment functions"""
	def Increment(new, mirror, incpref):
		"""Main file incrementing function

		new is the file on the active partition,
		mirror is the mirrored file from the last backup,
		incpref is the prefix of the increment file.

		This function basically moves mirror -> incpref.

		"""
		Log("Incrementing mirror file " + mirror.path, 5)
		assert new.lstat() or mirror.lstat()
		if (new.isdir() or mirror.isdir()) and not incpref.isdir():
			incpref.mkdir()

		if not mirror.lstat(): Inc.makemissing(incpref)
		elif mirror.isdir(): Inc.makedir(mirror, incpref)
		elif new.isreg() and mirror.isreg():
			Inc.makediff(new, mirror, incpref)
		else: Inc.makesnapshot(mirror, incpref)

	def makemissing(incpref):
		"""Signify that mirror file was missing"""
		incpref.add_inc_ext("missing").touch()
		
	def makesnapshot(mirror, incpref):
		"""Copy mirror to incfile, since new is quite different"""
		snapshotrp = incpref.add_inc_ext("snapshot")
		RPath.copy_with_attribs(mirror, snapshotrp)

	def makediff(new, mirror, incpref):
		"""Make incfile which is a diff new -> mirror"""
		diff = incpref.add_inc_ext("diff")
		Rdiff.write_delta(new, mirror, diff)
		RPath.copy_attribs(mirror, diff)

	def makedir(mirrordir, incpref):
		"""Make file indicating directory mirrordir has changed"""
		dirsign = incpref.add_inc_ext("dir")
		dirsign.touch()
		RPath.copy_attribs(mirrordir, dirsign)

	def make_patch_increment_ITR(inc_rpath):
		"""Return IterTreeReducer that patches and increments

		This has to be an ITR because directories that have files in
		them changed are flagged with an increment marker.  There are
		four possibilities as to the order:

		1.  Normal file -> Normal file:  right away
		2.  Directory -> Directory:  wait until files in the directory
		    are processed, as we won't know whether to add a marker
		    until the end.
		3.  Normal file -> Directory:  right away, so later files will
		    have a directory to go into.
		4.  Directory -> Normal file:  Wait until the end, so we can
		    process all the files in the directory.

		"""
		def base_init(indexed_tuple):
			"""Patch if appropriate, return (a,b) tuple

			a is true if found directory and thus didn't take action
			
			if a is false, b is true if some changes were made

			if a is true, b is the rp of a temporary file used to hold
			the diff_rorp's data (for dir -> normal file change), and
			false if none was necessary.

			"""
			diff_rorp, dsrp = indexed_tuple
			incpref = inc_rpath.new_index(indexed_tuple.index)
			if dsrp.isdir(): return init_dir(dsrp, diff_rorp, incpref)
			else: return init_non_dir(dsrp, diff_rorp, incpref)

		def init_dir(dsrp, diff_rorp, incpref):
			"""Initial processing of a directory

			Make the corresponding directory right away, but wait
			until the end to write the replacement.  However, if the
			diff_rorp contains data, we must write it locally before
			continuing, or else that data will be lost in the stream.

			"""
			if not (incpref.lstat() and incpref.isdir()): incpref.mkdir()
			if diff_rorp and diff_rorp.isreg() and diff_rorp.file:
				temprp = RPathStatic.gettemp(dsrp.conn)
				RPathStatic.copy_with_attribs(diff_rorp, temprp)
				temprp.set_attached_filetype(
					diff_rorp.get_attached_filetype())
				return (1, temprp)
			else: return (1, None)

		def init_non_dir(dsrp, diff_rorp, incpref):
			"""Initial processing of non-directory

			If a reverse diff is called for it is generated by apply
			the forwards diff first on a temporary file.

			"""
			if diff_rorp:
				if dsrp.isreg() and diff_rorp.isreg():
					tempnew = Rdiff.patch_fileobj_returnnew(dsrp,
												   diff_rorp.open("rb"))
					Inc.Increment(tempnew, dsrp, incpref)
					RPath.move(tempnew, dsrp)
					RPath.copy_attribs(diff_rorp, dsrp)
				else:
					Inc.Increment(diff_rorp, dsrp, incpref)
					RORPIter.patchonce(None, dsrp, diff_rorp)
				return (None, 1)
			return (None, None)

		def base_final(base_tuple, base_init_tuple, changed):
			"""Patch directory if not done, return true iff made change"""
			if base_init_tuple[0]: # was directory
				diff_rorp, dsrp = base_tuple
				if changed or diff_rorp:
					if base_init_tuple[1]: diff_rorp = base_init_tuple[1]
					Inc.Increment(diff_rorp, dsrp,
								  inc_rpath.new_index(base_tuple.index))
					RORPIter.patchonce(None, dsrp, diff_rorp)
					return 1
				return None
			else: # changed iff base_init_tuple says it was
				return base_init_tuple[1]

		return IterTreeReducer(base_init, lambda x,y: x or y, None, base_final)

MakeStatic(Inc)


#######################################################################
#
# restore - Read increment files and restore to original
#

class RestoreError(Exception): pass

class Restore:
	def RestoreFile(rest_time, rpbase, inclist, rptarget):
		"""Non-recursive restore function

		rest_time is the time in seconds to restore to,
		rpbase is the base name of the file being restored,
		inclist is a list of rpaths containing all the relevant increments,
		and rptarget is the rpath that will be written with the restored file.

		"""
		inclist = Restore.sortincseq(rest_time, inclist)
		if not inclist and not (rpbase and rpbase.lstat()):
			return # no increments were applicable
		Log("Restoring %s with increments %s to %s" %
			(rpbase and rpbase.path,
			 Restore.inclist2str(inclist), rptarget.path), 5)
		if not inclist or inclist[0].getinctype() == "diff":
			assert rpbase and rpbase.lstat(), \
				   "No base to go with incs %s" % Restore.inclist2str(inclist)
			RPath.copy_with_attribs(rpbase, rptarget)
		for inc in inclist: Restore.applyinc(inc, rptarget)

	def inclist2str(inclist):
		"""Return string version of inclist for logging"""
		return ",".join(map(lambda x: x.path, inclist))

	def sortincseq(rest_time, inclist):
		"""Sort the inc sequence, and throw away irrelevant increments"""
		incpairs = map(lambda rp: (Time.stringtotime(rp.getinctime()), rp),
					   inclist)
		# Only consider increments at or after the time being restored
		incpairs = filter(lambda pair: pair[0] >= rest_time, incpairs)

		# Now throw away older unnecessary increments
		incpairs.sort()
		i = 0
		while(i < len(incpairs)):
			# Only diff type increments require later versions
			if incpairs[i][1].getinctype() != "diff": break
			i = i+1
		incpairs = incpairs[:i+1]

		# Return increments in reversed order
		incpairs.reverse()
		return map(lambda pair: pair[1], incpairs)

	def applyinc(inc, target):
		"""Apply increment rp inc to targetrp target"""
		Log("Applying increment %s to %s" % (inc.path, target.path), 6)
		inctype = inc.getinctype()
		if inctype == "diff":
			if not target.lstat():
				raise RestoreError("Bad increment sequence at " + inc.path)
			Rdiff.patch(target, inc)
		elif inctype == "dir":
			if not target.isdir():
				if target.lstat():
					raise RestoreError("File %s already exists" % target.path)
				target.mkdir()
		elif inctype == "missing": return
		elif inctype == "snapshot": RPath.copy(inc, target)
		else: raise RestoreError("Unknown inctype %s" % inctype)
		RPath.copy_attribs(inc, target)

	def RestoreRecursive(rest_time, mirror_base, baseinc_tup, target_base):
		"""Recursive restore function.

		rest_time is the time in seconds to restore to;
		mirror_base is an rpath of the mirror directory corresponding
		to the one to be restored;
		baseinc_tup is the inc tuple (incdir, list of incs) to be
		restored;
		and target_base in the dsrp of the target directory.

		"""
		assert isinstance(target_base, DSRPath)
		collated = RORPIter.CollateIterators(
			DestructiveStepping.Iterate_from(mirror_base, None),
			Restore.yield_inc_tuples(baseinc_tup))
		mirror_finalizer = DestructiveStepping.Finalizer()
		target_finalizer = DestructiveStepping.Finalizer()

		for mirror, inc_tup in collated:
			if not inc_tup:
				inclist = []
				target = target_base.new_index(mirror.index)
			else:
				inclist = inc_tup[1]
				target = target_base.new_index(inc_tup.index)
			DestructiveStepping.initialize(target, None)
			Restore.RestoreFile(rest_time, mirror, inclist, target)
			target_finalizer(target)
			if mirror: mirror_finalizer(mirror)
		target_finalizer.getresult()
		mirror_finalizer.getresult()

	def yield_inc_tuples(inc_tuple):
		"""Iterate increment tuples starting with inc_tuple

		An increment tuple is an IndexedTuple (pair).  The first will
		be the rpath of a directory, and the second is a list of all
		the increments associated with that directory.  If there are
		increments that do not correspond to a directory, the first
		element will be None.  All the rpaths involved correspond to
		files in the increment directory.

		"""
		oldindex, rpath = inc_tuple.index, inc_tuple[0]
		yield inc_tuple
		if not rpath or not rpath.isdir(): return

		inc_list_dict = {} # Index tuple lists by index
		dirlist = rpath.listdir()

		def affirm_dict_indexed(index):
			"""Make sure the inc_list_dict has given index"""
			if not inc_list_dict.has_key(index):
				inc_list_dict[index] = [None, []]

		def add_to_dict(filename):
			"""Add filename to the inc tuple dictionary"""
			rp = rpath.append(filename)
			if rp.isincfile():
				basename = rp.getincbase_str()
				affirm_dict_indexed(basename)
				inc_list_dict[basename][1].append(rp)
			elif rp.isdir():
				affirm_dict_indexed(filename)
				inc_list_dict[filename][0] = rp

		def list2tuple(index):
			"""Return inc_tuple version of dictionary entry by index"""
			inclist = inc_list_dict[index]
			if not inclist[1]: return None # no increments, so ignore
			return IndexedTuple(oldindex + (index,), inclist)

		for filename in dirlist: add_to_dict(filename)
		keys = inc_list_dict.keys()
		keys.sort()
		for index in keys:
			new_inc_tuple = list2tuple(index)
			if not new_inc_tuple: continue
			elif new_inc_tuple[0]: # corresponds to directory
				for i in Restore.yield_inc_tuples(new_inc_tuple): yield i
			else: yield new_inc_tuple

MakeStatic(Restore)


#######################################################################
#
# manage - list, delete, and otherwise manage increments
#

class ManageException(Exception): pass

class Manage:
	def get_incobjs(datadir):
		"""Return Increments objects given the rdiff-backup data directory"""
		return map(IncObj, Manage.find_incrps_with_base(datadir, "increments"))

	def find_incrps_with_base(dir_rp, basename):
		"""Return list of incfiles with given basename in dir_rp"""
		rps = map(dir_rp.append, dir_rp.listdir())
		incrps = filter(RPath.isincfile, rps)
		result = filter(lambda rp: rp.getincbase_str() == basename, incrps)
		Log("find_incrps_with_base: found %d incs" % len(result), 6)
		return result
	
	def describe_root_incs(datadir):
		"""Return a string describing all the the root increments"""
		result = []
		currentrps = Manage.find_incrps_with_base(datadir, "current_mirror")
		if not currentrps:
			Log("Warning: no current mirror marker found", 1)
		elif len(currentrps) > 1:
			Log("Warning: multiple mirror markers found", 1)
		for rp in currentrps:
			result.append("Found mirror marker %s" % rp.path)
			result.append("Indicating latest mirror taken at %s" %
						  Time.stringtopretty(rp.getinctime()))
		result.append("---------------------------------------------"
					  "-------------")

		# Sort so they are in reverse order by time
		time_w_incobjs = map(lambda io: (-io.time, io),
							 Manage.get_incobjs(datadir))
		time_w_incobjs.sort()
		incobjs = map(lambda x: x[1], time_w_incobjs)
		result.append("Found %d increments:" % len(incobjs))
		result.append("\n------------------------------------------\n".join(
			map(IncObj.full_description, incobjs)))
		return "\n".join(result)

	def delete_earlier_than(rptree, time):
		"""Traverse an rptree, deleting increments older than time

		time is in seconds.  It will then delete any empty directories
		in the tree.  To process the entire backup area, the
		rdiff-backup-data directory should be the root of the tree.

		"""
		def helper(noderp):
			if ((noderp.isincfile() and
				 Time.stringtotime(noderp.getinctime()) < time) or
				(noderp.isdir() and not noderp.listdir())):
				Log("Deleting increment file %s" % noderp.path, 5)
				noderp.delete()
		rptree.for_node_after(helper)

MakeStatic(Manage)


class IncObj:
	"""Increment object - represent a completed increment"""
	def __init__(self, incrp):
		"""IncObj initializer

		incrp is an RPath of a path like increments.TIMESTR.dir
		standing for the root of the increment.

		"""
		if not incrp.isincfile():
			raise ManageException("%s is not an inc file" % incrp.path)
		self.incrp = incrp
		self.time = Time.stringtotime(incrp.getinctime())

	def getbaserp(self):
		"""Return rp of the incrp without extensions"""
		return self.incrp.getincbase()

	def pretty_time(self):
		"""Return a formatted version of inc's time"""
		return Time.timetopretty(self.time)

	def full_description(self):
		"""Return string describing increment"""
		s = ["Increment file %s" % self.incrp.path,
			 "Date: %s" % self.pretty_time()]
		return "\n".join(s)


#######################################################################
#
# highlevel - High level functions for mirroring, mirror & inc, etc.
#

class HighLevel:
	"""High level static functions

	The design of some of these functions is represented on the
	accompanying diagram.

	"""
	def Mirror(src_rpath, dest_rpath):
		"""Turn dest_rpath into a copy of src_rpath"""
		SourceS = src_rpath.conn.HLSourceStruct
		DestS = dest_rpath.conn.HLDestinationStruct
		SourceS.set_basic_info(src_rpath, dest_rpath)
		DestS.set_basic_info(src_rpath, dest_rpath)

		SourceS.split_initial_dsiter(src_rpath)
		DestS.split_initial_dsiter(dest_rpath)
		DestS.set_dissimilar()
		DestS.set_sigs()
		SourceS.set_diffs_and_finalize()
		DestS.patch_and_finalize()

		dest_rpath.setdata()

	def Mirror_and_increment(src_rpath, dest_rpath, inc_rpath):
		"""Mirror + put increments in tree based at inc_rpath"""
		SourceS = src_rpath.conn.HLSourceStruct
		DestS = dest_rpath.conn.HLDestinationStruct
		SourceS.set_basic_info(src_rpath, dest_rpath)
		DestS.set_basic_info(src_rpath, dest_rpath)

		SourceS.split_initial_dsiter(src_rpath)
		DestS.split_initial_dsiter(dest_rpath)
		DestS.set_dissimilar()
		DestS.set_sigs()
		SourceS.set_diffs_and_finalize()
		DestS.patch_increment_and_finalize(inc_rpath)

		dest_rpath.setdata()
		inc_rpath.setdata()

	def Restore(rest_time, mirror_base, baseinc_tup, target_base):
		"""Like Restore.RestoreRecursive but check arguments"""
		if not isinstance(target_base, DSRPath):
			target_base = DSRPath(target_base.conn, target_base.base,
								  target_base.index, target_base.data)
		Restore.RestoreRecursive(rest_time, mirror_base,
								 baseinc_tup, target_base)

MakeStatic(HighLevel)


class HLSourceStruct:
	"""Hold info used by HL on the source side"""
	def get(cls, name): return cls.__dict__[name]

	def set_basic_info(cls, src_rpath, dest_rpath):
		cls.dest_conn = dest_rpath.conn
		cls.dest_struct = cls.dest_conn.HLDestinationStruct
		cls.src_rpath = src_rpath

	def split_initial_dsiter(cls, rpath):
		"""Set initial_dsiters (iteration of all dsrps from rpath)"""
		dsiter = DestructiveStepping.Iterate_from(rpath, 1)
		cls.initial_dsiter1, cls.initial_dsiter2 = Iter.multiplex(dsiter, 2)

	def set_diffs_and_finalize(cls):
		"""Set diffs and finalize any dsrp changes remaining

		Set cls.diffs, which is a rorpiterator with files included of
		signatures of dissimilar files.  This is the last operation
		run on the local filestream, so finalize dsrp writes.

		"""
		collated = RORPIter.CollateIterators(cls.initial_dsiter2,
											 cls.dest_struct.get("sigs"))
		finalizer = DestructiveStepping.Finalizer()
		def diffs():
			for dsrp, dest_sig in collated:
				if dest_sig: yield RORPIter.diffonce(dest_sig, dsrp)
				if dsrp: finalizer(dsrp)
			finalizer.getresult()
		cls.diffs = diffs()

MakeClass(HLSourceStruct)


class HLDestinationStruct:
	"""Hold info used by HL on the destination side"""
	def get(cls, name): return cls.__dict__[name]

	def set_basic_info(cls, src_rpath, dest_rpath):
		cls.src_conn = src_rpath.conn
		cls.src_struct = cls.src_conn.HLSourceStruct
		cls.dest_rpath = dest_rpath

	def split_initial_dsiter(cls, rpath):
		"""Set initial_dsiters (iteration of all dsrps from rpath)"""
		dsiter = DestructiveStepping.Iterate_from(rpath, None)
		cls.initial_dsiter1, cls.initial_dsiter2 = Iter.multiplex(dsiter, 2)

	def set_dissimilar(cls):
		"""Set dissimilars

		cls.dissimilars becomes a pair of two equivalent iters.  Each
		enumerates the dsrps which are different on the source and
		destination ends.  These do not necessarily exist on the dest
		end.

		"""
		collated = RORPIter.CollateIterators(
			cls.src_struct.get("initial_dsiter1"), cls.initial_dsiter1)
		baserp = cls.dest_rpath
		def generate_dissimilar():
			for src_rorp, dest_dsrp in collated:
				if not dest_dsrp:
					dsrp = DSRPath(baserp.conn, baserp.base, src_rorp.index)
					assert not dsrp.lstat()
					yield dsrp
				elif not src_rorp or not src_rorp == dest_dsrp:
					yield dest_dsrp

		cls.dissimilars = Iter.multiplex(generate_dissimilar(), 2)

	def set_sigs(cls):
		"""Set cls.sigs to be the signatures of all dissimilar files"""
		cls.sigs = RORPIter.Signatures(cls.dissimilars[0])

	def get_dsrp(cls, dest_rpath, index):
		"""Return initialized dsrp based on dest_rpath with given index"""
		dsrp = DSRPath(dest_rpath.conn, dest_rpath.base, index)
		DestructiveStepping.initialize(dsrp, None)
		return dsrp

	def patch_and_finalize(cls):
		"""Apply diffs and finalize"""
		collated = RORPIter.CollateIterators(cls.src_struct.get("diffs"),
											 cls.initial_dsiter2)
		finalizer = DestructiveStepping.Finalizer()
		for diff_rorp, dsrp in collated:
			if not dsrp: dsrp = cls.get_dsrp(cls.dest_rpath, diff_rorp.index)
			if diff_rorp: RORPIter.patchonce(None, dsrp, diff_rorp)
			finalizer(dsrp)
		finalizer.getresult()

	def patch_increment_and_finalize(cls, inc_rpath):
		"""Apply diffs, write increment if necessary, and finalize"""
		collated = RORPIter.CollateIterators(cls.src_struct.get("diffs"),
											 cls.initial_dsiter2)
		finalizer = DestructiveStepping.Finalizer()
		ITR = Inc.make_patch_increment_ITR(inc_rpath)

		for indexed_tuple in collated:
			Log("Processing %s" % indexed_tuple, 7)
			diff_rorp, dsrp = indexed_tuple
			if not dsrp:
				dsrp = cls.get_dsrp(cls.dest_rpath, indexed_tuple.index)
				indexed_tuple = IndexedTuple(indexed_tuple.index,
											 (diff_rorp, dsrp))
			ITR(indexed_tuple)
			finalizer(dsrp)
		ITR.getresult()
		finalizer.getresult()

MakeClass(HLDestinationStruct)


#######################################################################
#
# main - Start here: Read arguments, set global settings, etc.
#

class Main:
	def __init__(self):
		self.action = None
		self.remote_cmd = None
		self.force = None
		self.exclude_regstrs = ["/proc"]
		self.exclude_mirror_regstrs = []

	def parse_cmdlineoptions(self):
		"""Parse argument list and set global preferences"""
		try: optlist, self.args = getopt.getopt(sys.argv[1:], "blmv:Vs",
			 ["backup-mode", "version", "verbosity=", "exclude=",
			  "exclude-mirror=", "server", "test-server", "remote-cmd=",
			  "mirror-only", "no-rdiff-copy", "force", "change-source-perms",
			  "list-increments", "remove-older-than="])
		except getopt.error:
			self.commandline_error("Error parsing commandline options")

		Globals.copy_func = staticmethod(Rdiff.copy)
		for opt, arg in optlist:
			if opt == "-b" or opt == "--backup-mode": self.action = "backup"
			elif opt == "-s" or opt == "--server": self.action = "server"
			elif opt == "-m" or opt == "--mirror-only": self.action = "mirror"
			elif opt == "--remote-cmd": self.remote_cmd = arg
			elif opt == "-v" or opt == "--verbosity":
				Log.setverbosity(int(arg))
			elif opt == "--terminal-verbosity":
				Log.setterm_verbosity(int(arg))
			elif opt == "--exclude": self.exclude_regstrs.append(arg)
			elif opt == "--exclude-mirror":
				self.exclude_mirror_regstrs.append(arg)
			elif opt == "--force": self.force = 1
			elif opt == "-V" or opt == "--version":
				print "rdiff-backup version " + Globals.version
				sys.exit(0)
			elif opt == "--no-rdiff-copy":
				Globals.copy_func = staticmethod(RPath.copy)
			elif opt == "--test-server": self.action = "test-server"
			elif opt == "--change-source-perms":
				Globals.change_source_perms = 1
			elif opt == "-l" or opt == "--list-increments":
				self.action = "list-increments"
			elif opt == "--remove-older-than":
				self.remove_older_than_string = arg
				self.action = "remove-older-than"
			else: Log.FatalError("Unknown option %s" % opt)

	def set_action(self):
		"""Check arguments and try to set self.action"""
		l = len(self.args)
		if not self.action:
			if l == 0: self.commandline_error("No arguments given")
			elif l == 1: self.action = "restore"
			elif l == 2:
				if RPath(Globals.local_connection, self.args[0]).isincfile():
					self.action = "restore"
				else: self.action = "backup"
			else: self.commandline_error("Too many arguments given")

		if l == 0 and self.action != "server" and self.action != "test-server":
			self.commandline_error("No arguments given")
		if l > 0 and (self.action == "server" or
					  self.action == "test-server"):
			self.commandline_error("Too many arguments given")
		if l < 2 and (self.action == "backup" or self.action == "mirror"):
			self.commandline_error("Two arguments are required "
								   "(source, destination).")
		if l == 2 and (self.action == "list-increments" or
					   self.action == "remove-older-than"):
			self.commandline_error("Only use one argument, "
								   "the root of the backup directory")
		if l > 2: self.commandline_error("Too many arguments given")

	def commandline_error(self, message):
		sys.stderr.write("Error: %s\n" % message)
		sys.stderr.write("See the rdiff-backup manual page for instructions\n")
		sys.exit(1)

	def init_connection(self):
		"""Start the connection if necessary"""
		self.sourceconn = Globals.local_connection
		if self.remote_cmd is None:
			self.destconn = self.sourceconn
			return

		stdin, stdout = os.popen2(self.remote_cmd)
		self.destconn = PipeConnection(stdout, stdin)
		Globals.connections.append(self.destconn)
		self.destconn.Log.setverbosity(Log.verbosity)
		self.destconn.Log.setterm_verbosity(Log.term_verbosity)

	def misc_setup(self):
		"""Set default change ownership flag, umask, excludes"""
		Globals.change_ownership = (self.sourceconn.os.getuid() == 0 and
									self.destconn.os.getuid() == 0)
		os.umask(077)
		for regex_string in self.exclude_regstrs:
			Globals.add_regexp(regex_string, None)
		for regex_string in self.exclude_mirror_regstrs:
			Globals.add_regexp(regex_string, 1)

	def take_action(self):
		"""Do whatever self.action says"""
		if self.action == "server":
			PipeConnection(sys.stdin, sys.stdout).Server()
		elif self.action == "backup":
			self.Backup(self.args[0], self.args[1])
		elif self.action == "restore":
			if len(self.args) == 1: self.Restore(self.args[0])
			else: apply(self.Restore, self.args)
		elif self.action == "mirror":
			self.Mirror(self.args[0], self.args[1])
		elif self.action == "test-server": self.TestServer()
		elif self.action == "list-increments":
			self.ListIncrements(self.args[0])
		elif self.action == "remove-older-than":
			self.RemoveOlderThan(self.args[0])
		else: raise AssertionError("Unknown action " + self.action)

	def cleanup(self):
		"""Do any last minute cleaning before exiting"""
		Log("Cleaning up", 6)
		Log.close_logfile()
		self.destconn.quit()

	def Main(self):
		"""Start everything up!"""
		self.parse_cmdlineoptions()
		self.set_action()
		self.init_connection()
		self.misc_setup()
		self.take_action()
		self.cleanup()


	def TestServer(self):
		"""Run a couple simple tests of the remote connection"""
		if not isinstance(self.destconn, PipeConnection):
			Log.FatalError("No remote server specified "
						   "(the --remote-cmd option is required)")
		try:
			assert self.destconn.pow(2,3) == 8
			assert self.destconn.os.path.join("a", "b") == "a/b"
			version = self.destconn.reval("lambda: Globals.version")
		except:
			sys.stderr.write("Server tests failed\n")
			raise
		if not version == Globals.version:
			print """Server may work, but there is a version mismatch:
Local version: %s
Remote version: %s""" % (Globals.version, version)
		else: print "Server OK"


	def Mirror(self, src_path, dest_path):
		"""Turn dest_path into a copy of src_path"""
		Log("Mirroring %s to %s" % (src_path, dest_path), 5)
		rpin, rpout = self.mirror_check_paths(src_path, dest_path)
		HighLevel.Mirror(rpin, rpout)

	def mirror_check_paths(self, src_path, dest_path):
		"""Check paths and return rpin, rpout"""
		rpin = RPath(self.sourceconn, src_path)
		rpout = RPath(self.destconn, dest_path)
		if not rpin.lstat():
			Log.FatalError("Source directory %s does not exist" % rpin.path)
		if rpout.lstat() and not self.force:
			Log.FatalError(
"""Destination %s exists so continuing could mess it up.  Run
rdiff-backup with the --force option if you want to mirror anyway.""" %
			rpout.path)
		return rpin, rpout


	def Backup(self, src_path, dest_path):
		"""Backup, possibly incrementally, src_path to dest_path."""
		rpin = RPath(self.sourceconn, src_path)
		rpout = RPath(self.destconn, dest_path)
		self.backup_init_dirs(rpin, rpout)
		Time.setcurtime()
		if self.prevtime:
			Time.setprevtime(self.prevtime)
			HighLevel.Mirror_and_increment(rpin, rpout, self.incdir)
		else: HighLevel.Mirror(rpin, rpout)
		self.backup_touch_curmirror(rpin, rpout)

	def backup_init_dirs(self, rpin, rpout):
		"""Make sure rpin and rpout are valid, init data dir and logging"""
		if rpout.lstat() and not rpout.isdir():
			if not self.force:
				Log.FatalError("Destination %s exists and is not a "
							   "directory" % rpout.path)
			else:
				Log("Deleting %s" % rpout.path, 3)
				rpout.delete()
			
		if not rpin.lstat():
			Log.FatalError("Source directory %s does not exist" % rpin.path)
		elif not rpin.isdir():
			Log.FatalError("Source %s is not a directory" % rpin.path)

		self.datadir = rpout.append("rdiff-backup-data")
		self.incdir = RPath(rpout.conn, os.path.join(self.datadir.path,
													 "increments"))
		self.prevtime = self.backup_get_mirrortime()

		if rpout.lstat() and not self.prevtime and not self.force:
			Log.FatalError(
"""Destination directory %s exists, but does not look like a
rdiff-backup directory.  Running rdiff-backup like this could mess up
what is currently in it.  If you want to overwrite it, run
rdiff-backup with the --force option.""" % rpout.path)

		if not rpout.lstat():
			try: rpout.mkdir()
			except os.error:
				Log.FatalError("Unable to create directory %s" % rpout.path)
		if not self.datadir.lstat(): self.datadir.mkdir()
		Globals.add_regexp(self.datadir.path, 1)
		if Log.verbosity > 0:
			Log.open_logfile(self.datadir.append("backup.log"))

	def backup_get_mirrorrps(self):
		"""Return list of current_mirror rps"""
		if not self.datadir.isdir(): return []
		mirrorfiles = filter(lambda f: f.startswith("current_mirror."),
							 self.datadir.listdir())
		mirrorrps = map(lambda x: self.datadir.append(x), mirrorfiles)
		return filter(lambda rp: rp.isincfile(), mirrorrps)

	def backup_get_mirrortime(self):
		"""Return time in seconds of previous mirror, or None if cannot"""
		mirrorrps = self.backup_get_mirrorrps()
		if not mirrorrps: return None
		if len(mirrorrps) > 1:
			Log(
"""Warning: duplicate current_mirror files found.  Perhaps something
went wrong during your last backup?  Using """ + mirrorrps[-1].path, 2)

		timestr = self.datadir.append(mirrorrps[-1].path).getinctime()
		return Time.stringtotime(timestr)
	
	def backup_touch_curmirror(self, rpin, rpout):
		"""Make a file like current_mirror.time.snapshot to record time

		Then update rpout to leave no trace...
		"""
		map(RPath.delete, self.backup_get_mirrorrps())
		mirrorrp = self.datadir.append("current_mirror.%s.%s" %
										  (Time.curtimestr, "snapshot"))
		Log("Touching mirror marker %s" % mirrorrp.path, 6)
		mirrorrp.touch()
		RPath.copy_attribs(rpin, rpout)


	def Restore(self, src_path, dest_path = None):
		"""Main restoring function - take src_path to dest_path"""
		Log("Starting Restore", 5)
		rpin, rpout = self.restore_check_paths(src_path, dest_path)
		inc_tup = self.restore_get_inctup(rpin)
		mirror_base = self.restore_get_mirror(rpin)
		rtime = Time.stringtotime(rpin.getinctime())
		Log.open_logfile(self.datadir.append("restore.log"))
		HighLevel.Restore(rtime, mirror_base, inc_tup, rpout)

	def restore_check_paths(self, src_path, dest_path):
		"""Check paths and return pair of corresponding rps"""
		rpin = RPath(self.sourceconn, src_path)
		if not rpin.lstat():
			Log.FatalError("Increment file %s does not exist" % src_path)
		if not rpin.isincfile():
			Log.FatalError("""File %s does not look like an increment file.

Try restoring from an increment file (the filenames look like
"foobar.2001-09-01T04:49:04-07:00.diff").""")

		if dest_path: rpout = RPath(self.destconn, dest_path)
		else: rpout = rpin.getincbase()
		if rpout.lstat():
			Log.FatalError("Restore target %s already exists.  "
						   "Will not overwrite." % rpout.path)
		return rpin, rpout

	def restore_get_inctup(self, rpin):
		"""Return increment tuple (incrp, list of incs"""
		rpin_dir = rpin.dirsplit()[0]
		if not rpin_dir: rpin_dir = "/"
		rpin_dir_rp = RPath(self.sourceconn, rpin_dir)
		incbase = rpin.getincbase()
		incbasename = incbase.dirsplit()[1]
		inclist = filter(lambda rp: rp.isincfile() and
						 rp.getincbase_str() == incbasename,
						 map(rpin_dir_rp.append, rpin_dir_rp.listdir()))
		return IndexedTuple((), (incbase, inclist))

	def restore_get_mirror(self, rpin):
		"""Return mirror file and set the data dir

		The idea here is to keep backing up on the path until we find
		something named "rdiff-backup-data".  Then use that as a
		reference to calculate the oldfile.  This could fail if the
		increment file is pointed to in a funny way, using symlinks or
		somesuch.

		"""
		pathcomps = os.path.join(rpin.conn.os.getcwd(),
								 rpin.getincbase().path).split("/")
		for i in range(1, len(pathcomps)):
			datadirrp = RPath(rpin.conn, "/".join(pathcomps[:i+1]))
			if pathcomps[i] == "rdiff-backup-data" and datadirrp.isdir():
				break
		else: Log.FatalError("Unable to find rdiff-backup-data dir")

		self.datadir = datadirrp
		Globals.exclude_mirror_regexps.append(re.compile(self.datadir.path))
		rootrp = RPath(rpin.conn, "/".join(pathcomps[:i]))
		if not rootrp.lstat():
			Log.FatalError("Root of mirror area %s does not exist" %
						   rootrp.path)
		else: Log("Using root mirror %s" % rootrp.path, 6)

		from_datadir = pathcomps[i+1:]
		if len(from_datadir) == 1: result = rootrp
		elif len(from_datadir) > 1:
			result = RPath(rootrp.conn, apply(os.path.join,
									      [rootrp.path] + from_datadir[1:]))
		else: raise RestoreError("Problem finding mirror file")

		Log("Using mirror file %s" % result.path, 6)
		return result


	def ListIncrements(self, rootpath):
		"""Print out a summary of the increments and their times"""
		datadir = self.li_getdatadir(rootpath,
			 """Unable to open rdiff-backup-data dir.

The argument to rdiff-backup -l or rdiff-backup --list-increments
should be the root of the target backup directory, of which
rdiff-backup-data is a subdirectory.  So, if you ran

rdiff-backup /home/foo /mnt/back/bar

earlier, try:

rdiff-backup -l /mnt/back/bar
""")
		print Manage.describe_root_incs(datadir)

	def li_getdatadir(self, rootpath, errormsg):
		"""Return data dir if can find it, otherwise use errormsg"""
		rootrp = RPath(self.sourceconn, rootpath)
		datadir = rootrp.append("rdiff-backup-data")
		if not datadir.lstat() or not datadir.isdir():
			Log.FatalError(errormsg)
		return datadir


	def RemoveOlderThan(self, rootpath):
		"""Remove all increment files older than a certain time"""
		datadir = self.li_getdatadir(rootpath, 
									 """Unable to open rdiff-backup-data dir.

Try finding the increments first using --list-increments.""")
		time = self.rot_get_earliest_time()
		timep = Time.timetopretty(time)
		Log("Deleting increment(s) before %s" % timep, 4)
		incobjs = filter(lambda x: x.time < time, Manage.get_incobjs(datadir))
		incobjs_time = ", ".join(map(IncObj.pretty_time, incobjs))
		if not incobjs:
			Log.FatalError("No increments older than %s found" % timep)
		elif len(incobjs) > 1 and not self.force:
			Log.FatalError("Found %d relevant increments, dated %s.\n"
				"If you want to delete multiple increments in this way, "
				"use the --force." % (len(incobjs), incobjs_time))


		Log("Deleting increment%sat %s" % (len(incobjs) == 1 and " " or "s ",
										   incobjs_time), 3)
		Manage.delete_earlier_than(RPTree(datadir), time)
		
	def rot_get_earliest_time(self):
		"""Return earliest time in seconds that will not be deleted"""
		seconds = Time.intstringtoseconds(self.remove_older_than_string)
		return time.time() - seconds



if __name__ == "__main__": Main().Main()


